aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/i2c/muxes/gpio-i2cmux65
-rw-r--r--Documentation/kernel-parameters.txt8
-rw-r--r--Documentation/networking/dccp.txt1
-rw-r--r--Documentation/powerpc/booting-without-of.txt4
-rw-r--r--Documentation/powerpc/dts-bindings/4xx/cpm.txt52
-rw-r--r--MAINTAINERS95
-rw-r--r--arch/arm/mach-dove/common.c4
-rw-r--r--arch/arm/mach-tegra/include/mach/sdhci.h29
-rw-r--r--arch/blackfin/Makefile9
-rw-r--r--arch/blackfin/boot/Makefile2
-rw-r--r--arch/blackfin/configs/BF561-EZKIT-SMP_defconfig113
-rw-r--r--arch/blackfin/configs/DNP5370_defconfig121
-rw-r--r--arch/blackfin/include/asm/bfin_dma.h91
-rw-r--r--arch/blackfin/include/asm/bfin_serial.h275
-rw-r--r--arch/blackfin/include/asm/bitops.h2
-rw-r--r--arch/blackfin/include/asm/cache.h2
-rw-r--r--arch/blackfin/include/asm/cacheflush.h3
-rw-r--r--arch/blackfin/include/asm/dma.h37
-rw-r--r--arch/blackfin/include/asm/dpmc.h2
-rw-r--r--arch/blackfin/include/asm/io.h268
-rw-r--r--arch/blackfin/include/asm/irqflags.h3
-rw-r--r--arch/blackfin/include/asm/processor.h4
-rw-r--r--arch/blackfin/include/asm/spinlock.h28
-rw-r--r--arch/blackfin/include/mach-common/pll.h86
-rw-r--r--arch/blackfin/include/mach-common/ports-a.h25
-rw-r--r--arch/blackfin/include/mach-common/ports-b.h25
-rw-r--r--arch/blackfin/include/mach-common/ports-c.h25
-rw-r--r--arch/blackfin/include/mach-common/ports-d.h25
-rw-r--r--arch/blackfin/include/mach-common/ports-e.h25
-rw-r--r--arch/blackfin/include/mach-common/ports-f.h25
-rw-r--r--arch/blackfin/include/mach-common/ports-g.h25
-rw-r--r--arch/blackfin/include/mach-common/ports-h.h25
-rw-r--r--arch/blackfin/include/mach-common/ports-i.h25
-rw-r--r--arch/blackfin/include/mach-common/ports-j.h25
-rw-r--r--arch/blackfin/kernel/cplb-nompu/cplbinit.c2
-rw-r--r--arch/blackfin/kernel/kgdb.c26
-rw-r--r--arch/blackfin/kernel/kgdb_test.c4
-rw-r--r--arch/blackfin/mach-bf518/boards/ezbrd.c29
-rw-r--r--arch/blackfin/mach-bf518/boards/tcm-bf518.c12
-rw-r--r--arch/blackfin/mach-bf518/dma.c2
-rw-r--r--arch/blackfin/mach-bf518/include/mach/bfin_serial.h14
-rw-r--r--arch/blackfin/mach-bf518/include/mach/bfin_serial_5xx.h73
-rw-r--r--arch/blackfin/mach-bf518/include/mach/blackfin.h68
-rw-r--r--arch/blackfin/mach-bf518/include/mach/cdefBF512.h1038
-rw-r--r--arch/blackfin/mach-bf518/include/mach/cdefBF514.h5
-rw-r--r--arch/blackfin/mach-bf518/include/mach/cdefBF516.h5
-rw-r--r--arch/blackfin/mach-bf518/include/mach/cdefBF518.h5
-rw-r--r--arch/blackfin/mach-bf518/include/mach/cdefBF51x_base.h1061
-rw-r--r--arch/blackfin/mach-bf518/include/mach/defBF512.h1388
-rw-r--r--arch/blackfin/mach-bf518/include/mach/defBF51x_base.h1495
-rw-r--r--arch/blackfin/mach-bf518/include/mach/gpio.h4
-rw-r--r--arch/blackfin/mach-bf518/include/mach/pll.h64
-rw-r--r--arch/blackfin/mach-bf527/boards/ad7160eval.c13
-rw-r--r--arch/blackfin/mach-bf527/boards/cm_bf527.c13
-rw-r--r--arch/blackfin/mach-bf527/boards/ezbrd.c13
-rw-r--r--arch/blackfin/mach-bf527/boards/ezkit.c18
-rw-r--r--arch/blackfin/mach-bf527/boards/tll6527m.c14
-rw-r--r--arch/blackfin/mach-bf527/dma.c2
-rw-r--r--arch/blackfin/mach-bf527/include/mach/bfin_serial.h14
-rw-r--r--arch/blackfin/mach-bf527/include/mach/bfin_serial_5xx.h73
-rw-r--r--arch/blackfin/mach-bf527/include/mach/blackfin.h48
-rw-r--r--arch/blackfin/mach-bf527/include/mach/cdefBF522.h1092
-rw-r--r--arch/blackfin/mach-bf527/include/mach/cdefBF525.h7
-rw-r--r--arch/blackfin/mach-bf527/include/mach/cdefBF527.h7
-rw-r--r--arch/blackfin/mach-bf527/include/mach/cdefBF52x_base.h1113
-rw-r--r--arch/blackfin/mach-bf527/include/mach/defBF522.h1393
-rw-r--r--arch/blackfin/mach-bf527/include/mach/defBF525.h2
-rw-r--r--arch/blackfin/mach-bf527/include/mach/defBF527.h2
-rw-r--r--arch/blackfin/mach-bf527/include/mach/defBF52x_base.h1506
-rw-r--r--arch/blackfin/mach-bf527/include/mach/gpio.h4
-rw-r--r--arch/blackfin/mach-bf527/include/mach/pll.h64
-rw-r--r--arch/blackfin/mach-bf533/boards/H8606.c2
-rw-r--r--arch/blackfin/mach-bf533/boards/blackstamp.c25
-rw-r--r--arch/blackfin/mach-bf533/boards/cm_bf533.c10
-rw-r--r--arch/blackfin/mach-bf533/boards/ezkit.c2
-rw-r--r--arch/blackfin/mach-bf533/boards/ip0x.c11
-rw-r--r--arch/blackfin/mach-bf533/boards/stamp.c30
-rw-r--r--arch/blackfin/mach-bf533/dma.c2
-rw-r--r--arch/blackfin/mach-bf533/include/mach/bfin_serial.h14
-rw-r--r--arch/blackfin/mach-bf533/include/mach/bfin_serial_5xx.h74
-rw-r--r--arch/blackfin/mach-bf533/include/mach/blackfin.h30
-rw-r--r--arch/blackfin/mach-bf533/include/mach/cdefBF532.h63
-rw-r--r--arch/blackfin/mach-bf533/include/mach/defBF532.h128
-rw-r--r--arch/blackfin/mach-bf533/include/mach/fio_flag.h55
-rw-r--r--arch/blackfin/mach-bf533/include/mach/gpio.h2
-rw-r--r--arch/blackfin/mach-bf533/include/mach/pll.h58
-rw-r--r--arch/blackfin/mach-bf537/boards/Kconfig6
-rw-r--r--arch/blackfin/mach-bf537/boards/Makefile1
-rw-r--r--arch/blackfin/mach-bf537/boards/cm_bf537e.c12
-rw-r--r--arch/blackfin/mach-bf537/boards/cm_bf537u.c12
-rw-r--r--arch/blackfin/mach-bf537/boards/dnp5370.c418
-rw-r--r--arch/blackfin/mach-bf537/boards/minotaur.c12
-rw-r--r--arch/blackfin/mach-bf537/boards/pnav10.c4
-rw-r--r--arch/blackfin/mach-bf537/boards/stamp.c18
-rw-r--r--arch/blackfin/mach-bf537/boards/tcm_bf537.c12
-rw-r--r--arch/blackfin/mach-bf537/dma.c2
-rw-r--r--arch/blackfin/mach-bf537/include/mach/bfin_serial.h14
-rw-r--r--arch/blackfin/mach-bf537/include/mach/bfin_serial_5xx.h72
-rw-r--r--arch/blackfin/mach-bf537/include/mach/blackfin.h38
-rw-r--r--arch/blackfin/mach-bf537/include/mach/cdefBF534.h27
-rw-r--r--arch/blackfin/mach-bf537/include/mach/cdefBF537.h5
-rw-r--r--arch/blackfin/mach-bf537/include/mach/defBF534.h111
-rw-r--r--arch/blackfin/mach-bf537/include/mach/defBF537.h5
-rw-r--r--arch/blackfin/mach-bf537/include/mach/gpio.h4
-rw-r--r--arch/blackfin/mach-bf537/include/mach/pll.h58
-rw-r--r--arch/blackfin/mach-bf538/boards/ezkit.c20
-rw-r--r--arch/blackfin/mach-bf538/dma.c18
-rw-r--r--arch/blackfin/mach-bf538/include/mach/bfin_serial.h14
-rw-r--r--arch/blackfin/mach-bf538/include/mach/bfin_serial_5xx.h73
-rw-r--r--arch/blackfin/mach-bf538/include/mach/blackfin.h37
-rw-r--r--arch/blackfin/mach-bf538/include/mach/cdefBF538.h504
-rw-r--r--arch/blackfin/mach-bf538/include/mach/cdefBF539.h8
-rw-r--r--arch/blackfin/mach-bf538/include/mach/defBF538.h1825
-rw-r--r--arch/blackfin/mach-bf538/include/mach/defBF539.h2095
-rw-r--r--arch/blackfin/mach-bf538/include/mach/gpio.h5
-rw-r--r--arch/blackfin/mach-bf538/include/mach/pll.h64
-rw-r--r--arch/blackfin/mach-bf548/boards/cm_bf548.c23
-rw-r--r--arch/blackfin/mach-bf548/boards/ezkit.c23
-rw-r--r--arch/blackfin/mach-bf548/dma.c2
-rw-r--r--arch/blackfin/mach-bf548/include/mach/bfin_serial.h16
-rw-r--r--arch/blackfin/mach-bf548/include/mach/bfin_serial_5xx.h60
-rw-r--r--arch/blackfin/mach-bf548/include/mach/blackfin.h70
-rw-r--r--arch/blackfin/mach-bf548/include/mach/cdefBF542.h10
-rw-r--r--arch/blackfin/mach-bf548/include/mach/cdefBF544.h10
-rw-r--r--arch/blackfin/mach-bf548/include/mach/cdefBF547.h10
-rw-r--r--arch/blackfin/mach-bf548/include/mach/cdefBF548.h10
-rw-r--r--arch/blackfin/mach-bf548/include/mach/cdefBF549.h10
-rw-r--r--arch/blackfin/mach-bf548/include/mach/cdefBF54x_base.h23
-rw-r--r--arch/blackfin/mach-bf548/include/mach/defBF542.h7
-rw-r--r--arch/blackfin/mach-bf548/include/mach/defBF544.h7
-rw-r--r--arch/blackfin/mach-bf548/include/mach/defBF547.h7
-rw-r--r--arch/blackfin/mach-bf548/include/mach/defBF548.h7
-rw-r--r--arch/blackfin/mach-bf548/include/mach/defBF549.h7
-rw-r--r--arch/blackfin/mach-bf548/include/mach/defBF54x_base.h321
-rw-r--r--arch/blackfin/mach-bf548/include/mach/gpio.h11
-rw-r--r--arch/blackfin/mach-bf548/include/mach/irq.h22
-rw-r--r--arch/blackfin/mach-bf548/include/mach/pll.h70
-rw-r--r--arch/blackfin/mach-bf561/atomic.S5
-rw-r--r--arch/blackfin/mach-bf561/boards/acvilon.c2
-rw-r--r--arch/blackfin/mach-bf561/boards/cm_bf561.c2
-rw-r--r--arch/blackfin/mach-bf561/boards/ezkit.c2
-rw-r--r--arch/blackfin/mach-bf561/boards/tepla.c2
-rw-r--r--arch/blackfin/mach-bf561/dma.c18
-rw-r--r--arch/blackfin/mach-bf561/hotplug.c2
-rw-r--r--arch/blackfin/mach-bf561/include/mach/anomaly.h6
-rw-r--r--arch/blackfin/mach-bf561/include/mach/bfin_serial.h14
-rw-r--r--arch/blackfin/mach-bf561/include/mach/bfin_serial_5xx.h74
-rw-r--r--arch/blackfin/mach-bf561/include/mach/blackfin.h26
-rw-r--r--arch/blackfin/mach-bf561/include/mach/cdefBF561.h503
-rw-r--r--arch/blackfin/mach-bf561/include/mach/defBF561.h408
-rw-r--r--arch/blackfin/mach-bf561/include/mach/gpio.h2
-rw-r--r--arch/blackfin/mach-bf561/include/mach/mem_map.h16
-rw-r--r--arch/blackfin/mach-bf561/include/mach/pll.h83
-rw-r--r--arch/blackfin/mach-bf561/include/mach/smp.h10
-rw-r--r--arch/blackfin/mach-bf561/smp.c29
-rw-r--r--arch/blackfin/mach-common/entry.S4
-rw-r--r--arch/blackfin/mach-common/ints-priority.c1
-rw-r--r--arch/blackfin/mach-common/pm.c10
-rw-r--r--arch/blackfin/mach-common/smp.c204
-rw-r--r--arch/blackfin/mm/sram-alloc.c18
-rw-r--r--arch/microblaze/Kconfig.debug4
-rw-r--r--arch/microblaze/Makefile2
-rw-r--r--arch/microblaze/configs/mmu_defconfig1
-rw-r--r--arch/microblaze/include/asm/pvr.h185
-rw-r--r--arch/microblaze/kernel/cpu/cpuinfo.c1
-rw-r--r--arch/microblaze/kernel/entry.S46
-rw-r--r--arch/microblaze/kernel/exceptions.c3
-rw-r--r--arch/microblaze/kernel/hw_exception_handler.S9
-rw-r--r--arch/microblaze/kernel/prom.c4
-rw-r--r--arch/microblaze/kernel/vmlinux.lds.S16
-rw-r--r--arch/microblaze/lib/memmove.c2
-rw-r--r--arch/microblaze/lib/muldi3.S121
-rw-r--r--arch/microblaze/lib/muldi3.c60
-rw-r--r--arch/powerpc/Kconfig16
-rw-r--r--arch/powerpc/boot/dts/canyonlands.dts31
-rw-r--r--arch/powerpc/boot/dts/kilauea.dts9
-rw-r--r--arch/powerpc/boot/dts/mpc8308_p1m.dts8
-rw-r--r--arch/powerpc/boot/dts/mpc8308rdb.dts8
-rw-r--r--arch/powerpc/configs/40x/kilauea_defconfig5
-rw-r--r--arch/powerpc/configs/44x/canyonlands_defconfig3
-rw-r--r--arch/powerpc/include/asm/bitops.h9
-rw-r--r--arch/powerpc/include/asm/cputable.h9
-rw-r--r--arch/powerpc/include/asm/cputhreads.h15
-rw-r--r--arch/powerpc/include/asm/device.h6
-rw-r--r--arch/powerpc/include/asm/firmware.h3
-rw-r--r--arch/powerpc/include/asm/hvcall.h4
-rw-r--r--arch/powerpc/include/asm/lppaca.h5
-rw-r--r--arch/powerpc/include/asm/machdep.h6
-rw-r--r--arch/powerpc/include/asm/mmzone.h5
-rw-r--r--arch/powerpc/include/asm/nvram.h52
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h8
-rw-r--r--arch/powerpc/include/asm/processor.h2
-rw-r--r--arch/powerpc/include/asm/topology.h15
-rw-r--r--arch/powerpc/include/asm/vdso_datapage.h2
-rw-r--r--arch/powerpc/kernel/Makefile9
-rw-r--r--arch/powerpc/kernel/asm-offsets.c1
-rw-r--r--arch/powerpc/kernel/cputable.c22
-rw-r--r--arch/powerpc/kernel/crash_dump.c33
-rw-r--r--arch/powerpc/kernel/dma-iommu.c2
-rw-r--r--arch/powerpc/kernel/entry_32.S1
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S1
-rw-r--r--arch/powerpc/kernel/fpu.S1
-rw-r--r--arch/powerpc/kernel/head_40x.S1
-rw-r--r--arch/powerpc/kernel/head_44x.S1
-rw-r--r--arch/powerpc/kernel/head_64.S7
-rw-r--r--arch/powerpc/kernel/head_8xx.S1
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S1
-rw-r--r--arch/powerpc/kernel/iommu.c14
-rw-r--r--arch/powerpc/kernel/misc.S5
-rw-r--r--arch/powerpc/kernel/misc_32.S1
-rw-r--r--arch/powerpc/kernel/misc_64.S1
-rw-r--r--arch/powerpc/kernel/nvram_64.c481
-rw-r--r--arch/powerpc/kernel/pci_64.c3
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c7
-rw-r--r--arch/powerpc/kernel/ppc_save_regs.S1
-rw-r--r--arch/powerpc/kernel/ptrace.c22
-rw-r--r--arch/powerpc/kernel/ptrace32.c7
-rw-r--r--arch/powerpc/kernel/rtas.c3
-rw-r--r--arch/powerpc/kernel/setup_64.c4
-rw-r--r--arch/powerpc/kernel/smp.c19
-rw-r--r--arch/powerpc/kernel/time.c2
-rw-r--r--arch/powerpc/kernel/vector.S1
-rw-r--r--arch/powerpc/kernel/vio.c15
-rw-r--r--arch/powerpc/lib/Makefile2
-rw-r--r--arch/powerpc/lib/hweight_64.S110
-rw-r--r--arch/powerpc/mm/hash_utils_64.c2
-rw-r--r--arch/powerpc/mm/mmu_context_nohash.c12
-rw-r--r--arch/powerpc/mm/numa.c311
-rw-r--r--arch/powerpc/mm/pgtable_32.c3
-rw-r--r--arch/powerpc/mm/pgtable_64.c2
-rw-r--r--arch/powerpc/platforms/44x/Makefile5
-rw-r--r--arch/powerpc/platforms/Kconfig7
-rw-r--r--arch/powerpc/platforms/cell/beat_iommu.c3
-rw-r--r--arch/powerpc/platforms/cell/spufs/lscsa_alloc.c3
-rw-r--r--arch/powerpc/platforms/chrp/time.c4
-rw-r--r--arch/powerpc/platforms/iseries/mf.c62
-rw-r--r--arch/powerpc/platforms/pasemi/iommu.c19
-rw-r--r--arch/powerpc/platforms/powermac/setup.c9
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig10
-rw-r--r--arch/powerpc/platforms/pseries/Makefile1
-rw-r--r--arch/powerpc/platforms/pseries/firmware.c1
-rw-r--r--arch/powerpc/platforms/pseries/hvCall.S1
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c49
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c12
-rw-r--r--arch/powerpc/platforms/pseries/nvram.c205
-rw-r--r--arch/powerpc/platforms/pseries/pseries_energy.c326
-rw-r--r--arch/powerpc/sysdev/Makefile1
-rw-r--r--arch/powerpc/sysdev/dart_iommu.c9
-rw-r--r--arch/powerpc/sysdev/mpc8xxx_gpio.c75
-rw-r--r--arch/powerpc/sysdev/ppc4xx_cpm.c346
-rw-r--r--arch/powerpc/sysdev/tsi108_dev.c8
-rw-r--r--arch/sh/boards/mach-ap325rxa/setup.c16
-rw-r--r--arch/sh/boards/mach-ecovec24/setup.c2
-rw-r--r--arch/sh/boards/mach-kfr2r09/setup.c2
-rw-r--r--arch/sh/boards/mach-migor/setup.c2
-rw-r--r--arch/sh/boards/mach-se/7724/setup.c3
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7343.c25
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7366.c24
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7722.c2
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7723.c4
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7724.c4
-rw-r--r--arch/sparc/kernel/cpu.c2
-rw-r--r--arch/sparc/kernel/pcr.c2
-rw-r--r--arch/x86/include/asm/acpi.h11
-rw-r--r--arch/x86/include/asm/amd_nb.h13
-rw-r--r--arch/x86/include/asm/fixmap.h4
-rw-r--r--arch/x86/include/asm/gpio.h5
-rw-r--r--arch/x86/include/asm/kdebug.h1
-rw-r--r--arch/x86/include/asm/mach_traps.h12
-rw-r--r--arch/x86/include/asm/nmi.h20
-rw-r--r--arch/x86/include/asm/numa_64.h2
-rw-r--r--arch/x86/include/asm/perf_event_p4.h3
-rw-r--r--arch/x86/kernel/amd_nb.c7
-rw-r--r--arch/x86/kernel/aperture_64.c44
-rw-r--r--arch/x86/kernel/apic/apic.c2
-rw-r--r--arch/x86/kernel/apic/hw_nmi.c3
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c4
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-inject.c5
-rw-r--r--arch/x86/kernel/cpu/perf_event.c3
-rw-r--r--arch/x86/kernel/cpu/perf_event_p4.c28
-rw-r--r--arch/x86/kernel/dumpstack.c6
-rw-r--r--arch/x86/kernel/entry_64.S36
-rw-r--r--arch/x86/kernel/kgdb.c7
-rw-r--r--arch/x86/kernel/reboot.c5
-rw-r--r--arch/x86/kernel/smpboot.c4
-rw-r--r--arch/x86/kernel/traps.c102
-rw-r--r--arch/x86/kernel/tsc.c2
-rw-r--r--arch/x86/mm/amdtopology_64.c86
-rw-r--r--arch/x86/mm/numa_64.c157
-rw-r--r--arch/x86/mm/srat_64.c26
-rw-r--r--arch/x86/oprofile/nmi_int.c3
-rw-r--r--arch/x86/oprofile/nmi_timer_int.c2
-rw-r--r--arch/x86/pci/amd_bus.c33
-rw-r--r--drivers/atm/ambassador.c19
-rw-r--r--drivers/char/agp/agp.h1
-rw-r--r--drivers/char/agp/compat_ioctl.c1
-rw-r--r--drivers/char/agp/compat_ioctl.h1
-rw-r--r--drivers/char/agp/frontend.c8
-rw-r--r--drivers/char/agp/generic.c27
-rw-r--r--drivers/char/agp/intel-agp.c5
-rw-r--r--drivers/char/agp/intel-agp.h14
-rw-r--r--drivers/char/agp/intel-gtt.c778
-rw-r--r--drivers/char/hvc_vio.c2
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c2
-rw-r--r--drivers/dma/Kconfig2
-rw-r--r--drivers/dma/mpc512x_dma.c187
-rw-r--r--drivers/gpu/drm/drm_agpsupport.c6
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c18
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c61
-rw-r--r--drivers/gpu/drm/drm_fops.c2
-rw-r--r--drivers/gpu/drm/drm_irq.c566
-rw-r--r--drivers/gpu/drm/drm_mm.c40
-rw-r--r--drivers/gpu/drm/drm_stub.c10
-rw-r--r--drivers/gpu/drm/i915/Makefile2
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c471
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c794
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c80
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h605
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c3764
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debug.c23
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c125
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c1343
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c99
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c139
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c724
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h193
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c104
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h91
-rw-r--r--drivers/gpu/drm/i915/intel_display.c999
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c3
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h22
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c32
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c21
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c43
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c8
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c116
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c52
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c1007
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h136
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c7
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c14
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig2
-rw-r--r--drivers/gpu/drm/nouveau/Makefile17
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c102
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c319
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c383
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c54
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c207
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c32
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c58
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h425
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c189
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.h18
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c117
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c171
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hw.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.c1210
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c426
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mm.c271
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mm.h67
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_notifier.c44
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_object.c754
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_pm.c33
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ramht.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ramht.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_reg.h75
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c212
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c300
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_util.c69
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_util.h45
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vm.c439
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vm.h113
-rw-r--r--drivers/gpu/drm/nouveau/nv04_crtc.c8
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dac.c12
-rw-r--r--drivers/gpu/drm/nouveau/nv04_display.c21
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c102
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fifo.c240
-rw-r--r--drivers/gpu/drm/nouveau/nv04_graph.c645
-rw-r--r--drivers/gpu/drm/nouveau/nv04_instmem.c50
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fb.c124
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fifo.c19
-rw-r--r--drivers/gpu/drm/nouveau/nv10_graph.c203
-rw-r--r--drivers/gpu/drm/nouveau/nv20_graph.c244
-rw-r--r--drivers/gpu/drm/nouveau/nv30_fb.c23
-rw-r--r--drivers/gpu/drm/nouveau/nv40_fb.c22
-rw-r--r--drivers/gpu/drm/nouveau/nv40_fifo.c20
-rw-r--r--drivers/gpu/drm/nouveau/nv40_graph.c205
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c27
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c422
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.h2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_evo.c345
-rw-r--r--drivers/gpu/drm/nouveau/nv50_evo.h10
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fb.c71
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c114
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fifo.c42
-rw-r--r--drivers/gpu/drm/nouveau/nv50_gpio.c198
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c677
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c375
-rw-r--r--drivers/gpu/drm/nouveau/nv50_vm.c180
-rw-r--r--drivers/gpu/drm/nouveau/nv50_vram.c190
-rw-r--r--drivers/gpu/drm/nouveau/nv84_crypt.c140
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fbcon.c269
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fifo.c365
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_graph.c705
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_graph.h64
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_grctx.c2874
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_instmem.c317
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_vm.c123
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_vram.c99
-rw-r--r--drivers/gpu/drm/nouveau/nvreg.h3
-rw-r--r--drivers/gpu/drm/radeon/Makefile5
-rw-r--r--drivers/gpu/drm/radeon/ObjectID.h48
-rw-r--r--drivers/gpu/drm/radeon/atom.c14
-rw-r--r--drivers/gpu/drm/radeon/atombios.h997
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c57
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c806
-rw-r--r--drivers/gpu/drm/radeon/evergreen_blit_kms.c92
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h6
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h53
-rw-r--r--drivers/gpu/drm/radeon/ni.c316
-rw-r--r--drivers/gpu/drm/radeon/ni_reg.h86
-rw-r--r--drivers/gpu/drm/radeon/nid.h41
-rw-r--r--drivers/gpu/drm/radeon/r100.c78
-rw-r--r--drivers/gpu/drm/radeon/r100d.h2
-rw-r--r--drivers/gpu/drm/radeon/r300.c21
-rw-r--r--drivers/gpu/drm/radeon/r300d.h1
-rw-r--r--drivers/gpu/drm/radeon/r500_reg.h4
-rw-r--r--drivers/gpu/drm/radeon/r600.c357
-rw-r--r--drivers/gpu/drm/radeon/r600d.h48
-rw-r--r--drivers/gpu/drm/radeon/radeon.h151
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c153
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h65
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c1246
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c41
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c17
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c35
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c391
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c205
-rw-r--r--drivers/gpu/drm/radeon/radeon_family.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c46
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c64
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h16
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c57
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h7
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c94
-rw-r--r--drivers/gpu/drm/radeon/radeon_reg.h13
-rw-r--r--drivers/gpu/drm/radeon/radeon_trace.h82
-rw-r--r--drivers/gpu/drm/radeon/radeon_trace_points.c9
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/rv51516
-rw-r--r--drivers/gpu/drm/radeon/rs600.c118
-rw-r--r--drivers/gpu/drm/radeon/rv770.c196
-rw-r--r--drivers/gpu/drm/radeon/rv770d.h47
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c156
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c138
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c29
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c169
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c3
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c78
-rw-r--r--drivers/i2c/algos/i2c-algo-bit.c31
-rw-r--r--drivers/i2c/busses/i2c-i801.c1
-rw-r--r--drivers/i2c/busses/i2c-nforce2.c2
-rw-r--r--drivers/i2c/i2c-core.c29
-rw-r--r--drivers/i2c/muxes/Kconfig12
-rw-r--r--drivers/i2c/muxes/Makefile1
-rw-r--r--drivers/i2c/muxes/gpio-i2cmux.c184
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.c2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.h2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_qp.c56
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h1
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c32
-rw-r--r--drivers/infiniband/hw/ipath/ipath_driver.c5
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c9
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mad.c2
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c4
-rw-r--r--drivers/infiniband/hw/qib/qib.h2
-rw-r--r--drivers/infiniband/hw/qib/qib_cq.c3
-rw-r--r--drivers/infiniband/hw/qib/qib_driver.c155
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c10
-rw-r--r--drivers/infiniband/hw/qib/qib_iba6120.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7220.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c373
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c6
-rw-r--r--drivers/infiniband/hw/qib/qib_intr.c3
-rw-r--r--drivers/infiniband/hw/qib/qib_keys.c80
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.c45
-rw-r--r--drivers/infiniband/hw/qib/qib_mr.c8
-rw-r--r--drivers/infiniband/hw/qib/qib_qp.c32
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c24
-rw-r--r--drivers/infiniband/hw/qib/qib_ud.c57
-rw-r--r--drivers/infiniband/hw/qib/qib_user_sdma.c1
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.h11
-rw-r--r--drivers/infiniband/ulp/ipoib/Kconfig1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h12
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ethtool.c51
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c8
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c62
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c392
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h46
-rw-r--r--drivers/macintosh/macio_asic.c7
-rw-r--r--drivers/macintosh/therm_pm72.c30
-rw-r--r--drivers/mfd/sh_mobile_sdhi.c6
-rw-r--r--drivers/mmc/card/Kconfig1
-rw-r--r--drivers/mmc/core/Kconfig11
-rw-r--r--drivers/mmc/core/bus.c8
-rw-r--r--drivers/mmc/core/core.c206
-rw-r--r--drivers/mmc/core/core.h9
-rw-r--r--drivers/mmc/core/debugfs.c5
-rw-r--r--drivers/mmc/core/host.c206
-rw-r--r--drivers/mmc/core/host.h21
-rw-r--r--drivers/mmc/core/mmc.c91
-rw-r--r--drivers/mmc/core/mmc_ops.c101
-rw-r--r--drivers/mmc/core/mmc_ops.h1
-rw-r--r--drivers/mmc/core/sd.c16
-rw-r--r--drivers/mmc/core/sdio.c36
-rw-r--r--drivers/mmc/core/sdio_bus.c32
-rw-r--r--drivers/mmc/host/Kconfig37
-rw-r--r--drivers/mmc/host/Makefile3
-rw-r--r--drivers/mmc/host/davinci_mmc.c80
-rw-r--r--drivers/mmc/host/dw_mmc.c1796
-rw-r--r--drivers/mmc/host/dw_mmc.h168
-rw-r--r--drivers/mmc/host/mxcmmc.c53
-rw-r--r--drivers/mmc/host/sdhci-dove.c70
-rw-r--r--drivers/mmc/host/sdhci-pci.c161
-rw-r--r--drivers/mmc/host/sdhci-pltfm.c6
-rw-r--r--drivers/mmc/host/sdhci-pltfm.h2
-rw-r--r--drivers/mmc/host/sdhci-s3c.c66
-rw-r--r--drivers/mmc/host/sdhci-tegra.c257
-rw-r--r--drivers/mmc/host/sdhci.c45
-rw-r--r--drivers/mmc/host/sdhci.h3
-rw-r--r--drivers/mmc/host/tmio_mmc.c561
-rw-r--r--drivers/mmc/host/tmio_mmc.h228
-rw-r--r--drivers/net/Kconfig9
-rw-r--r--drivers/net/bfin_mac.c74
-rw-r--r--drivers/net/bfin_mac.h11
-rw-r--r--drivers/net/bnx2x/bnx2x.h1
-rw-r--r--drivers/net/bnx2x/bnx2x_dump.h988
-rw-r--r--drivers/net/bnx2x/bnx2x_ethtool.c22
-rw-r--r--drivers/net/bnx2x/bnx2x_init.h220
-rw-r--r--drivers/net/bnx2x/bnx2x_main.c70
-rw-r--r--drivers/net/bnx2x/bnx2x_reg.h74
-rw-r--r--drivers/net/bnx2x/bnx2x_stats.c5
-rw-r--r--drivers/net/cxgb4vf/cxgb4vf_main.c15
-rw-r--r--drivers/net/cxgb4vf/t4vf_hw.c11
-rw-r--r--drivers/net/e1000/e1000_hw.c328
-rw-r--r--drivers/net/e1000/e1000_hw.h59
-rw-r--r--drivers/net/e1000/e1000_main.c35
-rw-r--r--drivers/net/e1000/e1000_osdep.h19
-rw-r--r--drivers/net/e1000e/82571.c77
-rw-r--r--drivers/net/e1000e/e1000.h3
-rw-r--r--drivers/net/e1000e/es2lan.c4
-rw-r--r--drivers/net/e1000e/ethtool.c54
-rw-r--r--drivers/net/e1000e/hw.h1
-rw-r--r--drivers/net/e1000e/ich8lan.c77
-rw-r--r--drivers/net/e1000e/lib.c3
-rw-r--r--drivers/net/e1000e/netdev.c53
-rw-r--r--drivers/net/e1000e/phy.c40
-rw-r--r--drivers/net/ehea/ehea.h2
-rw-r--r--drivers/net/ehea/ehea_main.c6
-rw-r--r--drivers/net/fec.c248
-rw-r--r--drivers/net/fec.h5
-rw-r--r--drivers/net/forcedeth.c34
-rw-r--r--drivers/net/hamradio/yam.c4
-rw-r--r--drivers/net/irda/bfin_sir.h2
-rw-r--r--drivers/net/ixgbe/ixgbe.h21
-rw-r--r--drivers/net/ixgbe/ixgbe_82599.c749
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c142
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c169
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h91
-rw-r--r--drivers/net/mlx4/alloc.c3
-rw-r--r--drivers/net/mlx4/en_netdev.c3
-rw-r--r--drivers/net/mlx4/fw.c4
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c1
-rw-r--r--drivers/net/ppp_async.c10
-rw-r--r--drivers/net/ppp_deflate.c9
-rw-r--r--drivers/net/ppp_generic.c9
-rw-r--r--drivers/net/ppp_mppe.c7
-rw-r--r--drivers/net/ppp_synctty.c3
-rw-r--r--drivers/net/qlcnic/qlcnic.h24
-rw-r--r--drivers/net/qlcnic/qlcnic_ethtool.c2
-rw-r--r--drivers/net/qlcnic/qlcnic_init.c63
-rw-r--r--drivers/net/qlcnic/qlcnic_main.c10
-rw-r--r--drivers/net/r8169.c143
-rw-r--r--drivers/net/sky2.c143
-rw-r--r--drivers/net/sky2.h6
-rw-r--r--drivers/net/xen-netfront.c2
-rw-r--r--drivers/ps3/Makefile2
-rw-r--r--drivers/rtc/class.c13
-rw-r--r--drivers/rtc/interface.c574
-rw-r--r--drivers/rtc/rtc-cmos.c3
-rw-r--r--drivers/rtc/rtc-dev.c104
-rw-r--r--drivers/rtc/rtc-ds1307.c12
-rw-r--r--drivers/rtc/rtc-lib.c28
-rw-r--r--drivers/serial/Kconfig70
-rw-r--r--drivers/serial/bfin_5xx.c638
-rw-r--r--drivers/watchdog/hpwdt.c2
-rw-r--r--fs/9p/Kconfig5
-rw-r--r--fs/9p/Makefile1
-rw-r--r--fs/9p/acl.c4
-rw-r--r--fs/9p/v9fs.h42
-rw-r--r--fs/9p/vfs_inode.c871
-rw-r--r--fs/9p/vfs_inode_dotl.c824
-rw-r--r--fs/9p/xattr.c2
-rw-r--r--fs/ext2/dir.c19
-rw-r--r--fs/ext2/namei.c2
-rw-r--r--fs/ext2/super.c25
-rw-r--r--fs/ext2/xattr.c10
-rw-r--r--fs/ext3/balloc.c266
-rw-r--r--fs/ext3/dir.c15
-rw-r--r--fs/ext3/inode.c6
-rw-r--r--fs/ext3/ioctl.c22
-rw-r--r--fs/ext3/namei.c138
-rw-r--r--fs/ext3/resize.c65
-rw-r--r--fs/ext3/super.c64
-rw-r--r--fs/ext3/xattr.c2
-rw-r--r--fs/ext4/balloc.c3
-rw-r--r--fs/ext4/dir.c56
-rw-r--r--fs/ext4/ext4.h93
-rw-r--r--fs/ext4/ext4_extents.h8
-rw-r--r--fs/ext4/ext4_jbd2.h2
-rw-r--r--fs/ext4/extents.c88
-rw-r--r--fs/ext4/file.c22
-rw-r--r--fs/ext4/fsync.c4
-rw-r--r--fs/ext4/ialloc.c2
-rw-r--r--fs/ext4/inode.c74
-rw-r--r--fs/ext4/mballoc.c55
-rw-r--r--fs/ext4/migrate.c2
-rw-r--r--fs/ext4/namei.c69
-rw-r--r--fs/ext4/page-io.c7
-rw-r--r--fs/ext4/resize.c64
-rw-r--r--fs/ext4/super.c288
-rw-r--r--fs/ext4/xattr.c28
-rw-r--r--fs/jbd2/journal.c34
-rw-r--r--fs/jbd2/recovery.c2
-rw-r--r--fs/jbd2/transaction.c6
-rw-r--r--fs/lockd/Makefile6
-rw-r--r--fs/lockd/clnt4xdr.c605
-rw-r--r--fs/lockd/clntlock.c4
-rw-r--r--fs/lockd/clntproc.c18
-rw-r--r--fs/lockd/clntxdr.c627
-rw-r--r--fs/lockd/host.c409
-rw-r--r--fs/lockd/mon.c110
-rw-r--r--fs/lockd/svc4proc.c20
-rw-r--r--fs/lockd/svclock.c34
-rw-r--r--fs/lockd/svcproc.c28
-rw-r--r--fs/lockd/xdr.c287
-rw-r--r--fs/lockd/xdr4.c255
-rw-r--r--fs/mbcache.c12
-rw-r--r--fs/nfs/callback.c83
-rw-r--r--fs/nfs/callback.h59
-rw-r--r--fs/nfs/callback_proc.c326
-rw-r--r--fs/nfs/callback_xdr.c143
-rw-r--r--fs/nfs/client.c302
-rw-r--r--fs/nfs/delegation.c362
-rw-r--r--fs/nfs/delegation.h1
-rw-r--r--fs/nfs/dir.c72
-rw-r--r--fs/nfs/idmap.c2
-rw-r--r--fs/nfs/inode.c3
-rw-r--r--fs/nfs/internal.h19
-rw-r--r--fs/nfs/mount_clnt.c83
-rw-r--r--fs/nfs/nfs2xdr.c1294
-rw-r--r--fs/nfs/nfs3xdr.c2817
-rw-r--r--fs/nfs/nfs4_fs.h13
-rw-r--r--fs/nfs/nfs4filelayout.c6
-rw-r--r--fs/nfs/nfs4proc.c188
-rw-r--r--fs/nfs/nfs4renewd.c11
-rw-r--r--fs/nfs/nfs4state.c293
-rw-r--r--fs/nfs/nfs4xdr.c1426
-rw-r--r--fs/nfs/pagelist.c7
-rw-r--r--fs/nfs/pnfs.c524
-rw-r--r--fs/nfs/pnfs.h76
-rw-r--r--fs/nfs/proc.c5
-rw-r--r--fs/nfs/super.c18
-rw-r--r--fs/nfs/unlink.c2
-rw-r--r--fs/nfsd/nfs4callback.c690
-rw-r--r--fs/ocfs2/Kconfig2
-rw-r--r--fs/ocfs2/alloc.c77
-rw-r--r--fs/ocfs2/alloc.h4
-rw-r--r--fs/ocfs2/aops.c59
-rw-r--r--fs/ocfs2/cluster/heartbeat.c246
-rw-r--r--fs/ocfs2/cluster/netdebug.c286
-rw-r--r--fs/ocfs2/cluster/tcp.c145
-rw-r--r--fs/ocfs2/cluster/tcp_internal.h33
-rw-r--r--fs/ocfs2/dlm/dlmast.c76
-rw-r--r--fs/ocfs2/dlm/dlmcommon.h86
-rw-r--r--fs/ocfs2/dlm/dlmdebug.c200
-rw-r--r--fs/ocfs2/dlm/dlmdebug.h5
-rw-r--r--fs/ocfs2/dlm/dlmdomain.c10
-rw-r--r--fs/ocfs2/dlm/dlmlock.c3
-rw-r--r--fs/ocfs2/dlm/dlmthread.c132
-rw-r--r--fs/ocfs2/namei.c5
-rw-r--r--fs/ocfs2/ocfs2.h5
-rw-r--r--fs/quota/dquot.c18
-rw-r--r--fs/quota/quota_tree.c9
-rw-r--r--fs/udf/Kconfig1
-rw-r--r--fs/udf/balloc.c3
-rw-r--r--fs/udf/dir.c5
-rw-r--r--fs/udf/file.c11
-rw-r--r--fs/udf/ialloc.c21
-rw-r--r--fs/udf/inode.c51
-rw-r--r--fs/udf/namei.c107
-rw-r--r--fs/udf/partition.c27
-rw-r--r--fs/udf/super.c67
-rw-r--r--fs/udf/symlink.c12
-rw-r--r--fs/udf/udf_i.h13
-rw-r--r--fs/udf/udf_sb.h22
-rw-r--r--fs/udf/udfdecl.h4
-rw-r--r--fs/xfs/linux-2.6/sv.h59
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c425
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.h16
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c235
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.h22
-rw-r--r--fs/xfs/linux-2.6/xfs_export.c12
-rw-r--r--fs/xfs/linux-2.6/xfs_linux.h1
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c22
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.c92
-rw-r--r--fs/xfs/linux-2.6/xfs_trace.h59
-rw-r--r--fs/xfs/quota/xfs_dquot.c1
-rw-r--r--fs/xfs/xfs_ag.h2
-rw-r--r--fs/xfs/xfs_alloc.c351
-rw-r--r--fs/xfs/xfs_attr_leaf.c4
-rw-r--r--fs/xfs/xfs_btree.c9
-rw-r--r--fs/xfs/xfs_buf_item.c32
-rw-r--r--fs/xfs/xfs_buf_item.h11
-rw-r--r--fs/xfs/xfs_extfree_item.c97
-rw-r--r--fs/xfs/xfs_extfree_item.h11
-rw-r--r--fs/xfs/xfs_fsops.c1
-rw-r--r--fs/xfs/xfs_iget.c79
-rw-r--r--fs/xfs/xfs_inode.c54
-rw-r--r--fs/xfs/xfs_inode.h15
-rw-r--r--fs/xfs/xfs_inode_item.c90
-rw-r--r--fs/xfs/xfs_iomap.c233
-rw-r--r--fs/xfs/xfs_iomap.h27
-rw-r--r--fs/xfs/xfs_log.c739
-rw-r--r--fs/xfs/xfs_log_cil.c17
-rw-r--r--fs/xfs/xfs_log_priv.h127
-rw-r--r--fs/xfs/xfs_log_recover.c620
-rw-r--r--fs/xfs/xfs_mount.c23
-rw-r--r--fs/xfs/xfs_mount.h14
-rw-r--r--fs/xfs/xfs_trans.c79
-rw-r--r--fs/xfs/xfs_trans.h2
-rw-r--r--fs/xfs/xfs_trans_ail.c232
-rw-r--r--fs/xfs/xfs_trans_extfree.c8
-rw-r--r--fs/xfs/xfs_trans_priv.h35
-rw-r--r--fs/xfs/xfs_vnodeops.c61
-rw-r--r--include/asm-generic/io.h30
-rw-r--r--include/drm/drmP.h102
-rw-r--r--include/drm/drm_crtc.h9
-rw-r--r--include/drm/drm_fb_helper.h3
-rw-r--r--include/drm/drm_mm.h7
-rw-r--r--include/drm/drm_pciids.h40
-rw-r--r--include/drm/i915_drm.h12
-rw-r--r--include/drm/intel-gtt.h35
-rw-r--r--include/drm/nouveau_drm.h5
-rw-r--r--include/drm/radeon_drm.h1
-rw-r--r--include/drm/ttm/ttm_bo_api.h50
-rw-r--r--include/drm/ttm/ttm_bo_driver.h152
-rw-r--r--include/drm/ttm/ttm_execbuf_util.h11
-rw-r--r--include/linux/agp_backend.h2
-rw-r--r--include/linux/bfin_mac.h1
-rw-r--r--include/linux/dynamic_debug.h18
-rw-r--r--include/linux/etherdevice.h4
-rw-r--r--include/linux/ext3_fs.h10
-rw-r--r--include/linux/fec.h3
-rw-r--r--include/linux/gpio-i2cmux.h38
-rw-r--r--include/linux/i2c.h27
-rw-r--r--include/linux/if_bridge.h2
-rw-r--r--include/linux/intel-gtt.h20
-rw-r--r--include/linux/jbd2.h20
-rw-r--r--include/linux/kref.h2
-rw-r--r--include/linux/lockd/debug.h10
-rw-r--r--include/linux/lockd/lockd.h6
-rw-r--r--include/linux/mbcache.h11
-rw-r--r--include/linux/mfd/tmio.h5
-rw-r--r--include/linux/mmc/dw_mmc.h217
-rw-r--r--include/linux/mmc/host.h19
-rw-r--r--include/linux/mmc/mmc.h2
-rw-r--r--include/linux/mmc/sdhci.h6
-rw-r--r--include/linux/netdevice.h24
-rw-r--r--include/linux/netfilter/x_tables.h10
-rw-r--r--include/linux/nfs3.h3
-rw-r--r--include/linux/nfs4.h8
-rw-r--r--include/linux/nfs_fs_sb.h15
-rw-r--r--include/linux/nfs_xdr.h6
-rw-r--r--include/linux/pci_ids.h8
-rw-r--r--include/linux/quotaops.h5
-rw-r--r--include/linux/rtc.h51
-rw-r--r--include/linux/sunrpc/auth.h8
-rw-r--r--include/linux/sunrpc/bc_xprt.h15
-rw-r--r--include/linux/sunrpc/clnt.h4
-rw-r--r--include/linux/sunrpc/svc.h2
-rw-r--r--include/linux/sunrpc/svc_xprt.h1
-rw-r--r--include/linux/sunrpc/xdr.h14
-rw-r--r--include/linux/tracepoint.h4
-rw-r--r--include/linux/vga_switcheroo.h2
-rw-r--r--include/net/ah.h2
-rw-r--r--include/net/arp.h1
-rw-r--r--include/net/phonet/phonet.h4
-rw-r--r--include/net/sch_generic.h20
-rw-r--r--include/net/sock.h4
-rw-r--r--include/trace/define_trace.h10
-rw-r--r--include/trace/events/skb.h4
-rw-r--r--kernel/Makefile1
-rw-r--r--kernel/exit.c14
-rw-r--r--kernel/perf_event.c82
-rw-r--r--kernel/trace/Makefile2
-rw-r--r--kernel/trace/trace.c6
-rw-r--r--lib/dynamic_debug.c9
-rw-r--r--lib/kref.c30
-rw-r--r--net/9p/protocol.c22
-rw-r--r--net/caif/caif_socket.c2
-rw-r--r--net/caif/chnl_net.c18
-rw-r--r--net/core/dev.c149
-rw-r--r--net/core/filter.c2
-rw-r--r--net/core/rtnetlink.c2
-rw-r--r--net/dccp/dccp.h3
-rw-r--r--net/dccp/input.c2
-rw-r--r--net/dccp/sysctl.c4
-rw-r--r--net/ethernet/eth.c12
-rw-r--r--net/ipv4/ah4.c7
-rw-r--r--net/ipv4/arp.c29
-rw-r--r--net/ipv4/inet_connection_sock.c5
-rw-r--r--net/ipv4/inet_diag.c2
-rw-r--r--net/ipv4/netfilter/arp_tables.c45
-rw-r--r--net/ipv4/netfilter/ip_tables.c45
-rw-r--r--net/ipv6/ah6.c8
-rw-r--r--net/ipv6/inet6_connection_sock.c2
-rw-r--r--net/ipv6/netfilter/ip6_tables.c45
-rw-r--r--net/netfilter/nf_conntrack_netlink.c18
-rw-r--r--net/netfilter/x_tables.c3
-rw-r--r--net/netlink/genetlink.c2
-rw-r--r--net/phonet/af_phonet.c6
-rw-r--r--net/sched/act_csum.c3
-rw-r--r--net/sched/act_ipt.c3
-rw-r--r--net/sched/act_mirred.c3
-rw-r--r--net/sched/act_nat.c3
-rw-r--r--net/sched/act_pedit.c3
-rw-r--r--net/sched/act_police.c3
-rw-r--r--net/sched/act_simple.c3
-rw-r--r--net/sched/act_skbedit.c3
-rw-r--r--net/sched/sch_atm.c6
-rw-r--r--net/sched/sch_cbq.c6
-rw-r--r--net/sched/sch_drr.c8
-rw-r--r--net/sched/sch_dsmark.c3
-rw-r--r--net/sched/sch_hfsc.c6
-rw-r--r--net/sched/sch_htb.c17
-rw-r--r--net/sched/sch_ingress.c3
-rw-r--r--net/sched/sch_multiq.c3
-rw-r--r--net/sched/sch_netem.c6
-rw-r--r--net/sched/sch_prio.c3
-rw-r--r--net/sched/sch_red.c3
-rw-r--r--net/sched/sch_sfq.c3
-rw-r--r--net/sched/sch_tbf.c3
-rw-r--r--net/sched/sch_teql.c3
-rw-r--r--net/sunrpc/auth.c28
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c44
-rw-r--r--net/sunrpc/bc_svc.c2
-rw-r--r--net/sunrpc/clnt.c21
-rw-r--r--net/sunrpc/rpc_pipe.c2
-rw-r--r--net/sunrpc/rpcb_clnt.c147
-rw-r--r--net/sunrpc/svc.c36
-rw-r--r--net/sunrpc/svcsock.c106
-rw-r--r--net/sunrpc/xdr.c155
-rw-r--r--net/xfrm/xfrm_user.c6
-rw-r--r--tools/perf/Makefile2
-rw-r--r--tools/perf/builtin-record.c3
-rw-r--r--tools/perf/builtin-sched.c5
-rw-r--r--tools/perf/builtin-stat.c5
-rw-r--r--tools/perf/builtin-test.c116
-rw-r--r--tools/perf/builtin-top.c2
-rw-r--r--tools/perf/util/evsel.c87
-rw-r--r--tools/perf/util/evsel.h2
-rw-r--r--tools/perf/util/parse-events.c74
-rw-r--r--tools/perf/util/session.c2
885 files changed, 59306 insertions, 34521 deletions
diff --git a/Documentation/i2c/muxes/gpio-i2cmux b/Documentation/i2c/muxes/gpio-i2cmux
new file mode 100644
index 000000000000..811cd78d4cdc
--- /dev/null
+++ b/Documentation/i2c/muxes/gpio-i2cmux
@@ -0,0 +1,65 @@
1Kernel driver gpio-i2cmux
2
3Author: Peter Korsgaard <peter.korsgaard@barco.com>
4
5Description
6-----------
7
8gpio-i2cmux is an i2c mux driver providing access to I2C bus segments
9from a master I2C bus and a hardware MUX controlled through GPIO pins.
10
11E.G.:
12
13 ---------- ---------- Bus segment 1 - - - - -
14 | | SCL/SDA | |-------------- | |
15 | |------------| |
16 | | | | Bus segment 2 | |
17 | Linux | GPIO 1..N | MUX |--------------- Devices
18 | |------------| | | |
19 | | | | Bus segment M
20 | | | |---------------| |
21 ---------- ---------- - - - - -
22
23SCL/SDA of the master I2C bus is multiplexed to bus segment 1..M
24according to the settings of the GPIO pins 1..N.
25
26Usage
27-----
28
29gpio-i2cmux uses the platform bus, so you need to provide a struct
30platform_device with the platform_data pointing to a struct
31gpio_i2cmux_platform_data with the I2C adapter number of the master
32bus, the number of bus segments to create and the GPIO pins used
33to control it. See include/linux/gpio-i2cmux.h for details.
34
35E.G. something like this for a MUX providing 4 bus segments
36controlled through 3 GPIO pins:
37
38#include <linux/gpio-i2cmux.h>
39#include <linux/platform_device.h>
40
41static const unsigned myboard_gpiomux_gpios[] = {
42 AT91_PIN_PC26, AT91_PIN_PC25, AT91_PIN_PC24
43};
44
45static const unsigned myboard_gpiomux_values[] = {
46 0, 1, 2, 3
47};
48
49static struct gpio_i2cmux_platform_data myboard_i2cmux_data = {
50 .parent = 1,
51 .base_nr = 2, /* optional */
52 .values = myboard_gpiomux_values,
53 .n_values = ARRAY_SIZE(myboard_gpiomux_values),
54 .gpios = myboard_gpiomux_gpios,
55 .n_gpios = ARRAY_SIZE(myboard_gpiomux_gpios),
56 .idle = 4, /* optional */
57};
58
59static struct platform_device myboard_i2cmux = {
60 .name = "gpio-i2cmux",
61 .id = 0,
62 .dev = {
63 .platform_data = &myboard_i2cmux_data,
64 },
65};
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index f3dc951e949f..ed3708f8d0db 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -403,6 +403,10 @@ and is between 256 and 4096 characters. It is defined in the file
403 bttv.pll= See Documentation/video4linux/bttv/Insmod-options 403 bttv.pll= See Documentation/video4linux/bttv/Insmod-options
404 bttv.tuner= and Documentation/video4linux/bttv/CARDLIST 404 bttv.tuner= and Documentation/video4linux/bttv/CARDLIST
405 405
406 bulk_remove=off [PPC] This parameter disables the use of the pSeries
407 firmware feature for flushing multiple hpte entries
408 at a time.
409
406 c101= [NET] Moxa C101 synchronous serial card 410 c101= [NET] Moxa C101 synchronous serial card
407 411
408 cachesize= [BUGS=X86-32] Override level 2 CPU cache size detection. 412 cachesize= [BUGS=X86-32] Override level 2 CPU cache size detection.
@@ -1490,6 +1494,10 @@ and is between 256 and 4096 characters. It is defined in the file
1490 mtdparts= [MTD] 1494 mtdparts= [MTD]
1491 See drivers/mtd/cmdlinepart.c. 1495 See drivers/mtd/cmdlinepart.c.
1492 1496
1497 multitce=off [PPC] This parameter disables the use of the pSeries
1498 firmware feature for updating multiple TCE entries
1499 at a time.
1500
1493 onenand.bdry= [HW,MTD] Flex-OneNAND Boundary Configuration 1501 onenand.bdry= [HW,MTD] Flex-OneNAND Boundary Configuration
1494 1502
1495 Format: [die0_boundary][,die0_lock][,die1_boundary][,die1_lock] 1503 Format: [die0_boundary][,die0_lock][,die1_boundary][,die1_lock]
diff --git a/Documentation/networking/dccp.txt b/Documentation/networking/dccp.txt
index b395ca6a49f2..811872b45bee 100644
--- a/Documentation/networking/dccp.txt
+++ b/Documentation/networking/dccp.txt
@@ -167,6 +167,7 @@ rx_ccid = 2
167seq_window = 100 167seq_window = 100
168 The initial sequence window (sec. 7.5.2) of the sender. This influences 168 The initial sequence window (sec. 7.5.2) of the sender. This influences
169 the local ackno validity and the remote seqno validity windows (7.5.1). 169 the local ackno validity and the remote seqno validity windows (7.5.1).
170 Values in the range Wmin = 32 (RFC 4340, 7.5.2) up to 2^32-1 can be set.
170 171
171tx_qlen = 5 172tx_qlen = 5
172 The size of the transmit buffer in packets. A value of 0 corresponds 173 The size of the transmit buffer in packets. A value of 0 corresponds
diff --git a/Documentation/powerpc/booting-without-of.txt b/Documentation/powerpc/booting-without-of.txt
index 302db5da49b3..3272ed59dec7 100644
--- a/Documentation/powerpc/booting-without-of.txt
+++ b/Documentation/powerpc/booting-without-of.txt
@@ -131,7 +131,7 @@ order to avoid the degeneration that had become the ppc32 kernel entry
131point and the way a new platform should be added to the kernel. The 131point and the way a new platform should be added to the kernel. The
132legacy iSeries platform breaks those rules as it predates this scheme, 132legacy iSeries platform breaks those rules as it predates this scheme,
133but no new board support will be accepted in the main tree that 133but no new board support will be accepted in the main tree that
134doesn't follows them properly. In addition, since the advent of the 134doesn't follow them properly. In addition, since the advent of the
135arch/powerpc merged architecture for ppc32 and ppc64, new 32-bit 135arch/powerpc merged architecture for ppc32 and ppc64, new 32-bit
136platforms and 32-bit platforms which move into arch/powerpc will be 136platforms and 32-bit platforms which move into arch/powerpc will be
137required to use these rules as well. 137required to use these rules as well.
@@ -1025,7 +1025,7 @@ dtc source code can be found at
1025 1025
1026WARNING: This version is still in early development stage; the 1026WARNING: This version is still in early development stage; the
1027resulting device-tree "blobs" have not yet been validated with the 1027resulting device-tree "blobs" have not yet been validated with the
1028kernel. The current generated bloc lacks a useful reserve map (it will 1028kernel. The current generated block lacks a useful reserve map (it will
1029be fixed to generate an empty one, it's up to the bootloader to fill 1029be fixed to generate an empty one, it's up to the bootloader to fill
1030it up) among others. The error handling needs work, bugs are lurking, 1030it up) among others. The error handling needs work, bugs are lurking,
1031etc... 1031etc...
diff --git a/Documentation/powerpc/dts-bindings/4xx/cpm.txt b/Documentation/powerpc/dts-bindings/4xx/cpm.txt
new file mode 100644
index 000000000000..ee459806d35e
--- /dev/null
+++ b/Documentation/powerpc/dts-bindings/4xx/cpm.txt
@@ -0,0 +1,52 @@
1PPC4xx Clock Power Management (CPM) node
2
3Required properties:
4 - compatible : compatible list, currently only "ibm,cpm"
5 - dcr-access-method : "native"
6 - dcr-reg : < DCR register range >
7
8Optional properties:
9 - er-offset : All 4xx SoCs with a CPM controller have
10 one of two different order for the CPM
11 registers. Some have the CPM registers
12 in the following order (ER,FR,SR). The
13 others have them in the following order
14 (SR,ER,FR). For the second case set
15 er-offset = <1>.
16 - unused-units : specifier consist of one cell. For each
17 bit in the cell, the corresponding bit
18 in CPM will be set to turn off unused
19 devices.
20 - idle-doze : specifier consist of one cell. For each
21 bit in the cell, the corresponding bit
22 in CPM will be set to turn off unused
23 devices. This is usually just CPM[CPU].
24 - standby : specifier consist of one cell. For each
25 bit in the cell, the corresponding bit
26 in CPM will be set on standby and
27 restored on resume.
28 - suspend : specifier consist of one cell. For each
29 bit in the cell, the corresponding bit
30 in CPM will be set on suspend (mem) and
31 restored on resume. Note, for standby
32 and suspend the corresponding bits can
33 be different or the same. Usually for
34 standby only class 2 and 3 units are set.
35 However, the interface does not care.
36 If they are the same, the additional
37 power saving will be seeing if support
38 is available to put the DDR in self
39 refresh mode and any additional power
40 saving techniques for the specific SoC.
41
42Example:
43 CPM0: cpm {
44 compatible = "ibm,cpm";
45 dcr-access-method = "native";
46 dcr-reg = <0x160 0x003>;
47 er-offset = <0>;
48 unused-units = <0x00000100>;
49 idle-doze = <0x02000000>;
50 standby = <0xfeff0000>;
51 suspend = <0xfeff791d>;
52};
diff --git a/MAINTAINERS b/MAINTAINERS
index aca102f758ba..42f991e5a85d 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -285,6 +285,41 @@ L: linux-parisc@vger.kernel.org
285S: Maintained 285S: Maintained
286F: sound/pci/ad1889.* 286F: sound/pci/ad1889.*
287 287
288AD525X ANALOG DEVICES DIGITAL POTENTIOMETERS DRIVER
289M: Michael Hennerich <michael.hennerich@analog.com>
290L: device-driver-devel@blackfin.uclinux.org
291W: http://wiki-analog.com/AD5254
292S: Supported
293F: drivers/misc/ad525x_dpot.c
294
295AD5398 CURRENT REGULATOR DRIVER (AD5398/AD5821)
296M: Michael Hennerich <michael.hennerich@analog.com>
297L: device-driver-devel@blackfin.uclinux.org
298W: http://wiki-analog.com/AD5398
299S: Supported
300F: drivers/regulator/ad5398.c
301
302AD714X CAPACITANCE TOUCH SENSOR DRIVER (AD7142/3/7/8/7A)
303M: Michael Hennerich <michael.hennerich@analog.com>
304L: device-driver-devel@blackfin.uclinux.org
305W: http://wiki-analog.com/AD7142
306S: Supported
307F: drivers/input/misc/ad714x.c
308
309AD7877 TOUCHSCREEN DRIVER
310M: Michael Hennerich <michael.hennerich@analog.com>
311L: device-driver-devel@blackfin.uclinux.org
312W: http://wiki-analog.com/AD7877
313S: Supported
314F: drivers/input/touchscreen/ad7877.c
315
316AD7879 TOUCHSCREEN DRIVER (AD7879/AD7889)
317M: Michael Hennerich <michael.hennerich@analog.com>
318L: device-driver-devel@blackfin.uclinux.org
319W: http://wiki-analog.com/AD7879
320S: Supported
321F: drivers/input/touchscreen/ad7879.c
322
288ADM1025 HARDWARE MONITOR DRIVER 323ADM1025 HARDWARE MONITOR DRIVER
289M: Jean Delvare <khali@linux-fr.org> 324M: Jean Delvare <khali@linux-fr.org>
290L: lm-sensors@lm-sensors.org 325L: lm-sensors@lm-sensors.org
@@ -304,6 +339,32 @@ W: http://linuxwireless.org/
304S: Orphan 339S: Orphan
305F: drivers/net/wireless/adm8211.* 340F: drivers/net/wireless/adm8211.*
306 341
342ADP5520 BACKLIGHT DRIVER WITH IO EXPANDER (ADP5520/ADP5501)
343M: Michael Hennerich <michael.hennerich@analog.com>
344L: device-driver-devel@blackfin.uclinux.org
345W: http://wiki-analog.com/ADP5520
346S: Supported
347F: drivers/mfd/adp5520.c
348F: drivers/video/backlight/adp5520_bl.c
349F: drivers/led/leds-adp5520.c
350F: drivers/gpio/adp5520-gpio.c
351F: drivers/input/keyboard/adp5520-keys.c
352
353ADP5588 QWERTY KEYPAD AND IO EXPANDER DRIVER (ADP5588/ADP5587)
354M: Michael Hennerich <michael.hennerich@analog.com>
355L: device-driver-devel@blackfin.uclinux.org
356W: http://wiki-analog.com/ADP5588
357S: Supported
358F: drivers/input/keyboard/adp5588-keys.c
359F: drivers/gpio/adp5588-gpio.c
360
361ADP8860 BACKLIGHT DRIVER (ADP8860/ADP8861/ADP8863)
362M: Michael Hennerich <michael.hennerich@analog.com>
363L: device-driver-devel@blackfin.uclinux.org
364W: http://wiki-analog.com/ADP8860
365S: Supported
366F: drivers/video/backlight/adp8860_bl.c
367
307ADT746X FAN DRIVER 368ADT746X FAN DRIVER
308M: Colin Leroy <colin@colino.net> 369M: Colin Leroy <colin@colino.net>
309S: Maintained 370S: Maintained
@@ -316,6 +377,13 @@ S: Maintained
316F: Documentation/hwmon/adt7475 377F: Documentation/hwmon/adt7475
317F: drivers/hwmon/adt7475.c 378F: drivers/hwmon/adt7475.c
318 379
380ADXL34X THREE-AXIS DIGITAL ACCELEROMETER DRIVER (ADXL345/ADXL346)
381M: Michael Hennerich <michael.hennerich@analog.com>
382L: device-driver-devel@blackfin.uclinux.org
383W: http://wiki-analog.com/ADXL345
384S: Supported
385F: drivers/input/misc/adxl34x.c
386
319ADVANSYS SCSI DRIVER 387ADVANSYS SCSI DRIVER
320M: Matthew Wilcox <matthew@wil.cx> 388M: Matthew Wilcox <matthew@wil.cx>
321L: linux-scsi@vger.kernel.org 389L: linux-scsi@vger.kernel.org
@@ -440,17 +508,23 @@ L: linux-rdma@vger.kernel.org
440S: Maintained 508S: Maintained
441F: drivers/infiniband/hw/amso1100/ 509F: drivers/infiniband/hw/amso1100/
442 510
443ANALOG DEVICES INC ASOC DRIVERS 511ANALOG DEVICES INC ASOC CODEC DRIVERS
444L: uclinux-dist-devel@blackfin.uclinux.org 512L: device-driver-devel@blackfin.uclinux.org
445L: alsa-devel@alsa-project.org (moderated for non-subscribers) 513L: alsa-devel@alsa-project.org (moderated for non-subscribers)
446W: http://blackfin.uclinux.org/ 514W: http://wiki-analog.com/
447S: Supported 515S: Supported
448F: sound/soc/blackfin/*
449F: sound/soc/codecs/ad1* 516F: sound/soc/codecs/ad1*
450F: sound/soc/codecs/adau* 517F: sound/soc/codecs/adau*
451F: sound/soc/codecs/adav* 518F: sound/soc/codecs/adav*
452F: sound/soc/codecs/ssm* 519F: sound/soc/codecs/ssm*
453 520
521ANALOG DEVICES INC ASOC DRIVERS
522L: uclinux-dist-devel@blackfin.uclinux.org
523L: alsa-devel@alsa-project.org (moderated for non-subscribers)
524W: http://blackfin.uclinux.org/
525S: Supported
526F: sound/soc/blackfin/*
527
454AOA (Apple Onboard Audio) ALSA DRIVER 528AOA (Apple Onboard Audio) ALSA DRIVER
455M: Johannes Berg <johannes@sipsolutions.net> 529M: Johannes Berg <johannes@sipsolutions.net>
456L: linuxppc-dev@lists.ozlabs.org 530L: linuxppc-dev@lists.ozlabs.org
@@ -1711,7 +1785,8 @@ S: Maintained
1711F: drivers/usb/atm/cxacru.c 1785F: drivers/usb/atm/cxacru.c
1712 1786
1713CONFIGFS 1787CONFIGFS
1714M: Joel Becker <joel.becker@oracle.com> 1788M: Joel Becker <jlbec@evilplan.org>
1789T: git git://git.kernel.org/pub/scm/linux/kernel/git/jlbec/configfs.git
1715S: Supported 1790S: Supported
1716F: fs/configfs/ 1791F: fs/configfs/
1717F: include/linux/configfs.h 1792F: include/linux/configfs.h
@@ -2618,6 +2693,14 @@ S: Supported
2618F: drivers/i2c/busses/i2c-gpio.c 2693F: drivers/i2c/busses/i2c-gpio.c
2619F: include/linux/i2c-gpio.h 2694F: include/linux/i2c-gpio.h
2620 2695
2696GENERIC GPIO I2C MULTIPLEXER DRIVER
2697M: Peter Korsgaard <peter.korsgaard@barco.com>
2698L: linux-i2c@vger.kernel.org
2699S: Supported
2700F: drivers/i2c/muxes/gpio-i2cmux.c
2701F: include/linux/gpio-i2cmux.h
2702F: Documentation/i2c/muxes/gpio-i2cmux
2703
2621GENERIC HDLC (WAN) DRIVERS 2704GENERIC HDLC (WAN) DRIVERS
2622M: Krzysztof Halasa <khc@pm.waw.pl> 2705M: Krzysztof Halasa <khc@pm.waw.pl>
2623W: http://www.kernel.org/pub/linux/utils/net/hdlc/ 2706W: http://www.kernel.org/pub/linux/utils/net/hdlc/
@@ -4467,7 +4550,7 @@ F: include/linux/oprofile.h
4467 4550
4468ORACLE CLUSTER FILESYSTEM 2 (OCFS2) 4551ORACLE CLUSTER FILESYSTEM 2 (OCFS2)
4469M: Mark Fasheh <mfasheh@suse.com> 4552M: Mark Fasheh <mfasheh@suse.com>
4470M: Joel Becker <joel.becker@oracle.com> 4553M: Joel Becker <jlbec@evilplan.org>
4471L: ocfs2-devel@oss.oracle.com (moderated for non-subscribers) 4554L: ocfs2-devel@oss.oracle.com (moderated for non-subscribers)
4472W: http://oss.oracle.com/projects/ocfs2/ 4555W: http://oss.oracle.com/projects/ocfs2/
4473T: git git://git.kernel.org/pub/scm/linux/kernel/git/jlbec/ocfs2.git 4556T: git git://git.kernel.org/pub/scm/linux/kernel/git/jlbec/ocfs2.git
diff --git a/arch/arm/mach-dove/common.c b/arch/arm/mach-dove/common.c
index f7a12586a1f5..fe627aba6da7 100644
--- a/arch/arm/mach-dove/common.c
+++ b/arch/arm/mach-dove/common.c
@@ -770,7 +770,7 @@ static struct resource dove_sdio0_resources[] = {
770}; 770};
771 771
772static struct platform_device dove_sdio0 = { 772static struct platform_device dove_sdio0 = {
773 .name = "sdhci-mv", 773 .name = "sdhci-dove",
774 .id = 0, 774 .id = 0,
775 .dev = { 775 .dev = {
776 .dma_mask = &sdio_dmamask, 776 .dma_mask = &sdio_dmamask,
@@ -798,7 +798,7 @@ static struct resource dove_sdio1_resources[] = {
798}; 798};
799 799
800static struct platform_device dove_sdio1 = { 800static struct platform_device dove_sdio1 = {
801 .name = "sdhci-mv", 801 .name = "sdhci-dove",
802 .id = 1, 802 .id = 1,
803 .dev = { 803 .dev = {
804 .dma_mask = &sdio_dmamask, 804 .dma_mask = &sdio_dmamask,
diff --git a/arch/arm/mach-tegra/include/mach/sdhci.h b/arch/arm/mach-tegra/include/mach/sdhci.h
new file mode 100644
index 000000000000..3ad086e859c3
--- /dev/null
+++ b/arch/arm/mach-tegra/include/mach/sdhci.h
@@ -0,0 +1,29 @@
1/*
2 * include/asm-arm/arch-tegra/include/mach/sdhci.h
3 *
4 * Copyright (C) 2009 Palm, Inc.
5 * Author: Yvonne Yip <y@palm.com>
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17#ifndef __ASM_ARM_ARCH_TEGRA_SDHCI_H
18#define __ASM_ARM_ARCH_TEGRA_SDHCI_H
19
20#include <linux/mmc/host.h>
21
22struct tegra_sdhci_platform_data {
23 int cd_gpio;
24 int wp_gpio;
25 int power_gpio;
26 int is_8bit;
27};
28
29#endif
diff --git a/arch/blackfin/Makefile b/arch/blackfin/Makefile
index 46738d49b7c8..46f42b2066e5 100644
--- a/arch/blackfin/Makefile
+++ b/arch/blackfin/Makefile
@@ -19,7 +19,7 @@ KBUILD_CFLAGS += -mlong-calls
19endif 19endif
20KBUILD_AFLAGS += $(call cc-option,-mno-fdpic) 20KBUILD_AFLAGS += $(call cc-option,-mno-fdpic)
21KBUILD_CFLAGS_MODULE += -mlong-calls 21KBUILD_CFLAGS_MODULE += -mlong-calls
22KBUILD_LDFLAGS_MODULE += -m elf32bfin 22LDFLAGS += -m elf32bfin
23KALLSYMS += --symbol-prefix=_ 23KALLSYMS += --symbol-prefix=_
24 24
25KBUILD_DEFCONFIG := BF537-STAMP_defconfig 25KBUILD_DEFCONFIG := BF537-STAMP_defconfig
@@ -97,8 +97,11 @@ rev-$(CONFIG_BF_REV_0_6) := 0.6
97rev-$(CONFIG_BF_REV_NONE) := none 97rev-$(CONFIG_BF_REV_NONE) := none
98rev-$(CONFIG_BF_REV_ANY) := any 98rev-$(CONFIG_BF_REV_ANY) := any
99 99
100KBUILD_CFLAGS += -mcpu=$(cpu-y)-$(rev-y) 100CPU_REV := $(cpu-y)-$(rev-y)
101KBUILD_AFLAGS += -mcpu=$(cpu-y)-$(rev-y) 101export CPU_REV
102
103KBUILD_CFLAGS += -mcpu=$(CPU_REV)
104KBUILD_AFLAGS += -mcpu=$(CPU_REV)
102 105
103# - we utilize the silicon rev from the toolchain, so move it over to the checkflags 106# - we utilize the silicon rev from the toolchain, so move it over to the checkflags
104CHECKFLAGS_SILICON = $(shell echo "" | $(CPP) $(KBUILD_CFLAGS) -dD - 2>/dev/null | awk '$$2 == "__SILICON_REVISION__" { print $$3 }') 107CHECKFLAGS_SILICON = $(shell echo "" | $(CPP) $(KBUILD_CFLAGS) -dD - 2>/dev/null | awk '$$2 == "__SILICON_REVISION__" { print $$3 }')
diff --git a/arch/blackfin/boot/Makefile b/arch/blackfin/boot/Makefile
index 13d2dbd658e3..0a49279e3428 100644
--- a/arch/blackfin/boot/Makefile
+++ b/arch/blackfin/boot/Makefile
@@ -17,7 +17,7 @@ UIMAGE_OPTS-$(CONFIG_ROMKERNEL) += -a $(CONFIG_ROM_BASE) -x
17 17
18quiet_cmd_uimage = UIMAGE $@ 18quiet_cmd_uimage = UIMAGE $@
19 cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A $(ARCH) -O linux -T kernel \ 19 cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A $(ARCH) -O linux -T kernel \
20 -C $(2) -n '$(MACHINE)-$(KERNELRELEASE)' \ 20 -C $(2) -n '$(CPU_REV)-$(KERNELRELEASE)' \
21 -e $(shell $(NM) vmlinux | awk '$$NF == "__start" {print $$1}') \ 21 -e $(shell $(NM) vmlinux | awk '$$NF == "__start" {print $$1}') \
22 $(UIMAGE_OPTS-y) -d $< $@ 22 $(UIMAGE_OPTS-y) -d $< $@
23 23
diff --git a/arch/blackfin/configs/BF561-EZKIT-SMP_defconfig b/arch/blackfin/configs/BF561-EZKIT-SMP_defconfig
new file mode 100644
index 000000000000..4cf451024fd8
--- /dev/null
+++ b/arch/blackfin/configs/BF561-EZKIT-SMP_defconfig
@@ -0,0 +1,113 @@
1CONFIG_EXPERIMENTAL=y
2CONFIG_SYSVIPC=y
3CONFIG_IKCONFIG=y
4CONFIG_IKCONFIG_PROC=y
5CONFIG_LOG_BUF_SHIFT=14
6CONFIG_BLK_DEV_INITRD=y
7# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
8CONFIG_EMBEDDED=y
9# CONFIG_SYSCTL_SYSCALL is not set
10# CONFIG_ELF_CORE is not set
11# CONFIG_FUTEX is not set
12# CONFIG_SIGNALFD is not set
13# CONFIG_TIMERFD is not set
14# CONFIG_EVENTFD is not set
15# CONFIG_AIO is not set
16CONFIG_SLAB=y
17CONFIG_MMAP_ALLOW_UNINITIALIZED=y
18CONFIG_MODULES=y
19CONFIG_MODULE_UNLOAD=y
20# CONFIG_LBDAF is not set
21# CONFIG_BLK_DEV_BSG is not set
22# CONFIG_IOSCHED_DEADLINE is not set
23# CONFIG_IOSCHED_CFQ is not set
24CONFIG_PREEMPT_VOLUNTARY=y
25CONFIG_BF561=y
26CONFIG_SMP=y
27CONFIG_IRQ_TIMER0=10
28CONFIG_CLKIN_HZ=30000000
29CONFIG_HIGH_RES_TIMERS=y
30CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0
31CONFIG_BFIN_GPTIMERS=m
32CONFIG_C_CDPRIO=y
33CONFIG_BANK_3=0xAAC2
34CONFIG_BINFMT_FLAT=y
35CONFIG_BINFMT_ZFLAT=y
36CONFIG_PM=y
37CONFIG_NET=y
38CONFIG_PACKET=y
39CONFIG_UNIX=y
40CONFIG_INET=y
41CONFIG_IP_PNP=y
42# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
43# CONFIG_INET_XFRM_MODE_TUNNEL is not set
44# CONFIG_INET_XFRM_MODE_BEET is not set
45# CONFIG_INET_LRO is not set
46# CONFIG_INET_DIAG is not set
47# CONFIG_IPV6 is not set
48CONFIG_IRDA=m
49CONFIG_IRLAN=m
50CONFIG_IRCOMM=m
51CONFIG_IRDA_CACHE_LAST_LSAP=y
52CONFIG_IRTTY_SIR=m
53# CONFIG_WIRELESS is not set
54CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
55# CONFIG_FW_LOADER is not set
56CONFIG_MTD=y
57CONFIG_MTD_PARTITIONS=y
58CONFIG_MTD_CMDLINE_PARTS=y
59CONFIG_MTD_CHAR=m
60CONFIG_MTD_BLOCK=y
61CONFIG_MTD_CFI=m
62CONFIG_MTD_CFI_AMDSTD=m
63CONFIG_MTD_RAM=y
64CONFIG_MTD_ROM=m
65CONFIG_MTD_PHYSMAP=m
66CONFIG_BLK_DEV_RAM=y
67CONFIG_NETDEVICES=y
68CONFIG_NET_ETHERNET=y
69CONFIG_SMC91X=y
70# CONFIG_NETDEV_1000 is not set
71# CONFIG_NETDEV_10000 is not set
72# CONFIG_WLAN is not set
73CONFIG_INPUT=m
74# CONFIG_INPUT_MOUSEDEV is not set
75CONFIG_INPUT_EVDEV=m
76# CONFIG_INPUT_KEYBOARD is not set
77# CONFIG_INPUT_MOUSE is not set
78# CONFIG_SERIO is not set
79# CONFIG_VT is not set
80# CONFIG_DEVKMEM is not set
81CONFIG_BFIN_JTAG_COMM=m
82CONFIG_SERIAL_BFIN=y
83CONFIG_SERIAL_BFIN_CONSOLE=y
84# CONFIG_LEGACY_PTYS is not set
85# CONFIG_HW_RANDOM is not set
86CONFIG_SPI=y
87CONFIG_SPI_BFIN=y
88CONFIG_GPIOLIB=y
89CONFIG_GPIO_SYSFS=y
90# CONFIG_HWMON is not set
91CONFIG_WATCHDOG=y
92CONFIG_BFIN_WDT=y
93# CONFIG_USB_SUPPORT is not set
94# CONFIG_DNOTIFY is not set
95CONFIG_JFFS2_FS=m
96CONFIG_NFS_FS=m
97CONFIG_NFS_V3=y
98CONFIG_SMB_FS=m
99CONFIG_DEBUG_KERNEL=y
100CONFIG_DEBUG_SHIRQ=y
101CONFIG_DETECT_HUNG_TASK=y
102CONFIG_DEBUG_INFO=y
103# CONFIG_RCU_CPU_STALL_DETECTOR is not set
104# CONFIG_FTRACE is not set
105CONFIG_DEBUG_MMRS=y
106CONFIG_DEBUG_HWERR=y
107CONFIG_EXACT_HWERR=y
108CONFIG_DEBUG_DOUBLEFAULT=y
109CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE=y
110CONFIG_EARLY_PRINTK=y
111CONFIG_CPLB_INFO=y
112CONFIG_CRYPTO=y
113# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/blackfin/configs/DNP5370_defconfig b/arch/blackfin/configs/DNP5370_defconfig
new file mode 100644
index 000000000000..0ebc7d9aa426
--- /dev/null
+++ b/arch/blackfin/configs/DNP5370_defconfig
@@ -0,0 +1,121 @@
1CONFIG_EXPERIMENTAL=y
2CONFIG_LOCALVERSION="DNP5370"
3CONFIG_SYSVIPC=y
4CONFIG_IKCONFIG=y
5CONFIG_IKCONFIG_PROC=y
6CONFIG_LOG_BUF_SHIFT=14
7CONFIG_BLK_DEV_INITRD=y
8CONFIG_EMBEDDED=y
9CONFIG_SLOB=y
10# CONFIG_BLK_DEV_BSG is not set
11# CONFIG_IOSCHED_CFQ is not set
12CONFIG_BF537=y
13CONFIG_BF_REV_0_3=y
14CONFIG_DNP5370=y
15CONFIG_IRQ_ERROR=7
16# CONFIG_CYCLES_CLOCKSOURCE is not set
17CONFIG_C_CDPRIO=y
18CONFIG_C_AMBEN_B0_B1_B2=y
19CONFIG_PM=y
20# CONFIG_SUSPEND is not set
21CONFIG_NET=y
22CONFIG_PACKET=y
23CONFIG_UNIX=y
24CONFIG_INET=y
25CONFIG_IP_PNP=y
26CONFIG_IP_PNP_RARP=y
27CONFIG_SYN_COOKIES=y
28# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
29# CONFIG_INET_XFRM_MODE_TUNNEL is not set
30# CONFIG_INET_XFRM_MODE_BEET is not set
31# CONFIG_INET_LRO is not set
32# CONFIG_INET_DIAG is not set
33# CONFIG_IPV6 is not set
34CONFIG_LLC2=y
35CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
36CONFIG_MTD=y
37CONFIG_MTD_DEBUG=y
38CONFIG_MTD_DEBUG_VERBOSE=1
39CONFIG_MTD_PARTITIONS=y
40CONFIG_MTD_CHAR=y
41CONFIG_MTD_BLOCK=y
42CONFIG_NFTL=y
43CONFIG_NFTL_RW=y
44CONFIG_MTD_CFI=y
45CONFIG_MTD_CFI_AMDSTD=y
46CONFIG_MTD_ROM=y
47CONFIG_MTD_ABSENT=y
48CONFIG_MTD_COMPLEX_MAPPINGS=y
49CONFIG_MTD_PHYSMAP=y
50CONFIG_MTD_UCLINUX=y
51CONFIG_MTD_PLATRAM=y
52CONFIG_MTD_DATAFLASH=y
53CONFIG_MTD_BLOCK2MTD=y
54CONFIG_MTD_NAND=y
55CONFIG_MTD_NAND_PLATFORM=y
56CONFIG_BLK_DEV_LOOP=y
57CONFIG_BLK_DEV_RAM=y
58# CONFIG_MISC_DEVICES is not set
59CONFIG_NETDEVICES=y
60CONFIG_DAVICOM_PHY=y
61CONFIG_NET_ETHERNET=y
62CONFIG_BFIN_MAC=y
63# CONFIG_NETDEV_1000 is not set
64# CONFIG_NETDEV_10000 is not set
65# CONFIG_WLAN is not set
66# CONFIG_INPUT is not set
67# CONFIG_SERIO is not set
68# CONFIG_BFIN_DMA_INTERFACE is not set
69# CONFIG_VT is not set
70# CONFIG_DEVKMEM is not set
71CONFIG_BFIN_JTAG_COMM=y
72CONFIG_BFIN_JTAG_COMM_CONSOLE=y
73CONFIG_SERIAL_BFIN=y
74CONFIG_SERIAL_BFIN_CONSOLE=y
75CONFIG_SERIAL_BFIN_UART0=y
76CONFIG_LEGACY_PTY_COUNT=64
77# CONFIG_HW_RANDOM is not set
78CONFIG_I2C=y
79CONFIG_I2C_CHARDEV=y
80CONFIG_I2C_BLACKFIN_TWI=y
81CONFIG_SPI=y
82CONFIG_SPI_BFIN=y
83CONFIG_SPI_SPIDEV=y
84CONFIG_GPIOLIB=y
85CONFIG_GPIO_SYSFS=y
86CONFIG_SENSORS_LM75=y
87# CONFIG_USB_SUPPORT is not set
88CONFIG_MMC=y
89CONFIG_MMC_SPI=y
90CONFIG_DMADEVICES=y
91CONFIG_EXT2_FS=y
92CONFIG_EXT2_FS_XATTR=y
93# CONFIG_DNOTIFY is not set
94CONFIG_MSDOS_FS=y
95CONFIG_VFAT_FS=y
96CONFIG_FAT_DEFAULT_CODEPAGE=850
97CONFIG_JFFS2_FS=y
98CONFIG_CRAMFS=y
99CONFIG_ROMFS_FS=y
100CONFIG_ROMFS_BACKED_BY_BOTH=y
101# CONFIG_NETWORK_FILESYSTEMS is not set
102CONFIG_NLS_CODEPAGE_437=y
103CONFIG_NLS_CODEPAGE_850=y
104CONFIG_NLS_ISO8859_1=y
105CONFIG_DEBUG_KERNEL=y
106CONFIG_DEBUG_SHIRQ=y
107CONFIG_DETECT_HUNG_TASK=y
108CONFIG_DEBUG_OBJECTS=y
109CONFIG_DEBUG_LOCK_ALLOC=y
110CONFIG_DEBUG_KOBJECT=y
111CONFIG_DEBUG_INFO=y
112CONFIG_DEBUG_VM=y
113CONFIG_DEBUG_MEMORY_INIT=y
114CONFIG_DEBUG_LIST=y
115# CONFIG_RCU_CPU_STALL_DETECTOR is not set
116CONFIG_SYSCTL_SYSCALL_CHECK=y
117CONFIG_PAGE_POISONING=y
118# CONFIG_FTRACE is not set
119CONFIG_DEBUG_DOUBLEFAULT=y
120CONFIG_CPLB_INFO=y
121CONFIG_CRC_CCITT=y
diff --git a/arch/blackfin/include/asm/bfin_dma.h b/arch/blackfin/include/asm/bfin_dma.h
new file mode 100644
index 000000000000..d51120744148
--- /dev/null
+++ b/arch/blackfin/include/asm/bfin_dma.h
@@ -0,0 +1,91 @@
1/*
2 * bfin_dma.h - Blackfin DMA defines/structures/etc...
3 *
4 * Copyright 2004-2010 Analog Devices Inc.
5 *
6 * Licensed under the GPL-2 or later.
7 */
8
9#ifndef __ASM_BFIN_DMA_H__
10#define __ASM_BFIN_DMA_H__
11
12#include <linux/types.h>
13
14/* DMA_CONFIG Masks */
15#define DMAEN 0x0001 /* DMA Channel Enable */
16#define WNR 0x0002 /* Channel Direction (W/R*) */
17#define WDSIZE_8 0x0000 /* Transfer Word Size = 8 */
18#define WDSIZE_16 0x0004 /* Transfer Word Size = 16 */
19#define WDSIZE_32 0x0008 /* Transfer Word Size = 32 */
20#define DMA2D 0x0010 /* DMA Mode (2D/1D*) */
21#define RESTART 0x0020 /* DMA Buffer Clear */
22#define DI_SEL 0x0040 /* Data Interrupt Timing Select */
23#define DI_EN 0x0080 /* Data Interrupt Enable */
24#define NDSIZE_0 0x0000 /* Next Descriptor Size = 0 (Stop/Autobuffer) */
25#define NDSIZE_1 0x0100 /* Next Descriptor Size = 1 */
26#define NDSIZE_2 0x0200 /* Next Descriptor Size = 2 */
27#define NDSIZE_3 0x0300 /* Next Descriptor Size = 3 */
28#define NDSIZE_4 0x0400 /* Next Descriptor Size = 4 */
29#define NDSIZE_5 0x0500 /* Next Descriptor Size = 5 */
30#define NDSIZE_6 0x0600 /* Next Descriptor Size = 6 */
31#define NDSIZE_7 0x0700 /* Next Descriptor Size = 7 */
32#define NDSIZE_8 0x0800 /* Next Descriptor Size = 8 */
33#define NDSIZE_9 0x0900 /* Next Descriptor Size = 9 */
34#define NDSIZE 0x0f00 /* Next Descriptor Size */
35#define DMAFLOW 0x7000 /* Flow Control */
36#define DMAFLOW_STOP 0x0000 /* Stop Mode */
37#define DMAFLOW_AUTO 0x1000 /* Autobuffer Mode */
38#define DMAFLOW_ARRAY 0x4000 /* Descriptor Array Mode */
39#define DMAFLOW_SMALL 0x6000 /* Small Model Descriptor List Mode */
40#define DMAFLOW_LARGE 0x7000 /* Large Model Descriptor List Mode */
41
42/* DMA_IRQ_STATUS Masks */
43#define DMA_DONE 0x0001 /* DMA Completion Interrupt Status */
44#define DMA_ERR 0x0002 /* DMA Error Interrupt Status */
45#define DFETCH 0x0004 /* DMA Descriptor Fetch Indicator */
46#define DMA_RUN 0x0008 /* DMA Channel Running Indicator */
47
48/*
49 * All Blackfin system MMRs are padded to 32bits even if the register
50 * itself is only 16bits. So use a helper macro to streamline this.
51 */
52#define __BFP(m) u16 m; u16 __pad_##m
53
54/*
55 * bfin dma registers layout
56 */
57struct bfin_dma_regs {
58 u32 next_desc_ptr;
59 u32 start_addr;
60 __BFP(config);
61 u32 __pad0;
62 __BFP(x_count);
63 __BFP(x_modify);
64 __BFP(y_count);
65 __BFP(y_modify);
66 u32 curr_desc_ptr;
67 u32 curr_addr;
68 __BFP(irq_status);
69 __BFP(peripheral_map);
70 __BFP(curr_x_count);
71 u32 __pad1;
72 __BFP(curr_y_count);
73 u32 __pad2;
74};
75
76/*
77 * bfin handshake mdma registers layout
78 */
79struct bfin_hmdma_regs {
80 __BFP(control);
81 __BFP(ecinit);
82 __BFP(bcinit);
83 __BFP(ecurgent);
84 __BFP(ecoverflow);
85 __BFP(ecount);
86 __BFP(bcount);
87};
88
89#undef __BFP
90
91#endif
diff --git a/arch/blackfin/include/asm/bfin_serial.h b/arch/blackfin/include/asm/bfin_serial.h
new file mode 100644
index 000000000000..1ff9f1468c02
--- /dev/null
+++ b/arch/blackfin/include/asm/bfin_serial.h
@@ -0,0 +1,275 @@
1/*
2 * bfin_serial.h - Blackfin UART/Serial definitions
3 *
4 * Copyright 2006-2010 Analog Devices Inc.
5 *
6 * Licensed under the GPL-2 or later.
7 */
8
9#ifndef __BFIN_ASM_SERIAL_H__
10#define __BFIN_ASM_SERIAL_H__
11
12#include <linux/serial_core.h>
13#include <mach/anomaly.h>
14#include <mach/bfin_serial.h>
15
16#if defined(CONFIG_BFIN_UART0_CTSRTS) || \
17 defined(CONFIG_BFIN_UART1_CTSRTS) || \
18 defined(CONFIG_BFIN_UART2_CTSRTS) || \
19 defined(CONFIG_BFIN_UART3_CTSRTS)
20# ifdef BFIN_UART_BF54X_STYLE
21# define CONFIG_SERIAL_BFIN_HARD_CTSRTS
22# else
23# define CONFIG_SERIAL_BFIN_CTSRTS
24# endif
25#endif
26
27struct circ_buf;
28struct timer_list;
29struct work_struct;
30
31struct bfin_serial_port {
32 struct uart_port port;
33 unsigned int old_status;
34 int status_irq;
35#ifndef BFIN_UART_BF54X_STYLE
36 unsigned int lsr;
37#endif
38#ifdef CONFIG_SERIAL_BFIN_DMA
39 int tx_done;
40 int tx_count;
41 struct circ_buf rx_dma_buf;
42 struct timer_list rx_dma_timer;
43 int rx_dma_nrows;
44 unsigned int tx_dma_channel;
45 unsigned int rx_dma_channel;
46 struct work_struct tx_dma_workqueue;
47#elif ANOMALY_05000363
48 unsigned int anomaly_threshold;
49#endif
50#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
51 int scts;
52#endif
53#if defined(CONFIG_SERIAL_BFIN_CTSRTS) || \
54 defined(CONFIG_SERIAL_BFIN_HARD_CTSRTS)
55 int cts_pin;
56 int rts_pin;
57#endif
58};
59
60/* UART_LCR Masks */
61#define WLS(x) (((x)-5) & 0x03) /* Word Length Select */
62#define STB 0x04 /* Stop Bits */
63#define PEN 0x08 /* Parity Enable */
64#define EPS 0x10 /* Even Parity Select */
65#define STP 0x20 /* Stick Parity */
66#define SB 0x40 /* Set Break */
67#define DLAB 0x80 /* Divisor Latch Access */
68
69/* UART_LSR Masks */
70#define DR 0x01 /* Data Ready */
71#define OE 0x02 /* Overrun Error */
72#define PE 0x04 /* Parity Error */
73#define FE 0x08 /* Framing Error */
74#define BI 0x10 /* Break Interrupt */
75#define THRE 0x20 /* THR Empty */
76#define TEMT 0x40 /* TSR and UART_THR Empty */
77#define TFI 0x80 /* Transmission Finished Indicator */
78
79/* UART_IER Masks */
80#define ERBFI 0x01 /* Enable Receive Buffer Full Interrupt */
81#define ETBEI 0x02 /* Enable Transmit Buffer Empty Interrupt */
82#define ELSI 0x04 /* Enable RX Status Interrupt */
83#define EDSSI 0x08 /* Enable Modem Status Interrupt */
84#define EDTPTI 0x10 /* Enable DMA Transmit PIRQ Interrupt */
85#define ETFI 0x20 /* Enable Transmission Finished Interrupt */
86#define ERFCI 0x40 /* Enable Receive FIFO Count Interrupt */
87
88/* UART_MCR Masks */
89#define XOFF 0x01 /* Transmitter Off */
90#define MRTS 0x02 /* Manual Request To Send */
91#define RFIT 0x04 /* Receive FIFO IRQ Threshold */
92#define RFRT 0x08 /* Receive FIFO RTS Threshold */
93#define LOOP_ENA 0x10 /* Loopback Mode Enable */
94#define FCPOL 0x20 /* Flow Control Pin Polarity */
95#define ARTS 0x40 /* Automatic Request To Send */
96#define ACTS 0x80 /* Automatic Clear To Send */
97
98/* UART_MSR Masks */
99#define SCTS 0x01 /* Sticky CTS */
100#define CTS 0x10 /* Clear To Send */
101#define RFCS 0x20 /* Receive FIFO Count Status */
102
103/* UART_GCTL Masks */
104#define UCEN 0x01 /* Enable UARTx Clocks */
105#define IREN 0x02 /* Enable IrDA Mode */
106#define TPOLC 0x04 /* IrDA TX Polarity Change */
107#define RPOLC 0x08 /* IrDA RX Polarity Change */
108#define FPE 0x10 /* Force Parity Error On Transmit */
109#define FFE 0x20 /* Force Framing Error On Transmit */
110
111#ifdef BFIN_UART_BF54X_STYLE
112# define OFFSET_DLL 0x00 /* Divisor Latch (Low-Byte) */
113# define OFFSET_DLH 0x04 /* Divisor Latch (High-Byte) */
114# define OFFSET_GCTL 0x08 /* Global Control Register */
115# define OFFSET_LCR 0x0C /* Line Control Register */
116# define OFFSET_MCR 0x10 /* Modem Control Register */
117# define OFFSET_LSR 0x14 /* Line Status Register */
118# define OFFSET_MSR 0x18 /* Modem Status Register */
119# define OFFSET_SCR 0x1C /* SCR Scratch Register */
120# define OFFSET_IER_SET 0x20 /* Set Interrupt Enable Register */
121# define OFFSET_IER_CLEAR 0x24 /* Clear Interrupt Enable Register */
122# define OFFSET_THR 0x28 /* Transmit Holding register */
123# define OFFSET_RBR 0x2C /* Receive Buffer register */
124#else /* BF533 style */
125# define OFFSET_THR 0x00 /* Transmit Holding register */
126# define OFFSET_RBR 0x00 /* Receive Buffer register */
127# define OFFSET_DLL 0x00 /* Divisor Latch (Low-Byte) */
128# define OFFSET_DLH 0x04 /* Divisor Latch (High-Byte) */
129# define OFFSET_IER 0x04 /* Interrupt Enable Register */
130# define OFFSET_IIR 0x08 /* Interrupt Identification Register */
131# define OFFSET_LCR 0x0C /* Line Control Register */
132# define OFFSET_MCR 0x10 /* Modem Control Register */
133# define OFFSET_LSR 0x14 /* Line Status Register */
134# define OFFSET_MSR 0x18 /* Modem Status Register */
135# define OFFSET_SCR 0x1C /* SCR Scratch Register */
136# define OFFSET_GCTL 0x24 /* Global Control Register */
137/* code should not need IIR, so force build error if they use it */
138# undef OFFSET_IIR
139#endif
140
141/*
142 * All Blackfin system MMRs are padded to 32bits even if the register
143 * itself is only 16bits. So use a helper macro to streamline this.
144 */
145#define __BFP(m) u16 m; u16 __pad_##m
146struct bfin_uart_regs {
147#ifdef BFIN_UART_BF54X_STYLE
148 __BFP(dll);
149 __BFP(dlh);
150 __BFP(gctl);
151 __BFP(lcr);
152 __BFP(mcr);
153 __BFP(lsr);
154 __BFP(msr);
155 __BFP(scr);
156 __BFP(ier_set);
157 __BFP(ier_clear);
158 __BFP(thr);
159 __BFP(rbr);
160#else
161 union {
162 u16 dll;
163 u16 thr;
164 const u16 rbr;
165 };
166 const u16 __pad0;
167 union {
168 u16 dlh;
169 u16 ier;
170 };
171 const u16 __pad1;
172 const __BFP(iir);
173 __BFP(lcr);
174 __BFP(mcr);
175 __BFP(lsr);
176 __BFP(msr);
177 __BFP(scr);
178 const u32 __pad2;
179 __BFP(gctl);
180#endif
181};
182#undef __BFP
183
184#ifndef port_membase
185# define port_membase(p) (((struct bfin_serial_port *)(p))->port.membase)
186#endif
187
188#define UART_GET_CHAR(p) bfin_read16(port_membase(p) + OFFSET_RBR)
189#define UART_GET_DLL(p) bfin_read16(port_membase(p) + OFFSET_DLL)
190#define UART_GET_DLH(p) bfin_read16(port_membase(p) + OFFSET_DLH)
191#define UART_GET_GCTL(p) bfin_read16(port_membase(p) + OFFSET_GCTL)
192#define UART_GET_LCR(p) bfin_read16(port_membase(p) + OFFSET_LCR)
193#define UART_GET_MCR(p) bfin_read16(port_membase(p) + OFFSET_MCR)
194#define UART_GET_MSR(p) bfin_read16(port_membase(p) + OFFSET_MSR)
195
196#define UART_PUT_CHAR(p, v) bfin_write16(port_membase(p) + OFFSET_THR, v)
197#define UART_PUT_DLL(p, v) bfin_write16(port_membase(p) + OFFSET_DLL, v)
198#define UART_PUT_DLH(p, v) bfin_write16(port_membase(p) + OFFSET_DLH, v)
199#define UART_PUT_GCTL(p, v) bfin_write16(port_membase(p) + OFFSET_GCTL, v)
200#define UART_PUT_LCR(p, v) bfin_write16(port_membase(p) + OFFSET_LCR, v)
201#define UART_PUT_MCR(p, v) bfin_write16(port_membase(p) + OFFSET_MCR, v)
202
203#ifdef BFIN_UART_BF54X_STYLE
204
205#define UART_CLEAR_IER(p, v) bfin_write16(port_membase(p) + OFFSET_IER_CLEAR, v)
206#define UART_GET_IER(p) bfin_read16(port_membase(p) + OFFSET_IER_SET)
207#define UART_SET_IER(p, v) bfin_write16(port_membase(p) + OFFSET_IER_SET, v)
208
209#define UART_CLEAR_DLAB(p) /* MMRs not muxed on BF54x */
210#define UART_SET_DLAB(p) /* MMRs not muxed on BF54x */
211
212#define UART_CLEAR_LSR(p) bfin_write16(port_membase(p) + OFFSET_LSR, -1)
213#define UART_GET_LSR(p) bfin_read16(port_membase(p) + OFFSET_LSR)
214#define UART_PUT_LSR(p, v) bfin_write16(port_membase(p) + OFFSET_LSR, v)
215
216/* This handles hard CTS/RTS */
217#define BFIN_UART_CTSRTS_HARD
218#define UART_CLEAR_SCTS(p) bfin_write16((port_membase(p) + OFFSET_MSR), SCTS)
219#define UART_GET_CTS(x) (UART_GET_MSR(x) & CTS)
220#define UART_DISABLE_RTS(x) UART_PUT_MCR(x, UART_GET_MCR(x) & ~(ARTS | MRTS))
221#define UART_ENABLE_RTS(x) UART_PUT_MCR(x, UART_GET_MCR(x) | MRTS | ARTS)
222#define UART_ENABLE_INTS(x, v) UART_SET_IER(x, v)
223#define UART_DISABLE_INTS(x) UART_CLEAR_IER(x, 0xF)
224
225#else /* BF533 style */
226
227#define UART_CLEAR_IER(p, v) UART_PUT_IER(p, UART_GET_IER(p) & ~(v))
228#define UART_GET_IER(p) bfin_read16(port_membase(p) + OFFSET_IER)
229#define UART_PUT_IER(p, v) bfin_write16(port_membase(p) + OFFSET_IER, v)
230#define UART_SET_IER(p, v) UART_PUT_IER(p, UART_GET_IER(p) | (v))
231
232#define UART_CLEAR_DLAB(p) do { UART_PUT_LCR(p, UART_GET_LCR(p) & ~DLAB); SSYNC(); } while (0)
233#define UART_SET_DLAB(p) do { UART_PUT_LCR(p, UART_GET_LCR(p) | DLAB); SSYNC(); } while (0)
234
235#ifndef put_lsr_cache
236# define put_lsr_cache(p, v) (((struct bfin_serial_port *)(p))->lsr = (v))
237#endif
238#ifndef get_lsr_cache
239# define get_lsr_cache(p) (((struct bfin_serial_port *)(p))->lsr)
240#endif
241
242/* The hardware clears the LSR bits upon read, so we need to cache
243 * some of the more fun bits in software so they don't get lost
244 * when checking the LSR in other code paths (TX).
245 */
246static inline void UART_CLEAR_LSR(void *p)
247{
248 put_lsr_cache(p, 0);
249 bfin_write16(port_membase(p) + OFFSET_LSR, -1);
250}
251static inline unsigned int UART_GET_LSR(void *p)
252{
253 unsigned int lsr = bfin_read16(port_membase(p) + OFFSET_LSR);
254 put_lsr_cache(p, get_lsr_cache(p) | (lsr & (BI|FE|PE|OE)));
255 return lsr | get_lsr_cache(p);
256}
257static inline void UART_PUT_LSR(void *p, uint16_t val)
258{
259 put_lsr_cache(p, get_lsr_cache(p) & ~val);
260}
261
262/* This handles soft CTS/RTS */
263#define UART_GET_CTS(x) gpio_get_value((x)->cts_pin)
264#define UART_DISABLE_RTS(x) gpio_set_value((x)->rts_pin, 1)
265#define UART_ENABLE_RTS(x) gpio_set_value((x)->rts_pin, 0)
266#define UART_ENABLE_INTS(x, v) UART_PUT_IER(x, v)
267#define UART_DISABLE_INTS(x) UART_PUT_IER(x, 0)
268
269#endif
270
271#ifndef BFIN_UART_TX_FIFO_SIZE
272# define BFIN_UART_TX_FIFO_SIZE 2
273#endif
274
275#endif /* __BFIN_ASM_SERIAL_H__ */
diff --git a/arch/blackfin/include/asm/bitops.h b/arch/blackfin/include/asm/bitops.h
index 3f7ef4d97791..29f4fd886174 100644
--- a/arch/blackfin/include/asm/bitops.h
+++ b/arch/blackfin/include/asm/bitops.h
@@ -108,7 +108,9 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
108#define smp_mb__before_clear_bit() barrier() 108#define smp_mb__before_clear_bit() barrier()
109#define smp_mb__after_clear_bit() barrier() 109#define smp_mb__after_clear_bit() barrier()
110 110
111#define test_bit __skip_test_bit
111#include <asm-generic/bitops/non-atomic.h> 112#include <asm-generic/bitops/non-atomic.h>
113#undef test_bit
112 114
113#endif /* CONFIG_SMP */ 115#endif /* CONFIG_SMP */
114 116
diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
index bd0641a267f1..568885a2c286 100644
--- a/arch/blackfin/include/asm/cache.h
+++ b/arch/blackfin/include/asm/cache.h
@@ -7,6 +7,8 @@
7#ifndef __ARCH_BLACKFIN_CACHE_H 7#ifndef __ARCH_BLACKFIN_CACHE_H
8#define __ARCH_BLACKFIN_CACHE_H 8#define __ARCH_BLACKFIN_CACHE_H
9 9
10#include <linux/linkage.h> /* for asmlinkage */
11
10/* 12/*
11 * Bytes per L1 cache line 13 * Bytes per L1 cache line
12 * Blackfin loads 32 bytes for cache 14 * Blackfin loads 32 bytes for cache
diff --git a/arch/blackfin/include/asm/cacheflush.h b/arch/blackfin/include/asm/cacheflush.h
index 2666ff8ea952..77135b62818e 100644
--- a/arch/blackfin/include/asm/cacheflush.h
+++ b/arch/blackfin/include/asm/cacheflush.h
@@ -11,6 +11,9 @@
11 11
12#include <asm/blackfin.h> /* for SSYNC() */ 12#include <asm/blackfin.h> /* for SSYNC() */
13#include <asm/sections.h> /* for _ramend */ 13#include <asm/sections.h> /* for _ramend */
14#ifdef CONFIG_SMP
15#include <asm/smp.h>
16#endif
14 17
15extern void blackfin_icache_flush_range(unsigned long start_address, unsigned long end_address); 18extern void blackfin_icache_flush_range(unsigned long start_address, unsigned long end_address);
16extern void blackfin_dcache_flush_range(unsigned long start_address, unsigned long end_address); 19extern void blackfin_dcache_flush_range(unsigned long start_address, unsigned long end_address);
diff --git a/arch/blackfin/include/asm/dma.h b/arch/blackfin/include/asm/dma.h
index eedf3ca65ba2..d9dbc1a53534 100644
--- a/arch/blackfin/include/asm/dma.h
+++ b/arch/blackfin/include/asm/dma.h
@@ -14,40 +14,7 @@
14#include <asm/blackfin.h> 14#include <asm/blackfin.h>
15#include <asm/page.h> 15#include <asm/page.h>
16#include <asm-generic/dma.h> 16#include <asm-generic/dma.h>
17 17#include <asm/bfin_dma.h>
18/* DMA_CONFIG Masks */
19#define DMAEN 0x0001 /* DMA Channel Enable */
20#define WNR 0x0002 /* Channel Direction (W/R*) */
21#define WDSIZE_8 0x0000 /* Transfer Word Size = 8 */
22#define WDSIZE_16 0x0004 /* Transfer Word Size = 16 */
23#define WDSIZE_32 0x0008 /* Transfer Word Size = 32 */
24#define DMA2D 0x0010 /* DMA Mode (2D/1D*) */
25#define RESTART 0x0020 /* DMA Buffer Clear */
26#define DI_SEL 0x0040 /* Data Interrupt Timing Select */
27#define DI_EN 0x0080 /* Data Interrupt Enable */
28#define NDSIZE_0 0x0000 /* Next Descriptor Size = 0 (Stop/Autobuffer) */
29#define NDSIZE_1 0x0100 /* Next Descriptor Size = 1 */
30#define NDSIZE_2 0x0200 /* Next Descriptor Size = 2 */
31#define NDSIZE_3 0x0300 /* Next Descriptor Size = 3 */
32#define NDSIZE_4 0x0400 /* Next Descriptor Size = 4 */
33#define NDSIZE_5 0x0500 /* Next Descriptor Size = 5 */
34#define NDSIZE_6 0x0600 /* Next Descriptor Size = 6 */
35#define NDSIZE_7 0x0700 /* Next Descriptor Size = 7 */
36#define NDSIZE_8 0x0800 /* Next Descriptor Size = 8 */
37#define NDSIZE_9 0x0900 /* Next Descriptor Size = 9 */
38#define NDSIZE 0x0f00 /* Next Descriptor Size */
39#define DMAFLOW 0x7000 /* Flow Control */
40#define DMAFLOW_STOP 0x0000 /* Stop Mode */
41#define DMAFLOW_AUTO 0x1000 /* Autobuffer Mode */
42#define DMAFLOW_ARRAY 0x4000 /* Descriptor Array Mode */
43#define DMAFLOW_SMALL 0x6000 /* Small Model Descriptor List Mode */
44#define DMAFLOW_LARGE 0x7000 /* Large Model Descriptor List Mode */
45
46/* DMA_IRQ_STATUS Masks */
47#define DMA_DONE 0x0001 /* DMA Completion Interrupt Status */
48#define DMA_ERR 0x0002 /* DMA Error Interrupt Status */
49#define DFETCH 0x0004 /* DMA Descriptor Fetch Indicator */
50#define DMA_RUN 0x0008 /* DMA Channel Running Indicator */
51 18
52/*------------------------- 19/*-------------------------
53 * config reg bits value 20 * config reg bits value
@@ -149,7 +116,7 @@ void blackfin_dma_resume(void);
149* DMA API's 116* DMA API's
150*******************************************************************************/ 117*******************************************************************************/
151extern struct dma_channel dma_ch[MAX_DMA_CHANNELS]; 118extern struct dma_channel dma_ch[MAX_DMA_CHANNELS];
152extern struct dma_register *dma_io_base_addr[MAX_DMA_CHANNELS]; 119extern struct dma_register * const dma_io_base_addr[MAX_DMA_CHANNELS];
153extern int channel2irq(unsigned int channel); 120extern int channel2irq(unsigned int channel);
154 121
155static inline void set_dma_start_addr(unsigned int channel, unsigned long addr) 122static inline void set_dma_start_addr(unsigned int channel, unsigned long addr)
diff --git a/arch/blackfin/include/asm/dpmc.h b/arch/blackfin/include/asm/dpmc.h
index efcc3aebeae4..3047120cfcff 100644
--- a/arch/blackfin/include/asm/dpmc.h
+++ b/arch/blackfin/include/asm/dpmc.h
@@ -9,6 +9,8 @@
9#ifndef _BLACKFIN_DPMC_H_ 9#ifndef _BLACKFIN_DPMC_H_
10#define _BLACKFIN_DPMC_H_ 10#define _BLACKFIN_DPMC_H_
11 11
12#include <mach/pll.h>
13
12/* PLL_CTL Masks */ 14/* PLL_CTL Masks */
13#define DF 0x0001 /* 0: PLL = CLKIN, 1: PLL = CLKIN/2 */ 15#define DF 0x0001 /* 0: PLL = CLKIN, 1: PLL = CLKIN/2 */
14#define PLL_OFF 0x0002 /* PLL Not Powered */ 16#define PLL_OFF 0x0002 /* PLL Not Powered */
diff --git a/arch/blackfin/include/asm/io.h b/arch/blackfin/include/asm/io.h
index 234fbac17ec1..dccae26805b0 100644
--- a/arch/blackfin/include/asm/io.h
+++ b/arch/blackfin/include/asm/io.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2004-2009 Analog Devices Inc. 2 * Copyright 2004-2010 Analog Devices Inc.
3 * 3 *
4 * Licensed under the GPL-2 or later. 4 * Licensed under the GPL-2 or later.
5 */ 5 */
@@ -7,148 +7,48 @@
7#ifndef _BFIN_IO_H 7#ifndef _BFIN_IO_H
8#define _BFIN_IO_H 8#define _BFIN_IO_H
9 9
10#ifdef __KERNEL__
11
12#ifndef __ASSEMBLY__
13#include <linux/types.h>
14#endif
15#include <linux/compiler.h> 10#include <linux/compiler.h>
16 11#include <linux/types.h>
17/* 12#include <asm/byteorder.h>
18 * These are for ISA/PCI shared memory _only_ and should never be used 13
19 * on any other type of memory, including Zorro memory. They are meant to 14#define DECLARE_BFIN_RAW_READX(size, type, asm, asm_sign) \
20 * access the bus in the bus byte order which is little-endian!. 15static inline type __raw_read##size(const volatile void __iomem *addr) \
21 * 16{ \
22 * readX/writeX() are used to access memory mapped devices. On some 17 unsigned int val; \
23 * architectures the memory mapped IO stuff needs to be accessed 18 int tmp; \
24 * differently. On the bfin architecture, we just read/write the 19 __asm__ __volatile__ ( \
25 * memory location directly. 20 "cli %1;" \
26 */ 21 "NOP; NOP; SSYNC;" \
27#ifndef __ASSEMBLY__ 22 "%0 = "#asm" [%2] "#asm_sign";" \
28 23 "sti %1;" \
29static inline unsigned char readb(const volatile void __iomem *addr) 24 : "=d"(val), "=d"(tmp) \
30{ 25 : "a"(addr) \
31 unsigned int val; 26 ); \
32 int tmp; 27 return (type) val; \
33
34 __asm__ __volatile__ (
35 "cli %1;"
36 "NOP; NOP; SSYNC;"
37 "%0 = b [%2] (z);"
38 "sti %1;"
39 : "=d"(val), "=d"(tmp)
40 : "a"(addr)
41 );
42
43 return (unsigned char) val;
44}
45
46static inline unsigned short readw(const volatile void __iomem *addr)
47{
48 unsigned int val;
49 int tmp;
50
51 __asm__ __volatile__ (
52 "cli %1;"
53 "NOP; NOP; SSYNC;"
54 "%0 = w [%2] (z);"
55 "sti %1;"
56 : "=d"(val), "=d"(tmp)
57 : "a"(addr)
58 );
59
60 return (unsigned short) val;
61}
62
63static inline unsigned int readl(const volatile void __iomem *addr)
64{
65 unsigned int val;
66 int tmp;
67
68 __asm__ __volatile__ (
69 "cli %1;"
70 "NOP; NOP; SSYNC;"
71 "%0 = [%2];"
72 "sti %1;"
73 : "=d"(val), "=d"(tmp)
74 : "a"(addr)
75 );
76
77 return val;
78} 28}
79 29DECLARE_BFIN_RAW_READX(b, u8, b, (z))
80#endif /* __ASSEMBLY__ */ 30#define __raw_readb __raw_readb
81 31DECLARE_BFIN_RAW_READX(w, u16, w, (z))
82#define writeb(b, addr) (void)((*(volatile unsigned char *) (addr)) = (b)) 32#define __raw_readw __raw_readw
83#define writew(b, addr) (void)((*(volatile unsigned short *) (addr)) = (b)) 33DECLARE_BFIN_RAW_READX(l, u32, , )
84#define writel(b, addr) (void)((*(volatile unsigned int *) (addr)) = (b)) 34#define __raw_readl __raw_readl
85
86#define __raw_readb readb
87#define __raw_readw readw
88#define __raw_readl readl
89#define __raw_writeb writeb
90#define __raw_writew writew
91#define __raw_writel writel
92#define memset_io(a, b, c) memset((void *)(a), (b), (c))
93#define memcpy_fromio(a, b, c) memcpy((a), (void *)(b), (c))
94#define memcpy_toio(a, b, c) memcpy((void *)(a), (b), (c))
95
96/* Convert "I/O port addresses" to actual addresses. i.e. ugly casts. */
97#define __io(port) ((void *)(unsigned long)(port))
98
99#define inb(port) readb(__io(port))
100#define inw(port) readw(__io(port))
101#define inl(port) readl(__io(port))
102#define outb(x, port) writeb(x, __io(port))
103#define outw(x, port) writew(x, __io(port))
104#define outl(x, port) writel(x, __io(port))
105
106#define inb_p(port) inb(__io(port))
107#define inw_p(port) inw(__io(port))
108#define inl_p(port) inl(__io(port))
109#define outb_p(x, port) outb(x, __io(port))
110#define outw_p(x, port) outw(x, __io(port))
111#define outl_p(x, port) outl(x, __io(port))
112
113#define ioread8_rep(a, d, c) readsb(a, d, c)
114#define ioread16_rep(a, d, c) readsw(a, d, c)
115#define ioread32_rep(a, d, c) readsl(a, d, c)
116#define iowrite8_rep(a, s, c) writesb(a, s, c)
117#define iowrite16_rep(a, s, c) writesw(a, s, c)
118#define iowrite32_rep(a, s, c) writesl(a, s, c)
119
120#define ioread8(x) readb(x)
121#define ioread16(x) readw(x)
122#define ioread32(x) readl(x)
123#define iowrite8(val, x) writeb(val, x)
124#define iowrite16(val, x) writew(val, x)
125#define iowrite32(val, x) writel(val, x)
126
127/**
128 * I/O write barrier
129 *
130 * Ensure ordering of I/O space writes. This will make sure that writes
131 * following the barrier will arrive after all previous writes.
132 */
133#define mmiowb() do { SSYNC(); wmb(); } while (0)
134
135#define IO_SPACE_LIMIT 0xffffffff
136
137/* Values for nocacheflag and cmode */
138#define IOMAP_NOCACHE_SER 1
139
140#ifndef __ASSEMBLY__
141 35
142extern void outsb(unsigned long port, const void *addr, unsigned long count); 36extern void outsb(unsigned long port, const void *addr, unsigned long count);
143extern void outsw(unsigned long port, const void *addr, unsigned long count); 37extern void outsw(unsigned long port, const void *addr, unsigned long count);
144extern void outsw_8(unsigned long port, const void *addr, unsigned long count); 38extern void outsw_8(unsigned long port, const void *addr, unsigned long count);
145extern void outsl(unsigned long port, const void *addr, unsigned long count); 39extern void outsl(unsigned long port, const void *addr, unsigned long count);
40#define outsb outsb
41#define outsw outsw
42#define outsl outsl
146 43
147extern void insb(unsigned long port, void *addr, unsigned long count); 44extern void insb(unsigned long port, void *addr, unsigned long count);
148extern void insw(unsigned long port, void *addr, unsigned long count); 45extern void insw(unsigned long port, void *addr, unsigned long count);
149extern void insw_8(unsigned long port, void *addr, unsigned long count); 46extern void insw_8(unsigned long port, void *addr, unsigned long count);
150extern void insl(unsigned long port, void *addr, unsigned long count); 47extern void insl(unsigned long port, void *addr, unsigned long count);
151extern void insl_16(unsigned long port, void *addr, unsigned long count); 48extern void insl_16(unsigned long port, void *addr, unsigned long count);
49#define insb insb
50#define insw insw
51#define insl insl
152 52
153extern void dma_outsb(unsigned long port, const void *addr, unsigned short count); 53extern void dma_outsb(unsigned long port, const void *addr, unsigned short count);
154extern void dma_outsw(unsigned long port, const void *addr, unsigned short count); 54extern void dma_outsw(unsigned long port, const void *addr, unsigned short count);
@@ -158,108 +58,14 @@ extern void dma_insb(unsigned long port, void *addr, unsigned short count);
158extern void dma_insw(unsigned long port, void *addr, unsigned short count); 58extern void dma_insw(unsigned long port, void *addr, unsigned short count);
159extern void dma_insl(unsigned long port, void *addr, unsigned short count); 59extern void dma_insl(unsigned long port, void *addr, unsigned short count);
160 60
161static inline void readsl(const void __iomem *addr, void *buf, int len) 61/**
162{ 62 * I/O write barrier
163 insl((unsigned long)addr, buf, len); 63 *
164} 64 * Ensure ordering of I/O space writes. This will make sure that writes
165 65 * following the barrier will arrive after all previous writes.
166static inline void readsw(const void __iomem *addr, void *buf, int len)
167{
168 insw((unsigned long)addr, buf, len);
169}
170
171static inline void readsb(const void __iomem *addr, void *buf, int len)
172{
173 insb((unsigned long)addr, buf, len);
174}
175
176static inline void writesl(const void __iomem *addr, const void *buf, int len)
177{
178 outsl((unsigned long)addr, buf, len);
179}
180
181static inline void writesw(const void __iomem *addr, const void *buf, int len)
182{
183 outsw((unsigned long)addr, buf, len);
184}
185
186static inline void writesb(const void __iomem *addr, const void *buf, int len)
187{
188 outsb((unsigned long)addr, buf, len);
189}
190
191/*
192 * Map some physical address range into the kernel address space.
193 */
194static inline void __iomem *__ioremap(unsigned long physaddr, unsigned long size,
195 int cacheflag)
196{
197 return (void __iomem *)physaddr;
198}
199
200/*
201 * Unmap a ioremap()ed region again
202 */
203static inline void iounmap(void *addr)
204{
205}
206
207/*
208 * __iounmap unmaps nearly everything, so be careful
209 * it doesn't free currently pointer/page tables anymore but it
210 * wans't used anyway and might be added later.
211 */
212static inline void __iounmap(void *addr, unsigned long size)
213{
214}
215
216/*
217 * Set new cache mode for some kernel address space.
218 * The caller must push data for that range itself, if such data may already
219 * be in the cache.
220 */ 66 */
221static inline void kernel_set_cachemode(void *addr, unsigned long size, 67#define mmiowb() do { SSYNC(); wmb(); } while (0)
222 int cmode)
223{
224}
225
226static inline void __iomem *ioremap(unsigned long physaddr, unsigned long size)
227{
228 return __ioremap(physaddr, size, IOMAP_NOCACHE_SER);
229}
230static inline void __iomem *ioremap_nocache(unsigned long physaddr,
231 unsigned long size)
232{
233 return __ioremap(physaddr, size, IOMAP_NOCACHE_SER);
234}
235 68
236extern void blkfin_inv_cache_all(void); 69#include <asm-generic/io.h>
237 70
238#endif 71#endif
239
240#define ioport_map(port, nr) ((void __iomem*)(port))
241#define ioport_unmap(addr)
242
243/* Pages to physical address... */
244#define page_to_bus(page) ((page - mem_map) << PAGE_SHIFT)
245
246#define phys_to_virt(vaddr) ((void *) (vaddr))
247#define virt_to_phys(vaddr) ((unsigned long) (vaddr))
248
249#define virt_to_bus virt_to_phys
250#define bus_to_virt phys_to_virt
251
252/*
253 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
254 * access
255 */
256#define xlate_dev_mem_ptr(p) __va(p)
257
258/*
259 * Convert a virtual cached pointer to an uncached pointer
260 */
261#define xlate_dev_kmem_ptr(p) p
262
263#endif /* __KERNEL__ */
264
265#endif /* _BFIN_IO_H */
diff --git a/arch/blackfin/include/asm/irqflags.h b/arch/blackfin/include/asm/irqflags.h
index 41c4d70544ef..3365cb97f539 100644
--- a/arch/blackfin/include/asm/irqflags.h
+++ b/arch/blackfin/include/asm/irqflags.h
@@ -13,9 +13,6 @@
13#ifdef CONFIG_SMP 13#ifdef CONFIG_SMP
14# include <asm/pda.h> 14# include <asm/pda.h>
15# include <asm/processor.h> 15# include <asm/processor.h>
16/* Forward decl needed due to cdef inter dependencies */
17static inline uint32_t __pure bfin_dspid(void);
18# define blackfin_core_id() (bfin_dspid() & 0xff)
19# define bfin_irq_flags cpu_pda[blackfin_core_id()].imask 16# define bfin_irq_flags cpu_pda[blackfin_core_id()].imask
20#else 17#else
21extern unsigned long bfin_irq_flags; 18extern unsigned long bfin_irq_flags;
diff --git a/arch/blackfin/include/asm/processor.h b/arch/blackfin/include/asm/processor.h
index aea880274de7..8af7772e84cc 100644
--- a/arch/blackfin/include/asm/processor.h
+++ b/arch/blackfin/include/asm/processor.h
@@ -14,7 +14,7 @@
14#define current_text_addr() ({ __label__ _l; _l: &&_l;}) 14#define current_text_addr() ({ __label__ _l; _l: &&_l;})
15 15
16#include <asm/ptrace.h> 16#include <asm/ptrace.h>
17#include <asm/blackfin.h> 17#include <mach/blackfin.h>
18 18
19static inline unsigned long rdusp(void) 19static inline unsigned long rdusp(void)
20{ 20{
@@ -134,6 +134,8 @@ static inline uint32_t __pure bfin_dspid(void)
134 return bfin_read_DSPID(); 134 return bfin_read_DSPID();
135} 135}
136 136
137#define blackfin_core_id() (bfin_dspid() & 0xff)
138
137static inline uint32_t __pure bfin_compiled_revid(void) 139static inline uint32_t __pure bfin_compiled_revid(void)
138{ 140{
139#if defined(CONFIG_BF_REV_0_0) 141#if defined(CONFIG_BF_REV_0_0)
diff --git a/arch/blackfin/include/asm/spinlock.h b/arch/blackfin/include/asm/spinlock.h
index 1942ccfedbe0..1f286e71c21f 100644
--- a/arch/blackfin/include/asm/spinlock.h
+++ b/arch/blackfin/include/asm/spinlock.h
@@ -17,12 +17,12 @@ asmlinkage int __raw_spin_is_locked_asm(volatile int *ptr);
17asmlinkage void __raw_spin_lock_asm(volatile int *ptr); 17asmlinkage void __raw_spin_lock_asm(volatile int *ptr);
18asmlinkage int __raw_spin_trylock_asm(volatile int *ptr); 18asmlinkage int __raw_spin_trylock_asm(volatile int *ptr);
19asmlinkage void __raw_spin_unlock_asm(volatile int *ptr); 19asmlinkage void __raw_spin_unlock_asm(volatile int *ptr);
20asmlinkage void arch_read_lock_asm(volatile int *ptr); 20asmlinkage void __raw_read_lock_asm(volatile int *ptr);
21asmlinkage int arch_read_trylock_asm(volatile int *ptr); 21asmlinkage int __raw_read_trylock_asm(volatile int *ptr);
22asmlinkage void arch_read_unlock_asm(volatile int *ptr); 22asmlinkage void __raw_read_unlock_asm(volatile int *ptr);
23asmlinkage void arch_write_lock_asm(volatile int *ptr); 23asmlinkage void __raw_write_lock_asm(volatile int *ptr);
24asmlinkage int arch_write_trylock_asm(volatile int *ptr); 24asmlinkage int __raw_write_trylock_asm(volatile int *ptr);
25asmlinkage void arch_write_unlock_asm(volatile int *ptr); 25asmlinkage void __raw_write_unlock_asm(volatile int *ptr);
26 26
27static inline int arch_spin_is_locked(arch_spinlock_t *lock) 27static inline int arch_spin_is_locked(arch_spinlock_t *lock)
28{ 28{
@@ -64,32 +64,36 @@ static inline int arch_write_can_lock(arch_rwlock_t *rw)
64 64
65static inline void arch_read_lock(arch_rwlock_t *rw) 65static inline void arch_read_lock(arch_rwlock_t *rw)
66{ 66{
67 arch_read_lock_asm(&rw->lock); 67 __raw_read_lock_asm(&rw->lock);
68} 68}
69 69
70#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
71
70static inline int arch_read_trylock(arch_rwlock_t *rw) 72static inline int arch_read_trylock(arch_rwlock_t *rw)
71{ 73{
72 return arch_read_trylock_asm(&rw->lock); 74 return __raw_read_trylock_asm(&rw->lock);
73} 75}
74 76
75static inline void arch_read_unlock(arch_rwlock_t *rw) 77static inline void arch_read_unlock(arch_rwlock_t *rw)
76{ 78{
77 arch_read_unlock_asm(&rw->lock); 79 __raw_read_unlock_asm(&rw->lock);
78} 80}
79 81
80static inline void arch_write_lock(arch_rwlock_t *rw) 82static inline void arch_write_lock(arch_rwlock_t *rw)
81{ 83{
82 arch_write_lock_asm(&rw->lock); 84 __raw_write_lock_asm(&rw->lock);
83} 85}
84 86
87#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
88
85static inline int arch_write_trylock(arch_rwlock_t *rw) 89static inline int arch_write_trylock(arch_rwlock_t *rw)
86{ 90{
87 return arch_write_trylock_asm(&rw->lock); 91 return __raw_write_trylock_asm(&rw->lock);
88} 92}
89 93
90static inline void arch_write_unlock(arch_rwlock_t *rw) 94static inline void arch_write_unlock(arch_rwlock_t *rw)
91{ 95{
92 arch_write_unlock_asm(&rw->lock); 96 __raw_write_unlock_asm(&rw->lock);
93} 97}
94 98
95#define arch_spin_relax(lock) cpu_relax() 99#define arch_spin_relax(lock) cpu_relax()
diff --git a/arch/blackfin/include/mach-common/pll.h b/arch/blackfin/include/mach-common/pll.h
new file mode 100644
index 000000000000..382178b361af
--- /dev/null
+++ b/arch/blackfin/include/mach-common/pll.h
@@ -0,0 +1,86 @@
1/*
2 * Copyright 2005-2010 Analog Devices Inc.
3 *
4 * Licensed under the GPL-2 or later.
5 */
6
7#ifndef _MACH_COMMON_PLL_H
8#define _MACH_COMMON_PLL_H
9
10#ifndef __ASSEMBLY__
11
12#include <asm/blackfin.h>
13#include <asm/irqflags.h>
14
15#ifndef bfin_iwr_restore
16static inline void
17bfin_iwr_restore(unsigned long iwr0, unsigned long iwr1, unsigned long iwr2)
18{
19#ifdef SIC_IWR
20 bfin_write_SIC_IWR(iwr0);
21#else
22 bfin_write_SIC_IWR0(iwr0);
23# ifdef SIC_IWR1
24 bfin_write_SIC_IWR1(iwr1);
25# endif
26# ifdef SIC_IWR2
27 bfin_write_SIC_IWR2(iwr2);
28# endif
29#endif
30}
31#endif
32
33#ifndef bfin_iwr_save
34static inline void
35bfin_iwr_save(unsigned long niwr0, unsigned long niwr1, unsigned long niwr2,
36 unsigned long *iwr0, unsigned long *iwr1, unsigned long *iwr2)
37{
38#ifdef SIC_IWR
39 *iwr0 = bfin_read_SIC_IWR();
40#else
41 *iwr0 = bfin_read_SIC_IWR0();
42# ifdef SIC_IWR1
43 *iwr1 = bfin_read_SIC_IWR1();
44# endif
45# ifdef SIC_IWR2
46 *iwr2 = bfin_read_SIC_IWR2();
47# endif
48#endif
49 bfin_iwr_restore(niwr0, niwr1, niwr2);
50}
51#endif
52
53static inline void _bfin_write_pll_relock(u32 addr, unsigned int val)
54{
55 unsigned long flags, iwr0, iwr1, iwr2;
56
57 if (val == bfin_read_PLL_CTL())
58 return;
59
60 flags = hard_local_irq_save();
61 /* Enable the PLL Wakeup bit in SIC IWR */
62 bfin_iwr_save(IWR_ENABLE(0), 0, 0, &iwr0, &iwr1, &iwr2);
63
64 bfin_write16(addr, val);
65 SSYNC();
66 asm("IDLE;");
67
68 bfin_iwr_restore(iwr0, iwr1, iwr2);
69 hard_local_irq_restore(flags);
70}
71
72/* Writing to PLL_CTL initiates a PLL relock sequence */
73static inline void bfin_write_PLL_CTL(unsigned int val)
74{
75 _bfin_write_pll_relock(PLL_CTL, val);
76}
77
78/* Writing to VR_CTL initiates a PLL relock sequence */
79static inline void bfin_write_VR_CTL(unsigned int val)
80{
81 _bfin_write_pll_relock(VR_CTL, val);
82}
83
84#endif
85
86#endif
diff --git a/arch/blackfin/include/mach-common/ports-a.h b/arch/blackfin/include/mach-common/ports-a.h
new file mode 100644
index 000000000000..9f78a761c40a
--- /dev/null
+++ b/arch/blackfin/include/mach-common/ports-a.h
@@ -0,0 +1,25 @@
1/*
2 * Port A Masks
3 */
4
5#ifndef __BFIN_PERIPHERAL_PORT_A__
6#define __BFIN_PERIPHERAL_PORT_A__
7
8#define PA0 (1 << 0)
9#define PA1 (1 << 1)
10#define PA2 (1 << 2)
11#define PA3 (1 << 3)
12#define PA4 (1 << 4)
13#define PA5 (1 << 5)
14#define PA6 (1 << 6)
15#define PA7 (1 << 7)
16#define PA8 (1 << 8)
17#define PA9 (1 << 9)
18#define PA10 (1 << 10)
19#define PA11 (1 << 11)
20#define PA12 (1 << 12)
21#define PA13 (1 << 13)
22#define PA14 (1 << 14)
23#define PA15 (1 << 15)
24
25#endif
diff --git a/arch/blackfin/include/mach-common/ports-b.h b/arch/blackfin/include/mach-common/ports-b.h
new file mode 100644
index 000000000000..b81702f09ec6
--- /dev/null
+++ b/arch/blackfin/include/mach-common/ports-b.h
@@ -0,0 +1,25 @@
1/*
2 * Port B Masks
3 */
4
5#ifndef __BFIN_PERIPHERAL_PORT_B__
6#define __BFIN_PERIPHERAL_PORT_B__
7
8#define PB0 (1 << 0)
9#define PB1 (1 << 1)
10#define PB2 (1 << 2)
11#define PB3 (1 << 3)
12#define PB4 (1 << 4)
13#define PB5 (1 << 5)
14#define PB6 (1 << 6)
15#define PB7 (1 << 7)
16#define PB8 (1 << 8)
17#define PB9 (1 << 9)
18#define PB10 (1 << 10)
19#define PB11 (1 << 11)
20#define PB12 (1 << 12)
21#define PB13 (1 << 13)
22#define PB14 (1 << 14)
23#define PB15 (1 << 15)
24
25#endif
diff --git a/arch/blackfin/include/mach-common/ports-c.h b/arch/blackfin/include/mach-common/ports-c.h
new file mode 100644
index 000000000000..3cc665e0ba08
--- /dev/null
+++ b/arch/blackfin/include/mach-common/ports-c.h
@@ -0,0 +1,25 @@
1/*
2 * Port C Masks
3 */
4
5#ifndef __BFIN_PERIPHERAL_PORT_C__
6#define __BFIN_PERIPHERAL_PORT_C__
7
8#define PC0 (1 << 0)
9#define PC1 (1 << 1)
10#define PC2 (1 << 2)
11#define PC3 (1 << 3)
12#define PC4 (1 << 4)
13#define PC5 (1 << 5)
14#define PC6 (1 << 6)
15#define PC7 (1 << 7)
16#define PC8 (1 << 8)
17#define PC9 (1 << 9)
18#define PC10 (1 << 10)
19#define PC11 (1 << 11)
20#define PC12 (1 << 12)
21#define PC13 (1 << 13)
22#define PC14 (1 << 14)
23#define PC15 (1 << 15)
24
25#endif
diff --git a/arch/blackfin/include/mach-common/ports-d.h b/arch/blackfin/include/mach-common/ports-d.h
new file mode 100644
index 000000000000..868c6a01f1b2
--- /dev/null
+++ b/arch/blackfin/include/mach-common/ports-d.h
@@ -0,0 +1,25 @@
1/*
2 * Port D Masks
3 */
4
5#ifndef __BFIN_PERIPHERAL_PORT_D__
6#define __BFIN_PERIPHERAL_PORT_D__
7
8#define PD0 (1 << 0)
9#define PD1 (1 << 1)
10#define PD2 (1 << 2)
11#define PD3 (1 << 3)
12#define PD4 (1 << 4)
13#define PD5 (1 << 5)
14#define PD6 (1 << 6)
15#define PD7 (1 << 7)
16#define PD8 (1 << 8)
17#define PD9 (1 << 9)
18#define PD10 (1 << 10)
19#define PD11 (1 << 11)
20#define PD12 (1 << 12)
21#define PD13 (1 << 13)
22#define PD14 (1 << 14)
23#define PD15 (1 << 15)
24
25#endif
diff --git a/arch/blackfin/include/mach-common/ports-e.h b/arch/blackfin/include/mach-common/ports-e.h
new file mode 100644
index 000000000000..c88b0d0dd443
--- /dev/null
+++ b/arch/blackfin/include/mach-common/ports-e.h
@@ -0,0 +1,25 @@
1/*
2 * Port E Masks
3 */
4
5#ifndef __BFIN_PERIPHERAL_PORT_E__
6#define __BFIN_PERIPHERAL_PORT_E__
7
8#define PE0 (1 << 0)
9#define PE1 (1 << 1)
10#define PE2 (1 << 2)
11#define PE3 (1 << 3)
12#define PE4 (1 << 4)
13#define PE5 (1 << 5)
14#define PE6 (1 << 6)
15#define PE7 (1 << 7)
16#define PE8 (1 << 8)
17#define PE9 (1 << 9)
18#define PE10 (1 << 10)
19#define PE11 (1 << 11)
20#define PE12 (1 << 12)
21#define PE13 (1 << 13)
22#define PE14 (1 << 14)
23#define PE15 (1 << 15)
24
25#endif
diff --git a/arch/blackfin/include/mach-common/ports-f.h b/arch/blackfin/include/mach-common/ports-f.h
new file mode 100644
index 000000000000..d6af20633278
--- /dev/null
+++ b/arch/blackfin/include/mach-common/ports-f.h
@@ -0,0 +1,25 @@
1/*
2 * Port F Masks
3 */
4
5#ifndef __BFIN_PERIPHERAL_PORT_F__
6#define __BFIN_PERIPHERAL_PORT_F__
7
8#define PF0 (1 << 0)
9#define PF1 (1 << 1)
10#define PF2 (1 << 2)
11#define PF3 (1 << 3)
12#define PF4 (1 << 4)
13#define PF5 (1 << 5)
14#define PF6 (1 << 6)
15#define PF7 (1 << 7)
16#define PF8 (1 << 8)
17#define PF9 (1 << 9)
18#define PF10 (1 << 10)
19#define PF11 (1 << 11)
20#define PF12 (1 << 12)
21#define PF13 (1 << 13)
22#define PF14 (1 << 14)
23#define PF15 (1 << 15)
24
25#endif
diff --git a/arch/blackfin/include/mach-common/ports-g.h b/arch/blackfin/include/mach-common/ports-g.h
new file mode 100644
index 000000000000..09355d333c0e
--- /dev/null
+++ b/arch/blackfin/include/mach-common/ports-g.h
@@ -0,0 +1,25 @@
1/*
2 * Port G Masks
3 */
4
5#ifndef __BFIN_PERIPHERAL_PORT_G__
6#define __BFIN_PERIPHERAL_PORT_G__
7
8#define PG0 (1 << 0)
9#define PG1 (1 << 1)
10#define PG2 (1 << 2)
11#define PG3 (1 << 3)
12#define PG4 (1 << 4)
13#define PG5 (1 << 5)
14#define PG6 (1 << 6)
15#define PG7 (1 << 7)
16#define PG8 (1 << 8)
17#define PG9 (1 << 9)
18#define PG10 (1 << 10)
19#define PG11 (1 << 11)
20#define PG12 (1 << 12)
21#define PG13 (1 << 13)
22#define PG14 (1 << 14)
23#define PG15 (1 << 15)
24
25#endif
diff --git a/arch/blackfin/include/mach-common/ports-h.h b/arch/blackfin/include/mach-common/ports-h.h
new file mode 100644
index 000000000000..fa3910c6fbd4
--- /dev/null
+++ b/arch/blackfin/include/mach-common/ports-h.h
@@ -0,0 +1,25 @@
1/*
2 * Port H Masks
3 */
4
5#ifndef __BFIN_PERIPHERAL_PORT_H__
6#define __BFIN_PERIPHERAL_PORT_H__
7
8#define PH0 (1 << 0)
9#define PH1 (1 << 1)
10#define PH2 (1 << 2)
11#define PH3 (1 << 3)
12#define PH4 (1 << 4)
13#define PH5 (1 << 5)
14#define PH6 (1 << 6)
15#define PH7 (1 << 7)
16#define PH8 (1 << 8)
17#define PH9 (1 << 9)
18#define PH10 (1 << 10)
19#define PH11 (1 << 11)
20#define PH12 (1 << 12)
21#define PH13 (1 << 13)
22#define PH14 (1 << 14)
23#define PH15 (1 << 15)
24
25#endif
diff --git a/arch/blackfin/include/mach-common/ports-i.h b/arch/blackfin/include/mach-common/ports-i.h
new file mode 100644
index 000000000000..f176f08af624
--- /dev/null
+++ b/arch/blackfin/include/mach-common/ports-i.h
@@ -0,0 +1,25 @@
1/*
2 * Port I Masks
3 */
4
5#ifndef __BFIN_PERIPHERAL_PORT_I__
6#define __BFIN_PERIPHERAL_PORT_I__
7
8#define PI0 (1 << 0)
9#define PI1 (1 << 1)
10#define PI2 (1 << 2)
11#define PI3 (1 << 3)
12#define PI4 (1 << 4)
13#define PI5 (1 << 5)
14#define PI6 (1 << 6)
15#define PI7 (1 << 7)
16#define PI8 (1 << 8)
17#define PI9 (1 << 9)
18#define PI10 (1 << 10)
19#define PI11 (1 << 11)
20#define PI12 (1 << 12)
21#define PI13 (1 << 13)
22#define PI14 (1 << 14)
23#define PI15 (1 << 15)
24
25#endif
diff --git a/arch/blackfin/include/mach-common/ports-j.h b/arch/blackfin/include/mach-common/ports-j.h
new file mode 100644
index 000000000000..924123ecec5a
--- /dev/null
+++ b/arch/blackfin/include/mach-common/ports-j.h
@@ -0,0 +1,25 @@
1/*
2 * Port J Masks
3 */
4
5#ifndef __BFIN_PERIPHERAL_PORT_J__
6#define __BFIN_PERIPHERAL_PORT_J__
7
8#define PJ0 (1 << 0)
9#define PJ1 (1 << 1)
10#define PJ2 (1 << 2)
11#define PJ3 (1 << 3)
12#define PJ4 (1 << 4)
13#define PJ5 (1 << 5)
14#define PJ6 (1 << 6)
15#define PJ7 (1 << 7)
16#define PJ8 (1 << 8)
17#define PJ9 (1 << 9)
18#define PJ10 (1 << 10)
19#define PJ11 (1 << 11)
20#define PJ12 (1 << 12)
21#define PJ13 (1 << 13)
22#define PJ14 (1 << 14)
23#define PJ15 (1 << 15)
24
25#endif
diff --git a/arch/blackfin/kernel/cplb-nompu/cplbinit.c b/arch/blackfin/kernel/cplb-nompu/cplbinit.c
index bfe75af4e8bd..886e00014d75 100644
--- a/arch/blackfin/kernel/cplb-nompu/cplbinit.c
+++ b/arch/blackfin/kernel/cplb-nompu/cplbinit.c
@@ -116,7 +116,7 @@ void __init generate_cplb_tables_all(void)
116 ((_ramend - uncached_end) >= 1 * 1024 * 1024)) 116 ((_ramend - uncached_end) >= 1 * 1024 * 1024))
117 dcplb_bounds[i_d].eaddr = uncached_end; 117 dcplb_bounds[i_d].eaddr = uncached_end;
118 else 118 else
119 dcplb_bounds[i_d].eaddr = uncached_end & ~(1 * 1024 * 1024); 119 dcplb_bounds[i_d].eaddr = uncached_end & ~(1 * 1024 * 1024 - 1);
120 dcplb_bounds[i_d++].data = SDRAM_DGENERIC; 120 dcplb_bounds[i_d++].data = SDRAM_DGENERIC;
121 /* DMA uncached region. */ 121 /* DMA uncached region. */
122 if (DMA_UNCACHED_REGION) { 122 if (DMA_UNCACHED_REGION) {
diff --git a/arch/blackfin/kernel/kgdb.c b/arch/blackfin/kernel/kgdb.c
index edae461b1c54..eb92592fd80c 100644
--- a/arch/blackfin/kernel/kgdb.c
+++ b/arch/blackfin/kernel/kgdb.c
@@ -345,6 +345,23 @@ void kgdb_roundup_cpu(int cpu, unsigned long flags)
345} 345}
346#endif 346#endif
347 347
348#ifdef CONFIG_IPIPE
349static unsigned long kgdb_arch_imask;
350#endif
351
352void kgdb_post_primary_code(struct pt_regs *regs, int e_vector, int err_code)
353{
354 if (kgdb_single_step)
355 preempt_enable();
356
357#ifdef CONFIG_IPIPE
358 if (kgdb_arch_imask) {
359 cpu_pda[raw_smp_processor_id()].ex_imask = kgdb_arch_imask;
360 kgdb_arch_imask = 0;
361 }
362#endif
363}
364
348int kgdb_arch_handle_exception(int vector, int signo, 365int kgdb_arch_handle_exception(int vector, int signo,
349 int err_code, char *remcom_in_buffer, 366 int err_code, char *remcom_in_buffer,
350 char *remcom_out_buffer, 367 char *remcom_out_buffer,
@@ -388,6 +405,12 @@ int kgdb_arch_handle_exception(int vector, int signo,
388 * kgdb_single_step > 0 means in single step mode 405 * kgdb_single_step > 0 means in single step mode
389 */ 406 */
390 kgdb_single_step = i + 1; 407 kgdb_single_step = i + 1;
408
409 preempt_disable();
410#ifdef CONFIG_IPIPE
411 kgdb_arch_imask = cpu_pda[raw_smp_processor_id()].ex_imask;
412 cpu_pda[raw_smp_processor_id()].ex_imask = 0;
413#endif
391 } 414 }
392 415
393 bfin_correct_hw_break(); 416 bfin_correct_hw_break();
@@ -448,6 +471,9 @@ void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
448int kgdb_arch_init(void) 471int kgdb_arch_init(void)
449{ 472{
450 kgdb_single_step = 0; 473 kgdb_single_step = 0;
474#ifdef CONFIG_IPIPE
475 kgdb_arch_imask = 0;
476#endif
451 477
452 bfin_remove_all_hw_break(); 478 bfin_remove_all_hw_break();
453 return 0; 479 return 0;
diff --git a/arch/blackfin/kernel/kgdb_test.c b/arch/blackfin/kernel/kgdb_test.c
index 08c0236acf3c..2a6e9dbb62a5 100644
--- a/arch/blackfin/kernel/kgdb_test.c
+++ b/arch/blackfin/kernel/kgdb_test.c
@@ -95,6 +95,10 @@ static int __init kgdbtest_init(void)
95{ 95{
96 struct proc_dir_entry *entry; 96 struct proc_dir_entry *entry;
97 97
98#if L2_LENGTH
99 num2 = 0;
100#endif
101
98 entry = proc_create("kgdbtest", 0, NULL, &kgdb_test_proc_fops); 102 entry = proc_create("kgdbtest", 0, NULL, &kgdb_test_proc_fops);
99 if (entry == NULL) 103 if (entry == NULL)
100 return -ENOMEM; 104 return -ENOMEM;
diff --git a/arch/blackfin/mach-bf518/boards/ezbrd.c b/arch/blackfin/mach-bf518/boards/ezbrd.c
index b894c8abe7ec..c0ccadcfa44e 100644
--- a/arch/blackfin/mach-bf518/boards/ezbrd.c
+++ b/arch/blackfin/mach-bf518/boards/ezbrd.c
@@ -104,24 +104,23 @@ static const unsigned short bfin_mac_peripherals[] = {
104 104
105static struct bfin_phydev_platform_data bfin_phydev_data[] = { 105static struct bfin_phydev_platform_data bfin_phydev_data[] = {
106 { 106 {
107 .addr = 1, 107#if defined(CONFIG_NET_DSA_KSZ8893M) || defined(CONFIG_NET_DSA_KSZ8893M_MODULE)
108 .irq = IRQ_MAC_PHYINT,
109 },
110 {
111 .addr = 2,
112 .irq = IRQ_MAC_PHYINT,
113 },
114 {
115 .addr = 3, 108 .addr = 3,
109#else
110 .addr = 1,
111#endif
116 .irq = IRQ_MAC_PHYINT, 112 .irq = IRQ_MAC_PHYINT,
117 }, 113 },
118}; 114};
119 115
120static struct bfin_mii_bus_platform_data bfin_mii_bus_data = { 116static struct bfin_mii_bus_platform_data bfin_mii_bus_data = {
121 .phydev_number = 3, 117 .phydev_number = 1,
122 .phydev_data = bfin_phydev_data, 118 .phydev_data = bfin_phydev_data,
123 .phy_mode = PHY_INTERFACE_MODE_MII, 119 .phy_mode = PHY_INTERFACE_MODE_MII,
124 .mac_peripherals = bfin_mac_peripherals, 120 .mac_peripherals = bfin_mac_peripherals,
121#if defined(CONFIG_NET_DSA_KSZ8893M) || defined(CONFIG_NET_DSA_KSZ8893M_MODULE)
122 .phy_mask = 0xfff7, /* Only probe the port phy connect to the on chip MAC */
123#endif
125}; 124};
126 125
127static struct platform_device bfin_mii_bus = { 126static struct platform_device bfin_mii_bus = {
@@ -453,7 +452,7 @@ static struct resource bfin_uart0_resources[] = {
453 }, 452 },
454}; 453};
455 454
456unsigned short bfin_uart0_peripherals[] = { 455static unsigned short bfin_uart0_peripherals[] = {
457 P_UART0_TX, P_UART0_RX, 0 456 P_UART0_TX, P_UART0_RX, 0
458}; 457};
459 458
@@ -496,7 +495,7 @@ static struct resource bfin_uart1_resources[] = {
496 }, 495 },
497}; 496};
498 497
499unsigned short bfin_uart1_peripherals[] = { 498static unsigned short bfin_uart1_peripherals[] = {
500 P_UART1_TX, P_UART1_RX, 0 499 P_UART1_TX, P_UART1_RX, 0
501}; 500};
502 501
@@ -636,9 +635,9 @@ static struct resource bfin_sport0_uart_resources[] = {
636 }, 635 },
637}; 636};
638 637
639unsigned short bfin_sport0_peripherals[] = { 638static unsigned short bfin_sport0_peripherals[] = {
640 P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS, 639 P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
641 P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0 640 P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
642}; 641};
643 642
644static struct platform_device bfin_sport0_uart_device = { 643static struct platform_device bfin_sport0_uart_device = {
@@ -670,9 +669,9 @@ static struct resource bfin_sport1_uart_resources[] = {
670 }, 669 },
671}; 670};
672 671
673unsigned short bfin_sport1_peripherals[] = { 672static unsigned short bfin_sport1_peripherals[] = {
674 P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS, 673 P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
675 P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0 674 P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
676}; 675};
677 676
678static struct platform_device bfin_sport1_uart_device = { 677static struct platform_device bfin_sport1_uart_device = {
diff --git a/arch/blackfin/mach-bf518/boards/tcm-bf518.c b/arch/blackfin/mach-bf518/boards/tcm-bf518.c
index e6ce1d7c523a..50fc5c89e379 100644
--- a/arch/blackfin/mach-bf518/boards/tcm-bf518.c
+++ b/arch/blackfin/mach-bf518/boards/tcm-bf518.c
@@ -377,7 +377,7 @@ static struct resource bfin_uart0_resources[] = {
377 }, 377 },
378}; 378};
379 379
380unsigned short bfin_uart0_peripherals[] = { 380static unsigned short bfin_uart0_peripherals[] = {
381 P_UART0_TX, P_UART0_RX, 0 381 P_UART0_TX, P_UART0_RX, 0
382}; 382};
383 383
@@ -420,7 +420,7 @@ static struct resource bfin_uart1_resources[] = {
420 }, 420 },
421}; 421};
422 422
423unsigned short bfin_uart1_peripherals[] = { 423static unsigned short bfin_uart1_peripherals[] = {
424 P_UART1_TX, P_UART1_RX, 0 424 P_UART1_TX, P_UART1_RX, 0
425}; 425};
426 426
@@ -547,9 +547,9 @@ static struct resource bfin_sport0_uart_resources[] = {
547 }, 547 },
548}; 548};
549 549
550unsigned short bfin_sport0_peripherals[] = { 550static unsigned short bfin_sport0_peripherals[] = {
551 P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS, 551 P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
552 P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0 552 P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
553}; 553};
554 554
555static struct platform_device bfin_sport0_uart_device = { 555static struct platform_device bfin_sport0_uart_device = {
@@ -581,9 +581,9 @@ static struct resource bfin_sport1_uart_resources[] = {
581 }, 581 },
582}; 582};
583 583
584unsigned short bfin_sport1_peripherals[] = { 584static unsigned short bfin_sport1_peripherals[] = {
585 P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS, 585 P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
586 P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0 586 P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
587}; 587};
588 588
589static struct platform_device bfin_sport1_uart_device = { 589static struct platform_device bfin_sport1_uart_device = {
diff --git a/arch/blackfin/mach-bf518/dma.c b/arch/blackfin/mach-bf518/dma.c
index 78b43605a0b5..bcd1fbc8c543 100644
--- a/arch/blackfin/mach-bf518/dma.c
+++ b/arch/blackfin/mach-bf518/dma.c
@@ -11,7 +11,7 @@
11#include <asm/blackfin.h> 11#include <asm/blackfin.h>
12#include <asm/dma.h> 12#include <asm/dma.h>
13 13
14struct dma_register *dma_io_base_addr[MAX_DMA_CHANNELS] = { 14struct dma_register * const dma_io_base_addr[MAX_DMA_CHANNELS] = {
15 (struct dma_register *) DMA0_NEXT_DESC_PTR, 15 (struct dma_register *) DMA0_NEXT_DESC_PTR,
16 (struct dma_register *) DMA1_NEXT_DESC_PTR, 16 (struct dma_register *) DMA1_NEXT_DESC_PTR,
17 (struct dma_register *) DMA2_NEXT_DESC_PTR, 17 (struct dma_register *) DMA2_NEXT_DESC_PTR,
diff --git a/arch/blackfin/mach-bf518/include/mach/bfin_serial.h b/arch/blackfin/mach-bf518/include/mach/bfin_serial.h
new file mode 100644
index 000000000000..00c603fe8218
--- /dev/null
+++ b/arch/blackfin/mach-bf518/include/mach/bfin_serial.h
@@ -0,0 +1,14 @@
1/*
2 * mach/bfin_serial.h - Blackfin UART/Serial definitions
3 *
4 * Copyright 2006-2010 Analog Devices Inc.
5 *
6 * Licensed under the GPL-2 or later.
7 */
8
9#ifndef __BFIN_MACH_SERIAL_H__
10#define __BFIN_MACH_SERIAL_H__
11
12#define BFIN_UART_NR_PORTS 2
13
14#endif
diff --git a/arch/blackfin/mach-bf518/include/mach/bfin_serial_5xx.h b/arch/blackfin/mach-bf518/include/mach/bfin_serial_5xx.h
index 970d310021e7..f6d924ac0c44 100644
--- a/arch/blackfin/mach-bf518/include/mach/bfin_serial_5xx.h
+++ b/arch/blackfin/mach-bf518/include/mach/bfin_serial_5xx.h
@@ -4,36 +4,9 @@
4 * Licensed under the GPL-2 or later 4 * Licensed under the GPL-2 or later
5 */ 5 */
6 6
7#include <linux/serial.h>
8#include <asm/dma.h> 7#include <asm/dma.h>
9#include <asm/portmux.h> 8#include <asm/portmux.h>
10 9
11#define UART_GET_CHAR(uart) bfin_read16(((uart)->port.membase + OFFSET_RBR))
12#define UART_GET_DLL(uart) bfin_read16(((uart)->port.membase + OFFSET_DLL))
13#define UART_GET_IER(uart) bfin_read16(((uart)->port.membase + OFFSET_IER))
14#define UART_GET_DLH(uart) bfin_read16(((uart)->port.membase + OFFSET_DLH))
15#define UART_GET_IIR(uart) bfin_read16(((uart)->port.membase + OFFSET_IIR))
16#define UART_GET_LCR(uart) bfin_read16(((uart)->port.membase + OFFSET_LCR))
17#define UART_GET_GCTL(uart) bfin_read16(((uart)->port.membase + OFFSET_GCTL))
18
19#define UART_PUT_CHAR(uart, v) bfin_write16(((uart)->port.membase + OFFSET_THR), v)
20#define UART_PUT_DLL(uart, v) bfin_write16(((uart)->port.membase + OFFSET_DLL), v)
21#define UART_PUT_IER(uart, v) bfin_write16(((uart)->port.membase + OFFSET_IER), v)
22#define UART_SET_IER(uart, v) UART_PUT_IER(uart, UART_GET_IER(uart) | (v))
23#define UART_CLEAR_IER(uart, v) UART_PUT_IER(uart, UART_GET_IER(uart) & ~(v))
24#define UART_PUT_DLH(uart, v) bfin_write16(((uart)->port.membase + OFFSET_DLH), v)
25#define UART_PUT_LCR(uart, v) bfin_write16(((uart)->port.membase + OFFSET_LCR), v)
26#define UART_PUT_GCTL(uart, v) bfin_write16(((uart)->port.membase + OFFSET_GCTL), v)
27
28#define UART_SET_DLAB(uart) do { UART_PUT_LCR(uart, UART_GET_LCR(uart) | DLAB); SSYNC(); } while (0)
29#define UART_CLEAR_DLAB(uart) do { UART_PUT_LCR(uart, UART_GET_LCR(uart) & ~DLAB); SSYNC(); } while (0)
30
31#define UART_GET_CTS(x) gpio_get_value(x->cts_pin)
32#define UART_DISABLE_RTS(x) gpio_set_value(x->rts_pin, 1)
33#define UART_ENABLE_RTS(x) gpio_set_value(x->rts_pin, 0)
34#define UART_ENABLE_INTS(x, v) UART_PUT_IER(x, v)
35#define UART_DISABLE_INTS(x) UART_PUT_IER(x, 0)
36
37#if defined(CONFIG_BFIN_UART0_CTSRTS) || defined(CONFIG_BFIN_UART1_CTSRTS) 10#if defined(CONFIG_BFIN_UART0_CTSRTS) || defined(CONFIG_BFIN_UART1_CTSRTS)
38# define CONFIG_SERIAL_BFIN_CTSRTS 11# define CONFIG_SERIAL_BFIN_CTSRTS
39 12
@@ -54,50 +27,6 @@
54# endif 27# endif
55#endif 28#endif
56 29
57#define BFIN_UART_TX_FIFO_SIZE 2
58
59/*
60 * The pin configuration is different from schematic
61 */
62struct bfin_serial_port {
63 struct uart_port port;
64 unsigned int old_status;
65 int status_irq;
66 unsigned int lsr;
67#ifdef CONFIG_SERIAL_BFIN_DMA
68 int tx_done;
69 int tx_count;
70 struct circ_buf rx_dma_buf;
71 struct timer_list rx_dma_timer;
72 int rx_dma_nrows;
73 unsigned int tx_dma_channel;
74 unsigned int rx_dma_channel;
75 struct work_struct tx_dma_workqueue;
76#endif
77#ifdef CONFIG_SERIAL_BFIN_CTSRTS
78 struct timer_list cts_timer;
79 int cts_pin;
80 int rts_pin;
81#endif
82};
83
84/* The hardware clears the LSR bits upon read, so we need to cache
85 * some of the more fun bits in software so they don't get lost
86 * when checking the LSR in other code paths (TX).
87 */
88static inline unsigned int UART_GET_LSR(struct bfin_serial_port *uart)
89{
90 unsigned int lsr = bfin_read16(uart->port.membase + OFFSET_LSR);
91 uart->lsr |= (lsr & (BI|FE|PE|OE));
92 return lsr | uart->lsr;
93}
94
95static inline void UART_CLEAR_LSR(struct bfin_serial_port *uart)
96{
97 uart->lsr = 0;
98 bfin_write16(uart->port.membase + OFFSET_LSR, -1);
99}
100
101struct bfin_serial_res { 30struct bfin_serial_res {
102 unsigned long uart_base_addr; 31 unsigned long uart_base_addr;
103 int uart_irq; 32 int uart_irq;
@@ -146,3 +75,5 @@ struct bfin_serial_res bfin_serial_resource[] = {
146}; 75};
147 76
148#define DRIVER_NAME "bfin-uart" 77#define DRIVER_NAME "bfin-uart"
78
79#include <asm/bfin_serial.h>
diff --git a/arch/blackfin/mach-bf518/include/mach/blackfin.h b/arch/blackfin/mach-bf518/include/mach/blackfin.h
index 9053462be4b1..a8828863226e 100644
--- a/arch/blackfin/mach-bf518/include/mach/blackfin.h
+++ b/arch/blackfin/mach-bf518/include/mach/blackfin.h
@@ -1,61 +1,43 @@
1/* 1/*
2 * Copyright 2008-2009 Analog Devices Inc. 2 * Copyright 2008-2010 Analog Devices Inc.
3 * 3 *
4 * Licensed under the GPL-2 or later 4 * Licensed under the GPL-2 or later.
5 */ 5 */
6 6
7#ifndef _MACH_BLACKFIN_H_ 7#ifndef _MACH_BLACKFIN_H_
8#define _MACH_BLACKFIN_H_ 8#define _MACH_BLACKFIN_H_
9 9
10#include "bf518.h" 10#include "bf518.h"
11#include "defBF512.h"
12#include "anomaly.h" 11#include "anomaly.h"
13 12
14#if defined(CONFIG_BF518) 13#include <asm/def_LPBlackfin.h>
15#include "defBF518.h" 14#ifdef CONFIG_BF512
15# include "defBF512.h"
16#endif 16#endif
17 17#ifdef CONFIG_BF514
18#if defined(CONFIG_BF516) 18# include "defBF514.h"
19#include "defBF516.h"
20#endif
21
22#if defined(CONFIG_BF514)
23#include "defBF514.h"
24#endif 19#endif
25 20#ifdef CONFIG_BF516
26#if defined(CONFIG_BF512) 21# include "defBF516.h"
27#include "defBF512.h"
28#endif 22#endif
29 23#ifdef CONFIG_BF518
30#if !defined(__ASSEMBLY__) 24# include "defBF518.h"
31#include "cdefBF512.h"
32
33#if defined(CONFIG_BF518)
34#include "cdefBF518.h"
35#endif 25#endif
36 26
37#if defined(CONFIG_BF516) 27#ifndef __ASSEMBLY__
38#include "cdefBF516.h" 28# include <asm/cdef_LPBlackfin.h>
29# ifdef CONFIG_BF512
30# include "cdefBF512.h"
31# endif
32# ifdef CONFIG_BF514
33# include "cdefBF514.h"
34# endif
35# ifdef CONFIG_BF516
36# include "cdefBF516.h"
37# endif
38# ifdef CONFIG_BF518
39# include "cdefBF518.h"
40# endif
39#endif 41#endif
40 42
41#if defined(CONFIG_BF514)
42#include "cdefBF514.h"
43#endif
44#endif
45
46#define BFIN_UART_NR_PORTS 2
47
48#define OFFSET_THR 0x00 /* Transmit Holding register */
49#define OFFSET_RBR 0x00 /* Receive Buffer register */
50#define OFFSET_DLL 0x00 /* Divisor Latch (Low-Byte) */
51#define OFFSET_IER 0x04 /* Interrupt Enable Register */
52#define OFFSET_DLH 0x04 /* Divisor Latch (High-Byte) */
53#define OFFSET_IIR 0x08 /* Interrupt Identification Register */
54#define OFFSET_LCR 0x0C /* Line Control Register */
55#define OFFSET_MCR 0x10 /* Modem Control Register */
56#define OFFSET_LSR 0x14 /* Line Status Register */
57#define OFFSET_MSR 0x18 /* Modem Status Register */
58#define OFFSET_SCR 0x1C /* SCR Scratch Register */
59#define OFFSET_GCTL 0x24 /* Global Control Register */
60
61#endif 43#endif
diff --git a/arch/blackfin/mach-bf518/include/mach/cdefBF512.h b/arch/blackfin/mach-bf518/include/mach/cdefBF512.h
index 493020d0a65a..b657d37a3402 100644
--- a/arch/blackfin/mach-bf518/include/mach/cdefBF512.h
+++ b/arch/blackfin/mach-bf518/include/mach/cdefBF512.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2008-2009 Analog Devices Inc. 2 * Copyright 2008-2010 Analog Devices Inc.
3 * 3 *
4 * Licensed under the ADI BSD license or the GPL-2 (or later) 4 * Licensed under the ADI BSD license or the GPL-2 (or later)
5 */ 5 */
@@ -7,15 +7,1037 @@
7#ifndef _CDEF_BF512_H 7#ifndef _CDEF_BF512_H
8#define _CDEF_BF512_H 8#define _CDEF_BF512_H
9 9
10/* include all Core registers and bit definitions */ 10/* Clock and System Control (0xFFC00000 - 0xFFC000FF) */
11#include "defBF512.h" 11#define bfin_read_PLL_CTL() bfin_read16(PLL_CTL)
12#define bfin_read_PLL_DIV() bfin_read16(PLL_DIV)
13#define bfin_write_PLL_DIV(val) bfin_write16(PLL_DIV, val)
14#define bfin_read_VR_CTL() bfin_read16(VR_CTL)
15#define bfin_read_PLL_STAT() bfin_read16(PLL_STAT)
16#define bfin_write_PLL_STAT(val) bfin_write16(PLL_STAT, val)
17#define bfin_read_PLL_LOCKCNT() bfin_read16(PLL_LOCKCNT)
18#define bfin_write_PLL_LOCKCNT(val) bfin_write16(PLL_LOCKCNT, val)
19#define bfin_read_CHIPID() bfin_read32(CHIPID)
20#define bfin_write_CHIPID(val) bfin_write32(CHIPID, val)
12 21
13/* include core specific register pointer definitions */
14#include <asm/cdef_LPBlackfin.h>
15 22
16/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF512 */ 23/* System Interrupt Controller (0xFFC00100 - 0xFFC001FF) */
24#define bfin_read_SWRST() bfin_read16(SWRST)
25#define bfin_write_SWRST(val) bfin_write16(SWRST, val)
26#define bfin_read_SYSCR() bfin_read16(SYSCR)
27#define bfin_write_SYSCR(val) bfin_write16(SYSCR, val)
17 28
18/* include cdefBF51x_base.h for the set of #defines that are common to all ADSP-BF51x processors */ 29#define bfin_read_SIC_RVECT() bfin_read32(SIC_RVECT)
19#include "cdefBF51x_base.h" 30#define bfin_write_SIC_RVECT(val) bfin_write32(SIC_RVECT, val)
31#define bfin_read_SIC_IMASK0() bfin_read32(SIC_IMASK0)
32#define bfin_write_SIC_IMASK0(val) bfin_write32(SIC_IMASK0, val)
33#define bfin_read_SIC_IMASK(x) bfin_read32(SIC_IMASK0 + (x << 6))
34#define bfin_write_SIC_IMASK(x, val) bfin_write32((SIC_IMASK0 + (x << 6)), val)
35
36#define bfin_read_SIC_IAR0() bfin_read32(SIC_IAR0)
37#define bfin_write_SIC_IAR0(val) bfin_write32(SIC_IAR0, val)
38#define bfin_read_SIC_IAR1() bfin_read32(SIC_IAR1)
39#define bfin_write_SIC_IAR1(val) bfin_write32(SIC_IAR1, val)
40#define bfin_read_SIC_IAR2() bfin_read32(SIC_IAR2)
41#define bfin_write_SIC_IAR2(val) bfin_write32(SIC_IAR2, val)
42#define bfin_read_SIC_IAR3() bfin_read32(SIC_IAR3)
43#define bfin_write_SIC_IAR3(val) bfin_write32(SIC_IAR3, val)
44
45#define bfin_read_SIC_ISR0() bfin_read32(SIC_ISR0)
46#define bfin_write_SIC_ISR0(val) bfin_write32(SIC_ISR0, val)
47#define bfin_read_SIC_ISR(x) bfin_read32(SIC_ISR0 + (x << 6))
48#define bfin_write_SIC_ISR(x, val) bfin_write32((SIC_ISR0 + (x << 6)), val)
49
50#define bfin_read_SIC_IWR0() bfin_read32(SIC_IWR0)
51#define bfin_write_SIC_IWR0(val) bfin_write32(SIC_IWR0, val)
52#define bfin_read_SIC_IWR(x) bfin_read32(SIC_IWR0 + (x << 6))
53#define bfin_write_SIC_IWR(x, val) bfin_write32((SIC_IWR0 + (x << 6)), val)
54
55/* SIC Additions to ADSP-BF51x (0xFFC0014C - 0xFFC00162) */
56
57#define bfin_read_SIC_IMASK1() bfin_read32(SIC_IMASK1)
58#define bfin_write_SIC_IMASK1(val) bfin_write32(SIC_IMASK1, val)
59#define bfin_read_SIC_IAR4() bfin_read32(SIC_IAR4)
60#define bfin_write_SIC_IAR4(val) bfin_write32(SIC_IAR4, val)
61#define bfin_read_SIC_IAR5() bfin_read32(SIC_IAR5)
62#define bfin_write_SIC_IAR5(val) bfin_write32(SIC_IAR5, val)
63#define bfin_read_SIC_IAR6() bfin_read32(SIC_IAR6)
64#define bfin_write_SIC_IAR6(val) bfin_write32(SIC_IAR6, val)
65#define bfin_read_SIC_IAR7() bfin_read32(SIC_IAR7)
66#define bfin_write_SIC_IAR7(val) bfin_write32(SIC_IAR7, val)
67#define bfin_read_SIC_ISR1() bfin_read32(SIC_ISR1)
68#define bfin_write_SIC_ISR1(val) bfin_write32(SIC_ISR1, val)
69#define bfin_read_SIC_IWR1() bfin_read32(SIC_IWR1)
70#define bfin_write_SIC_IWR1(val) bfin_write32(SIC_IWR1, val)
71
72/* Watchdog Timer (0xFFC00200 - 0xFFC002FF) */
73#define bfin_read_WDOG_CTL() bfin_read16(WDOG_CTL)
74#define bfin_write_WDOG_CTL(val) bfin_write16(WDOG_CTL, val)
75#define bfin_read_WDOG_CNT() bfin_read32(WDOG_CNT)
76#define bfin_write_WDOG_CNT(val) bfin_write32(WDOG_CNT, val)
77#define bfin_read_WDOG_STAT() bfin_read32(WDOG_STAT)
78#define bfin_write_WDOG_STAT(val) bfin_write32(WDOG_STAT, val)
79
80
81/* Real Time Clock (0xFFC00300 - 0xFFC003FF) */
82#define bfin_read_RTC_STAT() bfin_read32(RTC_STAT)
83#define bfin_write_RTC_STAT(val) bfin_write32(RTC_STAT, val)
84#define bfin_read_RTC_ICTL() bfin_read16(RTC_ICTL)
85#define bfin_write_RTC_ICTL(val) bfin_write16(RTC_ICTL, val)
86#define bfin_read_RTC_ISTAT() bfin_read16(RTC_ISTAT)
87#define bfin_write_RTC_ISTAT(val) bfin_write16(RTC_ISTAT, val)
88#define bfin_read_RTC_SWCNT() bfin_read16(RTC_SWCNT)
89#define bfin_write_RTC_SWCNT(val) bfin_write16(RTC_SWCNT, val)
90#define bfin_read_RTC_ALARM() bfin_read32(RTC_ALARM)
91#define bfin_write_RTC_ALARM(val) bfin_write32(RTC_ALARM, val)
92#define bfin_read_RTC_FAST() bfin_read16(RTC_FAST)
93#define bfin_write_RTC_FAST(val) bfin_write16(RTC_FAST, val)
94#define bfin_read_RTC_PREN() bfin_read16(RTC_PREN)
95#define bfin_write_RTC_PREN(val) bfin_write16(RTC_PREN, val)
96
97
98/* UART0 Controller (0xFFC00400 - 0xFFC004FF) */
99#define bfin_read_UART0_THR() bfin_read16(UART0_THR)
100#define bfin_write_UART0_THR(val) bfin_write16(UART0_THR, val)
101#define bfin_read_UART0_RBR() bfin_read16(UART0_RBR)
102#define bfin_write_UART0_RBR(val) bfin_write16(UART0_RBR, val)
103#define bfin_read_UART0_DLL() bfin_read16(UART0_DLL)
104#define bfin_write_UART0_DLL(val) bfin_write16(UART0_DLL, val)
105#define bfin_read_UART0_IER() bfin_read16(UART0_IER)
106#define bfin_write_UART0_IER(val) bfin_write16(UART0_IER, val)
107#define bfin_read_UART0_DLH() bfin_read16(UART0_DLH)
108#define bfin_write_UART0_DLH(val) bfin_write16(UART0_DLH, val)
109#define bfin_read_UART0_IIR() bfin_read16(UART0_IIR)
110#define bfin_write_UART0_IIR(val) bfin_write16(UART0_IIR, val)
111#define bfin_read_UART0_LCR() bfin_read16(UART0_LCR)
112#define bfin_write_UART0_LCR(val) bfin_write16(UART0_LCR, val)
113#define bfin_read_UART0_MCR() bfin_read16(UART0_MCR)
114#define bfin_write_UART0_MCR(val) bfin_write16(UART0_MCR, val)
115#define bfin_read_UART0_LSR() bfin_read16(UART0_LSR)
116#define bfin_write_UART0_LSR(val) bfin_write16(UART0_LSR, val)
117#define bfin_read_UART0_MSR() bfin_read16(UART0_MSR)
118#define bfin_write_UART0_MSR(val) bfin_write16(UART0_MSR, val)
119#define bfin_read_UART0_SCR() bfin_read16(UART0_SCR)
120#define bfin_write_UART0_SCR(val) bfin_write16(UART0_SCR, val)
121#define bfin_read_UART0_GCTL() bfin_read16(UART0_GCTL)
122#define bfin_write_UART0_GCTL(val) bfin_write16(UART0_GCTL, val)
123
124
125/* TIMER0-7 Registers (0xFFC00600 - 0xFFC006FF) */
126#define bfin_read_TIMER0_CONFIG() bfin_read16(TIMER0_CONFIG)
127#define bfin_write_TIMER0_CONFIG(val) bfin_write16(TIMER0_CONFIG, val)
128#define bfin_read_TIMER0_COUNTER() bfin_read32(TIMER0_COUNTER)
129#define bfin_write_TIMER0_COUNTER(val) bfin_write32(TIMER0_COUNTER, val)
130#define bfin_read_TIMER0_PERIOD() bfin_read32(TIMER0_PERIOD)
131#define bfin_write_TIMER0_PERIOD(val) bfin_write32(TIMER0_PERIOD, val)
132#define bfin_read_TIMER0_WIDTH() bfin_read32(TIMER0_WIDTH)
133#define bfin_write_TIMER0_WIDTH(val) bfin_write32(TIMER0_WIDTH, val)
134
135#define bfin_read_TIMER1_CONFIG() bfin_read16(TIMER1_CONFIG)
136#define bfin_write_TIMER1_CONFIG(val) bfin_write16(TIMER1_CONFIG, val)
137#define bfin_read_TIMER1_COUNTER() bfin_read32(TIMER1_COUNTER)
138#define bfin_write_TIMER1_COUNTER(val) bfin_write32(TIMER1_COUNTER, val)
139#define bfin_read_TIMER1_PERIOD() bfin_read32(TIMER1_PERIOD)
140#define bfin_write_TIMER1_PERIOD(val) bfin_write32(TIMER1_PERIOD, val)
141#define bfin_read_TIMER1_WIDTH() bfin_read32(TIMER1_WIDTH)
142#define bfin_write_TIMER1_WIDTH(val) bfin_write32(TIMER1_WIDTH, val)
143
144#define bfin_read_TIMER2_CONFIG() bfin_read16(TIMER2_CONFIG)
145#define bfin_write_TIMER2_CONFIG(val) bfin_write16(TIMER2_CONFIG, val)
146#define bfin_read_TIMER2_COUNTER() bfin_read32(TIMER2_COUNTER)
147#define bfin_write_TIMER2_COUNTER(val) bfin_write32(TIMER2_COUNTER, val)
148#define bfin_read_TIMER2_PERIOD() bfin_read32(TIMER2_PERIOD)
149#define bfin_write_TIMER2_PERIOD(val) bfin_write32(TIMER2_PERIOD, val)
150#define bfin_read_TIMER2_WIDTH() bfin_read32(TIMER2_WIDTH)
151#define bfin_write_TIMER2_WIDTH(val) bfin_write32(TIMER2_WIDTH, val)
152
153#define bfin_read_TIMER3_CONFIG() bfin_read16(TIMER3_CONFIG)
154#define bfin_write_TIMER3_CONFIG(val) bfin_write16(TIMER3_CONFIG, val)
155#define bfin_read_TIMER3_COUNTER() bfin_read32(TIMER3_COUNTER)
156#define bfin_write_TIMER3_COUNTER(val) bfin_write32(TIMER3_COUNTER, val)
157#define bfin_read_TIMER3_PERIOD() bfin_read32(TIMER3_PERIOD)
158#define bfin_write_TIMER3_PERIOD(val) bfin_write32(TIMER3_PERIOD, val)
159#define bfin_read_TIMER3_WIDTH() bfin_read32(TIMER3_WIDTH)
160#define bfin_write_TIMER3_WIDTH(val) bfin_write32(TIMER3_WIDTH, val)
161
162#define bfin_read_TIMER4_CONFIG() bfin_read16(TIMER4_CONFIG)
163#define bfin_write_TIMER4_CONFIG(val) bfin_write16(TIMER4_CONFIG, val)
164#define bfin_read_TIMER4_COUNTER() bfin_read32(TIMER4_COUNTER)
165#define bfin_write_TIMER4_COUNTER(val) bfin_write32(TIMER4_COUNTER, val)
166#define bfin_read_TIMER4_PERIOD() bfin_read32(TIMER4_PERIOD)
167#define bfin_write_TIMER4_PERIOD(val) bfin_write32(TIMER4_PERIOD, val)
168#define bfin_read_TIMER4_WIDTH() bfin_read32(TIMER4_WIDTH)
169#define bfin_write_TIMER4_WIDTH(val) bfin_write32(TIMER4_WIDTH, val)
170
171#define bfin_read_TIMER5_CONFIG() bfin_read16(TIMER5_CONFIG)
172#define bfin_write_TIMER5_CONFIG(val) bfin_write16(TIMER5_CONFIG, val)
173#define bfin_read_TIMER5_COUNTER() bfin_read32(TIMER5_COUNTER)
174#define bfin_write_TIMER5_COUNTER(val) bfin_write32(TIMER5_COUNTER, val)
175#define bfin_read_TIMER5_PERIOD() bfin_read32(TIMER5_PERIOD)
176#define bfin_write_TIMER5_PERIOD(val) bfin_write32(TIMER5_PERIOD, val)
177#define bfin_read_TIMER5_WIDTH() bfin_read32(TIMER5_WIDTH)
178#define bfin_write_TIMER5_WIDTH(val) bfin_write32(TIMER5_WIDTH, val)
179
180#define bfin_read_TIMER6_CONFIG() bfin_read16(TIMER6_CONFIG)
181#define bfin_write_TIMER6_CONFIG(val) bfin_write16(TIMER6_CONFIG, val)
182#define bfin_read_TIMER6_COUNTER() bfin_read32(TIMER6_COUNTER)
183#define bfin_write_TIMER6_COUNTER(val) bfin_write32(TIMER6_COUNTER, val)
184#define bfin_read_TIMER6_PERIOD() bfin_read32(TIMER6_PERIOD)
185#define bfin_write_TIMER6_PERIOD(val) bfin_write32(TIMER6_PERIOD, val)
186#define bfin_read_TIMER6_WIDTH() bfin_read32(TIMER6_WIDTH)
187#define bfin_write_TIMER6_WIDTH(val) bfin_write32(TIMER6_WIDTH, val)
188
189#define bfin_read_TIMER7_CONFIG() bfin_read16(TIMER7_CONFIG)
190#define bfin_write_TIMER7_CONFIG(val) bfin_write16(TIMER7_CONFIG, val)
191#define bfin_read_TIMER7_COUNTER() bfin_read32(TIMER7_COUNTER)
192#define bfin_write_TIMER7_COUNTER(val) bfin_write32(TIMER7_COUNTER, val)
193#define bfin_read_TIMER7_PERIOD() bfin_read32(TIMER7_PERIOD)
194#define bfin_write_TIMER7_PERIOD(val) bfin_write32(TIMER7_PERIOD, val)
195#define bfin_read_TIMER7_WIDTH() bfin_read32(TIMER7_WIDTH)
196#define bfin_write_TIMER7_WIDTH(val) bfin_write32(TIMER7_WIDTH, val)
197
198#define bfin_read_TIMER_ENABLE() bfin_read16(TIMER_ENABLE)
199#define bfin_write_TIMER_ENABLE(val) bfin_write16(TIMER_ENABLE, val)
200#define bfin_read_TIMER_DISABLE() bfin_read16(TIMER_DISABLE)
201#define bfin_write_TIMER_DISABLE(val) bfin_write16(TIMER_DISABLE, val)
202#define bfin_read_TIMER_STATUS() bfin_read32(TIMER_STATUS)
203#define bfin_write_TIMER_STATUS(val) bfin_write32(TIMER_STATUS, val)
204
205
206/* General Purpose I/O Port F (0xFFC00700 - 0xFFC007FF) */
207#define bfin_read_PORTFIO() bfin_read16(PORTFIO)
208#define bfin_write_PORTFIO(val) bfin_write16(PORTFIO, val)
209#define bfin_read_PORTFIO_CLEAR() bfin_read16(PORTFIO_CLEAR)
210#define bfin_write_PORTFIO_CLEAR(val) bfin_write16(PORTFIO_CLEAR, val)
211#define bfin_read_PORTFIO_SET() bfin_read16(PORTFIO_SET)
212#define bfin_write_PORTFIO_SET(val) bfin_write16(PORTFIO_SET, val)
213#define bfin_read_PORTFIO_TOGGLE() bfin_read16(PORTFIO_TOGGLE)
214#define bfin_write_PORTFIO_TOGGLE(val) bfin_write16(PORTFIO_TOGGLE, val)
215#define bfin_read_PORTFIO_MASKA() bfin_read16(PORTFIO_MASKA)
216#define bfin_write_PORTFIO_MASKA(val) bfin_write16(PORTFIO_MASKA, val)
217#define bfin_read_PORTFIO_MASKA_CLEAR() bfin_read16(PORTFIO_MASKA_CLEAR)
218#define bfin_write_PORTFIO_MASKA_CLEAR(val) bfin_write16(PORTFIO_MASKA_CLEAR, val)
219#define bfin_read_PORTFIO_MASKA_SET() bfin_read16(PORTFIO_MASKA_SET)
220#define bfin_write_PORTFIO_MASKA_SET(val) bfin_write16(PORTFIO_MASKA_SET, val)
221#define bfin_read_PORTFIO_MASKA_TOGGLE() bfin_read16(PORTFIO_MASKA_TOGGLE)
222#define bfin_write_PORTFIO_MASKA_TOGGLE(val) bfin_write16(PORTFIO_MASKA_TOGGLE, val)
223#define bfin_read_PORTFIO_MASKB() bfin_read16(PORTFIO_MASKB)
224#define bfin_write_PORTFIO_MASKB(val) bfin_write16(PORTFIO_MASKB, val)
225#define bfin_read_PORTFIO_MASKB_CLEAR() bfin_read16(PORTFIO_MASKB_CLEAR)
226#define bfin_write_PORTFIO_MASKB_CLEAR(val) bfin_write16(PORTFIO_MASKB_CLEAR, val)
227#define bfin_read_PORTFIO_MASKB_SET() bfin_read16(PORTFIO_MASKB_SET)
228#define bfin_write_PORTFIO_MASKB_SET(val) bfin_write16(PORTFIO_MASKB_SET, val)
229#define bfin_read_PORTFIO_MASKB_TOGGLE() bfin_read16(PORTFIO_MASKB_TOGGLE)
230#define bfin_write_PORTFIO_MASKB_TOGGLE(val) bfin_write16(PORTFIO_MASKB_TOGGLE, val)
231#define bfin_read_PORTFIO_DIR() bfin_read16(PORTFIO_DIR)
232#define bfin_write_PORTFIO_DIR(val) bfin_write16(PORTFIO_DIR, val)
233#define bfin_read_PORTFIO_POLAR() bfin_read16(PORTFIO_POLAR)
234#define bfin_write_PORTFIO_POLAR(val) bfin_write16(PORTFIO_POLAR, val)
235#define bfin_read_PORTFIO_EDGE() bfin_read16(PORTFIO_EDGE)
236#define bfin_write_PORTFIO_EDGE(val) bfin_write16(PORTFIO_EDGE, val)
237#define bfin_read_PORTFIO_BOTH() bfin_read16(PORTFIO_BOTH)
238#define bfin_write_PORTFIO_BOTH(val) bfin_write16(PORTFIO_BOTH, val)
239#define bfin_read_PORTFIO_INEN() bfin_read16(PORTFIO_INEN)
240#define bfin_write_PORTFIO_INEN(val) bfin_write16(PORTFIO_INEN, val)
241
242
243/* SPORT0 Controller (0xFFC00800 - 0xFFC008FF) */
244#define bfin_read_SPORT0_TCR1() bfin_read16(SPORT0_TCR1)
245#define bfin_write_SPORT0_TCR1(val) bfin_write16(SPORT0_TCR1, val)
246#define bfin_read_SPORT0_TCR2() bfin_read16(SPORT0_TCR2)
247#define bfin_write_SPORT0_TCR2(val) bfin_write16(SPORT0_TCR2, val)
248#define bfin_read_SPORT0_TCLKDIV() bfin_read16(SPORT0_TCLKDIV)
249#define bfin_write_SPORT0_TCLKDIV(val) bfin_write16(SPORT0_TCLKDIV, val)
250#define bfin_read_SPORT0_TFSDIV() bfin_read16(SPORT0_TFSDIV)
251#define bfin_write_SPORT0_TFSDIV(val) bfin_write16(SPORT0_TFSDIV, val)
252#define bfin_read_SPORT0_TX() bfin_read32(SPORT0_TX)
253#define bfin_write_SPORT0_TX(val) bfin_write32(SPORT0_TX, val)
254#define bfin_read_SPORT0_RX() bfin_read32(SPORT0_RX)
255#define bfin_write_SPORT0_RX(val) bfin_write32(SPORT0_RX, val)
256#define bfin_read_SPORT0_TX32() bfin_read32(SPORT0_TX)
257#define bfin_write_SPORT0_TX32(val) bfin_write32(SPORT0_TX, val)
258#define bfin_read_SPORT0_RX32() bfin_read32(SPORT0_RX)
259#define bfin_write_SPORT0_RX32(val) bfin_write32(SPORT0_RX, val)
260#define bfin_read_SPORT0_TX16() bfin_read16(SPORT0_TX)
261#define bfin_write_SPORT0_TX16(val) bfin_write16(SPORT0_TX, val)
262#define bfin_read_SPORT0_RX16() bfin_read16(SPORT0_RX)
263#define bfin_write_SPORT0_RX16(val) bfin_write16(SPORT0_RX, val)
264#define bfin_read_SPORT0_RCR1() bfin_read16(SPORT0_RCR1)
265#define bfin_write_SPORT0_RCR1(val) bfin_write16(SPORT0_RCR1, val)
266#define bfin_read_SPORT0_RCR2() bfin_read16(SPORT0_RCR2)
267#define bfin_write_SPORT0_RCR2(val) bfin_write16(SPORT0_RCR2, val)
268#define bfin_read_SPORT0_RCLKDIV() bfin_read16(SPORT0_RCLKDIV)
269#define bfin_write_SPORT0_RCLKDIV(val) bfin_write16(SPORT0_RCLKDIV, val)
270#define bfin_read_SPORT0_RFSDIV() bfin_read16(SPORT0_RFSDIV)
271#define bfin_write_SPORT0_RFSDIV(val) bfin_write16(SPORT0_RFSDIV, val)
272#define bfin_read_SPORT0_STAT() bfin_read16(SPORT0_STAT)
273#define bfin_write_SPORT0_STAT(val) bfin_write16(SPORT0_STAT, val)
274#define bfin_read_SPORT0_CHNL() bfin_read16(SPORT0_CHNL)
275#define bfin_write_SPORT0_CHNL(val) bfin_write16(SPORT0_CHNL, val)
276#define bfin_read_SPORT0_MCMC1() bfin_read16(SPORT0_MCMC1)
277#define bfin_write_SPORT0_MCMC1(val) bfin_write16(SPORT0_MCMC1, val)
278#define bfin_read_SPORT0_MCMC2() bfin_read16(SPORT0_MCMC2)
279#define bfin_write_SPORT0_MCMC2(val) bfin_write16(SPORT0_MCMC2, val)
280#define bfin_read_SPORT0_MTCS0() bfin_read32(SPORT0_MTCS0)
281#define bfin_write_SPORT0_MTCS0(val) bfin_write32(SPORT0_MTCS0, val)
282#define bfin_read_SPORT0_MTCS1() bfin_read32(SPORT0_MTCS1)
283#define bfin_write_SPORT0_MTCS1(val) bfin_write32(SPORT0_MTCS1, val)
284#define bfin_read_SPORT0_MTCS2() bfin_read32(SPORT0_MTCS2)
285#define bfin_write_SPORT0_MTCS2(val) bfin_write32(SPORT0_MTCS2, val)
286#define bfin_read_SPORT0_MTCS3() bfin_read32(SPORT0_MTCS3)
287#define bfin_write_SPORT0_MTCS3(val) bfin_write32(SPORT0_MTCS3, val)
288#define bfin_read_SPORT0_MRCS0() bfin_read32(SPORT0_MRCS0)
289#define bfin_write_SPORT0_MRCS0(val) bfin_write32(SPORT0_MRCS0, val)
290#define bfin_read_SPORT0_MRCS1() bfin_read32(SPORT0_MRCS1)
291#define bfin_write_SPORT0_MRCS1(val) bfin_write32(SPORT0_MRCS1, val)
292#define bfin_read_SPORT0_MRCS2() bfin_read32(SPORT0_MRCS2)
293#define bfin_write_SPORT0_MRCS2(val) bfin_write32(SPORT0_MRCS2, val)
294#define bfin_read_SPORT0_MRCS3() bfin_read32(SPORT0_MRCS3)
295#define bfin_write_SPORT0_MRCS3(val) bfin_write32(SPORT0_MRCS3, val)
296
297
298/* SPORT1 Controller (0xFFC00900 - 0xFFC009FF) */
299#define bfin_read_SPORT1_TCR1() bfin_read16(SPORT1_TCR1)
300#define bfin_write_SPORT1_TCR1(val) bfin_write16(SPORT1_TCR1, val)
301#define bfin_read_SPORT1_TCR2() bfin_read16(SPORT1_TCR2)
302#define bfin_write_SPORT1_TCR2(val) bfin_write16(SPORT1_TCR2, val)
303#define bfin_read_SPORT1_TCLKDIV() bfin_read16(SPORT1_TCLKDIV)
304#define bfin_write_SPORT1_TCLKDIV(val) bfin_write16(SPORT1_TCLKDIV, val)
305#define bfin_read_SPORT1_TFSDIV() bfin_read16(SPORT1_TFSDIV)
306#define bfin_write_SPORT1_TFSDIV(val) bfin_write16(SPORT1_TFSDIV, val)
307#define bfin_read_SPORT1_TX() bfin_read32(SPORT1_TX)
308#define bfin_write_SPORT1_TX(val) bfin_write32(SPORT1_TX, val)
309#define bfin_read_SPORT1_RX() bfin_read32(SPORT1_RX)
310#define bfin_write_SPORT1_RX(val) bfin_write32(SPORT1_RX, val)
311#define bfin_read_SPORT1_TX32() bfin_read32(SPORT1_TX)
312#define bfin_write_SPORT1_TX32(val) bfin_write32(SPORT1_TX, val)
313#define bfin_read_SPORT1_RX32() bfin_read32(SPORT1_RX)
314#define bfin_write_SPORT1_RX32(val) bfin_write32(SPORT1_RX, val)
315#define bfin_read_SPORT1_TX16() bfin_read16(SPORT1_TX)
316#define bfin_write_SPORT1_TX16(val) bfin_write16(SPORT1_TX, val)
317#define bfin_read_SPORT1_RX16() bfin_read16(SPORT1_RX)
318#define bfin_write_SPORT1_RX16(val) bfin_write16(SPORT1_RX, val)
319#define bfin_read_SPORT1_RCR1() bfin_read16(SPORT1_RCR1)
320#define bfin_write_SPORT1_RCR1(val) bfin_write16(SPORT1_RCR1, val)
321#define bfin_read_SPORT1_RCR2() bfin_read16(SPORT1_RCR2)
322#define bfin_write_SPORT1_RCR2(val) bfin_write16(SPORT1_RCR2, val)
323#define bfin_read_SPORT1_RCLKDIV() bfin_read16(SPORT1_RCLKDIV)
324#define bfin_write_SPORT1_RCLKDIV(val) bfin_write16(SPORT1_RCLKDIV, val)
325#define bfin_read_SPORT1_RFSDIV() bfin_read16(SPORT1_RFSDIV)
326#define bfin_write_SPORT1_RFSDIV(val) bfin_write16(SPORT1_RFSDIV, val)
327#define bfin_read_SPORT1_STAT() bfin_read16(SPORT1_STAT)
328#define bfin_write_SPORT1_STAT(val) bfin_write16(SPORT1_STAT, val)
329#define bfin_read_SPORT1_CHNL() bfin_read16(SPORT1_CHNL)
330#define bfin_write_SPORT1_CHNL(val) bfin_write16(SPORT1_CHNL, val)
331#define bfin_read_SPORT1_MCMC1() bfin_read16(SPORT1_MCMC1)
332#define bfin_write_SPORT1_MCMC1(val) bfin_write16(SPORT1_MCMC1, val)
333#define bfin_read_SPORT1_MCMC2() bfin_read16(SPORT1_MCMC2)
334#define bfin_write_SPORT1_MCMC2(val) bfin_write16(SPORT1_MCMC2, val)
335#define bfin_read_SPORT1_MTCS0() bfin_read32(SPORT1_MTCS0)
336#define bfin_write_SPORT1_MTCS0(val) bfin_write32(SPORT1_MTCS0, val)
337#define bfin_read_SPORT1_MTCS1() bfin_read32(SPORT1_MTCS1)
338#define bfin_write_SPORT1_MTCS1(val) bfin_write32(SPORT1_MTCS1, val)
339#define bfin_read_SPORT1_MTCS2() bfin_read32(SPORT1_MTCS2)
340#define bfin_write_SPORT1_MTCS2(val) bfin_write32(SPORT1_MTCS2, val)
341#define bfin_read_SPORT1_MTCS3() bfin_read32(SPORT1_MTCS3)
342#define bfin_write_SPORT1_MTCS3(val) bfin_write32(SPORT1_MTCS3, val)
343#define bfin_read_SPORT1_MRCS0() bfin_read32(SPORT1_MRCS0)
344#define bfin_write_SPORT1_MRCS0(val) bfin_write32(SPORT1_MRCS0, val)
345#define bfin_read_SPORT1_MRCS1() bfin_read32(SPORT1_MRCS1)
346#define bfin_write_SPORT1_MRCS1(val) bfin_write32(SPORT1_MRCS1, val)
347#define bfin_read_SPORT1_MRCS2() bfin_read32(SPORT1_MRCS2)
348#define bfin_write_SPORT1_MRCS2(val) bfin_write32(SPORT1_MRCS2, val)
349#define bfin_read_SPORT1_MRCS3() bfin_read32(SPORT1_MRCS3)
350#define bfin_write_SPORT1_MRCS3(val) bfin_write32(SPORT1_MRCS3, val)
351
352
353/* External Bus Interface Unit (0xFFC00A00 - 0xFFC00AFF) */
354#define bfin_read_EBIU_AMGCTL() bfin_read16(EBIU_AMGCTL)
355#define bfin_write_EBIU_AMGCTL(val) bfin_write16(EBIU_AMGCTL, val)
356#define bfin_read_EBIU_AMBCTL0() bfin_read32(EBIU_AMBCTL0)
357#define bfin_write_EBIU_AMBCTL0(val) bfin_write32(EBIU_AMBCTL0, val)
358#define bfin_read_EBIU_AMBCTL1() bfin_read32(EBIU_AMBCTL1)
359#define bfin_write_EBIU_AMBCTL1(val) bfin_write32(EBIU_AMBCTL1, val)
360#define bfin_read_EBIU_SDGCTL() bfin_read32(EBIU_SDGCTL)
361#define bfin_write_EBIU_SDGCTL(val) bfin_write32(EBIU_SDGCTL, val)
362#define bfin_read_EBIU_SDBCTL() bfin_read16(EBIU_SDBCTL)
363#define bfin_write_EBIU_SDBCTL(val) bfin_write16(EBIU_SDBCTL, val)
364#define bfin_read_EBIU_SDRRC() bfin_read16(EBIU_SDRRC)
365#define bfin_write_EBIU_SDRRC(val) bfin_write16(EBIU_SDRRC, val)
366#define bfin_read_EBIU_SDSTAT() bfin_read16(EBIU_SDSTAT)
367#define bfin_write_EBIU_SDSTAT(val) bfin_write16(EBIU_SDSTAT, val)
368
369
370/* DMA Traffic Control Registers */
371#define bfin_read_DMAC_TC_PER() bfin_read16(DMAC_TC_PER)
372#define bfin_write_DMAC_TC_PER(val) bfin_write16(DMAC_TC_PER, val)
373#define bfin_read_DMAC_TC_CNT() bfin_read16(DMAC_TC_CNT)
374#define bfin_write_DMAC_TC_CNT(val) bfin_write16(DMAC_TC_CNT, val)
375
376/* DMA Controller */
377#define bfin_read_DMA0_CONFIG() bfin_read16(DMA0_CONFIG)
378#define bfin_write_DMA0_CONFIG(val) bfin_write16(DMA0_CONFIG, val)
379#define bfin_read_DMA0_NEXT_DESC_PTR() bfin_read32(DMA0_NEXT_DESC_PTR)
380#define bfin_write_DMA0_NEXT_DESC_PTR(val) bfin_write32(DMA0_NEXT_DESC_PTR, val)
381#define bfin_read_DMA0_START_ADDR() bfin_read32(DMA0_START_ADDR)
382#define bfin_write_DMA0_START_ADDR(val) bfin_write32(DMA0_START_ADDR, val)
383#define bfin_read_DMA0_X_COUNT() bfin_read16(DMA0_X_COUNT)
384#define bfin_write_DMA0_X_COUNT(val) bfin_write16(DMA0_X_COUNT, val)
385#define bfin_read_DMA0_Y_COUNT() bfin_read16(DMA0_Y_COUNT)
386#define bfin_write_DMA0_Y_COUNT(val) bfin_write16(DMA0_Y_COUNT, val)
387#define bfin_read_DMA0_X_MODIFY() bfin_read16(DMA0_X_MODIFY)
388#define bfin_write_DMA0_X_MODIFY(val) bfin_write16(DMA0_X_MODIFY, val)
389#define bfin_read_DMA0_Y_MODIFY() bfin_read16(DMA0_Y_MODIFY)
390#define bfin_write_DMA0_Y_MODIFY(val) bfin_write16(DMA0_Y_MODIFY, val)
391#define bfin_read_DMA0_CURR_DESC_PTR() bfin_read32(DMA0_CURR_DESC_PTR)
392#define bfin_write_DMA0_CURR_DESC_PTR(val) bfin_write32(DMA0_CURR_DESC_PTR, val)
393#define bfin_read_DMA0_CURR_ADDR() bfin_read32(DMA0_CURR_ADDR)
394#define bfin_write_DMA0_CURR_ADDR(val) bfin_write32(DMA0_CURR_ADDR, val)
395#define bfin_read_DMA0_CURR_X_COUNT() bfin_read16(DMA0_CURR_X_COUNT)
396#define bfin_write_DMA0_CURR_X_COUNT(val) bfin_write16(DMA0_CURR_X_COUNT, val)
397#define bfin_read_DMA0_CURR_Y_COUNT() bfin_read16(DMA0_CURR_Y_COUNT)
398#define bfin_write_DMA0_CURR_Y_COUNT(val) bfin_write16(DMA0_CURR_Y_COUNT, val)
399#define bfin_read_DMA0_IRQ_STATUS() bfin_read16(DMA0_IRQ_STATUS)
400#define bfin_write_DMA0_IRQ_STATUS(val) bfin_write16(DMA0_IRQ_STATUS, val)
401#define bfin_read_DMA0_PERIPHERAL_MAP() bfin_read16(DMA0_PERIPHERAL_MAP)
402#define bfin_write_DMA0_PERIPHERAL_MAP(val) bfin_write16(DMA0_PERIPHERAL_MAP, val)
403
404#define bfin_read_DMA1_CONFIG() bfin_read16(DMA1_CONFIG)
405#define bfin_write_DMA1_CONFIG(val) bfin_write16(DMA1_CONFIG, val)
406#define bfin_read_DMA1_NEXT_DESC_PTR() bfin_read32(DMA1_NEXT_DESC_PTR)
407#define bfin_write_DMA1_NEXT_DESC_PTR(val) bfin_write32(DMA1_NEXT_DESC_PTR, val)
408#define bfin_read_DMA1_START_ADDR() bfin_read32(DMA1_START_ADDR)
409#define bfin_write_DMA1_START_ADDR(val) bfin_write32(DMA1_START_ADDR, val)
410#define bfin_read_DMA1_X_COUNT() bfin_read16(DMA1_X_COUNT)
411#define bfin_write_DMA1_X_COUNT(val) bfin_write16(DMA1_X_COUNT, val)
412#define bfin_read_DMA1_Y_COUNT() bfin_read16(DMA1_Y_COUNT)
413#define bfin_write_DMA1_Y_COUNT(val) bfin_write16(DMA1_Y_COUNT, val)
414#define bfin_read_DMA1_X_MODIFY() bfin_read16(DMA1_X_MODIFY)
415#define bfin_write_DMA1_X_MODIFY(val) bfin_write16(DMA1_X_MODIFY, val)
416#define bfin_read_DMA1_Y_MODIFY() bfin_read16(DMA1_Y_MODIFY)
417#define bfin_write_DMA1_Y_MODIFY(val) bfin_write16(DMA1_Y_MODIFY, val)
418#define bfin_read_DMA1_CURR_DESC_PTR() bfin_read32(DMA1_CURR_DESC_PTR)
419#define bfin_write_DMA1_CURR_DESC_PTR(val) bfin_write32(DMA1_CURR_DESC_PTR, val)
420#define bfin_read_DMA1_CURR_ADDR() bfin_read32(DMA1_CURR_ADDR)
421#define bfin_write_DMA1_CURR_ADDR(val) bfin_write32(DMA1_CURR_ADDR, val)
422#define bfin_read_DMA1_CURR_X_COUNT() bfin_read16(DMA1_CURR_X_COUNT)
423#define bfin_write_DMA1_CURR_X_COUNT(val) bfin_write16(DMA1_CURR_X_COUNT, val)
424#define bfin_read_DMA1_CURR_Y_COUNT() bfin_read16(DMA1_CURR_Y_COUNT)
425#define bfin_write_DMA1_CURR_Y_COUNT(val) bfin_write16(DMA1_CURR_Y_COUNT, val)
426#define bfin_read_DMA1_IRQ_STATUS() bfin_read16(DMA1_IRQ_STATUS)
427#define bfin_write_DMA1_IRQ_STATUS(val) bfin_write16(DMA1_IRQ_STATUS, val)
428#define bfin_read_DMA1_PERIPHERAL_MAP() bfin_read16(DMA1_PERIPHERAL_MAP)
429#define bfin_write_DMA1_PERIPHERAL_MAP(val) bfin_write16(DMA1_PERIPHERAL_MAP, val)
430
431#define bfin_read_DMA2_CONFIG() bfin_read16(DMA2_CONFIG)
432#define bfin_write_DMA2_CONFIG(val) bfin_write16(DMA2_CONFIG, val)
433#define bfin_read_DMA2_NEXT_DESC_PTR() bfin_read32(DMA2_NEXT_DESC_PTR)
434#define bfin_write_DMA2_NEXT_DESC_PTR(val) bfin_write32(DMA2_NEXT_DESC_PTR, val)
435#define bfin_read_DMA2_START_ADDR() bfin_read32(DMA2_START_ADDR)
436#define bfin_write_DMA2_START_ADDR(val) bfin_write32(DMA2_START_ADDR, val)
437#define bfin_read_DMA2_X_COUNT() bfin_read16(DMA2_X_COUNT)
438#define bfin_write_DMA2_X_COUNT(val) bfin_write16(DMA2_X_COUNT, val)
439#define bfin_read_DMA2_Y_COUNT() bfin_read16(DMA2_Y_COUNT)
440#define bfin_write_DMA2_Y_COUNT(val) bfin_write16(DMA2_Y_COUNT, val)
441#define bfin_read_DMA2_X_MODIFY() bfin_read16(DMA2_X_MODIFY)
442#define bfin_write_DMA2_X_MODIFY(val) bfin_write16(DMA2_X_MODIFY, val)
443#define bfin_read_DMA2_Y_MODIFY() bfin_read16(DMA2_Y_MODIFY)
444#define bfin_write_DMA2_Y_MODIFY(val) bfin_write16(DMA2_Y_MODIFY, val)
445#define bfin_read_DMA2_CURR_DESC_PTR() bfin_read32(DMA2_CURR_DESC_PTR)
446#define bfin_write_DMA2_CURR_DESC_PTR(val) bfin_write32(DMA2_CURR_DESC_PTR, val)
447#define bfin_read_DMA2_CURR_ADDR() bfin_read32(DMA2_CURR_ADDR)
448#define bfin_write_DMA2_CURR_ADDR(val) bfin_write32(DMA2_CURR_ADDR, val)
449#define bfin_read_DMA2_CURR_X_COUNT() bfin_read16(DMA2_CURR_X_COUNT)
450#define bfin_write_DMA2_CURR_X_COUNT(val) bfin_write16(DMA2_CURR_X_COUNT, val)
451#define bfin_read_DMA2_CURR_Y_COUNT() bfin_read16(DMA2_CURR_Y_COUNT)
452#define bfin_write_DMA2_CURR_Y_COUNT(val) bfin_write16(DMA2_CURR_Y_COUNT, val)
453#define bfin_read_DMA2_IRQ_STATUS() bfin_read16(DMA2_IRQ_STATUS)
454#define bfin_write_DMA2_IRQ_STATUS(val) bfin_write16(DMA2_IRQ_STATUS, val)
455#define bfin_read_DMA2_PERIPHERAL_MAP() bfin_read16(DMA2_PERIPHERAL_MAP)
456#define bfin_write_DMA2_PERIPHERAL_MAP(val) bfin_write16(DMA2_PERIPHERAL_MAP, val)
457
458#define bfin_read_DMA3_CONFIG() bfin_read16(DMA3_CONFIG)
459#define bfin_write_DMA3_CONFIG(val) bfin_write16(DMA3_CONFIG, val)
460#define bfin_read_DMA3_NEXT_DESC_PTR() bfin_read32(DMA3_NEXT_DESC_PTR)
461#define bfin_write_DMA3_NEXT_DESC_PTR(val) bfin_write32(DMA3_NEXT_DESC_PTR, val)
462#define bfin_read_DMA3_START_ADDR() bfin_read32(DMA3_START_ADDR)
463#define bfin_write_DMA3_START_ADDR(val) bfin_write32(DMA3_START_ADDR, val)
464#define bfin_read_DMA3_X_COUNT() bfin_read16(DMA3_X_COUNT)
465#define bfin_write_DMA3_X_COUNT(val) bfin_write16(DMA3_X_COUNT, val)
466#define bfin_read_DMA3_Y_COUNT() bfin_read16(DMA3_Y_COUNT)
467#define bfin_write_DMA3_Y_COUNT(val) bfin_write16(DMA3_Y_COUNT, val)
468#define bfin_read_DMA3_X_MODIFY() bfin_read16(DMA3_X_MODIFY)
469#define bfin_write_DMA3_X_MODIFY(val) bfin_write16(DMA3_X_MODIFY, val)
470#define bfin_read_DMA3_Y_MODIFY() bfin_read16(DMA3_Y_MODIFY)
471#define bfin_write_DMA3_Y_MODIFY(val) bfin_write16(DMA3_Y_MODIFY, val)
472#define bfin_read_DMA3_CURR_DESC_PTR() bfin_read32(DMA3_CURR_DESC_PTR)
473#define bfin_write_DMA3_CURR_DESC_PTR(val) bfin_write32(DMA3_CURR_DESC_PTR, val)
474#define bfin_read_DMA3_CURR_ADDR() bfin_read32(DMA3_CURR_ADDR)
475#define bfin_write_DMA3_CURR_ADDR(val) bfin_write32(DMA3_CURR_ADDR, val)
476#define bfin_read_DMA3_CURR_X_COUNT() bfin_read16(DMA3_CURR_X_COUNT)
477#define bfin_write_DMA3_CURR_X_COUNT(val) bfin_write16(DMA3_CURR_X_COUNT, val)
478#define bfin_read_DMA3_CURR_Y_COUNT() bfin_read16(DMA3_CURR_Y_COUNT)
479#define bfin_write_DMA3_CURR_Y_COUNT(val) bfin_write16(DMA3_CURR_Y_COUNT, val)
480#define bfin_read_DMA3_IRQ_STATUS() bfin_read16(DMA3_IRQ_STATUS)
481#define bfin_write_DMA3_IRQ_STATUS(val) bfin_write16(DMA3_IRQ_STATUS, val)
482#define bfin_read_DMA3_PERIPHERAL_MAP() bfin_read16(DMA3_PERIPHERAL_MAP)
483#define bfin_write_DMA3_PERIPHERAL_MAP(val) bfin_write16(DMA3_PERIPHERAL_MAP, val)
484
485#define bfin_read_DMA4_CONFIG() bfin_read16(DMA4_CONFIG)
486#define bfin_write_DMA4_CONFIG(val) bfin_write16(DMA4_CONFIG, val)
487#define bfin_read_DMA4_NEXT_DESC_PTR() bfin_read32(DMA4_NEXT_DESC_PTR)
488#define bfin_write_DMA4_NEXT_DESC_PTR(val) bfin_write32(DMA4_NEXT_DESC_PTR, val)
489#define bfin_read_DMA4_START_ADDR() bfin_read32(DMA4_START_ADDR)
490#define bfin_write_DMA4_START_ADDR(val) bfin_write32(DMA4_START_ADDR, val)
491#define bfin_read_DMA4_X_COUNT() bfin_read16(DMA4_X_COUNT)
492#define bfin_write_DMA4_X_COUNT(val) bfin_write16(DMA4_X_COUNT, val)
493#define bfin_read_DMA4_Y_COUNT() bfin_read16(DMA4_Y_COUNT)
494#define bfin_write_DMA4_Y_COUNT(val) bfin_write16(DMA4_Y_COUNT, val)
495#define bfin_read_DMA4_X_MODIFY() bfin_read16(DMA4_X_MODIFY)
496#define bfin_write_DMA4_X_MODIFY(val) bfin_write16(DMA4_X_MODIFY, val)
497#define bfin_read_DMA4_Y_MODIFY() bfin_read16(DMA4_Y_MODIFY)
498#define bfin_write_DMA4_Y_MODIFY(val) bfin_write16(DMA4_Y_MODIFY, val)
499#define bfin_read_DMA4_CURR_DESC_PTR() bfin_read32(DMA4_CURR_DESC_PTR)
500#define bfin_write_DMA4_CURR_DESC_PTR(val) bfin_write32(DMA4_CURR_DESC_PTR, val)
501#define bfin_read_DMA4_CURR_ADDR() bfin_read32(DMA4_CURR_ADDR)
502#define bfin_write_DMA4_CURR_ADDR(val) bfin_write32(DMA4_CURR_ADDR, val)
503#define bfin_read_DMA4_CURR_X_COUNT() bfin_read16(DMA4_CURR_X_COUNT)
504#define bfin_write_DMA4_CURR_X_COUNT(val) bfin_write16(DMA4_CURR_X_COUNT, val)
505#define bfin_read_DMA4_CURR_Y_COUNT() bfin_read16(DMA4_CURR_Y_COUNT)
506#define bfin_write_DMA4_CURR_Y_COUNT(val) bfin_write16(DMA4_CURR_Y_COUNT, val)
507#define bfin_read_DMA4_IRQ_STATUS() bfin_read16(DMA4_IRQ_STATUS)
508#define bfin_write_DMA4_IRQ_STATUS(val) bfin_write16(DMA4_IRQ_STATUS, val)
509#define bfin_read_DMA4_PERIPHERAL_MAP() bfin_read16(DMA4_PERIPHERAL_MAP)
510#define bfin_write_DMA4_PERIPHERAL_MAP(val) bfin_write16(DMA4_PERIPHERAL_MAP, val)
511
512#define bfin_read_DMA5_CONFIG() bfin_read16(DMA5_CONFIG)
513#define bfin_write_DMA5_CONFIG(val) bfin_write16(DMA5_CONFIG, val)
514#define bfin_read_DMA5_NEXT_DESC_PTR() bfin_read32(DMA5_NEXT_DESC_PTR)
515#define bfin_write_DMA5_NEXT_DESC_PTR(val) bfin_write32(DMA5_NEXT_DESC_PTR, val)
516#define bfin_read_DMA5_START_ADDR() bfin_read32(DMA5_START_ADDR)
517#define bfin_write_DMA5_START_ADDR(val) bfin_write32(DMA5_START_ADDR, val)
518#define bfin_read_DMA5_X_COUNT() bfin_read16(DMA5_X_COUNT)
519#define bfin_write_DMA5_X_COUNT(val) bfin_write16(DMA5_X_COUNT, val)
520#define bfin_read_DMA5_Y_COUNT() bfin_read16(DMA5_Y_COUNT)
521#define bfin_write_DMA5_Y_COUNT(val) bfin_write16(DMA5_Y_COUNT, val)
522#define bfin_read_DMA5_X_MODIFY() bfin_read16(DMA5_X_MODIFY)
523#define bfin_write_DMA5_X_MODIFY(val) bfin_write16(DMA5_X_MODIFY, val)
524#define bfin_read_DMA5_Y_MODIFY() bfin_read16(DMA5_Y_MODIFY)
525#define bfin_write_DMA5_Y_MODIFY(val) bfin_write16(DMA5_Y_MODIFY, val)
526#define bfin_read_DMA5_CURR_DESC_PTR() bfin_read32(DMA5_CURR_DESC_PTR)
527#define bfin_write_DMA5_CURR_DESC_PTR(val) bfin_write32(DMA5_CURR_DESC_PTR, val)
528#define bfin_read_DMA5_CURR_ADDR() bfin_read32(DMA5_CURR_ADDR)
529#define bfin_write_DMA5_CURR_ADDR(val) bfin_write32(DMA5_CURR_ADDR, val)
530#define bfin_read_DMA5_CURR_X_COUNT() bfin_read16(DMA5_CURR_X_COUNT)
531#define bfin_write_DMA5_CURR_X_COUNT(val) bfin_write16(DMA5_CURR_X_COUNT, val)
532#define bfin_read_DMA5_CURR_Y_COUNT() bfin_read16(DMA5_CURR_Y_COUNT)
533#define bfin_write_DMA5_CURR_Y_COUNT(val) bfin_write16(DMA5_CURR_Y_COUNT, val)
534#define bfin_read_DMA5_IRQ_STATUS() bfin_read16(DMA5_IRQ_STATUS)
535#define bfin_write_DMA5_IRQ_STATUS(val) bfin_write16(DMA5_IRQ_STATUS, val)
536#define bfin_read_DMA5_PERIPHERAL_MAP() bfin_read16(DMA5_PERIPHERAL_MAP)
537#define bfin_write_DMA5_PERIPHERAL_MAP(val) bfin_write16(DMA5_PERIPHERAL_MAP, val)
538
539#define bfin_read_DMA6_CONFIG() bfin_read16(DMA6_CONFIG)
540#define bfin_write_DMA6_CONFIG(val) bfin_write16(DMA6_CONFIG, val)
541#define bfin_read_DMA6_NEXT_DESC_PTR() bfin_read32(DMA6_NEXT_DESC_PTR)
542#define bfin_write_DMA6_NEXT_DESC_PTR(val) bfin_write32(DMA6_NEXT_DESC_PTR, val)
543#define bfin_read_DMA6_START_ADDR() bfin_read32(DMA6_START_ADDR)
544#define bfin_write_DMA6_START_ADDR(val) bfin_write32(DMA6_START_ADDR, val)
545#define bfin_read_DMA6_X_COUNT() bfin_read16(DMA6_X_COUNT)
546#define bfin_write_DMA6_X_COUNT(val) bfin_write16(DMA6_X_COUNT, val)
547#define bfin_read_DMA6_Y_COUNT() bfin_read16(DMA6_Y_COUNT)
548#define bfin_write_DMA6_Y_COUNT(val) bfin_write16(DMA6_Y_COUNT, val)
549#define bfin_read_DMA6_X_MODIFY() bfin_read16(DMA6_X_MODIFY)
550#define bfin_write_DMA6_X_MODIFY(val) bfin_write16(DMA6_X_MODIFY, val)
551#define bfin_read_DMA6_Y_MODIFY() bfin_read16(DMA6_Y_MODIFY)
552#define bfin_write_DMA6_Y_MODIFY(val) bfin_write16(DMA6_Y_MODIFY, val)
553#define bfin_read_DMA6_CURR_DESC_PTR() bfin_read32(DMA6_CURR_DESC_PTR)
554#define bfin_write_DMA6_CURR_DESC_PTR(val) bfin_write32(DMA6_CURR_DESC_PTR, val)
555#define bfin_read_DMA6_CURR_ADDR() bfin_read32(DMA6_CURR_ADDR)
556#define bfin_write_DMA6_CURR_ADDR(val) bfin_write32(DMA6_CURR_ADDR, val)
557#define bfin_read_DMA6_CURR_X_COUNT() bfin_read16(DMA6_CURR_X_COUNT)
558#define bfin_write_DMA6_CURR_X_COUNT(val) bfin_write16(DMA6_CURR_X_COUNT, val)
559#define bfin_read_DMA6_CURR_Y_COUNT() bfin_read16(DMA6_CURR_Y_COUNT)
560#define bfin_write_DMA6_CURR_Y_COUNT(val) bfin_write16(DMA6_CURR_Y_COUNT, val)
561#define bfin_read_DMA6_IRQ_STATUS() bfin_read16(DMA6_IRQ_STATUS)
562#define bfin_write_DMA6_IRQ_STATUS(val) bfin_write16(DMA6_IRQ_STATUS, val)
563#define bfin_read_DMA6_PERIPHERAL_MAP() bfin_read16(DMA6_PERIPHERAL_MAP)
564#define bfin_write_DMA6_PERIPHERAL_MAP(val) bfin_write16(DMA6_PERIPHERAL_MAP, val)
565
566#define bfin_read_DMA7_CONFIG() bfin_read16(DMA7_CONFIG)
567#define bfin_write_DMA7_CONFIG(val) bfin_write16(DMA7_CONFIG, val)
568#define bfin_read_DMA7_NEXT_DESC_PTR() bfin_read32(DMA7_NEXT_DESC_PTR)
569#define bfin_write_DMA7_NEXT_DESC_PTR(val) bfin_write32(DMA7_NEXT_DESC_PTR, val)
570#define bfin_read_DMA7_START_ADDR() bfin_read32(DMA7_START_ADDR)
571#define bfin_write_DMA7_START_ADDR(val) bfin_write32(DMA7_START_ADDR, val)
572#define bfin_read_DMA7_X_COUNT() bfin_read16(DMA7_X_COUNT)
573#define bfin_write_DMA7_X_COUNT(val) bfin_write16(DMA7_X_COUNT, val)
574#define bfin_read_DMA7_Y_COUNT() bfin_read16(DMA7_Y_COUNT)
575#define bfin_write_DMA7_Y_COUNT(val) bfin_write16(DMA7_Y_COUNT, val)
576#define bfin_read_DMA7_X_MODIFY() bfin_read16(DMA7_X_MODIFY)
577#define bfin_write_DMA7_X_MODIFY(val) bfin_write16(DMA7_X_MODIFY, val)
578#define bfin_read_DMA7_Y_MODIFY() bfin_read16(DMA7_Y_MODIFY)
579#define bfin_write_DMA7_Y_MODIFY(val) bfin_write16(DMA7_Y_MODIFY, val)
580#define bfin_read_DMA7_CURR_DESC_PTR() bfin_read32(DMA7_CURR_DESC_PTR)
581#define bfin_write_DMA7_CURR_DESC_PTR(val) bfin_write32(DMA7_CURR_DESC_PTR, val)
582#define bfin_read_DMA7_CURR_ADDR() bfin_read32(DMA7_CURR_ADDR)
583#define bfin_write_DMA7_CURR_ADDR(val) bfin_write32(DMA7_CURR_ADDR, val)
584#define bfin_read_DMA7_CURR_X_COUNT() bfin_read16(DMA7_CURR_X_COUNT)
585#define bfin_write_DMA7_CURR_X_COUNT(val) bfin_write16(DMA7_CURR_X_COUNT, val)
586#define bfin_read_DMA7_CURR_Y_COUNT() bfin_read16(DMA7_CURR_Y_COUNT)
587#define bfin_write_DMA7_CURR_Y_COUNT(val) bfin_write16(DMA7_CURR_Y_COUNT, val)
588#define bfin_read_DMA7_IRQ_STATUS() bfin_read16(DMA7_IRQ_STATUS)
589#define bfin_write_DMA7_IRQ_STATUS(val) bfin_write16(DMA7_IRQ_STATUS, val)
590#define bfin_read_DMA7_PERIPHERAL_MAP() bfin_read16(DMA7_PERIPHERAL_MAP)
591#define bfin_write_DMA7_PERIPHERAL_MAP(val) bfin_write16(DMA7_PERIPHERAL_MAP, val)
592
593#define bfin_read_DMA8_CONFIG() bfin_read16(DMA8_CONFIG)
594#define bfin_write_DMA8_CONFIG(val) bfin_write16(DMA8_CONFIG, val)
595#define bfin_read_DMA8_NEXT_DESC_PTR() bfin_read32(DMA8_NEXT_DESC_PTR)
596#define bfin_write_DMA8_NEXT_DESC_PTR(val) bfin_write32(DMA8_NEXT_DESC_PTR, val)
597#define bfin_read_DMA8_START_ADDR() bfin_read32(DMA8_START_ADDR)
598#define bfin_write_DMA8_START_ADDR(val) bfin_write32(DMA8_START_ADDR, val)
599#define bfin_read_DMA8_X_COUNT() bfin_read16(DMA8_X_COUNT)
600#define bfin_write_DMA8_X_COUNT(val) bfin_write16(DMA8_X_COUNT, val)
601#define bfin_read_DMA8_Y_COUNT() bfin_read16(DMA8_Y_COUNT)
602#define bfin_write_DMA8_Y_COUNT(val) bfin_write16(DMA8_Y_COUNT, val)
603#define bfin_read_DMA8_X_MODIFY() bfin_read16(DMA8_X_MODIFY)
604#define bfin_write_DMA8_X_MODIFY(val) bfin_write16(DMA8_X_MODIFY, val)
605#define bfin_read_DMA8_Y_MODIFY() bfin_read16(DMA8_Y_MODIFY)
606#define bfin_write_DMA8_Y_MODIFY(val) bfin_write16(DMA8_Y_MODIFY, val)
607#define bfin_read_DMA8_CURR_DESC_PTR() bfin_read32(DMA8_CURR_DESC_PTR)
608#define bfin_write_DMA8_CURR_DESC_PTR(val) bfin_write32(DMA8_CURR_DESC_PTR, val)
609#define bfin_read_DMA8_CURR_ADDR() bfin_read32(DMA8_CURR_ADDR)
610#define bfin_write_DMA8_CURR_ADDR(val) bfin_write32(DMA8_CURR_ADDR, val)
611#define bfin_read_DMA8_CURR_X_COUNT() bfin_read16(DMA8_CURR_X_COUNT)
612#define bfin_write_DMA8_CURR_X_COUNT(val) bfin_write16(DMA8_CURR_X_COUNT, val)
613#define bfin_read_DMA8_CURR_Y_COUNT() bfin_read16(DMA8_CURR_Y_COUNT)
614#define bfin_write_DMA8_CURR_Y_COUNT(val) bfin_write16(DMA8_CURR_Y_COUNT, val)
615#define bfin_read_DMA8_IRQ_STATUS() bfin_read16(DMA8_IRQ_STATUS)
616#define bfin_write_DMA8_IRQ_STATUS(val) bfin_write16(DMA8_IRQ_STATUS, val)
617#define bfin_read_DMA8_PERIPHERAL_MAP() bfin_read16(DMA8_PERIPHERAL_MAP)
618#define bfin_write_DMA8_PERIPHERAL_MAP(val) bfin_write16(DMA8_PERIPHERAL_MAP, val)
619
620#define bfin_read_DMA9_CONFIG() bfin_read16(DMA9_CONFIG)
621#define bfin_write_DMA9_CONFIG(val) bfin_write16(DMA9_CONFIG, val)
622#define bfin_read_DMA9_NEXT_DESC_PTR() bfin_read32(DMA9_NEXT_DESC_PTR)
623#define bfin_write_DMA9_NEXT_DESC_PTR(val) bfin_write32(DMA9_NEXT_DESC_PTR, val)
624#define bfin_read_DMA9_START_ADDR() bfin_read32(DMA9_START_ADDR)
625#define bfin_write_DMA9_START_ADDR(val) bfin_write32(DMA9_START_ADDR, val)
626#define bfin_read_DMA9_X_COUNT() bfin_read16(DMA9_X_COUNT)
627#define bfin_write_DMA9_X_COUNT(val) bfin_write16(DMA9_X_COUNT, val)
628#define bfin_read_DMA9_Y_COUNT() bfin_read16(DMA9_Y_COUNT)
629#define bfin_write_DMA9_Y_COUNT(val) bfin_write16(DMA9_Y_COUNT, val)
630#define bfin_read_DMA9_X_MODIFY() bfin_read16(DMA9_X_MODIFY)
631#define bfin_write_DMA9_X_MODIFY(val) bfin_write16(DMA9_X_MODIFY, val)
632#define bfin_read_DMA9_Y_MODIFY() bfin_read16(DMA9_Y_MODIFY)
633#define bfin_write_DMA9_Y_MODIFY(val) bfin_write16(DMA9_Y_MODIFY, val)
634#define bfin_read_DMA9_CURR_DESC_PTR() bfin_read32(DMA9_CURR_DESC_PTR)
635#define bfin_write_DMA9_CURR_DESC_PTR(val) bfin_write32(DMA9_CURR_DESC_PTR, val)
636#define bfin_read_DMA9_CURR_ADDR() bfin_read32(DMA9_CURR_ADDR)
637#define bfin_write_DMA9_CURR_ADDR(val) bfin_write32(DMA9_CURR_ADDR, val)
638#define bfin_read_DMA9_CURR_X_COUNT() bfin_read16(DMA9_CURR_X_COUNT)
639#define bfin_write_DMA9_CURR_X_COUNT(val) bfin_write16(DMA9_CURR_X_COUNT, val)
640#define bfin_read_DMA9_CURR_Y_COUNT() bfin_read16(DMA9_CURR_Y_COUNT)
641#define bfin_write_DMA9_CURR_Y_COUNT(val) bfin_write16(DMA9_CURR_Y_COUNT, val)
642#define bfin_read_DMA9_IRQ_STATUS() bfin_read16(DMA9_IRQ_STATUS)
643#define bfin_write_DMA9_IRQ_STATUS(val) bfin_write16(DMA9_IRQ_STATUS, val)
644#define bfin_read_DMA9_PERIPHERAL_MAP() bfin_read16(DMA9_PERIPHERAL_MAP)
645#define bfin_write_DMA9_PERIPHERAL_MAP(val) bfin_write16(DMA9_PERIPHERAL_MAP, val)
646
647#define bfin_read_DMA10_CONFIG() bfin_read16(DMA10_CONFIG)
648#define bfin_write_DMA10_CONFIG(val) bfin_write16(DMA10_CONFIG, val)
649#define bfin_read_DMA10_NEXT_DESC_PTR() bfin_read32(DMA10_NEXT_DESC_PTR)
650#define bfin_write_DMA10_NEXT_DESC_PTR(val) bfin_write32(DMA10_NEXT_DESC_PTR, val)
651#define bfin_read_DMA10_START_ADDR() bfin_read32(DMA10_START_ADDR)
652#define bfin_write_DMA10_START_ADDR(val) bfin_write32(DMA10_START_ADDR, val)
653#define bfin_read_DMA10_X_COUNT() bfin_read16(DMA10_X_COUNT)
654#define bfin_write_DMA10_X_COUNT(val) bfin_write16(DMA10_X_COUNT, val)
655#define bfin_read_DMA10_Y_COUNT() bfin_read16(DMA10_Y_COUNT)
656#define bfin_write_DMA10_Y_COUNT(val) bfin_write16(DMA10_Y_COUNT, val)
657#define bfin_read_DMA10_X_MODIFY() bfin_read16(DMA10_X_MODIFY)
658#define bfin_write_DMA10_X_MODIFY(val) bfin_write16(DMA10_X_MODIFY, val)
659#define bfin_read_DMA10_Y_MODIFY() bfin_read16(DMA10_Y_MODIFY)
660#define bfin_write_DMA10_Y_MODIFY(val) bfin_write16(DMA10_Y_MODIFY, val)
661#define bfin_read_DMA10_CURR_DESC_PTR() bfin_read32(DMA10_CURR_DESC_PTR)
662#define bfin_write_DMA10_CURR_DESC_PTR(val) bfin_write32(DMA10_CURR_DESC_PTR, val)
663#define bfin_read_DMA10_CURR_ADDR() bfin_read32(DMA10_CURR_ADDR)
664#define bfin_write_DMA10_CURR_ADDR(val) bfin_write32(DMA10_CURR_ADDR, val)
665#define bfin_read_DMA10_CURR_X_COUNT() bfin_read16(DMA10_CURR_X_COUNT)
666#define bfin_write_DMA10_CURR_X_COUNT(val) bfin_write16(DMA10_CURR_X_COUNT, val)
667#define bfin_read_DMA10_CURR_Y_COUNT() bfin_read16(DMA10_CURR_Y_COUNT)
668#define bfin_write_DMA10_CURR_Y_COUNT(val) bfin_write16(DMA10_CURR_Y_COUNT, val)
669#define bfin_read_DMA10_IRQ_STATUS() bfin_read16(DMA10_IRQ_STATUS)
670#define bfin_write_DMA10_IRQ_STATUS(val) bfin_write16(DMA10_IRQ_STATUS, val)
671#define bfin_read_DMA10_PERIPHERAL_MAP() bfin_read16(DMA10_PERIPHERAL_MAP)
672#define bfin_write_DMA10_PERIPHERAL_MAP(val) bfin_write16(DMA10_PERIPHERAL_MAP, val)
673
674#define bfin_read_DMA11_CONFIG() bfin_read16(DMA11_CONFIG)
675#define bfin_write_DMA11_CONFIG(val) bfin_write16(DMA11_CONFIG, val)
676#define bfin_read_DMA11_NEXT_DESC_PTR() bfin_read32(DMA11_NEXT_DESC_PTR)
677#define bfin_write_DMA11_NEXT_DESC_PTR(val) bfin_write32(DMA11_NEXT_DESC_PTR, val)
678#define bfin_read_DMA11_START_ADDR() bfin_read32(DMA11_START_ADDR)
679#define bfin_write_DMA11_START_ADDR(val) bfin_write32(DMA11_START_ADDR, val)
680#define bfin_read_DMA11_X_COUNT() bfin_read16(DMA11_X_COUNT)
681#define bfin_write_DMA11_X_COUNT(val) bfin_write16(DMA11_X_COUNT, val)
682#define bfin_read_DMA11_Y_COUNT() bfin_read16(DMA11_Y_COUNT)
683#define bfin_write_DMA11_Y_COUNT(val) bfin_write16(DMA11_Y_COUNT, val)
684#define bfin_read_DMA11_X_MODIFY() bfin_read16(DMA11_X_MODIFY)
685#define bfin_write_DMA11_X_MODIFY(val) bfin_write16(DMA11_X_MODIFY, val)
686#define bfin_read_DMA11_Y_MODIFY() bfin_read16(DMA11_Y_MODIFY)
687#define bfin_write_DMA11_Y_MODIFY(val) bfin_write16(DMA11_Y_MODIFY, val)
688#define bfin_read_DMA11_CURR_DESC_PTR() bfin_read32(DMA11_CURR_DESC_PTR)
689#define bfin_write_DMA11_CURR_DESC_PTR(val) bfin_write32(DMA11_CURR_DESC_PTR, val)
690#define bfin_read_DMA11_CURR_ADDR() bfin_read32(DMA11_CURR_ADDR)
691#define bfin_write_DMA11_CURR_ADDR(val) bfin_write32(DMA11_CURR_ADDR, val)
692#define bfin_read_DMA11_CURR_X_COUNT() bfin_read16(DMA11_CURR_X_COUNT)
693#define bfin_write_DMA11_CURR_X_COUNT(val) bfin_write16(DMA11_CURR_X_COUNT, val)
694#define bfin_read_DMA11_CURR_Y_COUNT() bfin_read16(DMA11_CURR_Y_COUNT)
695#define bfin_write_DMA11_CURR_Y_COUNT(val) bfin_write16(DMA11_CURR_Y_COUNT, val)
696#define bfin_read_DMA11_IRQ_STATUS() bfin_read16(DMA11_IRQ_STATUS)
697#define bfin_write_DMA11_IRQ_STATUS(val) bfin_write16(DMA11_IRQ_STATUS, val)
698#define bfin_read_DMA11_PERIPHERAL_MAP() bfin_read16(DMA11_PERIPHERAL_MAP)
699#define bfin_write_DMA11_PERIPHERAL_MAP(val) bfin_write16(DMA11_PERIPHERAL_MAP, val)
700
701#define bfin_read_MDMA_D0_CONFIG() bfin_read16(MDMA_D0_CONFIG)
702#define bfin_write_MDMA_D0_CONFIG(val) bfin_write16(MDMA_D0_CONFIG, val)
703#define bfin_read_MDMA_D0_NEXT_DESC_PTR() bfin_read32(MDMA_D0_NEXT_DESC_PTR)
704#define bfin_write_MDMA_D0_NEXT_DESC_PTR(val) bfin_write32(MDMA_D0_NEXT_DESC_PTR, val)
705#define bfin_read_MDMA_D0_START_ADDR() bfin_read32(MDMA_D0_START_ADDR)
706#define bfin_write_MDMA_D0_START_ADDR(val) bfin_write32(MDMA_D0_START_ADDR, val)
707#define bfin_read_MDMA_D0_X_COUNT() bfin_read16(MDMA_D0_X_COUNT)
708#define bfin_write_MDMA_D0_X_COUNT(val) bfin_write16(MDMA_D0_X_COUNT, val)
709#define bfin_read_MDMA_D0_Y_COUNT() bfin_read16(MDMA_D0_Y_COUNT)
710#define bfin_write_MDMA_D0_Y_COUNT(val) bfin_write16(MDMA_D0_Y_COUNT, val)
711#define bfin_read_MDMA_D0_X_MODIFY() bfin_read16(MDMA_D0_X_MODIFY)
712#define bfin_write_MDMA_D0_X_MODIFY(val) bfin_write16(MDMA_D0_X_MODIFY, val)
713#define bfin_read_MDMA_D0_Y_MODIFY() bfin_read16(MDMA_D0_Y_MODIFY)
714#define bfin_write_MDMA_D0_Y_MODIFY(val) bfin_write16(MDMA_D0_Y_MODIFY, val)
715#define bfin_read_MDMA_D0_CURR_DESC_PTR() bfin_read32(MDMA_D0_CURR_DESC_PTR)
716#define bfin_write_MDMA_D0_CURR_DESC_PTR(val) bfin_write32(MDMA_D0_CURR_DESC_PTR, val)
717#define bfin_read_MDMA_D0_CURR_ADDR() bfin_read32(MDMA_D0_CURR_ADDR)
718#define bfin_write_MDMA_D0_CURR_ADDR(val) bfin_write32(MDMA_D0_CURR_ADDR, val)
719#define bfin_read_MDMA_D0_CURR_X_COUNT() bfin_read16(MDMA_D0_CURR_X_COUNT)
720#define bfin_write_MDMA_D0_CURR_X_COUNT(val) bfin_write16(MDMA_D0_CURR_X_COUNT, val)
721#define bfin_read_MDMA_D0_CURR_Y_COUNT() bfin_read16(MDMA_D0_CURR_Y_COUNT)
722#define bfin_write_MDMA_D0_CURR_Y_COUNT(val) bfin_write16(MDMA_D0_CURR_Y_COUNT, val)
723#define bfin_read_MDMA_D0_IRQ_STATUS() bfin_read16(MDMA_D0_IRQ_STATUS)
724#define bfin_write_MDMA_D0_IRQ_STATUS(val) bfin_write16(MDMA_D0_IRQ_STATUS, val)
725#define bfin_read_MDMA_D0_PERIPHERAL_MAP() bfin_read16(MDMA_D0_PERIPHERAL_MAP)
726#define bfin_write_MDMA_D0_PERIPHERAL_MAP(val) bfin_write16(MDMA_D0_PERIPHERAL_MAP, val)
727
728#define bfin_read_MDMA_S0_CONFIG() bfin_read16(MDMA_S0_CONFIG)
729#define bfin_write_MDMA_S0_CONFIG(val) bfin_write16(MDMA_S0_CONFIG, val)
730#define bfin_read_MDMA_S0_NEXT_DESC_PTR() bfin_read32(MDMA_S0_NEXT_DESC_PTR)
731#define bfin_write_MDMA_S0_NEXT_DESC_PTR(val) bfin_write32(MDMA_S0_NEXT_DESC_PTR, val)
732#define bfin_read_MDMA_S0_START_ADDR() bfin_read32(MDMA_S0_START_ADDR)
733#define bfin_write_MDMA_S0_START_ADDR(val) bfin_write32(MDMA_S0_START_ADDR, val)
734#define bfin_read_MDMA_S0_X_COUNT() bfin_read16(MDMA_S0_X_COUNT)
735#define bfin_write_MDMA_S0_X_COUNT(val) bfin_write16(MDMA_S0_X_COUNT, val)
736#define bfin_read_MDMA_S0_Y_COUNT() bfin_read16(MDMA_S0_Y_COUNT)
737#define bfin_write_MDMA_S0_Y_COUNT(val) bfin_write16(MDMA_S0_Y_COUNT, val)
738#define bfin_read_MDMA_S0_X_MODIFY() bfin_read16(MDMA_S0_X_MODIFY)
739#define bfin_write_MDMA_S0_X_MODIFY(val) bfin_write16(MDMA_S0_X_MODIFY, val)
740#define bfin_read_MDMA_S0_Y_MODIFY() bfin_read16(MDMA_S0_Y_MODIFY)
741#define bfin_write_MDMA_S0_Y_MODIFY(val) bfin_write16(MDMA_S0_Y_MODIFY, val)
742#define bfin_read_MDMA_S0_CURR_DESC_PTR() bfin_read32(MDMA_S0_CURR_DESC_PTR)
743#define bfin_write_MDMA_S0_CURR_DESC_PTR(val) bfin_write32(MDMA_S0_CURR_DESC_PTR, val)
744#define bfin_read_MDMA_S0_CURR_ADDR() bfin_read32(MDMA_S0_CURR_ADDR)
745#define bfin_write_MDMA_S0_CURR_ADDR(val) bfin_write32(MDMA_S0_CURR_ADDR, val)
746#define bfin_read_MDMA_S0_CURR_X_COUNT() bfin_read16(MDMA_S0_CURR_X_COUNT)
747#define bfin_write_MDMA_S0_CURR_X_COUNT(val) bfin_write16(MDMA_S0_CURR_X_COUNT, val)
748#define bfin_read_MDMA_S0_CURR_Y_COUNT() bfin_read16(MDMA_S0_CURR_Y_COUNT)
749#define bfin_write_MDMA_S0_CURR_Y_COUNT(val) bfin_write16(MDMA_S0_CURR_Y_COUNT, val)
750#define bfin_read_MDMA_S0_IRQ_STATUS() bfin_read16(MDMA_S0_IRQ_STATUS)
751#define bfin_write_MDMA_S0_IRQ_STATUS(val) bfin_write16(MDMA_S0_IRQ_STATUS, val)
752#define bfin_read_MDMA_S0_PERIPHERAL_MAP() bfin_read16(MDMA_S0_PERIPHERAL_MAP)
753#define bfin_write_MDMA_S0_PERIPHERAL_MAP(val) bfin_write16(MDMA_S0_PERIPHERAL_MAP, val)
754
755#define bfin_read_MDMA_D1_CONFIG() bfin_read16(MDMA_D1_CONFIG)
756#define bfin_write_MDMA_D1_CONFIG(val) bfin_write16(MDMA_D1_CONFIG, val)
757#define bfin_read_MDMA_D1_NEXT_DESC_PTR() bfin_read32(MDMA_D1_NEXT_DESC_PTR)
758#define bfin_write_MDMA_D1_NEXT_DESC_PTR(val) bfin_write32(MDMA_D1_NEXT_DESC_PTR, val)
759#define bfin_read_MDMA_D1_START_ADDR() bfin_read32(MDMA_D1_START_ADDR)
760#define bfin_write_MDMA_D1_START_ADDR(val) bfin_write32(MDMA_D1_START_ADDR, val)
761#define bfin_read_MDMA_D1_X_COUNT() bfin_read16(MDMA_D1_X_COUNT)
762#define bfin_write_MDMA_D1_X_COUNT(val) bfin_write16(MDMA_D1_X_COUNT, val)
763#define bfin_read_MDMA_D1_Y_COUNT() bfin_read16(MDMA_D1_Y_COUNT)
764#define bfin_write_MDMA_D1_Y_COUNT(val) bfin_write16(MDMA_D1_Y_COUNT, val)
765#define bfin_read_MDMA_D1_X_MODIFY() bfin_read16(MDMA_D1_X_MODIFY)
766#define bfin_write_MDMA_D1_X_MODIFY(val) bfin_write16(MDMA_D1_X_MODIFY, val)
767#define bfin_read_MDMA_D1_Y_MODIFY() bfin_read16(MDMA_D1_Y_MODIFY)
768#define bfin_write_MDMA_D1_Y_MODIFY(val) bfin_write16(MDMA_D1_Y_MODIFY, val)
769#define bfin_read_MDMA_D1_CURR_DESC_PTR() bfin_read32(MDMA_D1_CURR_DESC_PTR)
770#define bfin_write_MDMA_D1_CURR_DESC_PTR(val) bfin_write32(MDMA_D1_CURR_DESC_PTR, val)
771#define bfin_read_MDMA_D1_CURR_ADDR() bfin_read32(MDMA_D1_CURR_ADDR)
772#define bfin_write_MDMA_D1_CURR_ADDR(val) bfin_write32(MDMA_D1_CURR_ADDR, val)
773#define bfin_read_MDMA_D1_CURR_X_COUNT() bfin_read16(MDMA_D1_CURR_X_COUNT)
774#define bfin_write_MDMA_D1_CURR_X_COUNT(val) bfin_write16(MDMA_D1_CURR_X_COUNT, val)
775#define bfin_read_MDMA_D1_CURR_Y_COUNT() bfin_read16(MDMA_D1_CURR_Y_COUNT)
776#define bfin_write_MDMA_D1_CURR_Y_COUNT(val) bfin_write16(MDMA_D1_CURR_Y_COUNT, val)
777#define bfin_read_MDMA_D1_IRQ_STATUS() bfin_read16(MDMA_D1_IRQ_STATUS)
778#define bfin_write_MDMA_D1_IRQ_STATUS(val) bfin_write16(MDMA_D1_IRQ_STATUS, val)
779#define bfin_read_MDMA_D1_PERIPHERAL_MAP() bfin_read16(MDMA_D1_PERIPHERAL_MAP)
780#define bfin_write_MDMA_D1_PERIPHERAL_MAP(val) bfin_write16(MDMA_D1_PERIPHERAL_MAP, val)
781
782#define bfin_read_MDMA_S1_CONFIG() bfin_read16(MDMA_S1_CONFIG)
783#define bfin_write_MDMA_S1_CONFIG(val) bfin_write16(MDMA_S1_CONFIG, val)
784#define bfin_read_MDMA_S1_NEXT_DESC_PTR() bfin_read32(MDMA_S1_NEXT_DESC_PTR)
785#define bfin_write_MDMA_S1_NEXT_DESC_PTR(val) bfin_write32(MDMA_S1_NEXT_DESC_PTR, val)
786#define bfin_read_MDMA_S1_START_ADDR() bfin_read32(MDMA_S1_START_ADDR)
787#define bfin_write_MDMA_S1_START_ADDR(val) bfin_write32(MDMA_S1_START_ADDR, val)
788#define bfin_read_MDMA_S1_X_COUNT() bfin_read16(MDMA_S1_X_COUNT)
789#define bfin_write_MDMA_S1_X_COUNT(val) bfin_write16(MDMA_S1_X_COUNT, val)
790#define bfin_read_MDMA_S1_Y_COUNT() bfin_read16(MDMA_S1_Y_COUNT)
791#define bfin_write_MDMA_S1_Y_COUNT(val) bfin_write16(MDMA_S1_Y_COUNT, val)
792#define bfin_read_MDMA_S1_X_MODIFY() bfin_read16(MDMA_S1_X_MODIFY)
793#define bfin_write_MDMA_S1_X_MODIFY(val) bfin_write16(MDMA_S1_X_MODIFY, val)
794#define bfin_read_MDMA_S1_Y_MODIFY() bfin_read16(MDMA_S1_Y_MODIFY)
795#define bfin_write_MDMA_S1_Y_MODIFY(val) bfin_write16(MDMA_S1_Y_MODIFY, val)
796#define bfin_read_MDMA_S1_CURR_DESC_PTR() bfin_read32(MDMA_S1_CURR_DESC_PTR)
797#define bfin_write_MDMA_S1_CURR_DESC_PTR(val) bfin_write32(MDMA_S1_CURR_DESC_PTR, val)
798#define bfin_read_MDMA_S1_CURR_ADDR() bfin_read32(MDMA_S1_CURR_ADDR)
799#define bfin_write_MDMA_S1_CURR_ADDR(val) bfin_write32(MDMA_S1_CURR_ADDR, val)
800#define bfin_read_MDMA_S1_CURR_X_COUNT() bfin_read16(MDMA_S1_CURR_X_COUNT)
801#define bfin_write_MDMA_S1_CURR_X_COUNT(val) bfin_write16(MDMA_S1_CURR_X_COUNT, val)
802#define bfin_read_MDMA_S1_CURR_Y_COUNT() bfin_read16(MDMA_S1_CURR_Y_COUNT)
803#define bfin_write_MDMA_S1_CURR_Y_COUNT(val) bfin_write16(MDMA_S1_CURR_Y_COUNT, val)
804#define bfin_read_MDMA_S1_IRQ_STATUS() bfin_read16(MDMA_S1_IRQ_STATUS)
805#define bfin_write_MDMA_S1_IRQ_STATUS(val) bfin_write16(MDMA_S1_IRQ_STATUS, val)
806#define bfin_read_MDMA_S1_PERIPHERAL_MAP() bfin_read16(MDMA_S1_PERIPHERAL_MAP)
807#define bfin_write_MDMA_S1_PERIPHERAL_MAP(val) bfin_write16(MDMA_S1_PERIPHERAL_MAP, val)
808
809
810/* Parallel Peripheral Interface (0xFFC01000 - 0xFFC010FF) */
811#define bfin_read_PPI_CONTROL() bfin_read16(PPI_CONTROL)
812#define bfin_write_PPI_CONTROL(val) bfin_write16(PPI_CONTROL, val)
813#define bfin_read_PPI_STATUS() bfin_read16(PPI_STATUS)
814#define bfin_write_PPI_STATUS(val) bfin_write16(PPI_STATUS, val)
815#define bfin_clear_PPI_STATUS() bfin_write_PPI_STATUS(0xFFFF)
816#define bfin_read_PPI_DELAY() bfin_read16(PPI_DELAY)
817#define bfin_write_PPI_DELAY(val) bfin_write16(PPI_DELAY, val)
818#define bfin_read_PPI_COUNT() bfin_read16(PPI_COUNT)
819#define bfin_write_PPI_COUNT(val) bfin_write16(PPI_COUNT, val)
820#define bfin_read_PPI_FRAME() bfin_read16(PPI_FRAME)
821#define bfin_write_PPI_FRAME(val) bfin_write16(PPI_FRAME, val)
822
823
824/* Two-Wire Interface (0xFFC01400 - 0xFFC014FF) */
825
826/* General Purpose I/O Port G (0xFFC01500 - 0xFFC015FF) */
827#define bfin_read_PORTGIO() bfin_read16(PORTGIO)
828#define bfin_write_PORTGIO(val) bfin_write16(PORTGIO, val)
829#define bfin_read_PORTGIO_CLEAR() bfin_read16(PORTGIO_CLEAR)
830#define bfin_write_PORTGIO_CLEAR(val) bfin_write16(PORTGIO_CLEAR, val)
831#define bfin_read_PORTGIO_SET() bfin_read16(PORTGIO_SET)
832#define bfin_write_PORTGIO_SET(val) bfin_write16(PORTGIO_SET, val)
833#define bfin_read_PORTGIO_TOGGLE() bfin_read16(PORTGIO_TOGGLE)
834#define bfin_write_PORTGIO_TOGGLE(val) bfin_write16(PORTGIO_TOGGLE, val)
835#define bfin_read_PORTGIO_MASKA() bfin_read16(PORTGIO_MASKA)
836#define bfin_write_PORTGIO_MASKA(val) bfin_write16(PORTGIO_MASKA, val)
837#define bfin_read_PORTGIO_MASKA_CLEAR() bfin_read16(PORTGIO_MASKA_CLEAR)
838#define bfin_write_PORTGIO_MASKA_CLEAR(val) bfin_write16(PORTGIO_MASKA_CLEAR, val)
839#define bfin_read_PORTGIO_MASKA_SET() bfin_read16(PORTGIO_MASKA_SET)
840#define bfin_write_PORTGIO_MASKA_SET(val) bfin_write16(PORTGIO_MASKA_SET, val)
841#define bfin_read_PORTGIO_MASKA_TOGGLE() bfin_read16(PORTGIO_MASKA_TOGGLE)
842#define bfin_write_PORTGIO_MASKA_TOGGLE(val) bfin_write16(PORTGIO_MASKA_TOGGLE, val)
843#define bfin_read_PORTGIO_MASKB() bfin_read16(PORTGIO_MASKB)
844#define bfin_write_PORTGIO_MASKB(val) bfin_write16(PORTGIO_MASKB, val)
845#define bfin_read_PORTGIO_MASKB_CLEAR() bfin_read16(PORTGIO_MASKB_CLEAR)
846#define bfin_write_PORTGIO_MASKB_CLEAR(val) bfin_write16(PORTGIO_MASKB_CLEAR, val)
847#define bfin_read_PORTGIO_MASKB_SET() bfin_read16(PORTGIO_MASKB_SET)
848#define bfin_write_PORTGIO_MASKB_SET(val) bfin_write16(PORTGIO_MASKB_SET, val)
849#define bfin_read_PORTGIO_MASKB_TOGGLE() bfin_read16(PORTGIO_MASKB_TOGGLE)
850#define bfin_write_PORTGIO_MASKB_TOGGLE(val) bfin_write16(PORTGIO_MASKB_TOGGLE, val)
851#define bfin_read_PORTGIO_DIR() bfin_read16(PORTGIO_DIR)
852#define bfin_write_PORTGIO_DIR(val) bfin_write16(PORTGIO_DIR, val)
853#define bfin_read_PORTGIO_POLAR() bfin_read16(PORTGIO_POLAR)
854#define bfin_write_PORTGIO_POLAR(val) bfin_write16(PORTGIO_POLAR, val)
855#define bfin_read_PORTGIO_EDGE() bfin_read16(PORTGIO_EDGE)
856#define bfin_write_PORTGIO_EDGE(val) bfin_write16(PORTGIO_EDGE, val)
857#define bfin_read_PORTGIO_BOTH() bfin_read16(PORTGIO_BOTH)
858#define bfin_write_PORTGIO_BOTH(val) bfin_write16(PORTGIO_BOTH, val)
859#define bfin_read_PORTGIO_INEN() bfin_read16(PORTGIO_INEN)
860#define bfin_write_PORTGIO_INEN(val) bfin_write16(PORTGIO_INEN, val)
861
862
863/* General Purpose I/O Port H (0xFFC01700 - 0xFFC017FF) */
864#define bfin_read_PORTHIO() bfin_read16(PORTHIO)
865#define bfin_write_PORTHIO(val) bfin_write16(PORTHIO, val)
866#define bfin_read_PORTHIO_CLEAR() bfin_read16(PORTHIO_CLEAR)
867#define bfin_write_PORTHIO_CLEAR(val) bfin_write16(PORTHIO_CLEAR, val)
868#define bfin_read_PORTHIO_SET() bfin_read16(PORTHIO_SET)
869#define bfin_write_PORTHIO_SET(val) bfin_write16(PORTHIO_SET, val)
870#define bfin_read_PORTHIO_TOGGLE() bfin_read16(PORTHIO_TOGGLE)
871#define bfin_write_PORTHIO_TOGGLE(val) bfin_write16(PORTHIO_TOGGLE, val)
872#define bfin_read_PORTHIO_MASKA() bfin_read16(PORTHIO_MASKA)
873#define bfin_write_PORTHIO_MASKA(val) bfin_write16(PORTHIO_MASKA, val)
874#define bfin_read_PORTHIO_MASKA_CLEAR() bfin_read16(PORTHIO_MASKA_CLEAR)
875#define bfin_write_PORTHIO_MASKA_CLEAR(val) bfin_write16(PORTHIO_MASKA_CLEAR, val)
876#define bfin_read_PORTHIO_MASKA_SET() bfin_read16(PORTHIO_MASKA_SET)
877#define bfin_write_PORTHIO_MASKA_SET(val) bfin_write16(PORTHIO_MASKA_SET, val)
878#define bfin_read_PORTHIO_MASKA_TOGGLE() bfin_read16(PORTHIO_MASKA_TOGGLE)
879#define bfin_write_PORTHIO_MASKA_TOGGLE(val) bfin_write16(PORTHIO_MASKA_TOGGLE, val)
880#define bfin_read_PORTHIO_MASKB() bfin_read16(PORTHIO_MASKB)
881#define bfin_write_PORTHIO_MASKB(val) bfin_write16(PORTHIO_MASKB, val)
882#define bfin_read_PORTHIO_MASKB_CLEAR() bfin_read16(PORTHIO_MASKB_CLEAR)
883#define bfin_write_PORTHIO_MASKB_CLEAR(val) bfin_write16(PORTHIO_MASKB_CLEAR, val)
884#define bfin_read_PORTHIO_MASKB_SET() bfin_read16(PORTHIO_MASKB_SET)
885#define bfin_write_PORTHIO_MASKB_SET(val) bfin_write16(PORTHIO_MASKB_SET, val)
886#define bfin_read_PORTHIO_MASKB_TOGGLE() bfin_read16(PORTHIO_MASKB_TOGGLE)
887#define bfin_write_PORTHIO_MASKB_TOGGLE(val) bfin_write16(PORTHIO_MASKB_TOGGLE, val)
888#define bfin_read_PORTHIO_DIR() bfin_read16(PORTHIO_DIR)
889#define bfin_write_PORTHIO_DIR(val) bfin_write16(PORTHIO_DIR, val)
890#define bfin_read_PORTHIO_POLAR() bfin_read16(PORTHIO_POLAR)
891#define bfin_write_PORTHIO_POLAR(val) bfin_write16(PORTHIO_POLAR, val)
892#define bfin_read_PORTHIO_EDGE() bfin_read16(PORTHIO_EDGE)
893#define bfin_write_PORTHIO_EDGE(val) bfin_write16(PORTHIO_EDGE, val)
894#define bfin_read_PORTHIO_BOTH() bfin_read16(PORTHIO_BOTH)
895#define bfin_write_PORTHIO_BOTH(val) bfin_write16(PORTHIO_BOTH, val)
896#define bfin_read_PORTHIO_INEN() bfin_read16(PORTHIO_INEN)
897#define bfin_write_PORTHIO_INEN(val) bfin_write16(PORTHIO_INEN, val)
898
899
900/* UART1 Controller (0xFFC02000 - 0xFFC020FF) */
901#define bfin_read_UART1_THR() bfin_read16(UART1_THR)
902#define bfin_write_UART1_THR(val) bfin_write16(UART1_THR, val)
903#define bfin_read_UART1_RBR() bfin_read16(UART1_RBR)
904#define bfin_write_UART1_RBR(val) bfin_write16(UART1_RBR, val)
905#define bfin_read_UART1_DLL() bfin_read16(UART1_DLL)
906#define bfin_write_UART1_DLL(val) bfin_write16(UART1_DLL, val)
907#define bfin_read_UART1_IER() bfin_read16(UART1_IER)
908#define bfin_write_UART1_IER(val) bfin_write16(UART1_IER, val)
909#define bfin_read_UART1_DLH() bfin_read16(UART1_DLH)
910#define bfin_write_UART1_DLH(val) bfin_write16(UART1_DLH, val)
911#define bfin_read_UART1_IIR() bfin_read16(UART1_IIR)
912#define bfin_write_UART1_IIR(val) bfin_write16(UART1_IIR, val)
913#define bfin_read_UART1_LCR() bfin_read16(UART1_LCR)
914#define bfin_write_UART1_LCR(val) bfin_write16(UART1_LCR, val)
915#define bfin_read_UART1_MCR() bfin_read16(UART1_MCR)
916#define bfin_write_UART1_MCR(val) bfin_write16(UART1_MCR, val)
917#define bfin_read_UART1_LSR() bfin_read16(UART1_LSR)
918#define bfin_write_UART1_LSR(val) bfin_write16(UART1_LSR, val)
919#define bfin_read_UART1_MSR() bfin_read16(UART1_MSR)
920#define bfin_write_UART1_MSR(val) bfin_write16(UART1_MSR, val)
921#define bfin_read_UART1_SCR() bfin_read16(UART1_SCR)
922#define bfin_write_UART1_SCR(val) bfin_write16(UART1_SCR, val)
923#define bfin_read_UART1_GCTL() bfin_read16(UART1_GCTL)
924#define bfin_write_UART1_GCTL(val) bfin_write16(UART1_GCTL, val)
925
926/* Omit CAN register sets from the cdefBF534.h (CAN is not in the ADSP-BF51x processor) */
927
928/* Pin Control Registers (0xFFC03200 - 0xFFC032FF) */
929#define bfin_read_PORTF_FER() bfin_read16(PORTF_FER)
930#define bfin_write_PORTF_FER(val) bfin_write16(PORTF_FER, val)
931#define bfin_read_PORTG_FER() bfin_read16(PORTG_FER)
932#define bfin_write_PORTG_FER(val) bfin_write16(PORTG_FER, val)
933#define bfin_read_PORTH_FER() bfin_read16(PORTH_FER)
934#define bfin_write_PORTH_FER(val) bfin_write16(PORTH_FER, val)
935#define bfin_read_PORT_MUX() bfin_read16(PORT_MUX)
936#define bfin_write_PORT_MUX(val) bfin_write16(PORT_MUX, val)
937
938
939/* Handshake MDMA Registers (0xFFC03300 - 0xFFC033FF) */
940#define bfin_read_HMDMA0_CONTROL() bfin_read16(HMDMA0_CONTROL)
941#define bfin_write_HMDMA0_CONTROL(val) bfin_write16(HMDMA0_CONTROL, val)
942#define bfin_read_HMDMA0_ECINIT() bfin_read16(HMDMA0_ECINIT)
943#define bfin_write_HMDMA0_ECINIT(val) bfin_write16(HMDMA0_ECINIT, val)
944#define bfin_read_HMDMA0_BCINIT() bfin_read16(HMDMA0_BCINIT)
945#define bfin_write_HMDMA0_BCINIT(val) bfin_write16(HMDMA0_BCINIT, val)
946#define bfin_read_HMDMA0_ECURGENT() bfin_read16(HMDMA0_ECURGENT)
947#define bfin_write_HMDMA0_ECURGENT(val) bfin_write16(HMDMA0_ECURGENT, val)
948#define bfin_read_HMDMA0_ECOVERFLOW() bfin_read16(HMDMA0_ECOVERFLOW)
949#define bfin_write_HMDMA0_ECOVERFLOW(val) bfin_write16(HMDMA0_ECOVERFLOW, val)
950#define bfin_read_HMDMA0_ECOUNT() bfin_read16(HMDMA0_ECOUNT)
951#define bfin_write_HMDMA0_ECOUNT(val) bfin_write16(HMDMA0_ECOUNT, val)
952#define bfin_read_HMDMA0_BCOUNT() bfin_read16(HMDMA0_BCOUNT)
953#define bfin_write_HMDMA0_BCOUNT(val) bfin_write16(HMDMA0_BCOUNT, val)
954
955#define bfin_read_HMDMA1_CONTROL() bfin_read16(HMDMA1_CONTROL)
956#define bfin_write_HMDMA1_CONTROL(val) bfin_write16(HMDMA1_CONTROL, val)
957#define bfin_read_HMDMA1_ECINIT() bfin_read16(HMDMA1_ECINIT)
958#define bfin_write_HMDMA1_ECINIT(val) bfin_write16(HMDMA1_ECINIT, val)
959#define bfin_read_HMDMA1_BCINIT() bfin_read16(HMDMA1_BCINIT)
960#define bfin_write_HMDMA1_BCINIT(val) bfin_write16(HMDMA1_BCINIT, val)
961#define bfin_read_HMDMA1_ECURGENT() bfin_read16(HMDMA1_ECURGENT)
962#define bfin_write_HMDMA1_ECURGENT(val) bfin_write16(HMDMA1_ECURGENT, val)
963#define bfin_read_HMDMA1_ECOVERFLOW() bfin_read16(HMDMA1_ECOVERFLOW)
964#define bfin_write_HMDMA1_ECOVERFLOW(val) bfin_write16(HMDMA1_ECOVERFLOW, val)
965#define bfin_read_HMDMA1_ECOUNT() bfin_read16(HMDMA1_ECOUNT)
966#define bfin_write_HMDMA1_ECOUNT(val) bfin_write16(HMDMA1_ECOUNT, val)
967#define bfin_read_HMDMA1_BCOUNT() bfin_read16(HMDMA1_BCOUNT)
968#define bfin_write_HMDMA1_BCOUNT(val) bfin_write16(HMDMA1_BCOUNT, val)
969
970/* ==== end from cdefBF534.h ==== */
971
972/* GPIO PIN mux (0xFFC03210 - OxFFC03288) */
973
974#define bfin_read_PORTF_MUX() bfin_read16(PORTF_MUX)
975#define bfin_write_PORTF_MUX(val) bfin_write16(PORTF_MUX, val)
976#define bfin_read_PORTG_MUX() bfin_read16(PORTG_MUX)
977#define bfin_write_PORTG_MUX(val) bfin_write16(PORTG_MUX, val)
978#define bfin_read_PORTH_MUX() bfin_read16(PORTH_MUX)
979#define bfin_write_PORTH_MUX(val) bfin_write16(PORTH_MUX, val)
980
981#define bfin_read_PORTF_DRIVE() bfin_read16(PORTF_DRIVE)
982#define bfin_write_PORTF_DRIVE(val) bfin_write16(PORTF_DRIVE, val)
983#define bfin_read_PORTG_DRIVE() bfin_read16(PORTG_DRIVE)
984#define bfin_write_PORTG_DRIVE(val) bfin_write16(PORTG_DRIVE, val)
985#define bfin_read_PORTH_DRIVE() bfin_read16(PORTH_DRIVE)
986#define bfin_write_PORTH_DRIVE(val) bfin_write16(PORTH_DRIVE, val)
987#define bfin_read_PORTF_SLEW() bfin_read16(PORTF_SLEW)
988#define bfin_write_PORTF_SLEW(val) bfin_write16(PORTF_SLEW, val)
989#define bfin_read_PORTG_SLEW() bfin_read16(PORTG_SLEW)
990#define bfin_write_PORTG_SLEW(val) bfin_write16(PORTG_SLEW, val)
991#define bfin_read_PORTH_SLEW() bfin_read16(PORTH_SLEW)
992#define bfin_write_PORTH_SLEW(val) bfin_write16(PORTH_SLEW, val)
993#define bfin_read_PORTF_HYSTERISIS() bfin_read16(PORTF_HYSTERISIS)
994#define bfin_write_PORTF_HYSTERISIS(val) bfin_write16(PORTF_HYSTERISIS, val)
995#define bfin_read_PORTG_HYSTERISIS() bfin_read16(PORTG_HYSTERISIS)
996#define bfin_write_PORTG_HYSTERISIS(val) bfin_write16(PORTG_HYSTERISIS, val)
997#define bfin_read_PORTH_HYSTERISIS() bfin_read16(PORTH_HYSTERISIS)
998#define bfin_write_PORTH_HYSTERISIS(val) bfin_write16(PORTH_HYSTERISIS, val)
999#define bfin_read_MISCPORT_DRIVE() bfin_read16(MISCPORT_DRIVE)
1000#define bfin_write_MISCPORT_DRIVE(val) bfin_write16(MISCPORT_DRIVE, val)
1001#define bfin_read_MISCPORT_SLEW() bfin_read16(MISCPORT_SLEW)
1002#define bfin_write_MISCPORT_SLEW(val) bfin_write16(MISCPORT_SLEW, val)
1003#define bfin_read_MISCPORT_HYSTERISIS() bfin_read16(MISCPORT_HYSTERISIS)
1004#define bfin_write_MISCPORT_HYSTERISIS(val) bfin_write16(MISCPORT_HYSTERISIS, val)
1005
1006/* HOST Port Registers */
1007
1008#define bfin_read_HOST_CONTROL() bfin_read16(HOST_CONTROL)
1009#define bfin_write_HOST_CONTROL(val) bfin_write16(HOST_CONTROL, val)
1010#define bfin_read_HOST_STATUS() bfin_read16(HOST_STATUS)
1011#define bfin_write_HOST_STATUS(val) bfin_write16(HOST_STATUS, val)
1012#define bfin_read_HOST_TIMEOUT() bfin_read16(HOST_TIMEOUT)
1013#define bfin_write_HOST_TIMEOUT(val) bfin_write16(HOST_TIMEOUT, val)
1014
1015/* Counter Registers */
1016
1017#define bfin_read_CNT_CONFIG() bfin_read16(CNT_CONFIG)
1018#define bfin_write_CNT_CONFIG(val) bfin_write16(CNT_CONFIG, val)
1019#define bfin_read_CNT_IMASK() bfin_read16(CNT_IMASK)
1020#define bfin_write_CNT_IMASK(val) bfin_write16(CNT_IMASK, val)
1021#define bfin_read_CNT_STATUS() bfin_read16(CNT_STATUS)
1022#define bfin_write_CNT_STATUS(val) bfin_write16(CNT_STATUS, val)
1023#define bfin_read_CNT_COMMAND() bfin_read16(CNT_COMMAND)
1024#define bfin_write_CNT_COMMAND(val) bfin_write16(CNT_COMMAND, val)
1025#define bfin_read_CNT_DEBOUNCE() bfin_read16(CNT_DEBOUNCE)
1026#define bfin_write_CNT_DEBOUNCE(val) bfin_write16(CNT_DEBOUNCE, val)
1027#define bfin_read_CNT_COUNTER() bfin_read32(CNT_COUNTER)
1028#define bfin_write_CNT_COUNTER(val) bfin_write32(CNT_COUNTER, val)
1029#define bfin_read_CNT_MAX() bfin_read32(CNT_MAX)
1030#define bfin_write_CNT_MAX(val) bfin_write32(CNT_MAX, val)
1031#define bfin_read_CNT_MIN() bfin_read32(CNT_MIN)
1032#define bfin_write_CNT_MIN(val) bfin_write32(CNT_MIN, val)
1033
1034/* Security Registers */
1035
1036#define bfin_read_SECURE_SYSSWT() bfin_read32(SECURE_SYSSWT)
1037#define bfin_write_SECURE_SYSSWT(val) bfin_write32(SECURE_SYSSWT, val)
1038#define bfin_read_SECURE_CONTROL() bfin_read16(SECURE_CONTROL)
1039#define bfin_write_SECURE_CONTROL(val) bfin_write16(SECURE_CONTROL, val)
1040#define bfin_read_SECURE_STATUS() bfin_read16(SECURE_STATUS)
1041#define bfin_write_SECURE_STATUS(val) bfin_write16(SECURE_STATUS, val)
20 1042
21#endif /* _CDEF_BF512_H */ 1043#endif /* _CDEF_BF512_H */
diff --git a/arch/blackfin/mach-bf518/include/mach/cdefBF514.h b/arch/blackfin/mach-bf518/include/mach/cdefBF514.h
index 108fa4bde277..dc988668203e 100644
--- a/arch/blackfin/mach-bf518/include/mach/cdefBF514.h
+++ b/arch/blackfin/mach-bf518/include/mach/cdefBF514.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2008-2009 Analog Devices Inc. 2 * Copyright 2008-2010 Analog Devices Inc.
3 * 3 *
4 * Licensed under the ADI BSD license or the GPL-2 (or later) 4 * Licensed under the ADI BSD license or the GPL-2 (or later)
5 */ 5 */
@@ -7,9 +7,6 @@
7#ifndef _CDEF_BF514_H 7#ifndef _CDEF_BF514_H
8#define _CDEF_BF514_H 8#define _CDEF_BF514_H
9 9
10/* include all Core registers and bit definitions */
11#include "defBF514.h"
12
13/* BF514 is BF512 + RSI */ 10/* BF514 is BF512 + RSI */
14#include "cdefBF512.h" 11#include "cdefBF512.h"
15 12
diff --git a/arch/blackfin/mach-bf518/include/mach/cdefBF516.h b/arch/blackfin/mach-bf518/include/mach/cdefBF516.h
index 2751592ef1c1..142e45cbc253 100644
--- a/arch/blackfin/mach-bf518/include/mach/cdefBF516.h
+++ b/arch/blackfin/mach-bf518/include/mach/cdefBF516.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2008-2009 Analog Devices Inc. 2 * Copyright 2008-2010 Analog Devices Inc.
3 * 3 *
4 * Licensed under the ADI BSD license or the GPL-2 (or later) 4 * Licensed under the ADI BSD license or the GPL-2 (or later)
5 */ 5 */
@@ -7,9 +7,6 @@
7#ifndef _CDEF_BF516_H 7#ifndef _CDEF_BF516_H
8#define _CDEF_BF516_H 8#define _CDEF_BF516_H
9 9
10/* include all Core registers and bit definitions */
11#include "defBF516.h"
12
13/* BF516 is BF514 + EMAC */ 10/* BF516 is BF514 + EMAC */
14#include "cdefBF514.h" 11#include "cdefBF514.h"
15 12
diff --git a/arch/blackfin/mach-bf518/include/mach/cdefBF518.h b/arch/blackfin/mach-bf518/include/mach/cdefBF518.h
index 7fb7f0eab990..e638197bf8b1 100644
--- a/arch/blackfin/mach-bf518/include/mach/cdefBF518.h
+++ b/arch/blackfin/mach-bf518/include/mach/cdefBF518.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2008-2009 Analog Devices Inc. 2 * Copyright 2008-2010 Analog Devices Inc.
3 * 3 *
4 * Licensed under the ADI BSD license or the GPL-2 (or later) 4 * Licensed under the ADI BSD license or the GPL-2 (or later)
5 */ 5 */
@@ -7,9 +7,6 @@
7#ifndef _CDEF_BF518_H 7#ifndef _CDEF_BF518_H
8#define _CDEF_BF518_H 8#define _CDEF_BF518_H
9 9
10/* include all Core registers and bit definitions */
11#include "defBF518.h"
12
13/* BF518 is BF516 + IEEE-1588 */ 10/* BF518 is BF516 + IEEE-1588 */
14#include "cdefBF516.h" 11#include "cdefBF516.h"
15 12
diff --git a/arch/blackfin/mach-bf518/include/mach/cdefBF51x_base.h b/arch/blackfin/mach-bf518/include/mach/cdefBF51x_base.h
deleted file mode 100644
index e16969f24ffd..000000000000
--- a/arch/blackfin/mach-bf518/include/mach/cdefBF51x_base.h
+++ /dev/null
@@ -1,1061 +0,0 @@
1/*
2 * Copyright 2008 Analog Devices Inc.
3 *
4 * Licensed under the GPL-2 or later
5 */
6
7#ifndef _CDEF_BF52X_H
8#define _CDEF_BF52X_H
9
10#include <asm/blackfin.h>
11
12#include "defBF51x_base.h"
13
14/* Include core specific register pointer definitions */
15#include <asm/cdef_LPBlackfin.h>
16
17/* ==== begin from cdefBF534.h ==== */
18
19/* Clock and System Control (0xFFC00000 - 0xFFC000FF) */
20#define bfin_read_PLL_CTL() bfin_read16(PLL_CTL)
21#define bfin_read_PLL_DIV() bfin_read16(PLL_DIV)
22#define bfin_write_PLL_DIV(val) bfin_write16(PLL_DIV, val)
23#define bfin_read_VR_CTL() bfin_read16(VR_CTL)
24#define bfin_read_PLL_STAT() bfin_read16(PLL_STAT)
25#define bfin_write_PLL_STAT(val) bfin_write16(PLL_STAT, val)
26#define bfin_read_PLL_LOCKCNT() bfin_read16(PLL_LOCKCNT)
27#define bfin_write_PLL_LOCKCNT(val) bfin_write16(PLL_LOCKCNT, val)
28#define bfin_read_CHIPID() bfin_read32(CHIPID)
29#define bfin_write_CHIPID(val) bfin_write32(CHIPID, val)
30
31
32/* System Interrupt Controller (0xFFC00100 - 0xFFC001FF) */
33#define bfin_read_SWRST() bfin_read16(SWRST)
34#define bfin_write_SWRST(val) bfin_write16(SWRST, val)
35#define bfin_read_SYSCR() bfin_read16(SYSCR)
36#define bfin_write_SYSCR(val) bfin_write16(SYSCR, val)
37
38#define bfin_read_SIC_RVECT() bfin_read32(SIC_RVECT)
39#define bfin_write_SIC_RVECT(val) bfin_write32(SIC_RVECT, val)
40#define bfin_read_SIC_IMASK0() bfin_read32(SIC_IMASK0)
41#define bfin_write_SIC_IMASK0(val) bfin_write32(SIC_IMASK0, val)
42#define bfin_read_SIC_IMASK(x) bfin_read32(SIC_IMASK0 + (x << 6))
43#define bfin_write_SIC_IMASK(x, val) bfin_write32((SIC_IMASK0 + (x << 6)), val)
44
45#define bfin_read_SIC_IAR0() bfin_read32(SIC_IAR0)
46#define bfin_write_SIC_IAR0(val) bfin_write32(SIC_IAR0, val)
47#define bfin_read_SIC_IAR1() bfin_read32(SIC_IAR1)
48#define bfin_write_SIC_IAR1(val) bfin_write32(SIC_IAR1, val)
49#define bfin_read_SIC_IAR2() bfin_read32(SIC_IAR2)
50#define bfin_write_SIC_IAR2(val) bfin_write32(SIC_IAR2, val)
51#define bfin_read_SIC_IAR3() bfin_read32(SIC_IAR3)
52#define bfin_write_SIC_IAR3(val) bfin_write32(SIC_IAR3, val)
53
54#define bfin_read_SIC_ISR0() bfin_read32(SIC_ISR0)
55#define bfin_write_SIC_ISR0(val) bfin_write32(SIC_ISR0, val)
56#define bfin_read_SIC_ISR(x) bfin_read32(SIC_ISR0 + (x << 6))
57#define bfin_write_SIC_ISR(x, val) bfin_write32((SIC_ISR0 + (x << 6)), val)
58
59#define bfin_read_SIC_IWR0() bfin_read32(SIC_IWR0)
60#define bfin_write_SIC_IWR0(val) bfin_write32(SIC_IWR0, val)
61#define bfin_read_SIC_IWR(x) bfin_read32(SIC_IWR0 + (x << 6))
62#define bfin_write_SIC_IWR(x, val) bfin_write32((SIC_IWR0 + (x << 6)), val)
63
64/* SIC Additions to ADSP-BF51x (0xFFC0014C - 0xFFC00162) */
65
66#define bfin_read_SIC_IMASK1() bfin_read32(SIC_IMASK1)
67#define bfin_write_SIC_IMASK1(val) bfin_write32(SIC_IMASK1, val)
68#define bfin_read_SIC_IAR4() bfin_read32(SIC_IAR4)
69#define bfin_write_SIC_IAR4(val) bfin_write32(SIC_IAR4, val)
70#define bfin_read_SIC_IAR5() bfin_read32(SIC_IAR5)
71#define bfin_write_SIC_IAR5(val) bfin_write32(SIC_IAR5, val)
72#define bfin_read_SIC_IAR6() bfin_read32(SIC_IAR6)
73#define bfin_write_SIC_IAR6(val) bfin_write32(SIC_IAR6, val)
74#define bfin_read_SIC_IAR7() bfin_read32(SIC_IAR7)
75#define bfin_write_SIC_IAR7(val) bfin_write32(SIC_IAR7, val)
76#define bfin_read_SIC_ISR1() bfin_read32(SIC_ISR1)
77#define bfin_write_SIC_ISR1(val) bfin_write32(SIC_ISR1, val)
78#define bfin_read_SIC_IWR1() bfin_read32(SIC_IWR1)
79#define bfin_write_SIC_IWR1(val) bfin_write32(SIC_IWR1, val)
80
81/* Watchdog Timer (0xFFC00200 - 0xFFC002FF) */
82#define bfin_read_WDOG_CTL() bfin_read16(WDOG_CTL)
83#define bfin_write_WDOG_CTL(val) bfin_write16(WDOG_CTL, val)
84#define bfin_read_WDOG_CNT() bfin_read32(WDOG_CNT)
85#define bfin_write_WDOG_CNT(val) bfin_write32(WDOG_CNT, val)
86#define bfin_read_WDOG_STAT() bfin_read32(WDOG_STAT)
87#define bfin_write_WDOG_STAT(val) bfin_write32(WDOG_STAT, val)
88
89
90/* Real Time Clock (0xFFC00300 - 0xFFC003FF) */
91#define bfin_read_RTC_STAT() bfin_read32(RTC_STAT)
92#define bfin_write_RTC_STAT(val) bfin_write32(RTC_STAT, val)
93#define bfin_read_RTC_ICTL() bfin_read16(RTC_ICTL)
94#define bfin_write_RTC_ICTL(val) bfin_write16(RTC_ICTL, val)
95#define bfin_read_RTC_ISTAT() bfin_read16(RTC_ISTAT)
96#define bfin_write_RTC_ISTAT(val) bfin_write16(RTC_ISTAT, val)
97#define bfin_read_RTC_SWCNT() bfin_read16(RTC_SWCNT)
98#define bfin_write_RTC_SWCNT(val) bfin_write16(RTC_SWCNT, val)
99#define bfin_read_RTC_ALARM() bfin_read32(RTC_ALARM)
100#define bfin_write_RTC_ALARM(val) bfin_write32(RTC_ALARM, val)
101#define bfin_read_RTC_FAST() bfin_read16(RTC_FAST)
102#define bfin_write_RTC_FAST(val) bfin_write16(RTC_FAST, val)
103#define bfin_read_RTC_PREN() bfin_read16(RTC_PREN)
104#define bfin_write_RTC_PREN(val) bfin_write16(RTC_PREN, val)
105
106
107/* UART0 Controller (0xFFC00400 - 0xFFC004FF) */
108#define bfin_read_UART0_THR() bfin_read16(UART0_THR)
109#define bfin_write_UART0_THR(val) bfin_write16(UART0_THR, val)
110#define bfin_read_UART0_RBR() bfin_read16(UART0_RBR)
111#define bfin_write_UART0_RBR(val) bfin_write16(UART0_RBR, val)
112#define bfin_read_UART0_DLL() bfin_read16(UART0_DLL)
113#define bfin_write_UART0_DLL(val) bfin_write16(UART0_DLL, val)
114#define bfin_read_UART0_IER() bfin_read16(UART0_IER)
115#define bfin_write_UART0_IER(val) bfin_write16(UART0_IER, val)
116#define bfin_read_UART0_DLH() bfin_read16(UART0_DLH)
117#define bfin_write_UART0_DLH(val) bfin_write16(UART0_DLH, val)
118#define bfin_read_UART0_IIR() bfin_read16(UART0_IIR)
119#define bfin_write_UART0_IIR(val) bfin_write16(UART0_IIR, val)
120#define bfin_read_UART0_LCR() bfin_read16(UART0_LCR)
121#define bfin_write_UART0_LCR(val) bfin_write16(UART0_LCR, val)
122#define bfin_read_UART0_MCR() bfin_read16(UART0_MCR)
123#define bfin_write_UART0_MCR(val) bfin_write16(UART0_MCR, val)
124#define bfin_read_UART0_LSR() bfin_read16(UART0_LSR)
125#define bfin_write_UART0_LSR(val) bfin_write16(UART0_LSR, val)
126#define bfin_read_UART0_MSR() bfin_read16(UART0_MSR)
127#define bfin_write_UART0_MSR(val) bfin_write16(UART0_MSR, val)
128#define bfin_read_UART0_SCR() bfin_read16(UART0_SCR)
129#define bfin_write_UART0_SCR(val) bfin_write16(UART0_SCR, val)
130#define bfin_read_UART0_GCTL() bfin_read16(UART0_GCTL)
131#define bfin_write_UART0_GCTL(val) bfin_write16(UART0_GCTL, val)
132
133
134/* TIMER0-7 Registers (0xFFC00600 - 0xFFC006FF) */
135#define bfin_read_TIMER0_CONFIG() bfin_read16(TIMER0_CONFIG)
136#define bfin_write_TIMER0_CONFIG(val) bfin_write16(TIMER0_CONFIG, val)
137#define bfin_read_TIMER0_COUNTER() bfin_read32(TIMER0_COUNTER)
138#define bfin_write_TIMER0_COUNTER(val) bfin_write32(TIMER0_COUNTER, val)
139#define bfin_read_TIMER0_PERIOD() bfin_read32(TIMER0_PERIOD)
140#define bfin_write_TIMER0_PERIOD(val) bfin_write32(TIMER0_PERIOD, val)
141#define bfin_read_TIMER0_WIDTH() bfin_read32(TIMER0_WIDTH)
142#define bfin_write_TIMER0_WIDTH(val) bfin_write32(TIMER0_WIDTH, val)
143
144#define bfin_read_TIMER1_CONFIG() bfin_read16(TIMER1_CONFIG)
145#define bfin_write_TIMER1_CONFIG(val) bfin_write16(TIMER1_CONFIG, val)
146#define bfin_read_TIMER1_COUNTER() bfin_read32(TIMER1_COUNTER)
147#define bfin_write_TIMER1_COUNTER(val) bfin_write32(TIMER1_COUNTER, val)
148#define bfin_read_TIMER1_PERIOD() bfin_read32(TIMER1_PERIOD)
149#define bfin_write_TIMER1_PERIOD(val) bfin_write32(TIMER1_PERIOD, val)
150#define bfin_read_TIMER1_WIDTH() bfin_read32(TIMER1_WIDTH)
151#define bfin_write_TIMER1_WIDTH(val) bfin_write32(TIMER1_WIDTH, val)
152
153#define bfin_read_TIMER2_CONFIG() bfin_read16(TIMER2_CONFIG)
154#define bfin_write_TIMER2_CONFIG(val) bfin_write16(TIMER2_CONFIG, val)
155#define bfin_read_TIMER2_COUNTER() bfin_read32(TIMER2_COUNTER)
156#define bfin_write_TIMER2_COUNTER(val) bfin_write32(TIMER2_COUNTER, val)
157#define bfin_read_TIMER2_PERIOD() bfin_read32(TIMER2_PERIOD)
158#define bfin_write_TIMER2_PERIOD(val) bfin_write32(TIMER2_PERIOD, val)
159#define bfin_read_TIMER2_WIDTH() bfin_read32(TIMER2_WIDTH)
160#define bfin_write_TIMER2_WIDTH(val) bfin_write32(TIMER2_WIDTH, val)
161
162#define bfin_read_TIMER3_CONFIG() bfin_read16(TIMER3_CONFIG)
163#define bfin_write_TIMER3_CONFIG(val) bfin_write16(TIMER3_CONFIG, val)
164#define bfin_read_TIMER3_COUNTER() bfin_read32(TIMER3_COUNTER)
165#define bfin_write_TIMER3_COUNTER(val) bfin_write32(TIMER3_COUNTER, val)
166#define bfin_read_TIMER3_PERIOD() bfin_read32(TIMER3_PERIOD)
167#define bfin_write_TIMER3_PERIOD(val) bfin_write32(TIMER3_PERIOD, val)
168#define bfin_read_TIMER3_WIDTH() bfin_read32(TIMER3_WIDTH)
169#define bfin_write_TIMER3_WIDTH(val) bfin_write32(TIMER3_WIDTH, val)
170
171#define bfin_read_TIMER4_CONFIG() bfin_read16(TIMER4_CONFIG)
172#define bfin_write_TIMER4_CONFIG(val) bfin_write16(TIMER4_CONFIG, val)
173#define bfin_read_TIMER4_COUNTER() bfin_read32(TIMER4_COUNTER)
174#define bfin_write_TIMER4_COUNTER(val) bfin_write32(TIMER4_COUNTER, val)
175#define bfin_read_TIMER4_PERIOD() bfin_read32(TIMER4_PERIOD)
176#define bfin_write_TIMER4_PERIOD(val) bfin_write32(TIMER4_PERIOD, val)
177#define bfin_read_TIMER4_WIDTH() bfin_read32(TIMER4_WIDTH)
178#define bfin_write_TIMER4_WIDTH(val) bfin_write32(TIMER4_WIDTH, val)
179
180#define bfin_read_TIMER5_CONFIG() bfin_read16(TIMER5_CONFIG)
181#define bfin_write_TIMER5_CONFIG(val) bfin_write16(TIMER5_CONFIG, val)
182#define bfin_read_TIMER5_COUNTER() bfin_read32(TIMER5_COUNTER)
183#define bfin_write_TIMER5_COUNTER(val) bfin_write32(TIMER5_COUNTER, val)
184#define bfin_read_TIMER5_PERIOD() bfin_read32(TIMER5_PERIOD)
185#define bfin_write_TIMER5_PERIOD(val) bfin_write32(TIMER5_PERIOD, val)
186#define bfin_read_TIMER5_WIDTH() bfin_read32(TIMER5_WIDTH)
187#define bfin_write_TIMER5_WIDTH(val) bfin_write32(TIMER5_WIDTH, val)
188
189#define bfin_read_TIMER6_CONFIG() bfin_read16(TIMER6_CONFIG)
190#define bfin_write_TIMER6_CONFIG(val) bfin_write16(TIMER6_CONFIG, val)
191#define bfin_read_TIMER6_COUNTER() bfin_read32(TIMER6_COUNTER)
192#define bfin_write_TIMER6_COUNTER(val) bfin_write32(TIMER6_COUNTER, val)
193#define bfin_read_TIMER6_PERIOD() bfin_read32(TIMER6_PERIOD)
194#define bfin_write_TIMER6_PERIOD(val) bfin_write32(TIMER6_PERIOD, val)
195#define bfin_read_TIMER6_WIDTH() bfin_read32(TIMER6_WIDTH)
196#define bfin_write_TIMER6_WIDTH(val) bfin_write32(TIMER6_WIDTH, val)
197
198#define bfin_read_TIMER7_CONFIG() bfin_read16(TIMER7_CONFIG)
199#define bfin_write_TIMER7_CONFIG(val) bfin_write16(TIMER7_CONFIG, val)
200#define bfin_read_TIMER7_COUNTER() bfin_read32(TIMER7_COUNTER)
201#define bfin_write_TIMER7_COUNTER(val) bfin_write32(TIMER7_COUNTER, val)
202#define bfin_read_TIMER7_PERIOD() bfin_read32(TIMER7_PERIOD)
203#define bfin_write_TIMER7_PERIOD(val) bfin_write32(TIMER7_PERIOD, val)
204#define bfin_read_TIMER7_WIDTH() bfin_read32(TIMER7_WIDTH)
205#define bfin_write_TIMER7_WIDTH(val) bfin_write32(TIMER7_WIDTH, val)
206
207#define bfin_read_TIMER_ENABLE() bfin_read16(TIMER_ENABLE)
208#define bfin_write_TIMER_ENABLE(val) bfin_write16(TIMER_ENABLE, val)
209#define bfin_read_TIMER_DISABLE() bfin_read16(TIMER_DISABLE)
210#define bfin_write_TIMER_DISABLE(val) bfin_write16(TIMER_DISABLE, val)
211#define bfin_read_TIMER_STATUS() bfin_read32(TIMER_STATUS)
212#define bfin_write_TIMER_STATUS(val) bfin_write32(TIMER_STATUS, val)
213
214
215/* General Purpose I/O Port F (0xFFC00700 - 0xFFC007FF) */
216#define bfin_read_PORTFIO() bfin_read16(PORTFIO)
217#define bfin_write_PORTFIO(val) bfin_write16(PORTFIO, val)
218#define bfin_read_PORTFIO_CLEAR() bfin_read16(PORTFIO_CLEAR)
219#define bfin_write_PORTFIO_CLEAR(val) bfin_write16(PORTFIO_CLEAR, val)
220#define bfin_read_PORTFIO_SET() bfin_read16(PORTFIO_SET)
221#define bfin_write_PORTFIO_SET(val) bfin_write16(PORTFIO_SET, val)
222#define bfin_read_PORTFIO_TOGGLE() bfin_read16(PORTFIO_TOGGLE)
223#define bfin_write_PORTFIO_TOGGLE(val) bfin_write16(PORTFIO_TOGGLE, val)
224#define bfin_read_PORTFIO_MASKA() bfin_read16(PORTFIO_MASKA)
225#define bfin_write_PORTFIO_MASKA(val) bfin_write16(PORTFIO_MASKA, val)
226#define bfin_read_PORTFIO_MASKA_CLEAR() bfin_read16(PORTFIO_MASKA_CLEAR)
227#define bfin_write_PORTFIO_MASKA_CLEAR(val) bfin_write16(PORTFIO_MASKA_CLEAR, val)
228#define bfin_read_PORTFIO_MASKA_SET() bfin_read16(PORTFIO_MASKA_SET)
229#define bfin_write_PORTFIO_MASKA_SET(val) bfin_write16(PORTFIO_MASKA_SET, val)
230#define bfin_read_PORTFIO_MASKA_TOGGLE() bfin_read16(PORTFIO_MASKA_TOGGLE)
231#define bfin_write_PORTFIO_MASKA_TOGGLE(val) bfin_write16(PORTFIO_MASKA_TOGGLE, val)
232#define bfin_read_PORTFIO_MASKB() bfin_read16(PORTFIO_MASKB)
233#define bfin_write_PORTFIO_MASKB(val) bfin_write16(PORTFIO_MASKB, val)
234#define bfin_read_PORTFIO_MASKB_CLEAR() bfin_read16(PORTFIO_MASKB_CLEAR)
235#define bfin_write_PORTFIO_MASKB_CLEAR(val) bfin_write16(PORTFIO_MASKB_CLEAR, val)
236#define bfin_read_PORTFIO_MASKB_SET() bfin_read16(PORTFIO_MASKB_SET)
237#define bfin_write_PORTFIO_MASKB_SET(val) bfin_write16(PORTFIO_MASKB_SET, val)
238#define bfin_read_PORTFIO_MASKB_TOGGLE() bfin_read16(PORTFIO_MASKB_TOGGLE)
239#define bfin_write_PORTFIO_MASKB_TOGGLE(val) bfin_write16(PORTFIO_MASKB_TOGGLE, val)
240#define bfin_read_PORTFIO_DIR() bfin_read16(PORTFIO_DIR)
241#define bfin_write_PORTFIO_DIR(val) bfin_write16(PORTFIO_DIR, val)
242#define bfin_read_PORTFIO_POLAR() bfin_read16(PORTFIO_POLAR)
243#define bfin_write_PORTFIO_POLAR(val) bfin_write16(PORTFIO_POLAR, val)
244#define bfin_read_PORTFIO_EDGE() bfin_read16(PORTFIO_EDGE)
245#define bfin_write_PORTFIO_EDGE(val) bfin_write16(PORTFIO_EDGE, val)
246#define bfin_read_PORTFIO_BOTH() bfin_read16(PORTFIO_BOTH)
247#define bfin_write_PORTFIO_BOTH(val) bfin_write16(PORTFIO_BOTH, val)
248#define bfin_read_PORTFIO_INEN() bfin_read16(PORTFIO_INEN)
249#define bfin_write_PORTFIO_INEN(val) bfin_write16(PORTFIO_INEN, val)
250
251
252/* SPORT0 Controller (0xFFC00800 - 0xFFC008FF) */
253#define bfin_read_SPORT0_TCR1() bfin_read16(SPORT0_TCR1)
254#define bfin_write_SPORT0_TCR1(val) bfin_write16(SPORT0_TCR1, val)
255#define bfin_read_SPORT0_TCR2() bfin_read16(SPORT0_TCR2)
256#define bfin_write_SPORT0_TCR2(val) bfin_write16(SPORT0_TCR2, val)
257#define bfin_read_SPORT0_TCLKDIV() bfin_read16(SPORT0_TCLKDIV)
258#define bfin_write_SPORT0_TCLKDIV(val) bfin_write16(SPORT0_TCLKDIV, val)
259#define bfin_read_SPORT0_TFSDIV() bfin_read16(SPORT0_TFSDIV)
260#define bfin_write_SPORT0_TFSDIV(val) bfin_write16(SPORT0_TFSDIV, val)
261#define bfin_read_SPORT0_TX() bfin_read32(SPORT0_TX)
262#define bfin_write_SPORT0_TX(val) bfin_write32(SPORT0_TX, val)
263#define bfin_read_SPORT0_RX() bfin_read32(SPORT0_RX)
264#define bfin_write_SPORT0_RX(val) bfin_write32(SPORT0_RX, val)
265#define bfin_read_SPORT0_TX32() bfin_read32(SPORT0_TX)
266#define bfin_write_SPORT0_TX32(val) bfin_write32(SPORT0_TX, val)
267#define bfin_read_SPORT0_RX32() bfin_read32(SPORT0_RX)
268#define bfin_write_SPORT0_RX32(val) bfin_write32(SPORT0_RX, val)
269#define bfin_read_SPORT0_TX16() bfin_read16(SPORT0_TX)
270#define bfin_write_SPORT0_TX16(val) bfin_write16(SPORT0_TX, val)
271#define bfin_read_SPORT0_RX16() bfin_read16(SPORT0_RX)
272#define bfin_write_SPORT0_RX16(val) bfin_write16(SPORT0_RX, val)
273#define bfin_read_SPORT0_RCR1() bfin_read16(SPORT0_RCR1)
274#define bfin_write_SPORT0_RCR1(val) bfin_write16(SPORT0_RCR1, val)
275#define bfin_read_SPORT0_RCR2() bfin_read16(SPORT0_RCR2)
276#define bfin_write_SPORT0_RCR2(val) bfin_write16(SPORT0_RCR2, val)
277#define bfin_read_SPORT0_RCLKDIV() bfin_read16(SPORT0_RCLKDIV)
278#define bfin_write_SPORT0_RCLKDIV(val) bfin_write16(SPORT0_RCLKDIV, val)
279#define bfin_read_SPORT0_RFSDIV() bfin_read16(SPORT0_RFSDIV)
280#define bfin_write_SPORT0_RFSDIV(val) bfin_write16(SPORT0_RFSDIV, val)
281#define bfin_read_SPORT0_STAT() bfin_read16(SPORT0_STAT)
282#define bfin_write_SPORT0_STAT(val) bfin_write16(SPORT0_STAT, val)
283#define bfin_read_SPORT0_CHNL() bfin_read16(SPORT0_CHNL)
284#define bfin_write_SPORT0_CHNL(val) bfin_write16(SPORT0_CHNL, val)
285#define bfin_read_SPORT0_MCMC1() bfin_read16(SPORT0_MCMC1)
286#define bfin_write_SPORT0_MCMC1(val) bfin_write16(SPORT0_MCMC1, val)
287#define bfin_read_SPORT0_MCMC2() bfin_read16(SPORT0_MCMC2)
288#define bfin_write_SPORT0_MCMC2(val) bfin_write16(SPORT0_MCMC2, val)
289#define bfin_read_SPORT0_MTCS0() bfin_read32(SPORT0_MTCS0)
290#define bfin_write_SPORT0_MTCS0(val) bfin_write32(SPORT0_MTCS0, val)
291#define bfin_read_SPORT0_MTCS1() bfin_read32(SPORT0_MTCS1)
292#define bfin_write_SPORT0_MTCS1(val) bfin_write32(SPORT0_MTCS1, val)
293#define bfin_read_SPORT0_MTCS2() bfin_read32(SPORT0_MTCS2)
294#define bfin_write_SPORT0_MTCS2(val) bfin_write32(SPORT0_MTCS2, val)
295#define bfin_read_SPORT0_MTCS3() bfin_read32(SPORT0_MTCS3)
296#define bfin_write_SPORT0_MTCS3(val) bfin_write32(SPORT0_MTCS3, val)
297#define bfin_read_SPORT0_MRCS0() bfin_read32(SPORT0_MRCS0)
298#define bfin_write_SPORT0_MRCS0(val) bfin_write32(SPORT0_MRCS0, val)
299#define bfin_read_SPORT0_MRCS1() bfin_read32(SPORT0_MRCS1)
300#define bfin_write_SPORT0_MRCS1(val) bfin_write32(SPORT0_MRCS1, val)
301#define bfin_read_SPORT0_MRCS2() bfin_read32(SPORT0_MRCS2)
302#define bfin_write_SPORT0_MRCS2(val) bfin_write32(SPORT0_MRCS2, val)
303#define bfin_read_SPORT0_MRCS3() bfin_read32(SPORT0_MRCS3)
304#define bfin_write_SPORT0_MRCS3(val) bfin_write32(SPORT0_MRCS3, val)
305
306
307/* SPORT1 Controller (0xFFC00900 - 0xFFC009FF) */
308#define bfin_read_SPORT1_TCR1() bfin_read16(SPORT1_TCR1)
309#define bfin_write_SPORT1_TCR1(val) bfin_write16(SPORT1_TCR1, val)
310#define bfin_read_SPORT1_TCR2() bfin_read16(SPORT1_TCR2)
311#define bfin_write_SPORT1_TCR2(val) bfin_write16(SPORT1_TCR2, val)
312#define bfin_read_SPORT1_TCLKDIV() bfin_read16(SPORT1_TCLKDIV)
313#define bfin_write_SPORT1_TCLKDIV(val) bfin_write16(SPORT1_TCLKDIV, val)
314#define bfin_read_SPORT1_TFSDIV() bfin_read16(SPORT1_TFSDIV)
315#define bfin_write_SPORT1_TFSDIV(val) bfin_write16(SPORT1_TFSDIV, val)
316#define bfin_read_SPORT1_TX() bfin_read32(SPORT1_TX)
317#define bfin_write_SPORT1_TX(val) bfin_write32(SPORT1_TX, val)
318#define bfin_read_SPORT1_RX() bfin_read32(SPORT1_RX)
319#define bfin_write_SPORT1_RX(val) bfin_write32(SPORT1_RX, val)
320#define bfin_read_SPORT1_TX32() bfin_read32(SPORT1_TX)
321#define bfin_write_SPORT1_TX32(val) bfin_write32(SPORT1_TX, val)
322#define bfin_read_SPORT1_RX32() bfin_read32(SPORT1_RX)
323#define bfin_write_SPORT1_RX32(val) bfin_write32(SPORT1_RX, val)
324#define bfin_read_SPORT1_TX16() bfin_read16(SPORT1_TX)
325#define bfin_write_SPORT1_TX16(val) bfin_write16(SPORT1_TX, val)
326#define bfin_read_SPORT1_RX16() bfin_read16(SPORT1_RX)
327#define bfin_write_SPORT1_RX16(val) bfin_write16(SPORT1_RX, val)
328#define bfin_read_SPORT1_RCR1() bfin_read16(SPORT1_RCR1)
329#define bfin_write_SPORT1_RCR1(val) bfin_write16(SPORT1_RCR1, val)
330#define bfin_read_SPORT1_RCR2() bfin_read16(SPORT1_RCR2)
331#define bfin_write_SPORT1_RCR2(val) bfin_write16(SPORT1_RCR2, val)
332#define bfin_read_SPORT1_RCLKDIV() bfin_read16(SPORT1_RCLKDIV)
333#define bfin_write_SPORT1_RCLKDIV(val) bfin_write16(SPORT1_RCLKDIV, val)
334#define bfin_read_SPORT1_RFSDIV() bfin_read16(SPORT1_RFSDIV)
335#define bfin_write_SPORT1_RFSDIV(val) bfin_write16(SPORT1_RFSDIV, val)
336#define bfin_read_SPORT1_STAT() bfin_read16(SPORT1_STAT)
337#define bfin_write_SPORT1_STAT(val) bfin_write16(SPORT1_STAT, val)
338#define bfin_read_SPORT1_CHNL() bfin_read16(SPORT1_CHNL)
339#define bfin_write_SPORT1_CHNL(val) bfin_write16(SPORT1_CHNL, val)
340#define bfin_read_SPORT1_MCMC1() bfin_read16(SPORT1_MCMC1)
341#define bfin_write_SPORT1_MCMC1(val) bfin_write16(SPORT1_MCMC1, val)
342#define bfin_read_SPORT1_MCMC2() bfin_read16(SPORT1_MCMC2)
343#define bfin_write_SPORT1_MCMC2(val) bfin_write16(SPORT1_MCMC2, val)
344#define bfin_read_SPORT1_MTCS0() bfin_read32(SPORT1_MTCS0)
345#define bfin_write_SPORT1_MTCS0(val) bfin_write32(SPORT1_MTCS0, val)
346#define bfin_read_SPORT1_MTCS1() bfin_read32(SPORT1_MTCS1)
347#define bfin_write_SPORT1_MTCS1(val) bfin_write32(SPORT1_MTCS1, val)
348#define bfin_read_SPORT1_MTCS2() bfin_read32(SPORT1_MTCS2)
349#define bfin_write_SPORT1_MTCS2(val) bfin_write32(SPORT1_MTCS2, val)
350#define bfin_read_SPORT1_MTCS3() bfin_read32(SPORT1_MTCS3)
351#define bfin_write_SPORT1_MTCS3(val) bfin_write32(SPORT1_MTCS3, val)
352#define bfin_read_SPORT1_MRCS0() bfin_read32(SPORT1_MRCS0)
353#define bfin_write_SPORT1_MRCS0(val) bfin_write32(SPORT1_MRCS0, val)
354#define bfin_read_SPORT1_MRCS1() bfin_read32(SPORT1_MRCS1)
355#define bfin_write_SPORT1_MRCS1(val) bfin_write32(SPORT1_MRCS1, val)
356#define bfin_read_SPORT1_MRCS2() bfin_read32(SPORT1_MRCS2)
357#define bfin_write_SPORT1_MRCS2(val) bfin_write32(SPORT1_MRCS2, val)
358#define bfin_read_SPORT1_MRCS3() bfin_read32(SPORT1_MRCS3)
359#define bfin_write_SPORT1_MRCS3(val) bfin_write32(SPORT1_MRCS3, val)
360
361
362/* External Bus Interface Unit (0xFFC00A00 - 0xFFC00AFF) */
363#define bfin_read_EBIU_AMGCTL() bfin_read16(EBIU_AMGCTL)
364#define bfin_write_EBIU_AMGCTL(val) bfin_write16(EBIU_AMGCTL, val)
365#define bfin_read_EBIU_AMBCTL0() bfin_read32(EBIU_AMBCTL0)
366#define bfin_write_EBIU_AMBCTL0(val) bfin_write32(EBIU_AMBCTL0, val)
367#define bfin_read_EBIU_AMBCTL1() bfin_read32(EBIU_AMBCTL1)
368#define bfin_write_EBIU_AMBCTL1(val) bfin_write32(EBIU_AMBCTL1, val)
369#define bfin_read_EBIU_SDGCTL() bfin_read32(EBIU_SDGCTL)
370#define bfin_write_EBIU_SDGCTL(val) bfin_write32(EBIU_SDGCTL, val)
371#define bfin_read_EBIU_SDBCTL() bfin_read16(EBIU_SDBCTL)
372#define bfin_write_EBIU_SDBCTL(val) bfin_write16(EBIU_SDBCTL, val)
373#define bfin_read_EBIU_SDRRC() bfin_read16(EBIU_SDRRC)
374#define bfin_write_EBIU_SDRRC(val) bfin_write16(EBIU_SDRRC, val)
375#define bfin_read_EBIU_SDSTAT() bfin_read16(EBIU_SDSTAT)
376#define bfin_write_EBIU_SDSTAT(val) bfin_write16(EBIU_SDSTAT, val)
377
378
379/* DMA Traffic Control Registers */
380#define bfin_read_DMA_TC_PER() bfin_read16(DMA_TC_PER)
381#define bfin_write_DMA_TC_PER(val) bfin_write16(DMA_TC_PER, val)
382#define bfin_read_DMA_TC_CNT() bfin_read16(DMA_TC_CNT)
383#define bfin_write_DMA_TC_CNT(val) bfin_write16(DMA_TC_CNT, val)
384
385/* Alternate deprecated register names (below) provided for backwards code compatibility */
386#define bfin_read_DMA_TCPER() bfin_read16(DMA_TCPER)
387#define bfin_write_DMA_TCPER(val) bfin_write16(DMA_TCPER, val)
388#define bfin_read_DMA_TCCNT() bfin_read16(DMA_TCCNT)
389#define bfin_write_DMA_TCCNT(val) bfin_write16(DMA_TCCNT, val)
390
391/* DMA Controller */
392#define bfin_read_DMA0_CONFIG() bfin_read16(DMA0_CONFIG)
393#define bfin_write_DMA0_CONFIG(val) bfin_write16(DMA0_CONFIG, val)
394#define bfin_read_DMA0_NEXT_DESC_PTR() bfin_read32(DMA0_NEXT_DESC_PTR)
395#define bfin_write_DMA0_NEXT_DESC_PTR(val) bfin_write32(DMA0_NEXT_DESC_PTR, val)
396#define bfin_read_DMA0_START_ADDR() bfin_read32(DMA0_START_ADDR)
397#define bfin_write_DMA0_START_ADDR(val) bfin_write32(DMA0_START_ADDR, val)
398#define bfin_read_DMA0_X_COUNT() bfin_read16(DMA0_X_COUNT)
399#define bfin_write_DMA0_X_COUNT(val) bfin_write16(DMA0_X_COUNT, val)
400#define bfin_read_DMA0_Y_COUNT() bfin_read16(DMA0_Y_COUNT)
401#define bfin_write_DMA0_Y_COUNT(val) bfin_write16(DMA0_Y_COUNT, val)
402#define bfin_read_DMA0_X_MODIFY() bfin_read16(DMA0_X_MODIFY)
403#define bfin_write_DMA0_X_MODIFY(val) bfin_write16(DMA0_X_MODIFY, val)
404#define bfin_read_DMA0_Y_MODIFY() bfin_read16(DMA0_Y_MODIFY)
405#define bfin_write_DMA0_Y_MODIFY(val) bfin_write16(DMA0_Y_MODIFY, val)
406#define bfin_read_DMA0_CURR_DESC_PTR() bfin_read32(DMA0_CURR_DESC_PTR)
407#define bfin_write_DMA0_CURR_DESC_PTR(val) bfin_write32(DMA0_CURR_DESC_PTR, val)
408#define bfin_read_DMA0_CURR_ADDR() bfin_read32(DMA0_CURR_ADDR)
409#define bfin_write_DMA0_CURR_ADDR(val) bfin_write32(DMA0_CURR_ADDR, val)
410#define bfin_read_DMA0_CURR_X_COUNT() bfin_read16(DMA0_CURR_X_COUNT)
411#define bfin_write_DMA0_CURR_X_COUNT(val) bfin_write16(DMA0_CURR_X_COUNT, val)
412#define bfin_read_DMA0_CURR_Y_COUNT() bfin_read16(DMA0_CURR_Y_COUNT)
413#define bfin_write_DMA0_CURR_Y_COUNT(val) bfin_write16(DMA0_CURR_Y_COUNT, val)
414#define bfin_read_DMA0_IRQ_STATUS() bfin_read16(DMA0_IRQ_STATUS)
415#define bfin_write_DMA0_IRQ_STATUS(val) bfin_write16(DMA0_IRQ_STATUS, val)
416#define bfin_read_DMA0_PERIPHERAL_MAP() bfin_read16(DMA0_PERIPHERAL_MAP)
417#define bfin_write_DMA0_PERIPHERAL_MAP(val) bfin_write16(DMA0_PERIPHERAL_MAP, val)
418
419#define bfin_read_DMA1_CONFIG() bfin_read16(DMA1_CONFIG)
420#define bfin_write_DMA1_CONFIG(val) bfin_write16(DMA1_CONFIG, val)
421#define bfin_read_DMA1_NEXT_DESC_PTR() bfin_read32(DMA1_NEXT_DESC_PTR)
422#define bfin_write_DMA1_NEXT_DESC_PTR(val) bfin_write32(DMA1_NEXT_DESC_PTR, val)
423#define bfin_read_DMA1_START_ADDR() bfin_read32(DMA1_START_ADDR)
424#define bfin_write_DMA1_START_ADDR(val) bfin_write32(DMA1_START_ADDR, val)
425#define bfin_read_DMA1_X_COUNT() bfin_read16(DMA1_X_COUNT)
426#define bfin_write_DMA1_X_COUNT(val) bfin_write16(DMA1_X_COUNT, val)
427#define bfin_read_DMA1_Y_COUNT() bfin_read16(DMA1_Y_COUNT)
428#define bfin_write_DMA1_Y_COUNT(val) bfin_write16(DMA1_Y_COUNT, val)
429#define bfin_read_DMA1_X_MODIFY() bfin_read16(DMA1_X_MODIFY)
430#define bfin_write_DMA1_X_MODIFY(val) bfin_write16(DMA1_X_MODIFY, val)
431#define bfin_read_DMA1_Y_MODIFY() bfin_read16(DMA1_Y_MODIFY)
432#define bfin_write_DMA1_Y_MODIFY(val) bfin_write16(DMA1_Y_MODIFY, val)
433#define bfin_read_DMA1_CURR_DESC_PTR() bfin_read32(DMA1_CURR_DESC_PTR)
434#define bfin_write_DMA1_CURR_DESC_PTR(val) bfin_write32(DMA1_CURR_DESC_PTR, val)
435#define bfin_read_DMA1_CURR_ADDR() bfin_read32(DMA1_CURR_ADDR)
436#define bfin_write_DMA1_CURR_ADDR(val) bfin_write32(DMA1_CURR_ADDR, val)
437#define bfin_read_DMA1_CURR_X_COUNT() bfin_read16(DMA1_CURR_X_COUNT)
438#define bfin_write_DMA1_CURR_X_COUNT(val) bfin_write16(DMA1_CURR_X_COUNT, val)
439#define bfin_read_DMA1_CURR_Y_COUNT() bfin_read16(DMA1_CURR_Y_COUNT)
440#define bfin_write_DMA1_CURR_Y_COUNT(val) bfin_write16(DMA1_CURR_Y_COUNT, val)
441#define bfin_read_DMA1_IRQ_STATUS() bfin_read16(DMA1_IRQ_STATUS)
442#define bfin_write_DMA1_IRQ_STATUS(val) bfin_write16(DMA1_IRQ_STATUS, val)
443#define bfin_read_DMA1_PERIPHERAL_MAP() bfin_read16(DMA1_PERIPHERAL_MAP)
444#define bfin_write_DMA1_PERIPHERAL_MAP(val) bfin_write16(DMA1_PERIPHERAL_MAP, val)
445
446#define bfin_read_DMA2_CONFIG() bfin_read16(DMA2_CONFIG)
447#define bfin_write_DMA2_CONFIG(val) bfin_write16(DMA2_CONFIG, val)
448#define bfin_read_DMA2_NEXT_DESC_PTR() bfin_read32(DMA2_NEXT_DESC_PTR)
449#define bfin_write_DMA2_NEXT_DESC_PTR(val) bfin_write32(DMA2_NEXT_DESC_PTR, val)
450#define bfin_read_DMA2_START_ADDR() bfin_read32(DMA2_START_ADDR)
451#define bfin_write_DMA2_START_ADDR(val) bfin_write32(DMA2_START_ADDR, val)
452#define bfin_read_DMA2_X_COUNT() bfin_read16(DMA2_X_COUNT)
453#define bfin_write_DMA2_X_COUNT(val) bfin_write16(DMA2_X_COUNT, val)
454#define bfin_read_DMA2_Y_COUNT() bfin_read16(DMA2_Y_COUNT)
455#define bfin_write_DMA2_Y_COUNT(val) bfin_write16(DMA2_Y_COUNT, val)
456#define bfin_read_DMA2_X_MODIFY() bfin_read16(DMA2_X_MODIFY)
457#define bfin_write_DMA2_X_MODIFY(val) bfin_write16(DMA2_X_MODIFY, val)
458#define bfin_read_DMA2_Y_MODIFY() bfin_read16(DMA2_Y_MODIFY)
459#define bfin_write_DMA2_Y_MODIFY(val) bfin_write16(DMA2_Y_MODIFY, val)
460#define bfin_read_DMA2_CURR_DESC_PTR() bfin_read32(DMA2_CURR_DESC_PTR)
461#define bfin_write_DMA2_CURR_DESC_PTR(val) bfin_write32(DMA2_CURR_DESC_PTR, val)
462#define bfin_read_DMA2_CURR_ADDR() bfin_read32(DMA2_CURR_ADDR)
463#define bfin_write_DMA2_CURR_ADDR(val) bfin_write32(DMA2_CURR_ADDR, val)
464#define bfin_read_DMA2_CURR_X_COUNT() bfin_read16(DMA2_CURR_X_COUNT)
465#define bfin_write_DMA2_CURR_X_COUNT(val) bfin_write16(DMA2_CURR_X_COUNT, val)
466#define bfin_read_DMA2_CURR_Y_COUNT() bfin_read16(DMA2_CURR_Y_COUNT)
467#define bfin_write_DMA2_CURR_Y_COUNT(val) bfin_write16(DMA2_CURR_Y_COUNT, val)
468#define bfin_read_DMA2_IRQ_STATUS() bfin_read16(DMA2_IRQ_STATUS)
469#define bfin_write_DMA2_IRQ_STATUS(val) bfin_write16(DMA2_IRQ_STATUS, val)
470#define bfin_read_DMA2_PERIPHERAL_MAP() bfin_read16(DMA2_PERIPHERAL_MAP)
471#define bfin_write_DMA2_PERIPHERAL_MAP(val) bfin_write16(DMA2_PERIPHERAL_MAP, val)
472
473#define bfin_read_DMA3_CONFIG() bfin_read16(DMA3_CONFIG)
474#define bfin_write_DMA3_CONFIG(val) bfin_write16(DMA3_CONFIG, val)
475#define bfin_read_DMA3_NEXT_DESC_PTR() bfin_read32(DMA3_NEXT_DESC_PTR)
476#define bfin_write_DMA3_NEXT_DESC_PTR(val) bfin_write32(DMA3_NEXT_DESC_PTR, val)
477#define bfin_read_DMA3_START_ADDR() bfin_read32(DMA3_START_ADDR)
478#define bfin_write_DMA3_START_ADDR(val) bfin_write32(DMA3_START_ADDR, val)
479#define bfin_read_DMA3_X_COUNT() bfin_read16(DMA3_X_COUNT)
480#define bfin_write_DMA3_X_COUNT(val) bfin_write16(DMA3_X_COUNT, val)
481#define bfin_read_DMA3_Y_COUNT() bfin_read16(DMA3_Y_COUNT)
482#define bfin_write_DMA3_Y_COUNT(val) bfin_write16(DMA3_Y_COUNT, val)
483#define bfin_read_DMA3_X_MODIFY() bfin_read16(DMA3_X_MODIFY)
484#define bfin_write_DMA3_X_MODIFY(val) bfin_write16(DMA3_X_MODIFY, val)
485#define bfin_read_DMA3_Y_MODIFY() bfin_read16(DMA3_Y_MODIFY)
486#define bfin_write_DMA3_Y_MODIFY(val) bfin_write16(DMA3_Y_MODIFY, val)
487#define bfin_read_DMA3_CURR_DESC_PTR() bfin_read32(DMA3_CURR_DESC_PTR)
488#define bfin_write_DMA3_CURR_DESC_PTR(val) bfin_write32(DMA3_CURR_DESC_PTR, val)
489#define bfin_read_DMA3_CURR_ADDR() bfin_read32(DMA3_CURR_ADDR)
490#define bfin_write_DMA3_CURR_ADDR(val) bfin_write32(DMA3_CURR_ADDR, val)
491#define bfin_read_DMA3_CURR_X_COUNT() bfin_read16(DMA3_CURR_X_COUNT)
492#define bfin_write_DMA3_CURR_X_COUNT(val) bfin_write16(DMA3_CURR_X_COUNT, val)
493#define bfin_read_DMA3_CURR_Y_COUNT() bfin_read16(DMA3_CURR_Y_COUNT)
494#define bfin_write_DMA3_CURR_Y_COUNT(val) bfin_write16(DMA3_CURR_Y_COUNT, val)
495#define bfin_read_DMA3_IRQ_STATUS() bfin_read16(DMA3_IRQ_STATUS)
496#define bfin_write_DMA3_IRQ_STATUS(val) bfin_write16(DMA3_IRQ_STATUS, val)
497#define bfin_read_DMA3_PERIPHERAL_MAP() bfin_read16(DMA3_PERIPHERAL_MAP)
498#define bfin_write_DMA3_PERIPHERAL_MAP(val) bfin_write16(DMA3_PERIPHERAL_MAP, val)
499
500#define bfin_read_DMA4_CONFIG() bfin_read16(DMA4_CONFIG)
501#define bfin_write_DMA4_CONFIG(val) bfin_write16(DMA4_CONFIG, val)
502#define bfin_read_DMA4_NEXT_DESC_PTR() bfin_read32(DMA4_NEXT_DESC_PTR)
503#define bfin_write_DMA4_NEXT_DESC_PTR(val) bfin_write32(DMA4_NEXT_DESC_PTR, val)
504#define bfin_read_DMA4_START_ADDR() bfin_read32(DMA4_START_ADDR)
505#define bfin_write_DMA4_START_ADDR(val) bfin_write32(DMA4_START_ADDR, val)
506#define bfin_read_DMA4_X_COUNT() bfin_read16(DMA4_X_COUNT)
507#define bfin_write_DMA4_X_COUNT(val) bfin_write16(DMA4_X_COUNT, val)
508#define bfin_read_DMA4_Y_COUNT() bfin_read16(DMA4_Y_COUNT)
509#define bfin_write_DMA4_Y_COUNT(val) bfin_write16(DMA4_Y_COUNT, val)
510#define bfin_read_DMA4_X_MODIFY() bfin_read16(DMA4_X_MODIFY)
511#define bfin_write_DMA4_X_MODIFY(val) bfin_write16(DMA4_X_MODIFY, val)
512#define bfin_read_DMA4_Y_MODIFY() bfin_read16(DMA4_Y_MODIFY)
513#define bfin_write_DMA4_Y_MODIFY(val) bfin_write16(DMA4_Y_MODIFY, val)
514#define bfin_read_DMA4_CURR_DESC_PTR() bfin_read32(DMA4_CURR_DESC_PTR)
515#define bfin_write_DMA4_CURR_DESC_PTR(val) bfin_write32(DMA4_CURR_DESC_PTR, val)
516#define bfin_read_DMA4_CURR_ADDR() bfin_read32(DMA4_CURR_ADDR)
517#define bfin_write_DMA4_CURR_ADDR(val) bfin_write32(DMA4_CURR_ADDR, val)
518#define bfin_read_DMA4_CURR_X_COUNT() bfin_read16(DMA4_CURR_X_COUNT)
519#define bfin_write_DMA4_CURR_X_COUNT(val) bfin_write16(DMA4_CURR_X_COUNT, val)
520#define bfin_read_DMA4_CURR_Y_COUNT() bfin_read16(DMA4_CURR_Y_COUNT)
521#define bfin_write_DMA4_CURR_Y_COUNT(val) bfin_write16(DMA4_CURR_Y_COUNT, val)
522#define bfin_read_DMA4_IRQ_STATUS() bfin_read16(DMA4_IRQ_STATUS)
523#define bfin_write_DMA4_IRQ_STATUS(val) bfin_write16(DMA4_IRQ_STATUS, val)
524#define bfin_read_DMA4_PERIPHERAL_MAP() bfin_read16(DMA4_PERIPHERAL_MAP)
525#define bfin_write_DMA4_PERIPHERAL_MAP(val) bfin_write16(DMA4_PERIPHERAL_MAP, val)
526
527#define bfin_read_DMA5_CONFIG() bfin_read16(DMA5_CONFIG)
528#define bfin_write_DMA5_CONFIG(val) bfin_write16(DMA5_CONFIG, val)
529#define bfin_read_DMA5_NEXT_DESC_PTR() bfin_read32(DMA5_NEXT_DESC_PTR)
530#define bfin_write_DMA5_NEXT_DESC_PTR(val) bfin_write32(DMA5_NEXT_DESC_PTR, val)
531#define bfin_read_DMA5_START_ADDR() bfin_read32(DMA5_START_ADDR)
532#define bfin_write_DMA5_START_ADDR(val) bfin_write32(DMA5_START_ADDR, val)
533#define bfin_read_DMA5_X_COUNT() bfin_read16(DMA5_X_COUNT)
534#define bfin_write_DMA5_X_COUNT(val) bfin_write16(DMA5_X_COUNT, val)
535#define bfin_read_DMA5_Y_COUNT() bfin_read16(DMA5_Y_COUNT)
536#define bfin_write_DMA5_Y_COUNT(val) bfin_write16(DMA5_Y_COUNT, val)
537#define bfin_read_DMA5_X_MODIFY() bfin_read16(DMA5_X_MODIFY)
538#define bfin_write_DMA5_X_MODIFY(val) bfin_write16(DMA5_X_MODIFY, val)
539#define bfin_read_DMA5_Y_MODIFY() bfin_read16(DMA5_Y_MODIFY)
540#define bfin_write_DMA5_Y_MODIFY(val) bfin_write16(DMA5_Y_MODIFY, val)
541#define bfin_read_DMA5_CURR_DESC_PTR() bfin_read32(DMA5_CURR_DESC_PTR)
542#define bfin_write_DMA5_CURR_DESC_PTR(val) bfin_write32(DMA5_CURR_DESC_PTR, val)
543#define bfin_read_DMA5_CURR_ADDR() bfin_read32(DMA5_CURR_ADDR)
544#define bfin_write_DMA5_CURR_ADDR(val) bfin_write32(DMA5_CURR_ADDR, val)
545#define bfin_read_DMA5_CURR_X_COUNT() bfin_read16(DMA5_CURR_X_COUNT)
546#define bfin_write_DMA5_CURR_X_COUNT(val) bfin_write16(DMA5_CURR_X_COUNT, val)
547#define bfin_read_DMA5_CURR_Y_COUNT() bfin_read16(DMA5_CURR_Y_COUNT)
548#define bfin_write_DMA5_CURR_Y_COUNT(val) bfin_write16(DMA5_CURR_Y_COUNT, val)
549#define bfin_read_DMA5_IRQ_STATUS() bfin_read16(DMA5_IRQ_STATUS)
550#define bfin_write_DMA5_IRQ_STATUS(val) bfin_write16(DMA5_IRQ_STATUS, val)
551#define bfin_read_DMA5_PERIPHERAL_MAP() bfin_read16(DMA5_PERIPHERAL_MAP)
552#define bfin_write_DMA5_PERIPHERAL_MAP(val) bfin_write16(DMA5_PERIPHERAL_MAP, val)
553
554#define bfin_read_DMA6_CONFIG() bfin_read16(DMA6_CONFIG)
555#define bfin_write_DMA6_CONFIG(val) bfin_write16(DMA6_CONFIG, val)
556#define bfin_read_DMA6_NEXT_DESC_PTR() bfin_read32(DMA6_NEXT_DESC_PTR)
557#define bfin_write_DMA6_NEXT_DESC_PTR(val) bfin_write32(DMA6_NEXT_DESC_PTR, val)
558#define bfin_read_DMA6_START_ADDR() bfin_read32(DMA6_START_ADDR)
559#define bfin_write_DMA6_START_ADDR(val) bfin_write32(DMA6_START_ADDR, val)
560#define bfin_read_DMA6_X_COUNT() bfin_read16(DMA6_X_COUNT)
561#define bfin_write_DMA6_X_COUNT(val) bfin_write16(DMA6_X_COUNT, val)
562#define bfin_read_DMA6_Y_COUNT() bfin_read16(DMA6_Y_COUNT)
563#define bfin_write_DMA6_Y_COUNT(val) bfin_write16(DMA6_Y_COUNT, val)
564#define bfin_read_DMA6_X_MODIFY() bfin_read16(DMA6_X_MODIFY)
565#define bfin_write_DMA6_X_MODIFY(val) bfin_write16(DMA6_X_MODIFY, val)
566#define bfin_read_DMA6_Y_MODIFY() bfin_read16(DMA6_Y_MODIFY)
567#define bfin_write_DMA6_Y_MODIFY(val) bfin_write16(DMA6_Y_MODIFY, val)
568#define bfin_read_DMA6_CURR_DESC_PTR() bfin_read32(DMA6_CURR_DESC_PTR)
569#define bfin_write_DMA6_CURR_DESC_PTR(val) bfin_write32(DMA6_CURR_DESC_PTR, val)
570#define bfin_read_DMA6_CURR_ADDR() bfin_read32(DMA6_CURR_ADDR)
571#define bfin_write_DMA6_CURR_ADDR(val) bfin_write32(DMA6_CURR_ADDR, val)
572#define bfin_read_DMA6_CURR_X_COUNT() bfin_read16(DMA6_CURR_X_COUNT)
573#define bfin_write_DMA6_CURR_X_COUNT(val) bfin_write16(DMA6_CURR_X_COUNT, val)
574#define bfin_read_DMA6_CURR_Y_COUNT() bfin_read16(DMA6_CURR_Y_COUNT)
575#define bfin_write_DMA6_CURR_Y_COUNT(val) bfin_write16(DMA6_CURR_Y_COUNT, val)
576#define bfin_read_DMA6_IRQ_STATUS() bfin_read16(DMA6_IRQ_STATUS)
577#define bfin_write_DMA6_IRQ_STATUS(val) bfin_write16(DMA6_IRQ_STATUS, val)
578#define bfin_read_DMA6_PERIPHERAL_MAP() bfin_read16(DMA6_PERIPHERAL_MAP)
579#define bfin_write_DMA6_PERIPHERAL_MAP(val) bfin_write16(DMA6_PERIPHERAL_MAP, val)
580
581#define bfin_read_DMA7_CONFIG() bfin_read16(DMA7_CONFIG)
582#define bfin_write_DMA7_CONFIG(val) bfin_write16(DMA7_CONFIG, val)
583#define bfin_read_DMA7_NEXT_DESC_PTR() bfin_read32(DMA7_NEXT_DESC_PTR)
584#define bfin_write_DMA7_NEXT_DESC_PTR(val) bfin_write32(DMA7_NEXT_DESC_PTR, val)
585#define bfin_read_DMA7_START_ADDR() bfin_read32(DMA7_START_ADDR)
586#define bfin_write_DMA7_START_ADDR(val) bfin_write32(DMA7_START_ADDR, val)
587#define bfin_read_DMA7_X_COUNT() bfin_read16(DMA7_X_COUNT)
588#define bfin_write_DMA7_X_COUNT(val) bfin_write16(DMA7_X_COUNT, val)
589#define bfin_read_DMA7_Y_COUNT() bfin_read16(DMA7_Y_COUNT)
590#define bfin_write_DMA7_Y_COUNT(val) bfin_write16(DMA7_Y_COUNT, val)
591#define bfin_read_DMA7_X_MODIFY() bfin_read16(DMA7_X_MODIFY)
592#define bfin_write_DMA7_X_MODIFY(val) bfin_write16(DMA7_X_MODIFY, val)
593#define bfin_read_DMA7_Y_MODIFY() bfin_read16(DMA7_Y_MODIFY)
594#define bfin_write_DMA7_Y_MODIFY(val) bfin_write16(DMA7_Y_MODIFY, val)
595#define bfin_read_DMA7_CURR_DESC_PTR() bfin_read32(DMA7_CURR_DESC_PTR)
596#define bfin_write_DMA7_CURR_DESC_PTR(val) bfin_write32(DMA7_CURR_DESC_PTR, val)
597#define bfin_read_DMA7_CURR_ADDR() bfin_read32(DMA7_CURR_ADDR)
598#define bfin_write_DMA7_CURR_ADDR(val) bfin_write32(DMA7_CURR_ADDR, val)
599#define bfin_read_DMA7_CURR_X_COUNT() bfin_read16(DMA7_CURR_X_COUNT)
600#define bfin_write_DMA7_CURR_X_COUNT(val) bfin_write16(DMA7_CURR_X_COUNT, val)
601#define bfin_read_DMA7_CURR_Y_COUNT() bfin_read16(DMA7_CURR_Y_COUNT)
602#define bfin_write_DMA7_CURR_Y_COUNT(val) bfin_write16(DMA7_CURR_Y_COUNT, val)
603#define bfin_read_DMA7_IRQ_STATUS() bfin_read16(DMA7_IRQ_STATUS)
604#define bfin_write_DMA7_IRQ_STATUS(val) bfin_write16(DMA7_IRQ_STATUS, val)
605#define bfin_read_DMA7_PERIPHERAL_MAP() bfin_read16(DMA7_PERIPHERAL_MAP)
606#define bfin_write_DMA7_PERIPHERAL_MAP(val) bfin_write16(DMA7_PERIPHERAL_MAP, val)
607
608#define bfin_read_DMA8_CONFIG() bfin_read16(DMA8_CONFIG)
609#define bfin_write_DMA8_CONFIG(val) bfin_write16(DMA8_CONFIG, val)
610#define bfin_read_DMA8_NEXT_DESC_PTR() bfin_read32(DMA8_NEXT_DESC_PTR)
611#define bfin_write_DMA8_NEXT_DESC_PTR(val) bfin_write32(DMA8_NEXT_DESC_PTR, val)
612#define bfin_read_DMA8_START_ADDR() bfin_read32(DMA8_START_ADDR)
613#define bfin_write_DMA8_START_ADDR(val) bfin_write32(DMA8_START_ADDR, val)
614#define bfin_read_DMA8_X_COUNT() bfin_read16(DMA8_X_COUNT)
615#define bfin_write_DMA8_X_COUNT(val) bfin_write16(DMA8_X_COUNT, val)
616#define bfin_read_DMA8_Y_COUNT() bfin_read16(DMA8_Y_COUNT)
617#define bfin_write_DMA8_Y_COUNT(val) bfin_write16(DMA8_Y_COUNT, val)
618#define bfin_read_DMA8_X_MODIFY() bfin_read16(DMA8_X_MODIFY)
619#define bfin_write_DMA8_X_MODIFY(val) bfin_write16(DMA8_X_MODIFY, val)
620#define bfin_read_DMA8_Y_MODIFY() bfin_read16(DMA8_Y_MODIFY)
621#define bfin_write_DMA8_Y_MODIFY(val) bfin_write16(DMA8_Y_MODIFY, val)
622#define bfin_read_DMA8_CURR_DESC_PTR() bfin_read32(DMA8_CURR_DESC_PTR)
623#define bfin_write_DMA8_CURR_DESC_PTR(val) bfin_write32(DMA8_CURR_DESC_PTR, val)
624#define bfin_read_DMA8_CURR_ADDR() bfin_read32(DMA8_CURR_ADDR)
625#define bfin_write_DMA8_CURR_ADDR(val) bfin_write32(DMA8_CURR_ADDR, val)
626#define bfin_read_DMA8_CURR_X_COUNT() bfin_read16(DMA8_CURR_X_COUNT)
627#define bfin_write_DMA8_CURR_X_COUNT(val) bfin_write16(DMA8_CURR_X_COUNT, val)
628#define bfin_read_DMA8_CURR_Y_COUNT() bfin_read16(DMA8_CURR_Y_COUNT)
629#define bfin_write_DMA8_CURR_Y_COUNT(val) bfin_write16(DMA8_CURR_Y_COUNT, val)
630#define bfin_read_DMA8_IRQ_STATUS() bfin_read16(DMA8_IRQ_STATUS)
631#define bfin_write_DMA8_IRQ_STATUS(val) bfin_write16(DMA8_IRQ_STATUS, val)
632#define bfin_read_DMA8_PERIPHERAL_MAP() bfin_read16(DMA8_PERIPHERAL_MAP)
633#define bfin_write_DMA8_PERIPHERAL_MAP(val) bfin_write16(DMA8_PERIPHERAL_MAP, val)
634
635#define bfin_read_DMA9_CONFIG() bfin_read16(DMA9_CONFIG)
636#define bfin_write_DMA9_CONFIG(val) bfin_write16(DMA9_CONFIG, val)
637#define bfin_read_DMA9_NEXT_DESC_PTR() bfin_read32(DMA9_NEXT_DESC_PTR)
638#define bfin_write_DMA9_NEXT_DESC_PTR(val) bfin_write32(DMA9_NEXT_DESC_PTR, val)
639#define bfin_read_DMA9_START_ADDR() bfin_read32(DMA9_START_ADDR)
640#define bfin_write_DMA9_START_ADDR(val) bfin_write32(DMA9_START_ADDR, val)
641#define bfin_read_DMA9_X_COUNT() bfin_read16(DMA9_X_COUNT)
642#define bfin_write_DMA9_X_COUNT(val) bfin_write16(DMA9_X_COUNT, val)
643#define bfin_read_DMA9_Y_COUNT() bfin_read16(DMA9_Y_COUNT)
644#define bfin_write_DMA9_Y_COUNT(val) bfin_write16(DMA9_Y_COUNT, val)
645#define bfin_read_DMA9_X_MODIFY() bfin_read16(DMA9_X_MODIFY)
646#define bfin_write_DMA9_X_MODIFY(val) bfin_write16(DMA9_X_MODIFY, val)
647#define bfin_read_DMA9_Y_MODIFY() bfin_read16(DMA9_Y_MODIFY)
648#define bfin_write_DMA9_Y_MODIFY(val) bfin_write16(DMA9_Y_MODIFY, val)
649#define bfin_read_DMA9_CURR_DESC_PTR() bfin_read32(DMA9_CURR_DESC_PTR)
650#define bfin_write_DMA9_CURR_DESC_PTR(val) bfin_write32(DMA9_CURR_DESC_PTR, val)
651#define bfin_read_DMA9_CURR_ADDR() bfin_read32(DMA9_CURR_ADDR)
652#define bfin_write_DMA9_CURR_ADDR(val) bfin_write32(DMA9_CURR_ADDR, val)
653#define bfin_read_DMA9_CURR_X_COUNT() bfin_read16(DMA9_CURR_X_COUNT)
654#define bfin_write_DMA9_CURR_X_COUNT(val) bfin_write16(DMA9_CURR_X_COUNT, val)
655#define bfin_read_DMA9_CURR_Y_COUNT() bfin_read16(DMA9_CURR_Y_COUNT)
656#define bfin_write_DMA9_CURR_Y_COUNT(val) bfin_write16(DMA9_CURR_Y_COUNT, val)
657#define bfin_read_DMA9_IRQ_STATUS() bfin_read16(DMA9_IRQ_STATUS)
658#define bfin_write_DMA9_IRQ_STATUS(val) bfin_write16(DMA9_IRQ_STATUS, val)
659#define bfin_read_DMA9_PERIPHERAL_MAP() bfin_read16(DMA9_PERIPHERAL_MAP)
660#define bfin_write_DMA9_PERIPHERAL_MAP(val) bfin_write16(DMA9_PERIPHERAL_MAP, val)
661
662#define bfin_read_DMA10_CONFIG() bfin_read16(DMA10_CONFIG)
663#define bfin_write_DMA10_CONFIG(val) bfin_write16(DMA10_CONFIG, val)
664#define bfin_read_DMA10_NEXT_DESC_PTR() bfin_read32(DMA10_NEXT_DESC_PTR)
665#define bfin_write_DMA10_NEXT_DESC_PTR(val) bfin_write32(DMA10_NEXT_DESC_PTR, val)
666#define bfin_read_DMA10_START_ADDR() bfin_read32(DMA10_START_ADDR)
667#define bfin_write_DMA10_START_ADDR(val) bfin_write32(DMA10_START_ADDR, val)
668#define bfin_read_DMA10_X_COUNT() bfin_read16(DMA10_X_COUNT)
669#define bfin_write_DMA10_X_COUNT(val) bfin_write16(DMA10_X_COUNT, val)
670#define bfin_read_DMA10_Y_COUNT() bfin_read16(DMA10_Y_COUNT)
671#define bfin_write_DMA10_Y_COUNT(val) bfin_write16(DMA10_Y_COUNT, val)
672#define bfin_read_DMA10_X_MODIFY() bfin_read16(DMA10_X_MODIFY)
673#define bfin_write_DMA10_X_MODIFY(val) bfin_write16(DMA10_X_MODIFY, val)
674#define bfin_read_DMA10_Y_MODIFY() bfin_read16(DMA10_Y_MODIFY)
675#define bfin_write_DMA10_Y_MODIFY(val) bfin_write16(DMA10_Y_MODIFY, val)
676#define bfin_read_DMA10_CURR_DESC_PTR() bfin_read32(DMA10_CURR_DESC_PTR)
677#define bfin_write_DMA10_CURR_DESC_PTR(val) bfin_write32(DMA10_CURR_DESC_PTR, val)
678#define bfin_read_DMA10_CURR_ADDR() bfin_read32(DMA10_CURR_ADDR)
679#define bfin_write_DMA10_CURR_ADDR(val) bfin_write32(DMA10_CURR_ADDR, val)
680#define bfin_read_DMA10_CURR_X_COUNT() bfin_read16(DMA10_CURR_X_COUNT)
681#define bfin_write_DMA10_CURR_X_COUNT(val) bfin_write16(DMA10_CURR_X_COUNT, val)
682#define bfin_read_DMA10_CURR_Y_COUNT() bfin_read16(DMA10_CURR_Y_COUNT)
683#define bfin_write_DMA10_CURR_Y_COUNT(val) bfin_write16(DMA10_CURR_Y_COUNT, val)
684#define bfin_read_DMA10_IRQ_STATUS() bfin_read16(DMA10_IRQ_STATUS)
685#define bfin_write_DMA10_IRQ_STATUS(val) bfin_write16(DMA10_IRQ_STATUS, val)
686#define bfin_read_DMA10_PERIPHERAL_MAP() bfin_read16(DMA10_PERIPHERAL_MAP)
687#define bfin_write_DMA10_PERIPHERAL_MAP(val) bfin_write16(DMA10_PERIPHERAL_MAP, val)
688
689#define bfin_read_DMA11_CONFIG() bfin_read16(DMA11_CONFIG)
690#define bfin_write_DMA11_CONFIG(val) bfin_write16(DMA11_CONFIG, val)
691#define bfin_read_DMA11_NEXT_DESC_PTR() bfin_read32(DMA11_NEXT_DESC_PTR)
692#define bfin_write_DMA11_NEXT_DESC_PTR(val) bfin_write32(DMA11_NEXT_DESC_PTR, val)
693#define bfin_read_DMA11_START_ADDR() bfin_read32(DMA11_START_ADDR)
694#define bfin_write_DMA11_START_ADDR(val) bfin_write32(DMA11_START_ADDR, val)
695#define bfin_read_DMA11_X_COUNT() bfin_read16(DMA11_X_COUNT)
696#define bfin_write_DMA11_X_COUNT(val) bfin_write16(DMA11_X_COUNT, val)
697#define bfin_read_DMA11_Y_COUNT() bfin_read16(DMA11_Y_COUNT)
698#define bfin_write_DMA11_Y_COUNT(val) bfin_write16(DMA11_Y_COUNT, val)
699#define bfin_read_DMA11_X_MODIFY() bfin_read16(DMA11_X_MODIFY)
700#define bfin_write_DMA11_X_MODIFY(val) bfin_write16(DMA11_X_MODIFY, val)
701#define bfin_read_DMA11_Y_MODIFY() bfin_read16(DMA11_Y_MODIFY)
702#define bfin_write_DMA11_Y_MODIFY(val) bfin_write16(DMA11_Y_MODIFY, val)
703#define bfin_read_DMA11_CURR_DESC_PTR() bfin_read32(DMA11_CURR_DESC_PTR)
704#define bfin_write_DMA11_CURR_DESC_PTR(val) bfin_write32(DMA11_CURR_DESC_PTR, val)
705#define bfin_read_DMA11_CURR_ADDR() bfin_read32(DMA11_CURR_ADDR)
706#define bfin_write_DMA11_CURR_ADDR(val) bfin_write32(DMA11_CURR_ADDR, val)
707#define bfin_read_DMA11_CURR_X_COUNT() bfin_read16(DMA11_CURR_X_COUNT)
708#define bfin_write_DMA11_CURR_X_COUNT(val) bfin_write16(DMA11_CURR_X_COUNT, val)
709#define bfin_read_DMA11_CURR_Y_COUNT() bfin_read16(DMA11_CURR_Y_COUNT)
710#define bfin_write_DMA11_CURR_Y_COUNT(val) bfin_write16(DMA11_CURR_Y_COUNT, val)
711#define bfin_read_DMA11_IRQ_STATUS() bfin_read16(DMA11_IRQ_STATUS)
712#define bfin_write_DMA11_IRQ_STATUS(val) bfin_write16(DMA11_IRQ_STATUS, val)
713#define bfin_read_DMA11_PERIPHERAL_MAP() bfin_read16(DMA11_PERIPHERAL_MAP)
714#define bfin_write_DMA11_PERIPHERAL_MAP(val) bfin_write16(DMA11_PERIPHERAL_MAP, val)
715
716#define bfin_read_MDMA_D0_CONFIG() bfin_read16(MDMA_D0_CONFIG)
717#define bfin_write_MDMA_D0_CONFIG(val) bfin_write16(MDMA_D0_CONFIG, val)
718#define bfin_read_MDMA_D0_NEXT_DESC_PTR() bfin_read32(MDMA_D0_NEXT_DESC_PTR)
719#define bfin_write_MDMA_D0_NEXT_DESC_PTR(val) bfin_write32(MDMA_D0_NEXT_DESC_PTR, val)
720#define bfin_read_MDMA_D0_START_ADDR() bfin_read32(MDMA_D0_START_ADDR)
721#define bfin_write_MDMA_D0_START_ADDR(val) bfin_write32(MDMA_D0_START_ADDR, val)
722#define bfin_read_MDMA_D0_X_COUNT() bfin_read16(MDMA_D0_X_COUNT)
723#define bfin_write_MDMA_D0_X_COUNT(val) bfin_write16(MDMA_D0_X_COUNT, val)
724#define bfin_read_MDMA_D0_Y_COUNT() bfin_read16(MDMA_D0_Y_COUNT)
725#define bfin_write_MDMA_D0_Y_COUNT(val) bfin_write16(MDMA_D0_Y_COUNT, val)
726#define bfin_read_MDMA_D0_X_MODIFY() bfin_read16(MDMA_D0_X_MODIFY)
727#define bfin_write_MDMA_D0_X_MODIFY(val) bfin_write16(MDMA_D0_X_MODIFY, val)
728#define bfin_read_MDMA_D0_Y_MODIFY() bfin_read16(MDMA_D0_Y_MODIFY)
729#define bfin_write_MDMA_D0_Y_MODIFY(val) bfin_write16(MDMA_D0_Y_MODIFY, val)
730#define bfin_read_MDMA_D0_CURR_DESC_PTR() bfin_read32(MDMA_D0_CURR_DESC_PTR)
731#define bfin_write_MDMA_D0_CURR_DESC_PTR(val) bfin_write32(MDMA_D0_CURR_DESC_PTR, val)
732#define bfin_read_MDMA_D0_CURR_ADDR() bfin_read32(MDMA_D0_CURR_ADDR)
733#define bfin_write_MDMA_D0_CURR_ADDR(val) bfin_write32(MDMA_D0_CURR_ADDR, val)
734#define bfin_read_MDMA_D0_CURR_X_COUNT() bfin_read16(MDMA_D0_CURR_X_COUNT)
735#define bfin_write_MDMA_D0_CURR_X_COUNT(val) bfin_write16(MDMA_D0_CURR_X_COUNT, val)
736#define bfin_read_MDMA_D0_CURR_Y_COUNT() bfin_read16(MDMA_D0_CURR_Y_COUNT)
737#define bfin_write_MDMA_D0_CURR_Y_COUNT(val) bfin_write16(MDMA_D0_CURR_Y_COUNT, val)
738#define bfin_read_MDMA_D0_IRQ_STATUS() bfin_read16(MDMA_D0_IRQ_STATUS)
739#define bfin_write_MDMA_D0_IRQ_STATUS(val) bfin_write16(MDMA_D0_IRQ_STATUS, val)
740#define bfin_read_MDMA_D0_PERIPHERAL_MAP() bfin_read16(MDMA_D0_PERIPHERAL_MAP)
741#define bfin_write_MDMA_D0_PERIPHERAL_MAP(val) bfin_write16(MDMA_D0_PERIPHERAL_MAP, val)
742
743#define bfin_read_MDMA_S0_CONFIG() bfin_read16(MDMA_S0_CONFIG)
744#define bfin_write_MDMA_S0_CONFIG(val) bfin_write16(MDMA_S0_CONFIG, val)
745#define bfin_read_MDMA_S0_NEXT_DESC_PTR() bfin_read32(MDMA_S0_NEXT_DESC_PTR)
746#define bfin_write_MDMA_S0_NEXT_DESC_PTR(val) bfin_write32(MDMA_S0_NEXT_DESC_PTR, val)
747#define bfin_read_MDMA_S0_START_ADDR() bfin_read32(MDMA_S0_START_ADDR)
748#define bfin_write_MDMA_S0_START_ADDR(val) bfin_write32(MDMA_S0_START_ADDR, val)
749#define bfin_read_MDMA_S0_X_COUNT() bfin_read16(MDMA_S0_X_COUNT)
750#define bfin_write_MDMA_S0_X_COUNT(val) bfin_write16(MDMA_S0_X_COUNT, val)
751#define bfin_read_MDMA_S0_Y_COUNT() bfin_read16(MDMA_S0_Y_COUNT)
752#define bfin_write_MDMA_S0_Y_COUNT(val) bfin_write16(MDMA_S0_Y_COUNT, val)
753#define bfin_read_MDMA_S0_X_MODIFY() bfin_read16(MDMA_S0_X_MODIFY)
754#define bfin_write_MDMA_S0_X_MODIFY(val) bfin_write16(MDMA_S0_X_MODIFY, val)
755#define bfin_read_MDMA_S0_Y_MODIFY() bfin_read16(MDMA_S0_Y_MODIFY)
756#define bfin_write_MDMA_S0_Y_MODIFY(val) bfin_write16(MDMA_S0_Y_MODIFY, val)
757#define bfin_read_MDMA_S0_CURR_DESC_PTR() bfin_read32(MDMA_S0_CURR_DESC_PTR)
758#define bfin_write_MDMA_S0_CURR_DESC_PTR(val) bfin_write32(MDMA_S0_CURR_DESC_PTR, val)
759#define bfin_read_MDMA_S0_CURR_ADDR() bfin_read32(MDMA_S0_CURR_ADDR)
760#define bfin_write_MDMA_S0_CURR_ADDR(val) bfin_write32(MDMA_S0_CURR_ADDR, val)
761#define bfin_read_MDMA_S0_CURR_X_COUNT() bfin_read16(MDMA_S0_CURR_X_COUNT)
762#define bfin_write_MDMA_S0_CURR_X_COUNT(val) bfin_write16(MDMA_S0_CURR_X_COUNT, val)
763#define bfin_read_MDMA_S0_CURR_Y_COUNT() bfin_read16(MDMA_S0_CURR_Y_COUNT)
764#define bfin_write_MDMA_S0_CURR_Y_COUNT(val) bfin_write16(MDMA_S0_CURR_Y_COUNT, val)
765#define bfin_read_MDMA_S0_IRQ_STATUS() bfin_read16(MDMA_S0_IRQ_STATUS)
766#define bfin_write_MDMA_S0_IRQ_STATUS(val) bfin_write16(MDMA_S0_IRQ_STATUS, val)
767#define bfin_read_MDMA_S0_PERIPHERAL_MAP() bfin_read16(MDMA_S0_PERIPHERAL_MAP)
768#define bfin_write_MDMA_S0_PERIPHERAL_MAP(val) bfin_write16(MDMA_S0_PERIPHERAL_MAP, val)
769
770#define bfin_read_MDMA_D1_CONFIG() bfin_read16(MDMA_D1_CONFIG)
771#define bfin_write_MDMA_D1_CONFIG(val) bfin_write16(MDMA_D1_CONFIG, val)
772#define bfin_read_MDMA_D1_NEXT_DESC_PTR() bfin_read32(MDMA_D1_NEXT_DESC_PTR)
773#define bfin_write_MDMA_D1_NEXT_DESC_PTR(val) bfin_write32(MDMA_D1_NEXT_DESC_PTR, val)
774#define bfin_read_MDMA_D1_START_ADDR() bfin_read32(MDMA_D1_START_ADDR)
775#define bfin_write_MDMA_D1_START_ADDR(val) bfin_write32(MDMA_D1_START_ADDR, val)
776#define bfin_read_MDMA_D1_X_COUNT() bfin_read16(MDMA_D1_X_COUNT)
777#define bfin_write_MDMA_D1_X_COUNT(val) bfin_write16(MDMA_D1_X_COUNT, val)
778#define bfin_read_MDMA_D1_Y_COUNT() bfin_read16(MDMA_D1_Y_COUNT)
779#define bfin_write_MDMA_D1_Y_COUNT(val) bfin_write16(MDMA_D1_Y_COUNT, val)
780#define bfin_read_MDMA_D1_X_MODIFY() bfin_read16(MDMA_D1_X_MODIFY)
781#define bfin_write_MDMA_D1_X_MODIFY(val) bfin_write16(MDMA_D1_X_MODIFY, val)
782#define bfin_read_MDMA_D1_Y_MODIFY() bfin_read16(MDMA_D1_Y_MODIFY)
783#define bfin_write_MDMA_D1_Y_MODIFY(val) bfin_write16(MDMA_D1_Y_MODIFY, val)
784#define bfin_read_MDMA_D1_CURR_DESC_PTR() bfin_read32(MDMA_D1_CURR_DESC_PTR)
785#define bfin_write_MDMA_D1_CURR_DESC_PTR(val) bfin_write32(MDMA_D1_CURR_DESC_PTR, val)
786#define bfin_read_MDMA_D1_CURR_ADDR() bfin_read32(MDMA_D1_CURR_ADDR)
787#define bfin_write_MDMA_D1_CURR_ADDR(val) bfin_write32(MDMA_D1_CURR_ADDR, val)
788#define bfin_read_MDMA_D1_CURR_X_COUNT() bfin_read16(MDMA_D1_CURR_X_COUNT)
789#define bfin_write_MDMA_D1_CURR_X_COUNT(val) bfin_write16(MDMA_D1_CURR_X_COUNT, val)
790#define bfin_read_MDMA_D1_CURR_Y_COUNT() bfin_read16(MDMA_D1_CURR_Y_COUNT)
791#define bfin_write_MDMA_D1_CURR_Y_COUNT(val) bfin_write16(MDMA_D1_CURR_Y_COUNT, val)
792#define bfin_read_MDMA_D1_IRQ_STATUS() bfin_read16(MDMA_D1_IRQ_STATUS)
793#define bfin_write_MDMA_D1_IRQ_STATUS(val) bfin_write16(MDMA_D1_IRQ_STATUS, val)
794#define bfin_read_MDMA_D1_PERIPHERAL_MAP() bfin_read16(MDMA_D1_PERIPHERAL_MAP)
795#define bfin_write_MDMA_D1_PERIPHERAL_MAP(val) bfin_write16(MDMA_D1_PERIPHERAL_MAP, val)
796
797#define bfin_read_MDMA_S1_CONFIG() bfin_read16(MDMA_S1_CONFIG)
798#define bfin_write_MDMA_S1_CONFIG(val) bfin_write16(MDMA_S1_CONFIG, val)
799#define bfin_read_MDMA_S1_NEXT_DESC_PTR() bfin_read32(MDMA_S1_NEXT_DESC_PTR)
800#define bfin_write_MDMA_S1_NEXT_DESC_PTR(val) bfin_write32(MDMA_S1_NEXT_DESC_PTR, val)
801#define bfin_read_MDMA_S1_START_ADDR() bfin_read32(MDMA_S1_START_ADDR)
802#define bfin_write_MDMA_S1_START_ADDR(val) bfin_write32(MDMA_S1_START_ADDR, val)
803#define bfin_read_MDMA_S1_X_COUNT() bfin_read16(MDMA_S1_X_COUNT)
804#define bfin_write_MDMA_S1_X_COUNT(val) bfin_write16(MDMA_S1_X_COUNT, val)
805#define bfin_read_MDMA_S1_Y_COUNT() bfin_read16(MDMA_S1_Y_COUNT)
806#define bfin_write_MDMA_S1_Y_COUNT(val) bfin_write16(MDMA_S1_Y_COUNT, val)
807#define bfin_read_MDMA_S1_X_MODIFY() bfin_read16(MDMA_S1_X_MODIFY)
808#define bfin_write_MDMA_S1_X_MODIFY(val) bfin_write16(MDMA_S1_X_MODIFY, val)
809#define bfin_read_MDMA_S1_Y_MODIFY() bfin_read16(MDMA_S1_Y_MODIFY)
810#define bfin_write_MDMA_S1_Y_MODIFY(val) bfin_write16(MDMA_S1_Y_MODIFY, val)
811#define bfin_read_MDMA_S1_CURR_DESC_PTR() bfin_read32(MDMA_S1_CURR_DESC_PTR)
812#define bfin_write_MDMA_S1_CURR_DESC_PTR(val) bfin_write32(MDMA_S1_CURR_DESC_PTR, val)
813#define bfin_read_MDMA_S1_CURR_ADDR() bfin_read32(MDMA_S1_CURR_ADDR)
814#define bfin_write_MDMA_S1_CURR_ADDR(val) bfin_write32(MDMA_S1_CURR_ADDR, val)
815#define bfin_read_MDMA_S1_CURR_X_COUNT() bfin_read16(MDMA_S1_CURR_X_COUNT)
816#define bfin_write_MDMA_S1_CURR_X_COUNT(val) bfin_write16(MDMA_S1_CURR_X_COUNT, val)
817#define bfin_read_MDMA_S1_CURR_Y_COUNT() bfin_read16(MDMA_S1_CURR_Y_COUNT)
818#define bfin_write_MDMA_S1_CURR_Y_COUNT(val) bfin_write16(MDMA_S1_CURR_Y_COUNT, val)
819#define bfin_read_MDMA_S1_IRQ_STATUS() bfin_read16(MDMA_S1_IRQ_STATUS)
820#define bfin_write_MDMA_S1_IRQ_STATUS(val) bfin_write16(MDMA_S1_IRQ_STATUS, val)
821#define bfin_read_MDMA_S1_PERIPHERAL_MAP() bfin_read16(MDMA_S1_PERIPHERAL_MAP)
822#define bfin_write_MDMA_S1_PERIPHERAL_MAP(val) bfin_write16(MDMA_S1_PERIPHERAL_MAP, val)
823
824
825/* Parallel Peripheral Interface (0xFFC01000 - 0xFFC010FF) */
826#define bfin_read_PPI_CONTROL() bfin_read16(PPI_CONTROL)
827#define bfin_write_PPI_CONTROL(val) bfin_write16(PPI_CONTROL, val)
828#define bfin_read_PPI_STATUS() bfin_read16(PPI_STATUS)
829#define bfin_write_PPI_STATUS(val) bfin_write16(PPI_STATUS, val)
830#define bfin_clear_PPI_STATUS() bfin_write_PPI_STATUS(0xFFFF)
831#define bfin_read_PPI_DELAY() bfin_read16(PPI_DELAY)
832#define bfin_write_PPI_DELAY(val) bfin_write16(PPI_DELAY, val)
833#define bfin_read_PPI_COUNT() bfin_read16(PPI_COUNT)
834#define bfin_write_PPI_COUNT(val) bfin_write16(PPI_COUNT, val)
835#define bfin_read_PPI_FRAME() bfin_read16(PPI_FRAME)
836#define bfin_write_PPI_FRAME(val) bfin_write16(PPI_FRAME, val)
837
838
839/* Two-Wire Interface (0xFFC01400 - 0xFFC014FF) */
840
841/* General Purpose I/O Port G (0xFFC01500 - 0xFFC015FF) */
842#define bfin_read_PORTGIO() bfin_read16(PORTGIO)
843#define bfin_write_PORTGIO(val) bfin_write16(PORTGIO, val)
844#define bfin_read_PORTGIO_CLEAR() bfin_read16(PORTGIO_CLEAR)
845#define bfin_write_PORTGIO_CLEAR(val) bfin_write16(PORTGIO_CLEAR, val)
846#define bfin_read_PORTGIO_SET() bfin_read16(PORTGIO_SET)
847#define bfin_write_PORTGIO_SET(val) bfin_write16(PORTGIO_SET, val)
848#define bfin_read_PORTGIO_TOGGLE() bfin_read16(PORTGIO_TOGGLE)
849#define bfin_write_PORTGIO_TOGGLE(val) bfin_write16(PORTGIO_TOGGLE, val)
850#define bfin_read_PORTGIO_MASKA() bfin_read16(PORTGIO_MASKA)
851#define bfin_write_PORTGIO_MASKA(val) bfin_write16(PORTGIO_MASKA, val)
852#define bfin_read_PORTGIO_MASKA_CLEAR() bfin_read16(PORTGIO_MASKA_CLEAR)
853#define bfin_write_PORTGIO_MASKA_CLEAR(val) bfin_write16(PORTGIO_MASKA_CLEAR, val)
854#define bfin_read_PORTGIO_MASKA_SET() bfin_read16(PORTGIO_MASKA_SET)
855#define bfin_write_PORTGIO_MASKA_SET(val) bfin_write16(PORTGIO_MASKA_SET, val)
856#define bfin_read_PORTGIO_MASKA_TOGGLE() bfin_read16(PORTGIO_MASKA_TOGGLE)
857#define bfin_write_PORTGIO_MASKA_TOGGLE(val) bfin_write16(PORTGIO_MASKA_TOGGLE, val)
858#define bfin_read_PORTGIO_MASKB() bfin_read16(PORTGIO_MASKB)
859#define bfin_write_PORTGIO_MASKB(val) bfin_write16(PORTGIO_MASKB, val)
860#define bfin_read_PORTGIO_MASKB_CLEAR() bfin_read16(PORTGIO_MASKB_CLEAR)
861#define bfin_write_PORTGIO_MASKB_CLEAR(val) bfin_write16(PORTGIO_MASKB_CLEAR, val)
862#define bfin_read_PORTGIO_MASKB_SET() bfin_read16(PORTGIO_MASKB_SET)
863#define bfin_write_PORTGIO_MASKB_SET(val) bfin_write16(PORTGIO_MASKB_SET, val)
864#define bfin_read_PORTGIO_MASKB_TOGGLE() bfin_read16(PORTGIO_MASKB_TOGGLE)
865#define bfin_write_PORTGIO_MASKB_TOGGLE(val) bfin_write16(PORTGIO_MASKB_TOGGLE, val)
866#define bfin_read_PORTGIO_DIR() bfin_read16(PORTGIO_DIR)
867#define bfin_write_PORTGIO_DIR(val) bfin_write16(PORTGIO_DIR, val)
868#define bfin_read_PORTGIO_POLAR() bfin_read16(PORTGIO_POLAR)
869#define bfin_write_PORTGIO_POLAR(val) bfin_write16(PORTGIO_POLAR, val)
870#define bfin_read_PORTGIO_EDGE() bfin_read16(PORTGIO_EDGE)
871#define bfin_write_PORTGIO_EDGE(val) bfin_write16(PORTGIO_EDGE, val)
872#define bfin_read_PORTGIO_BOTH() bfin_read16(PORTGIO_BOTH)
873#define bfin_write_PORTGIO_BOTH(val) bfin_write16(PORTGIO_BOTH, val)
874#define bfin_read_PORTGIO_INEN() bfin_read16(PORTGIO_INEN)
875#define bfin_write_PORTGIO_INEN(val) bfin_write16(PORTGIO_INEN, val)
876
877
878/* General Purpose I/O Port H (0xFFC01700 - 0xFFC017FF) */
879#define bfin_read_PORTHIO() bfin_read16(PORTHIO)
880#define bfin_write_PORTHIO(val) bfin_write16(PORTHIO, val)
881#define bfin_read_PORTHIO_CLEAR() bfin_read16(PORTHIO_CLEAR)
882#define bfin_write_PORTHIO_CLEAR(val) bfin_write16(PORTHIO_CLEAR, val)
883#define bfin_read_PORTHIO_SET() bfin_read16(PORTHIO_SET)
884#define bfin_write_PORTHIO_SET(val) bfin_write16(PORTHIO_SET, val)
885#define bfin_read_PORTHIO_TOGGLE() bfin_read16(PORTHIO_TOGGLE)
886#define bfin_write_PORTHIO_TOGGLE(val) bfin_write16(PORTHIO_TOGGLE, val)
887#define bfin_read_PORTHIO_MASKA() bfin_read16(PORTHIO_MASKA)
888#define bfin_write_PORTHIO_MASKA(val) bfin_write16(PORTHIO_MASKA, val)
889#define bfin_read_PORTHIO_MASKA_CLEAR() bfin_read16(PORTHIO_MASKA_CLEAR)
890#define bfin_write_PORTHIO_MASKA_CLEAR(val) bfin_write16(PORTHIO_MASKA_CLEAR, val)
891#define bfin_read_PORTHIO_MASKA_SET() bfin_read16(PORTHIO_MASKA_SET)
892#define bfin_write_PORTHIO_MASKA_SET(val) bfin_write16(PORTHIO_MASKA_SET, val)
893#define bfin_read_PORTHIO_MASKA_TOGGLE() bfin_read16(PORTHIO_MASKA_TOGGLE)
894#define bfin_write_PORTHIO_MASKA_TOGGLE(val) bfin_write16(PORTHIO_MASKA_TOGGLE, val)
895#define bfin_read_PORTHIO_MASKB() bfin_read16(PORTHIO_MASKB)
896#define bfin_write_PORTHIO_MASKB(val) bfin_write16(PORTHIO_MASKB, val)
897#define bfin_read_PORTHIO_MASKB_CLEAR() bfin_read16(PORTHIO_MASKB_CLEAR)
898#define bfin_write_PORTHIO_MASKB_CLEAR(val) bfin_write16(PORTHIO_MASKB_CLEAR, val)
899#define bfin_read_PORTHIO_MASKB_SET() bfin_read16(PORTHIO_MASKB_SET)
900#define bfin_write_PORTHIO_MASKB_SET(val) bfin_write16(PORTHIO_MASKB_SET, val)
901#define bfin_read_PORTHIO_MASKB_TOGGLE() bfin_read16(PORTHIO_MASKB_TOGGLE)
902#define bfin_write_PORTHIO_MASKB_TOGGLE(val) bfin_write16(PORTHIO_MASKB_TOGGLE, val)
903#define bfin_read_PORTHIO_DIR() bfin_read16(PORTHIO_DIR)
904#define bfin_write_PORTHIO_DIR(val) bfin_write16(PORTHIO_DIR, val)
905#define bfin_read_PORTHIO_POLAR() bfin_read16(PORTHIO_POLAR)
906#define bfin_write_PORTHIO_POLAR(val) bfin_write16(PORTHIO_POLAR, val)
907#define bfin_read_PORTHIO_EDGE() bfin_read16(PORTHIO_EDGE)
908#define bfin_write_PORTHIO_EDGE(val) bfin_write16(PORTHIO_EDGE, val)
909#define bfin_read_PORTHIO_BOTH() bfin_read16(PORTHIO_BOTH)
910#define bfin_write_PORTHIO_BOTH(val) bfin_write16(PORTHIO_BOTH, val)
911#define bfin_read_PORTHIO_INEN() bfin_read16(PORTHIO_INEN)
912#define bfin_write_PORTHIO_INEN(val) bfin_write16(PORTHIO_INEN, val)
913
914
915/* UART1 Controller (0xFFC02000 - 0xFFC020FF) */
916#define bfin_read_UART1_THR() bfin_read16(UART1_THR)
917#define bfin_write_UART1_THR(val) bfin_write16(UART1_THR, val)
918#define bfin_read_UART1_RBR() bfin_read16(UART1_RBR)
919#define bfin_write_UART1_RBR(val) bfin_write16(UART1_RBR, val)
920#define bfin_read_UART1_DLL() bfin_read16(UART1_DLL)
921#define bfin_write_UART1_DLL(val) bfin_write16(UART1_DLL, val)
922#define bfin_read_UART1_IER() bfin_read16(UART1_IER)
923#define bfin_write_UART1_IER(val) bfin_write16(UART1_IER, val)
924#define bfin_read_UART1_DLH() bfin_read16(UART1_DLH)
925#define bfin_write_UART1_DLH(val) bfin_write16(UART1_DLH, val)
926#define bfin_read_UART1_IIR() bfin_read16(UART1_IIR)
927#define bfin_write_UART1_IIR(val) bfin_write16(UART1_IIR, val)
928#define bfin_read_UART1_LCR() bfin_read16(UART1_LCR)
929#define bfin_write_UART1_LCR(val) bfin_write16(UART1_LCR, val)
930#define bfin_read_UART1_MCR() bfin_read16(UART1_MCR)
931#define bfin_write_UART1_MCR(val) bfin_write16(UART1_MCR, val)
932#define bfin_read_UART1_LSR() bfin_read16(UART1_LSR)
933#define bfin_write_UART1_LSR(val) bfin_write16(UART1_LSR, val)
934#define bfin_read_UART1_MSR() bfin_read16(UART1_MSR)
935#define bfin_write_UART1_MSR(val) bfin_write16(UART1_MSR, val)
936#define bfin_read_UART1_SCR() bfin_read16(UART1_SCR)
937#define bfin_write_UART1_SCR(val) bfin_write16(UART1_SCR, val)
938#define bfin_read_UART1_GCTL() bfin_read16(UART1_GCTL)
939#define bfin_write_UART1_GCTL(val) bfin_write16(UART1_GCTL, val)
940
941/* Omit CAN register sets from the cdefBF534.h (CAN is not in the ADSP-BF51x processor) */
942
943/* Pin Control Registers (0xFFC03200 - 0xFFC032FF) */
944#define bfin_read_PORTF_FER() bfin_read16(PORTF_FER)
945#define bfin_write_PORTF_FER(val) bfin_write16(PORTF_FER, val)
946#define bfin_read_PORTG_FER() bfin_read16(PORTG_FER)
947#define bfin_write_PORTG_FER(val) bfin_write16(PORTG_FER, val)
948#define bfin_read_PORTH_FER() bfin_read16(PORTH_FER)
949#define bfin_write_PORTH_FER(val) bfin_write16(PORTH_FER, val)
950#define bfin_read_PORT_MUX() bfin_read16(PORT_MUX)
951#define bfin_write_PORT_MUX(val) bfin_write16(PORT_MUX, val)
952
953
954/* Handshake MDMA Registers (0xFFC03300 - 0xFFC033FF) */
955#define bfin_read_HMDMA0_CONTROL() bfin_read16(HMDMA0_CONTROL)
956#define bfin_write_HMDMA0_CONTROL(val) bfin_write16(HMDMA0_CONTROL, val)
957#define bfin_read_HMDMA0_ECINIT() bfin_read16(HMDMA0_ECINIT)
958#define bfin_write_HMDMA0_ECINIT(val) bfin_write16(HMDMA0_ECINIT, val)
959#define bfin_read_HMDMA0_BCINIT() bfin_read16(HMDMA0_BCINIT)
960#define bfin_write_HMDMA0_BCINIT(val) bfin_write16(HMDMA0_BCINIT, val)
961#define bfin_read_HMDMA0_ECURGENT() bfin_read16(HMDMA0_ECURGENT)
962#define bfin_write_HMDMA0_ECURGENT(val) bfin_write16(HMDMA0_ECURGENT, val)
963#define bfin_read_HMDMA0_ECOVERFLOW() bfin_read16(HMDMA0_ECOVERFLOW)
964#define bfin_write_HMDMA0_ECOVERFLOW(val) bfin_write16(HMDMA0_ECOVERFLOW, val)
965#define bfin_read_HMDMA0_ECOUNT() bfin_read16(HMDMA0_ECOUNT)
966#define bfin_write_HMDMA0_ECOUNT(val) bfin_write16(HMDMA0_ECOUNT, val)
967#define bfin_read_HMDMA0_BCOUNT() bfin_read16(HMDMA0_BCOUNT)
968#define bfin_write_HMDMA0_BCOUNT(val) bfin_write16(HMDMA0_BCOUNT, val)
969
970#define bfin_read_HMDMA1_CONTROL() bfin_read16(HMDMA1_CONTROL)
971#define bfin_write_HMDMA1_CONTROL(val) bfin_write16(HMDMA1_CONTROL, val)
972#define bfin_read_HMDMA1_ECINIT() bfin_read16(HMDMA1_ECINIT)
973#define bfin_write_HMDMA1_ECINIT(val) bfin_write16(HMDMA1_ECINIT, val)
974#define bfin_read_HMDMA1_BCINIT() bfin_read16(HMDMA1_BCINIT)
975#define bfin_write_HMDMA1_BCINIT(val) bfin_write16(HMDMA1_BCINIT, val)
976#define bfin_read_HMDMA1_ECURGENT() bfin_read16(HMDMA1_ECURGENT)
977#define bfin_write_HMDMA1_ECURGENT(val) bfin_write16(HMDMA1_ECURGENT, val)
978#define bfin_read_HMDMA1_ECOVERFLOW() bfin_read16(HMDMA1_ECOVERFLOW)
979#define bfin_write_HMDMA1_ECOVERFLOW(val) bfin_write16(HMDMA1_ECOVERFLOW, val)
980#define bfin_read_HMDMA1_ECOUNT() bfin_read16(HMDMA1_ECOUNT)
981#define bfin_write_HMDMA1_ECOUNT(val) bfin_write16(HMDMA1_ECOUNT, val)
982#define bfin_read_HMDMA1_BCOUNT() bfin_read16(HMDMA1_BCOUNT)
983#define bfin_write_HMDMA1_BCOUNT(val) bfin_write16(HMDMA1_BCOUNT, val)
984
985/* ==== end from cdefBF534.h ==== */
986
987/* GPIO PIN mux (0xFFC03210 - OxFFC03288) */
988
989#define bfin_read_PORTF_MUX() bfin_read16(PORTF_MUX)
990#define bfin_write_PORTF_MUX(val) bfin_write16(PORTF_MUX, val)
991#define bfin_read_PORTG_MUX() bfin_read16(PORTG_MUX)
992#define bfin_write_PORTG_MUX(val) bfin_write16(PORTG_MUX, val)
993#define bfin_read_PORTH_MUX() bfin_read16(PORTH_MUX)
994#define bfin_write_PORTH_MUX(val) bfin_write16(PORTH_MUX, val)
995
996#define bfin_read_PORTF_DRIVE() bfin_read16(PORTF_DRIVE)
997#define bfin_write_PORTF_DRIVE(val) bfin_write16(PORTF_DRIVE, val)
998#define bfin_read_PORTG_DRIVE() bfin_read16(PORTG_DRIVE)
999#define bfin_write_PORTG_DRIVE(val) bfin_write16(PORTG_DRIVE, val)
1000#define bfin_read_PORTH_DRIVE() bfin_read16(PORTH_DRIVE)
1001#define bfin_write_PORTH_DRIVE(val) bfin_write16(PORTH_DRIVE, val)
1002#define bfin_read_PORTF_SLEW() bfin_read16(PORTF_SLEW)
1003#define bfin_write_PORTF_SLEW(val) bfin_write16(PORTF_SLEW, val)
1004#define bfin_read_PORTG_SLEW() bfin_read16(PORTG_SLEW)
1005#define bfin_write_PORTG_SLEW(val) bfin_write16(PORTG_SLEW, val)
1006#define bfin_read_PORTH_SLEW() bfin_read16(PORTH_SLEW)
1007#define bfin_write_PORTH_SLEW(val) bfin_write16(PORTH_SLEW, val)
1008#define bfin_read_PORTF_HYSTERISIS() bfin_read16(PORTF_HYSTERISIS)
1009#define bfin_write_PORTF_HYSTERISIS(val) bfin_write16(PORTF_HYSTERISIS, val)
1010#define bfin_read_PORTG_HYSTERISIS() bfin_read16(PORTG_HYSTERISIS)
1011#define bfin_write_PORTG_HYSTERISIS(val) bfin_write16(PORTG_HYSTERISIS, val)
1012#define bfin_read_PORTH_HYSTERISIS() bfin_read16(PORTH_HYSTERISIS)
1013#define bfin_write_PORTH_HYSTERISIS(val) bfin_write16(PORTH_HYSTERISIS, val)
1014#define bfin_read_MISCPORT_DRIVE() bfin_read16(MISCPORT_DRIVE)
1015#define bfin_write_MISCPORT_DRIVE(val) bfin_write16(MISCPORT_DRIVE, val)
1016#define bfin_read_MISCPORT_SLEW() bfin_read16(MISCPORT_SLEW)
1017#define bfin_write_MISCPORT_SLEW(val) bfin_write16(MISCPORT_SLEW, val)
1018#define bfin_read_MISCPORT_HYSTERISIS() bfin_read16(MISCPORT_HYSTERISIS)
1019#define bfin_write_MISCPORT_HYSTERISIS(val) bfin_write16(MISCPORT_HYSTERISIS, val)
1020
1021/* HOST Port Registers */
1022
1023#define bfin_read_HOST_CONTROL() bfin_read16(HOST_CONTROL)
1024#define bfin_write_HOST_CONTROL(val) bfin_write16(HOST_CONTROL, val)
1025#define bfin_read_HOST_STATUS() bfin_read16(HOST_STATUS)
1026#define bfin_write_HOST_STATUS(val) bfin_write16(HOST_STATUS, val)
1027#define bfin_read_HOST_TIMEOUT() bfin_read16(HOST_TIMEOUT)
1028#define bfin_write_HOST_TIMEOUT(val) bfin_write16(HOST_TIMEOUT, val)
1029
1030/* Counter Registers */
1031
1032#define bfin_read_CNT_CONFIG() bfin_read16(CNT_CONFIG)
1033#define bfin_write_CNT_CONFIG(val) bfin_write16(CNT_CONFIG, val)
1034#define bfin_read_CNT_IMASK() bfin_read16(CNT_IMASK)
1035#define bfin_write_CNT_IMASK(val) bfin_write16(CNT_IMASK, val)
1036#define bfin_read_CNT_STATUS() bfin_read16(CNT_STATUS)
1037#define bfin_write_CNT_STATUS(val) bfin_write16(CNT_STATUS, val)
1038#define bfin_read_CNT_COMMAND() bfin_read16(CNT_COMMAND)
1039#define bfin_write_CNT_COMMAND(val) bfin_write16(CNT_COMMAND, val)
1040#define bfin_read_CNT_DEBOUNCE() bfin_read16(CNT_DEBOUNCE)
1041#define bfin_write_CNT_DEBOUNCE(val) bfin_write16(CNT_DEBOUNCE, val)
1042#define bfin_read_CNT_COUNTER() bfin_read32(CNT_COUNTER)
1043#define bfin_write_CNT_COUNTER(val) bfin_write32(CNT_COUNTER, val)
1044#define bfin_read_CNT_MAX() bfin_read32(CNT_MAX)
1045#define bfin_write_CNT_MAX(val) bfin_write32(CNT_MAX, val)
1046#define bfin_read_CNT_MIN() bfin_read32(CNT_MIN)
1047#define bfin_write_CNT_MIN(val) bfin_write32(CNT_MIN, val)
1048
1049/* Security Registers */
1050
1051#define bfin_read_SECURE_SYSSWT() bfin_read32(SECURE_SYSSWT)
1052#define bfin_write_SECURE_SYSSWT(val) bfin_write32(SECURE_SYSSWT, val)
1053#define bfin_read_SECURE_CONTROL() bfin_read16(SECURE_CONTROL)
1054#define bfin_write_SECURE_CONTROL(val) bfin_write16(SECURE_CONTROL, val)
1055#define bfin_read_SECURE_STATUS() bfin_read16(SECURE_STATUS)
1056#define bfin_write_SECURE_STATUS(val) bfin_write16(SECURE_STATUS, val)
1057
1058/* These need to be last due to the cdef/linux inter-dependencies */
1059#include <asm/irq.h>
1060
1061#endif /* _CDEF_BF52X_H */
diff --git a/arch/blackfin/mach-bf518/include/mach/defBF512.h b/arch/blackfin/mach-bf518/include/mach/defBF512.h
index 9b505bb0cb2d..27285823fb25 100644
--- a/arch/blackfin/mach-bf518/include/mach/defBF512.h
+++ b/arch/blackfin/mach-bf518/include/mach/defBF512.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2008 Analog Devices Inc. 2 * Copyright 2008-2010 Analog Devices Inc.
3 * 3 *
4 * Licensed under the ADI BSD license or the GPL-2 (or later) 4 * Licensed under the ADI BSD license or the GPL-2 (or later)
5 */ 5 */
@@ -7,12 +7,1388 @@
7#ifndef _DEF_BF512_H 7#ifndef _DEF_BF512_H
8#define _DEF_BF512_H 8#define _DEF_BF512_H
9 9
10/* Include all Core registers and bit definitions */ 10/* ************************************************************** */
11#include <asm/def_LPBlackfin.h> 11/* SYSTEM & MMR ADDRESS DEFINITIONS COMMON TO ALL ADSP-BF51x */
12/* ************************************************************** */
12 13
13/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF512 */ 14/* Clock and System Control (0xFFC00000 - 0xFFC000FF) */
15#define PLL_CTL 0xFFC00000 /* PLL Control Register */
16#define PLL_DIV 0xFFC00004 /* PLL Divide Register */
17#define VR_CTL 0xFFC00008 /* Voltage Regulator Control Register */
18#define PLL_STAT 0xFFC0000C /* PLL Status Register */
19#define PLL_LOCKCNT 0xFFC00010 /* PLL Lock Count Register */
20#define CHIPID 0xFFC00014 /* Device ID Register */
14 21
15/* Include defBF51x_base.h for the set of #defines that are common to all ADSP-BF51x processors */ 22/* System Interrupt Controller (0xFFC00100 - 0xFFC001FF) */
16#include "defBF51x_base.h" 23#define SWRST 0xFFC00100 /* Software Reset Register */
24#define SYSCR 0xFFC00104 /* System Configuration Register */
25#define SIC_RVECT 0xFFC00108 /* Interrupt Reset Vector Address Register */
26
27#define SIC_IMASK0 0xFFC0010C /* Interrupt Mask Register */
28#define SIC_IAR0 0xFFC00110 /* Interrupt Assignment Register 0 */
29#define SIC_IAR1 0xFFC00114 /* Interrupt Assignment Register 1 */
30#define SIC_IAR2 0xFFC00118 /* Interrupt Assignment Register 2 */
31#define SIC_IAR3 0xFFC0011C /* Interrupt Assignment Register 3 */
32#define SIC_ISR0 0xFFC00120 /* Interrupt Status Register */
33#define SIC_IWR0 0xFFC00124 /* Interrupt Wakeup Register */
34
35/* SIC Additions to ADSP-BF51x (0xFFC0014C - 0xFFC00162) */
36#define SIC_IMASK1 0xFFC0014C /* Interrupt Mask register of SIC2 */
37#define SIC_IAR4 0xFFC00150 /* Interrupt Assignment register4 */
38#define SIC_IAR5 0xFFC00154 /* Interrupt Assignment register5 */
39#define SIC_IAR6 0xFFC00158 /* Interrupt Assignment register6 */
40#define SIC_IAR7 0xFFC0015C /* Interrupt Assignment register7 */
41#define SIC_ISR1 0xFFC00160 /* Interrupt Statur register */
42#define SIC_IWR1 0xFFC00164 /* Interrupt Wakeup register */
43
44
45/* Watchdog Timer (0xFFC00200 - 0xFFC002FF) */
46#define WDOG_CTL 0xFFC00200 /* Watchdog Control Register */
47#define WDOG_CNT 0xFFC00204 /* Watchdog Count Register */
48#define WDOG_STAT 0xFFC00208 /* Watchdog Status Register */
49
50
51/* Real Time Clock (0xFFC00300 - 0xFFC003FF) */
52#define RTC_STAT 0xFFC00300 /* RTC Status Register */
53#define RTC_ICTL 0xFFC00304 /* RTC Interrupt Control Register */
54#define RTC_ISTAT 0xFFC00308 /* RTC Interrupt Status Register */
55#define RTC_SWCNT 0xFFC0030C /* RTC Stopwatch Count Register */
56#define RTC_ALARM 0xFFC00310 /* RTC Alarm Time Register */
57#define RTC_FAST 0xFFC00314 /* RTC Prescaler Enable Register */
58#define RTC_PREN 0xFFC00314 /* RTC Prescaler Enable Alternate Macro */
59
60
61/* UART0 Controller (0xFFC00400 - 0xFFC004FF) */
62#define UART0_THR 0xFFC00400 /* Transmit Holding register */
63#define UART0_RBR 0xFFC00400 /* Receive Buffer register */
64#define UART0_DLL 0xFFC00400 /* Divisor Latch (Low-Byte) */
65#define UART0_IER 0xFFC00404 /* Interrupt Enable Register */
66#define UART0_DLH 0xFFC00404 /* Divisor Latch (High-Byte) */
67#define UART0_IIR 0xFFC00408 /* Interrupt Identification Register */
68#define UART0_LCR 0xFFC0040C /* Line Control Register */
69#define UART0_MCR 0xFFC00410 /* Modem Control Register */
70#define UART0_LSR 0xFFC00414 /* Line Status Register */
71#define UART0_MSR 0xFFC00418 /* Modem Status Register */
72#define UART0_SCR 0xFFC0041C /* SCR Scratch Register */
73#define UART0_GCTL 0xFFC00424 /* Global Control Register */
74
75/* SPI0 Controller (0xFFC00500 - 0xFFC005FF) */
76#define SPI0_REGBASE 0xFFC00500
77#define SPI0_CTL 0xFFC00500 /* SPI Control Register */
78#define SPI0_FLG 0xFFC00504 /* SPI Flag register */
79#define SPI0_STAT 0xFFC00508 /* SPI Status register */
80#define SPI0_TDBR 0xFFC0050C /* SPI Transmit Data Buffer Register */
81#define SPI0_RDBR 0xFFC00510 /* SPI Receive Data Buffer Register */
82#define SPI0_BAUD 0xFFC00514 /* SPI Baud rate Register */
83#define SPI0_SHADOW 0xFFC00518 /* SPI_RDBR Shadow Register */
84
85/* SPI1 Controller (0xFFC03400 - 0xFFC034FF) */
86#define SPI1_REGBASE 0xFFC03400
87#define SPI1_CTL 0xFFC03400 /* SPI Control Register */
88#define SPI1_FLG 0xFFC03404 /* SPI Flag register */
89#define SPI1_STAT 0xFFC03408 /* SPI Status register */
90#define SPI1_TDBR 0xFFC0340C /* SPI Transmit Data Buffer Register */
91#define SPI1_RDBR 0xFFC03410 /* SPI Receive Data Buffer Register */
92#define SPI1_BAUD 0xFFC03414 /* SPI Baud rate Register */
93#define SPI1_SHADOW 0xFFC03418 /* SPI_RDBR Shadow Register */
94
95/* TIMER0-7 Registers (0xFFC00600 - 0xFFC006FF) */
96#define TIMER0_CONFIG 0xFFC00600 /* Timer 0 Configuration Register */
97#define TIMER0_COUNTER 0xFFC00604 /* Timer 0 Counter Register */
98#define TIMER0_PERIOD 0xFFC00608 /* Timer 0 Period Register */
99#define TIMER0_WIDTH 0xFFC0060C /* Timer 0 Width Register */
100
101#define TIMER1_CONFIG 0xFFC00610 /* Timer 1 Configuration Register */
102#define TIMER1_COUNTER 0xFFC00614 /* Timer 1 Counter Register */
103#define TIMER1_PERIOD 0xFFC00618 /* Timer 1 Period Register */
104#define TIMER1_WIDTH 0xFFC0061C /* Timer 1 Width Register */
105
106#define TIMER2_CONFIG 0xFFC00620 /* Timer 2 Configuration Register */
107#define TIMER2_COUNTER 0xFFC00624 /* Timer 2 Counter Register */
108#define TIMER2_PERIOD 0xFFC00628 /* Timer 2 Period Register */
109#define TIMER2_WIDTH 0xFFC0062C /* Timer 2 Width Register */
110
111#define TIMER3_CONFIG 0xFFC00630 /* Timer 3 Configuration Register */
112#define TIMER3_COUNTER 0xFFC00634 /* Timer 3 Counter Register */
113#define TIMER3_PERIOD 0xFFC00638 /* Timer 3 Period Register */
114#define TIMER3_WIDTH 0xFFC0063C /* Timer 3 Width Register */
115
116#define TIMER4_CONFIG 0xFFC00640 /* Timer 4 Configuration Register */
117#define TIMER4_COUNTER 0xFFC00644 /* Timer 4 Counter Register */
118#define TIMER4_PERIOD 0xFFC00648 /* Timer 4 Period Register */
119#define TIMER4_WIDTH 0xFFC0064C /* Timer 4 Width Register */
120
121#define TIMER5_CONFIG 0xFFC00650 /* Timer 5 Configuration Register */
122#define TIMER5_COUNTER 0xFFC00654 /* Timer 5 Counter Register */
123#define TIMER5_PERIOD 0xFFC00658 /* Timer 5 Period Register */
124#define TIMER5_WIDTH 0xFFC0065C /* Timer 5 Width Register */
125
126#define TIMER6_CONFIG 0xFFC00660 /* Timer 6 Configuration Register */
127#define TIMER6_COUNTER 0xFFC00664 /* Timer 6 Counter Register */
128#define TIMER6_PERIOD 0xFFC00668 /* Timer 6 Period Register */
129#define TIMER6_WIDTH 0xFFC0066C /* Timer 6 Width Register */
130
131#define TIMER7_CONFIG 0xFFC00670 /* Timer 7 Configuration Register */
132#define TIMER7_COUNTER 0xFFC00674 /* Timer 7 Counter Register */
133#define TIMER7_PERIOD 0xFFC00678 /* Timer 7 Period Register */
134#define TIMER7_WIDTH 0xFFC0067C /* Timer 7 Width Register */
135
136#define TIMER_ENABLE 0xFFC00680 /* Timer Enable Register */
137#define TIMER_DISABLE 0xFFC00684 /* Timer Disable Register */
138#define TIMER_STATUS 0xFFC00688 /* Timer Status Register */
139
140/* General Purpose I/O Port F (0xFFC00700 - 0xFFC007FF) */
141#define PORTFIO 0xFFC00700 /* Port F I/O Pin State Specify Register */
142#define PORTFIO_CLEAR 0xFFC00704 /* Port F I/O Peripheral Interrupt Clear Register */
143#define PORTFIO_SET 0xFFC00708 /* Port F I/O Peripheral Interrupt Set Register */
144#define PORTFIO_TOGGLE 0xFFC0070C /* Port F I/O Pin State Toggle Register */
145#define PORTFIO_MASKA 0xFFC00710 /* Port F I/O Mask State Specify Interrupt A Register */
146#define PORTFIO_MASKA_CLEAR 0xFFC00714 /* Port F I/O Mask Disable Interrupt A Register */
147#define PORTFIO_MASKA_SET 0xFFC00718 /* Port F I/O Mask Enable Interrupt A Register */
148#define PORTFIO_MASKA_TOGGLE 0xFFC0071C /* Port F I/O Mask Toggle Enable Interrupt A Register */
149#define PORTFIO_MASKB 0xFFC00720 /* Port F I/O Mask State Specify Interrupt B Register */
150#define PORTFIO_MASKB_CLEAR 0xFFC00724 /* Port F I/O Mask Disable Interrupt B Register */
151#define PORTFIO_MASKB_SET 0xFFC00728 /* Port F I/O Mask Enable Interrupt B Register */
152#define PORTFIO_MASKB_TOGGLE 0xFFC0072C /* Port F I/O Mask Toggle Enable Interrupt B Register */
153#define PORTFIO_DIR 0xFFC00730 /* Port F I/O Direction Register */
154#define PORTFIO_POLAR 0xFFC00734 /* Port F I/O Source Polarity Register */
155#define PORTFIO_EDGE 0xFFC00738 /* Port F I/O Source Sensitivity Register */
156#define PORTFIO_BOTH 0xFFC0073C /* Port F I/O Set on BOTH Edges Register */
157#define PORTFIO_INEN 0xFFC00740 /* Port F I/O Input Enable Register */
158
159/* SPORT0 Controller (0xFFC00800 - 0xFFC008FF) */
160#define SPORT0_TCR1 0xFFC00800 /* SPORT0 Transmit Configuration 1 Register */
161#define SPORT0_TCR2 0xFFC00804 /* SPORT0 Transmit Configuration 2 Register */
162#define SPORT0_TCLKDIV 0xFFC00808 /* SPORT0 Transmit Clock Divider */
163#define SPORT0_TFSDIV 0xFFC0080C /* SPORT0 Transmit Frame Sync Divider */
164#define SPORT0_TX 0xFFC00810 /* SPORT0 TX Data Register */
165#define SPORT0_RX 0xFFC00818 /* SPORT0 RX Data Register */
166#define SPORT0_RCR1 0xFFC00820 /* SPORT0 Transmit Configuration 1 Register */
167#define SPORT0_RCR2 0xFFC00824 /* SPORT0 Transmit Configuration 2 Register */
168#define SPORT0_RCLKDIV 0xFFC00828 /* SPORT0 Receive Clock Divider */
169#define SPORT0_RFSDIV 0xFFC0082C /* SPORT0 Receive Frame Sync Divider */
170#define SPORT0_STAT 0xFFC00830 /* SPORT0 Status Register */
171#define SPORT0_CHNL 0xFFC00834 /* SPORT0 Current Channel Register */
172#define SPORT0_MCMC1 0xFFC00838 /* SPORT0 Multi-Channel Configuration Register 1 */
173#define SPORT0_MCMC2 0xFFC0083C /* SPORT0 Multi-Channel Configuration Register 2 */
174#define SPORT0_MTCS0 0xFFC00840 /* SPORT0 Multi-Channel Transmit Select Register 0 */
175#define SPORT0_MTCS1 0xFFC00844 /* SPORT0 Multi-Channel Transmit Select Register 1 */
176#define SPORT0_MTCS2 0xFFC00848 /* SPORT0 Multi-Channel Transmit Select Register 2 */
177#define SPORT0_MTCS3 0xFFC0084C /* SPORT0 Multi-Channel Transmit Select Register 3 */
178#define SPORT0_MRCS0 0xFFC00850 /* SPORT0 Multi-Channel Receive Select Register 0 */
179#define SPORT0_MRCS1 0xFFC00854 /* SPORT0 Multi-Channel Receive Select Register 1 */
180#define SPORT0_MRCS2 0xFFC00858 /* SPORT0 Multi-Channel Receive Select Register 2 */
181#define SPORT0_MRCS3 0xFFC0085C /* SPORT0 Multi-Channel Receive Select Register 3 */
182
183/* SPORT1 Controller (0xFFC00900 - 0xFFC009FF) */
184#define SPORT1_TCR1 0xFFC00900 /* SPORT1 Transmit Configuration 1 Register */
185#define SPORT1_TCR2 0xFFC00904 /* SPORT1 Transmit Configuration 2 Register */
186#define SPORT1_TCLKDIV 0xFFC00908 /* SPORT1 Transmit Clock Divider */
187#define SPORT1_TFSDIV 0xFFC0090C /* SPORT1 Transmit Frame Sync Divider */
188#define SPORT1_TX 0xFFC00910 /* SPORT1 TX Data Register */
189#define SPORT1_RX 0xFFC00918 /* SPORT1 RX Data Register */
190#define SPORT1_RCR1 0xFFC00920 /* SPORT1 Transmit Configuration 1 Register */
191#define SPORT1_RCR2 0xFFC00924 /* SPORT1 Transmit Configuration 2 Register */
192#define SPORT1_RCLKDIV 0xFFC00928 /* SPORT1 Receive Clock Divider */
193#define SPORT1_RFSDIV 0xFFC0092C /* SPORT1 Receive Frame Sync Divider */
194#define SPORT1_STAT 0xFFC00930 /* SPORT1 Status Register */
195#define SPORT1_CHNL 0xFFC00934 /* SPORT1 Current Channel Register */
196#define SPORT1_MCMC1 0xFFC00938 /* SPORT1 Multi-Channel Configuration Register 1 */
197#define SPORT1_MCMC2 0xFFC0093C /* SPORT1 Multi-Channel Configuration Register 2 */
198#define SPORT1_MTCS0 0xFFC00940 /* SPORT1 Multi-Channel Transmit Select Register 0 */
199#define SPORT1_MTCS1 0xFFC00944 /* SPORT1 Multi-Channel Transmit Select Register 1 */
200#define SPORT1_MTCS2 0xFFC00948 /* SPORT1 Multi-Channel Transmit Select Register 2 */
201#define SPORT1_MTCS3 0xFFC0094C /* SPORT1 Multi-Channel Transmit Select Register 3 */
202#define SPORT1_MRCS0 0xFFC00950 /* SPORT1 Multi-Channel Receive Select Register 0 */
203#define SPORT1_MRCS1 0xFFC00954 /* SPORT1 Multi-Channel Receive Select Register 1 */
204#define SPORT1_MRCS2 0xFFC00958 /* SPORT1 Multi-Channel Receive Select Register 2 */
205#define SPORT1_MRCS3 0xFFC0095C /* SPORT1 Multi-Channel Receive Select Register 3 */
206
207/* External Bus Interface Unit (0xFFC00A00 - 0xFFC00AFF) */
208#define EBIU_AMGCTL 0xFFC00A00 /* Asynchronous Memory Global Control Register */
209#define EBIU_AMBCTL0 0xFFC00A04 /* Asynchronous Memory Bank Control Register 0 */
210#define EBIU_AMBCTL1 0xFFC00A08 /* Asynchronous Memory Bank Control Register 1 */
211#define EBIU_SDGCTL 0xFFC00A10 /* SDRAM Global Control Register */
212#define EBIU_SDBCTL 0xFFC00A14 /* SDRAM Bank Control Register */
213#define EBIU_SDRRC 0xFFC00A18 /* SDRAM Refresh Rate Control Register */
214#define EBIU_SDSTAT 0xFFC00A1C /* SDRAM Status Register */
215
216/* DMA Traffic Control Registers */
217#define DMAC_TC_PER 0xFFC00B0C /* Traffic Control Periods Register */
218#define DMAC_TC_CNT 0xFFC00B10 /* Traffic Control Current Counts Register */
219
220/* DMA Controller (0xFFC00C00 - 0xFFC00FFF) */
221#define DMA0_NEXT_DESC_PTR 0xFFC00C00 /* DMA Channel 0 Next Descriptor Pointer Register */
222#define DMA0_START_ADDR 0xFFC00C04 /* DMA Channel 0 Start Address Register */
223#define DMA0_CONFIG 0xFFC00C08 /* DMA Channel 0 Configuration Register */
224#define DMA0_X_COUNT 0xFFC00C10 /* DMA Channel 0 X Count Register */
225#define DMA0_X_MODIFY 0xFFC00C14 /* DMA Channel 0 X Modify Register */
226#define DMA0_Y_COUNT 0xFFC00C18 /* DMA Channel 0 Y Count Register */
227#define DMA0_Y_MODIFY 0xFFC00C1C /* DMA Channel 0 Y Modify Register */
228#define DMA0_CURR_DESC_PTR 0xFFC00C20 /* DMA Channel 0 Current Descriptor Pointer Register */
229#define DMA0_CURR_ADDR 0xFFC00C24 /* DMA Channel 0 Current Address Register */
230#define DMA0_IRQ_STATUS 0xFFC00C28 /* DMA Channel 0 Interrupt/Status Register */
231#define DMA0_PERIPHERAL_MAP 0xFFC00C2C /* DMA Channel 0 Peripheral Map Register */
232#define DMA0_CURR_X_COUNT 0xFFC00C30 /* DMA Channel 0 Current X Count Register */
233#define DMA0_CURR_Y_COUNT 0xFFC00C38 /* DMA Channel 0 Current Y Count Register */
234
235#define DMA1_NEXT_DESC_PTR 0xFFC00C40 /* DMA Channel 1 Next Descriptor Pointer Register */
236#define DMA1_START_ADDR 0xFFC00C44 /* DMA Channel 1 Start Address Register */
237#define DMA1_CONFIG 0xFFC00C48 /* DMA Channel 1 Configuration Register */
238#define DMA1_X_COUNT 0xFFC00C50 /* DMA Channel 1 X Count Register */
239#define DMA1_X_MODIFY 0xFFC00C54 /* DMA Channel 1 X Modify Register */
240#define DMA1_Y_COUNT 0xFFC00C58 /* DMA Channel 1 Y Count Register */
241#define DMA1_Y_MODIFY 0xFFC00C5C /* DMA Channel 1 Y Modify Register */
242#define DMA1_CURR_DESC_PTR 0xFFC00C60 /* DMA Channel 1 Current Descriptor Pointer Register */
243#define DMA1_CURR_ADDR 0xFFC00C64 /* DMA Channel 1 Current Address Register */
244#define DMA1_IRQ_STATUS 0xFFC00C68 /* DMA Channel 1 Interrupt/Status Register */
245#define DMA1_PERIPHERAL_MAP 0xFFC00C6C /* DMA Channel 1 Peripheral Map Register */
246#define DMA1_CURR_X_COUNT 0xFFC00C70 /* DMA Channel 1 Current X Count Register */
247#define DMA1_CURR_Y_COUNT 0xFFC00C78 /* DMA Channel 1 Current Y Count Register */
248
249#define DMA2_NEXT_DESC_PTR 0xFFC00C80 /* DMA Channel 2 Next Descriptor Pointer Register */
250#define DMA2_START_ADDR 0xFFC00C84 /* DMA Channel 2 Start Address Register */
251#define DMA2_CONFIG 0xFFC00C88 /* DMA Channel 2 Configuration Register */
252#define DMA2_X_COUNT 0xFFC00C90 /* DMA Channel 2 X Count Register */
253#define DMA2_X_MODIFY 0xFFC00C94 /* DMA Channel 2 X Modify Register */
254#define DMA2_Y_COUNT 0xFFC00C98 /* DMA Channel 2 Y Count Register */
255#define DMA2_Y_MODIFY 0xFFC00C9C /* DMA Channel 2 Y Modify Register */
256#define DMA2_CURR_DESC_PTR 0xFFC00CA0 /* DMA Channel 2 Current Descriptor Pointer Register */
257#define DMA2_CURR_ADDR 0xFFC00CA4 /* DMA Channel 2 Current Address Register */
258#define DMA2_IRQ_STATUS 0xFFC00CA8 /* DMA Channel 2 Interrupt/Status Register */
259#define DMA2_PERIPHERAL_MAP 0xFFC00CAC /* DMA Channel 2 Peripheral Map Register */
260#define DMA2_CURR_X_COUNT 0xFFC00CB0 /* DMA Channel 2 Current X Count Register */
261#define DMA2_CURR_Y_COUNT 0xFFC00CB8 /* DMA Channel 2 Current Y Count Register */
262
263#define DMA3_NEXT_DESC_PTR 0xFFC00CC0 /* DMA Channel 3 Next Descriptor Pointer Register */
264#define DMA3_START_ADDR 0xFFC00CC4 /* DMA Channel 3 Start Address Register */
265#define DMA3_CONFIG 0xFFC00CC8 /* DMA Channel 3 Configuration Register */
266#define DMA3_X_COUNT 0xFFC00CD0 /* DMA Channel 3 X Count Register */
267#define DMA3_X_MODIFY 0xFFC00CD4 /* DMA Channel 3 X Modify Register */
268#define DMA3_Y_COUNT 0xFFC00CD8 /* DMA Channel 3 Y Count Register */
269#define DMA3_Y_MODIFY 0xFFC00CDC /* DMA Channel 3 Y Modify Register */
270#define DMA3_CURR_DESC_PTR 0xFFC00CE0 /* DMA Channel 3 Current Descriptor Pointer Register */
271#define DMA3_CURR_ADDR 0xFFC00CE4 /* DMA Channel 3 Current Address Register */
272#define DMA3_IRQ_STATUS 0xFFC00CE8 /* DMA Channel 3 Interrupt/Status Register */
273#define DMA3_PERIPHERAL_MAP 0xFFC00CEC /* DMA Channel 3 Peripheral Map Register */
274#define DMA3_CURR_X_COUNT 0xFFC00CF0 /* DMA Channel 3 Current X Count Register */
275#define DMA3_CURR_Y_COUNT 0xFFC00CF8 /* DMA Channel 3 Current Y Count Register */
276
277#define DMA4_NEXT_DESC_PTR 0xFFC00D00 /* DMA Channel 4 Next Descriptor Pointer Register */
278#define DMA4_START_ADDR 0xFFC00D04 /* DMA Channel 4 Start Address Register */
279#define DMA4_CONFIG 0xFFC00D08 /* DMA Channel 4 Configuration Register */
280#define DMA4_X_COUNT 0xFFC00D10 /* DMA Channel 4 X Count Register */
281#define DMA4_X_MODIFY 0xFFC00D14 /* DMA Channel 4 X Modify Register */
282#define DMA4_Y_COUNT 0xFFC00D18 /* DMA Channel 4 Y Count Register */
283#define DMA4_Y_MODIFY 0xFFC00D1C /* DMA Channel 4 Y Modify Register */
284#define DMA4_CURR_DESC_PTR 0xFFC00D20 /* DMA Channel 4 Current Descriptor Pointer Register */
285#define DMA4_CURR_ADDR 0xFFC00D24 /* DMA Channel 4 Current Address Register */
286#define DMA4_IRQ_STATUS 0xFFC00D28 /* DMA Channel 4 Interrupt/Status Register */
287#define DMA4_PERIPHERAL_MAP 0xFFC00D2C /* DMA Channel 4 Peripheral Map Register */
288#define DMA4_CURR_X_COUNT 0xFFC00D30 /* DMA Channel 4 Current X Count Register */
289#define DMA4_CURR_Y_COUNT 0xFFC00D38 /* DMA Channel 4 Current Y Count Register */
290
291#define DMA5_NEXT_DESC_PTR 0xFFC00D40 /* DMA Channel 5 Next Descriptor Pointer Register */
292#define DMA5_START_ADDR 0xFFC00D44 /* DMA Channel 5 Start Address Register */
293#define DMA5_CONFIG 0xFFC00D48 /* DMA Channel 5 Configuration Register */
294#define DMA5_X_COUNT 0xFFC00D50 /* DMA Channel 5 X Count Register */
295#define DMA5_X_MODIFY 0xFFC00D54 /* DMA Channel 5 X Modify Register */
296#define DMA5_Y_COUNT 0xFFC00D58 /* DMA Channel 5 Y Count Register */
297#define DMA5_Y_MODIFY 0xFFC00D5C /* DMA Channel 5 Y Modify Register */
298#define DMA5_CURR_DESC_PTR 0xFFC00D60 /* DMA Channel 5 Current Descriptor Pointer Register */
299#define DMA5_CURR_ADDR 0xFFC00D64 /* DMA Channel 5 Current Address Register */
300#define DMA5_IRQ_STATUS 0xFFC00D68 /* DMA Channel 5 Interrupt/Status Register */
301#define DMA5_PERIPHERAL_MAP 0xFFC00D6C /* DMA Channel 5 Peripheral Map Register */
302#define DMA5_CURR_X_COUNT 0xFFC00D70 /* DMA Channel 5 Current X Count Register */
303#define DMA5_CURR_Y_COUNT 0xFFC00D78 /* DMA Channel 5 Current Y Count Register */
304
305#define DMA6_NEXT_DESC_PTR 0xFFC00D80 /* DMA Channel 6 Next Descriptor Pointer Register */
306#define DMA6_START_ADDR 0xFFC00D84 /* DMA Channel 6 Start Address Register */
307#define DMA6_CONFIG 0xFFC00D88 /* DMA Channel 6 Configuration Register */
308#define DMA6_X_COUNT 0xFFC00D90 /* DMA Channel 6 X Count Register */
309#define DMA6_X_MODIFY 0xFFC00D94 /* DMA Channel 6 X Modify Register */
310#define DMA6_Y_COUNT 0xFFC00D98 /* DMA Channel 6 Y Count Register */
311#define DMA6_Y_MODIFY 0xFFC00D9C /* DMA Channel 6 Y Modify Register */
312#define DMA6_CURR_DESC_PTR 0xFFC00DA0 /* DMA Channel 6 Current Descriptor Pointer Register */
313#define DMA6_CURR_ADDR 0xFFC00DA4 /* DMA Channel 6 Current Address Register */
314#define DMA6_IRQ_STATUS 0xFFC00DA8 /* DMA Channel 6 Interrupt/Status Register */
315#define DMA6_PERIPHERAL_MAP 0xFFC00DAC /* DMA Channel 6 Peripheral Map Register */
316#define DMA6_CURR_X_COUNT 0xFFC00DB0 /* DMA Channel 6 Current X Count Register */
317#define DMA6_CURR_Y_COUNT 0xFFC00DB8 /* DMA Channel 6 Current Y Count Register */
318
319#define DMA7_NEXT_DESC_PTR 0xFFC00DC0 /* DMA Channel 7 Next Descriptor Pointer Register */
320#define DMA7_START_ADDR 0xFFC00DC4 /* DMA Channel 7 Start Address Register */
321#define DMA7_CONFIG 0xFFC00DC8 /* DMA Channel 7 Configuration Register */
322#define DMA7_X_COUNT 0xFFC00DD0 /* DMA Channel 7 X Count Register */
323#define DMA7_X_MODIFY 0xFFC00DD4 /* DMA Channel 7 X Modify Register */
324#define DMA7_Y_COUNT 0xFFC00DD8 /* DMA Channel 7 Y Count Register */
325#define DMA7_Y_MODIFY 0xFFC00DDC /* DMA Channel 7 Y Modify Register */
326#define DMA7_CURR_DESC_PTR 0xFFC00DE0 /* DMA Channel 7 Current Descriptor Pointer Register */
327#define DMA7_CURR_ADDR 0xFFC00DE4 /* DMA Channel 7 Current Address Register */
328#define DMA7_IRQ_STATUS 0xFFC00DE8 /* DMA Channel 7 Interrupt/Status Register */
329#define DMA7_PERIPHERAL_MAP 0xFFC00DEC /* DMA Channel 7 Peripheral Map Register */
330#define DMA7_CURR_X_COUNT 0xFFC00DF0 /* DMA Channel 7 Current X Count Register */
331#define DMA7_CURR_Y_COUNT 0xFFC00DF8 /* DMA Channel 7 Current Y Count Register */
332
333#define DMA8_NEXT_DESC_PTR 0xFFC00E00 /* DMA Channel 8 Next Descriptor Pointer Register */
334#define DMA8_START_ADDR 0xFFC00E04 /* DMA Channel 8 Start Address Register */
335#define DMA8_CONFIG 0xFFC00E08 /* DMA Channel 8 Configuration Register */
336#define DMA8_X_COUNT 0xFFC00E10 /* DMA Channel 8 X Count Register */
337#define DMA8_X_MODIFY 0xFFC00E14 /* DMA Channel 8 X Modify Register */
338#define DMA8_Y_COUNT 0xFFC00E18 /* DMA Channel 8 Y Count Register */
339#define DMA8_Y_MODIFY 0xFFC00E1C /* DMA Channel 8 Y Modify Register */
340#define DMA8_CURR_DESC_PTR 0xFFC00E20 /* DMA Channel 8 Current Descriptor Pointer Register */
341#define DMA8_CURR_ADDR 0xFFC00E24 /* DMA Channel 8 Current Address Register */
342#define DMA8_IRQ_STATUS 0xFFC00E28 /* DMA Channel 8 Interrupt/Status Register */
343#define DMA8_PERIPHERAL_MAP 0xFFC00E2C /* DMA Channel 8 Peripheral Map Register */
344#define DMA8_CURR_X_COUNT 0xFFC00E30 /* DMA Channel 8 Current X Count Register */
345#define DMA8_CURR_Y_COUNT 0xFFC00E38 /* DMA Channel 8 Current Y Count Register */
346
347#define DMA9_NEXT_DESC_PTR 0xFFC00E40 /* DMA Channel 9 Next Descriptor Pointer Register */
348#define DMA9_START_ADDR 0xFFC00E44 /* DMA Channel 9 Start Address Register */
349#define DMA9_CONFIG 0xFFC00E48 /* DMA Channel 9 Configuration Register */
350#define DMA9_X_COUNT 0xFFC00E50 /* DMA Channel 9 X Count Register */
351#define DMA9_X_MODIFY 0xFFC00E54 /* DMA Channel 9 X Modify Register */
352#define DMA9_Y_COUNT 0xFFC00E58 /* DMA Channel 9 Y Count Register */
353#define DMA9_Y_MODIFY 0xFFC00E5C /* DMA Channel 9 Y Modify Register */
354#define DMA9_CURR_DESC_PTR 0xFFC00E60 /* DMA Channel 9 Current Descriptor Pointer Register */
355#define DMA9_CURR_ADDR 0xFFC00E64 /* DMA Channel 9 Current Address Register */
356#define DMA9_IRQ_STATUS 0xFFC00E68 /* DMA Channel 9 Interrupt/Status Register */
357#define DMA9_PERIPHERAL_MAP 0xFFC00E6C /* DMA Channel 9 Peripheral Map Register */
358#define DMA9_CURR_X_COUNT 0xFFC00E70 /* DMA Channel 9 Current X Count Register */
359#define DMA9_CURR_Y_COUNT 0xFFC00E78 /* DMA Channel 9 Current Y Count Register */
360
361#define DMA10_NEXT_DESC_PTR 0xFFC00E80 /* DMA Channel 10 Next Descriptor Pointer Register */
362#define DMA10_START_ADDR 0xFFC00E84 /* DMA Channel 10 Start Address Register */
363#define DMA10_CONFIG 0xFFC00E88 /* DMA Channel 10 Configuration Register */
364#define DMA10_X_COUNT 0xFFC00E90 /* DMA Channel 10 X Count Register */
365#define DMA10_X_MODIFY 0xFFC00E94 /* DMA Channel 10 X Modify Register */
366#define DMA10_Y_COUNT 0xFFC00E98 /* DMA Channel 10 Y Count Register */
367#define DMA10_Y_MODIFY 0xFFC00E9C /* DMA Channel 10 Y Modify Register */
368#define DMA10_CURR_DESC_PTR 0xFFC00EA0 /* DMA Channel 10 Current Descriptor Pointer Register */
369#define DMA10_CURR_ADDR 0xFFC00EA4 /* DMA Channel 10 Current Address Register */
370#define DMA10_IRQ_STATUS 0xFFC00EA8 /* DMA Channel 10 Interrupt/Status Register */
371#define DMA10_PERIPHERAL_MAP 0xFFC00EAC /* DMA Channel 10 Peripheral Map Register */
372#define DMA10_CURR_X_COUNT 0xFFC00EB0 /* DMA Channel 10 Current X Count Register */
373#define DMA10_CURR_Y_COUNT 0xFFC00EB8 /* DMA Channel 10 Current Y Count Register */
374
375#define DMA11_NEXT_DESC_PTR 0xFFC00EC0 /* DMA Channel 11 Next Descriptor Pointer Register */
376#define DMA11_START_ADDR 0xFFC00EC4 /* DMA Channel 11 Start Address Register */
377#define DMA11_CONFIG 0xFFC00EC8 /* DMA Channel 11 Configuration Register */
378#define DMA11_X_COUNT 0xFFC00ED0 /* DMA Channel 11 X Count Register */
379#define DMA11_X_MODIFY 0xFFC00ED4 /* DMA Channel 11 X Modify Register */
380#define DMA11_Y_COUNT 0xFFC00ED8 /* DMA Channel 11 Y Count Register */
381#define DMA11_Y_MODIFY 0xFFC00EDC /* DMA Channel 11 Y Modify Register */
382#define DMA11_CURR_DESC_PTR 0xFFC00EE0 /* DMA Channel 11 Current Descriptor Pointer Register */
383#define DMA11_CURR_ADDR 0xFFC00EE4 /* DMA Channel 11 Current Address Register */
384#define DMA11_IRQ_STATUS 0xFFC00EE8 /* DMA Channel 11 Interrupt/Status Register */
385#define DMA11_PERIPHERAL_MAP 0xFFC00EEC /* DMA Channel 11 Peripheral Map Register */
386#define DMA11_CURR_X_COUNT 0xFFC00EF0 /* DMA Channel 11 Current X Count Register */
387#define DMA11_CURR_Y_COUNT 0xFFC00EF8 /* DMA Channel 11 Current Y Count Register */
388
389#define MDMA_D0_NEXT_DESC_PTR 0xFFC00F00 /* MemDMA Stream 0 Destination Next Descriptor Pointer Register */
390#define MDMA_D0_START_ADDR 0xFFC00F04 /* MemDMA Stream 0 Destination Start Address Register */
391#define MDMA_D0_CONFIG 0xFFC00F08 /* MemDMA Stream 0 Destination Configuration Register */
392#define MDMA_D0_X_COUNT 0xFFC00F10 /* MemDMA Stream 0 Destination X Count Register */
393#define MDMA_D0_X_MODIFY 0xFFC00F14 /* MemDMA Stream 0 Destination X Modify Register */
394#define MDMA_D0_Y_COUNT 0xFFC00F18 /* MemDMA Stream 0 Destination Y Count Register */
395#define MDMA_D0_Y_MODIFY 0xFFC00F1C /* MemDMA Stream 0 Destination Y Modify Register */
396#define MDMA_D0_CURR_DESC_PTR 0xFFC00F20 /* MemDMA Stream 0 Destination Current Descriptor Pointer Register */
397#define MDMA_D0_CURR_ADDR 0xFFC00F24 /* MemDMA Stream 0 Destination Current Address Register */
398#define MDMA_D0_IRQ_STATUS 0xFFC00F28 /* MemDMA Stream 0 Destination Interrupt/Status Register */
399#define MDMA_D0_PERIPHERAL_MAP 0xFFC00F2C /* MemDMA Stream 0 Destination Peripheral Map Register */
400#define MDMA_D0_CURR_X_COUNT 0xFFC00F30 /* MemDMA Stream 0 Destination Current X Count Register */
401#define MDMA_D0_CURR_Y_COUNT 0xFFC00F38 /* MemDMA Stream 0 Destination Current Y Count Register */
402
403#define MDMA_S0_NEXT_DESC_PTR 0xFFC00F40 /* MemDMA Stream 0 Source Next Descriptor Pointer Register */
404#define MDMA_S0_START_ADDR 0xFFC00F44 /* MemDMA Stream 0 Source Start Address Register */
405#define MDMA_S0_CONFIG 0xFFC00F48 /* MemDMA Stream 0 Source Configuration Register */
406#define MDMA_S0_X_COUNT 0xFFC00F50 /* MemDMA Stream 0 Source X Count Register */
407#define MDMA_S0_X_MODIFY 0xFFC00F54 /* MemDMA Stream 0 Source X Modify Register */
408#define MDMA_S0_Y_COUNT 0xFFC00F58 /* MemDMA Stream 0 Source Y Count Register */
409#define MDMA_S0_Y_MODIFY 0xFFC00F5C /* MemDMA Stream 0 Source Y Modify Register */
410#define MDMA_S0_CURR_DESC_PTR 0xFFC00F60 /* MemDMA Stream 0 Source Current Descriptor Pointer Register */
411#define MDMA_S0_CURR_ADDR 0xFFC00F64 /* MemDMA Stream 0 Source Current Address Register */
412#define MDMA_S0_IRQ_STATUS 0xFFC00F68 /* MemDMA Stream 0 Source Interrupt/Status Register */
413#define MDMA_S0_PERIPHERAL_MAP 0xFFC00F6C /* MemDMA Stream 0 Source Peripheral Map Register */
414#define MDMA_S0_CURR_X_COUNT 0xFFC00F70 /* MemDMA Stream 0 Source Current X Count Register */
415#define MDMA_S0_CURR_Y_COUNT 0xFFC00F78 /* MemDMA Stream 0 Source Current Y Count Register */
416
417#define MDMA_D1_NEXT_DESC_PTR 0xFFC00F80 /* MemDMA Stream 1 Destination Next Descriptor Pointer Register */
418#define MDMA_D1_START_ADDR 0xFFC00F84 /* MemDMA Stream 1 Destination Start Address Register */
419#define MDMA_D1_CONFIG 0xFFC00F88 /* MemDMA Stream 1 Destination Configuration Register */
420#define MDMA_D1_X_COUNT 0xFFC00F90 /* MemDMA Stream 1 Destination X Count Register */
421#define MDMA_D1_X_MODIFY 0xFFC00F94 /* MemDMA Stream 1 Destination X Modify Register */
422#define MDMA_D1_Y_COUNT 0xFFC00F98 /* MemDMA Stream 1 Destination Y Count Register */
423#define MDMA_D1_Y_MODIFY 0xFFC00F9C /* MemDMA Stream 1 Destination Y Modify Register */
424#define MDMA_D1_CURR_DESC_PTR 0xFFC00FA0 /* MemDMA Stream 1 Destination Current Descriptor Pointer Register */
425#define MDMA_D1_CURR_ADDR 0xFFC00FA4 /* MemDMA Stream 1 Destination Current Address Register */
426#define MDMA_D1_IRQ_STATUS 0xFFC00FA8 /* MemDMA Stream 1 Destination Interrupt/Status Register */
427#define MDMA_D1_PERIPHERAL_MAP 0xFFC00FAC /* MemDMA Stream 1 Destination Peripheral Map Register */
428#define MDMA_D1_CURR_X_COUNT 0xFFC00FB0 /* MemDMA Stream 1 Destination Current X Count Register */
429#define MDMA_D1_CURR_Y_COUNT 0xFFC00FB8 /* MemDMA Stream 1 Destination Current Y Count Register */
430
431#define MDMA_S1_NEXT_DESC_PTR 0xFFC00FC0 /* MemDMA Stream 1 Source Next Descriptor Pointer Register */
432#define MDMA_S1_START_ADDR 0xFFC00FC4 /* MemDMA Stream 1 Source Start Address Register */
433#define MDMA_S1_CONFIG 0xFFC00FC8 /* MemDMA Stream 1 Source Configuration Register */
434#define MDMA_S1_X_COUNT 0xFFC00FD0 /* MemDMA Stream 1 Source X Count Register */
435#define MDMA_S1_X_MODIFY 0xFFC00FD4 /* MemDMA Stream 1 Source X Modify Register */
436#define MDMA_S1_Y_COUNT 0xFFC00FD8 /* MemDMA Stream 1 Source Y Count Register */
437#define MDMA_S1_Y_MODIFY 0xFFC00FDC /* MemDMA Stream 1 Source Y Modify Register */
438#define MDMA_S1_CURR_DESC_PTR 0xFFC00FE0 /* MemDMA Stream 1 Source Current Descriptor Pointer Register */
439#define MDMA_S1_CURR_ADDR 0xFFC00FE4 /* MemDMA Stream 1 Source Current Address Register */
440#define MDMA_S1_IRQ_STATUS 0xFFC00FE8 /* MemDMA Stream 1 Source Interrupt/Status Register */
441#define MDMA_S1_PERIPHERAL_MAP 0xFFC00FEC /* MemDMA Stream 1 Source Peripheral Map Register */
442#define MDMA_S1_CURR_X_COUNT 0xFFC00FF0 /* MemDMA Stream 1 Source Current X Count Register */
443#define MDMA_S1_CURR_Y_COUNT 0xFFC00FF8 /* MemDMA Stream 1 Source Current Y Count Register */
444
445
446/* Parallel Peripheral Interface (0xFFC01000 - 0xFFC010FF) */
447#define PPI_CONTROL 0xFFC01000 /* PPI Control Register */
448#define PPI_STATUS 0xFFC01004 /* PPI Status Register */
449#define PPI_COUNT 0xFFC01008 /* PPI Transfer Count Register */
450#define PPI_DELAY 0xFFC0100C /* PPI Delay Count Register */
451#define PPI_FRAME 0xFFC01010 /* PPI Frame Length Register */
452
453
454/* Two-Wire Interface (0xFFC01400 - 0xFFC014FF) */
455#define TWI0_REGBASE 0xFFC01400
456#define TWI0_CLKDIV 0xFFC01400 /* Serial Clock Divider Register */
457#define TWI0_CONTROL 0xFFC01404 /* TWI Control Register */
458#define TWI0_SLAVE_CTL 0xFFC01408 /* Slave Mode Control Register */
459#define TWI0_SLAVE_STAT 0xFFC0140C /* Slave Mode Status Register */
460#define TWI0_SLAVE_ADDR 0xFFC01410 /* Slave Mode Address Register */
461#define TWI0_MASTER_CTL 0xFFC01414 /* Master Mode Control Register */
462#define TWI0_MASTER_STAT 0xFFC01418 /* Master Mode Status Register */
463#define TWI0_MASTER_ADDR 0xFFC0141C /* Master Mode Address Register */
464#define TWI0_INT_STAT 0xFFC01420 /* TWI Interrupt Status Register */
465#define TWI0_INT_MASK 0xFFC01424 /* TWI Master Interrupt Mask Register */
466#define TWI0_FIFO_CTL 0xFFC01428 /* FIFO Control Register */
467#define TWI0_FIFO_STAT 0xFFC0142C /* FIFO Status Register */
468#define TWI0_XMT_DATA8 0xFFC01480 /* FIFO Transmit Data Single Byte Register */
469#define TWI0_XMT_DATA16 0xFFC01484 /* FIFO Transmit Data Double Byte Register */
470#define TWI0_RCV_DATA8 0xFFC01488 /* FIFO Receive Data Single Byte Register */
471#define TWI0_RCV_DATA16 0xFFC0148C /* FIFO Receive Data Double Byte Register */
472
473
474/* General Purpose I/O Port G (0xFFC01500 - 0xFFC015FF) */
475#define PORTGIO 0xFFC01500 /* Port G I/O Pin State Specify Register */
476#define PORTGIO_CLEAR 0xFFC01504 /* Port G I/O Peripheral Interrupt Clear Register */
477#define PORTGIO_SET 0xFFC01508 /* Port G I/O Peripheral Interrupt Set Register */
478#define PORTGIO_TOGGLE 0xFFC0150C /* Port G I/O Pin State Toggle Register */
479#define PORTGIO_MASKA 0xFFC01510 /* Port G I/O Mask State Specify Interrupt A Register */
480#define PORTGIO_MASKA_CLEAR 0xFFC01514 /* Port G I/O Mask Disable Interrupt A Register */
481#define PORTGIO_MASKA_SET 0xFFC01518 /* Port G I/O Mask Enable Interrupt A Register */
482#define PORTGIO_MASKA_TOGGLE 0xFFC0151C /* Port G I/O Mask Toggle Enable Interrupt A Register */
483#define PORTGIO_MASKB 0xFFC01520 /* Port G I/O Mask State Specify Interrupt B Register */
484#define PORTGIO_MASKB_CLEAR 0xFFC01524 /* Port G I/O Mask Disable Interrupt B Register */
485#define PORTGIO_MASKB_SET 0xFFC01528 /* Port G I/O Mask Enable Interrupt B Register */
486#define PORTGIO_MASKB_TOGGLE 0xFFC0152C /* Port G I/O Mask Toggle Enable Interrupt B Register */
487#define PORTGIO_DIR 0xFFC01530 /* Port G I/O Direction Register */
488#define PORTGIO_POLAR 0xFFC01534 /* Port G I/O Source Polarity Register */
489#define PORTGIO_EDGE 0xFFC01538 /* Port G I/O Source Sensitivity Register */
490#define PORTGIO_BOTH 0xFFC0153C /* Port G I/O Set on BOTH Edges Register */
491#define PORTGIO_INEN 0xFFC01540 /* Port G I/O Input Enable Register */
492
493
494/* General Purpose I/O Port H (0xFFC01700 - 0xFFC017FF) */
495#define PORTHIO 0xFFC01700 /* Port H I/O Pin State Specify Register */
496#define PORTHIO_CLEAR 0xFFC01704 /* Port H I/O Peripheral Interrupt Clear Register */
497#define PORTHIO_SET 0xFFC01708 /* Port H I/O Peripheral Interrupt Set Register */
498#define PORTHIO_TOGGLE 0xFFC0170C /* Port H I/O Pin State Toggle Register */
499#define PORTHIO_MASKA 0xFFC01710 /* Port H I/O Mask State Specify Interrupt A Register */
500#define PORTHIO_MASKA_CLEAR 0xFFC01714 /* Port H I/O Mask Disable Interrupt A Register */
501#define PORTHIO_MASKA_SET 0xFFC01718 /* Port H I/O Mask Enable Interrupt A Register */
502#define PORTHIO_MASKA_TOGGLE 0xFFC0171C /* Port H I/O Mask Toggle Enable Interrupt A Register */
503#define PORTHIO_MASKB 0xFFC01720 /* Port H I/O Mask State Specify Interrupt B Register */
504#define PORTHIO_MASKB_CLEAR 0xFFC01724 /* Port H I/O Mask Disable Interrupt B Register */
505#define PORTHIO_MASKB_SET 0xFFC01728 /* Port H I/O Mask Enable Interrupt B Register */
506#define PORTHIO_MASKB_TOGGLE 0xFFC0172C /* Port H I/O Mask Toggle Enable Interrupt B Register */
507#define PORTHIO_DIR 0xFFC01730 /* Port H I/O Direction Register */
508#define PORTHIO_POLAR 0xFFC01734 /* Port H I/O Source Polarity Register */
509#define PORTHIO_EDGE 0xFFC01738 /* Port H I/O Source Sensitivity Register */
510#define PORTHIO_BOTH 0xFFC0173C /* Port H I/O Set on BOTH Edges Register */
511#define PORTHIO_INEN 0xFFC01740 /* Port H I/O Input Enable Register */
512
513
514/* UART1 Controller (0xFFC02000 - 0xFFC020FF) */
515#define UART1_THR 0xFFC02000 /* Transmit Holding register */
516#define UART1_RBR 0xFFC02000 /* Receive Buffer register */
517#define UART1_DLL 0xFFC02000 /* Divisor Latch (Low-Byte) */
518#define UART1_IER 0xFFC02004 /* Interrupt Enable Register */
519#define UART1_DLH 0xFFC02004 /* Divisor Latch (High-Byte) */
520#define UART1_IIR 0xFFC02008 /* Interrupt Identification Register */
521#define UART1_LCR 0xFFC0200C /* Line Control Register */
522#define UART1_MCR 0xFFC02010 /* Modem Control Register */
523#define UART1_LSR 0xFFC02014 /* Line Status Register */
524#define UART1_MSR 0xFFC02018 /* Modem Status Register */
525#define UART1_SCR 0xFFC0201C /* SCR Scratch Register */
526#define UART1_GCTL 0xFFC02024 /* Global Control Register */
527
528
529/* Pin Control Registers (0xFFC03200 - 0xFFC032FF) */
530#define PORTF_FER 0xFFC03200 /* Port F Function Enable Register (Alternate/Flag*) */
531#define PORTG_FER 0xFFC03204 /* Port G Function Enable Register (Alternate/Flag*) */
532#define PORTH_FER 0xFFC03208 /* Port H Function Enable Register (Alternate/Flag*) */
533#define BFIN_PORT_MUX 0xFFC0320C /* Port Multiplexer Control Register */
534
535
536/* Handshake MDMA Registers (0xFFC03300 - 0xFFC033FF) */
537#define HMDMA0_CONTROL 0xFFC03300 /* Handshake MDMA0 Control Register */
538#define HMDMA0_ECINIT 0xFFC03304 /* HMDMA0 Initial Edge Count Register */
539#define HMDMA0_BCINIT 0xFFC03308 /* HMDMA0 Initial Block Count Register */
540#define HMDMA0_ECURGENT 0xFFC0330C /* HMDMA0 Urgent Edge Count Threshold Register */
541#define HMDMA0_ECOVERFLOW 0xFFC03310 /* HMDMA0 Edge Count Overflow Interrupt Register */
542#define HMDMA0_ECOUNT 0xFFC03314 /* HMDMA0 Current Edge Count Register */
543#define HMDMA0_BCOUNT 0xFFC03318 /* HMDMA0 Current Block Count Register */
544
545#define HMDMA1_CONTROL 0xFFC03340 /* Handshake MDMA1 Control Register */
546#define HMDMA1_ECINIT 0xFFC03344 /* HMDMA1 Initial Edge Count Register */
547#define HMDMA1_BCINIT 0xFFC03348 /* HMDMA1 Initial Block Count Register */
548#define HMDMA1_ECURGENT 0xFFC0334C /* HMDMA1 Urgent Edge Count Threshold Register */
549#define HMDMA1_ECOVERFLOW 0xFFC03350 /* HMDMA1 Edge Count Overflow Interrupt Register */
550#define HMDMA1_ECOUNT 0xFFC03354 /* HMDMA1 Current Edge Count Register */
551#define HMDMA1_BCOUNT 0xFFC03358 /* HMDMA1 Current Block Count Register */
552
553
554/* GPIO PIN mux (0xFFC03210 - OxFFC03288) */
555#define PORTF_MUX 0xFFC03210 /* Port F mux control */
556#define PORTG_MUX 0xFFC03214 /* Port G mux control */
557#define PORTH_MUX 0xFFC03218 /* Port H mux control */
558#define PORTF_DRIVE 0xFFC03220 /* Port F drive strength control */
559#define PORTG_DRIVE 0xFFC03224 /* Port G drive strength control */
560#define PORTH_DRIVE 0xFFC03228 /* Port H drive strength control */
561#define PORTF_SLEW 0xFFC03230 /* Port F slew control */
562#define PORTG_SLEW 0xFFC03234 /* Port G slew control */
563#define PORTH_SLEW 0xFFC03238 /* Port H slew control */
564#define PORTF_HYSTERISIS 0xFFC03240 /* Port F Schmitt trigger control */
565#define PORTG_HYSTERISIS 0xFFC03244 /* Port G Schmitt trigger control */
566#define PORTH_HYSTERISIS 0xFFC03248 /* Port H Schmitt trigger control */
567#define MISCPORT_DRIVE 0xFFC03280 /* Misc Port drive strength control */
568#define MISCPORT_SLEW 0xFFC03284 /* Misc Port slew control */
569#define MISCPORT_HYSTERISIS 0xFFC03288 /* Misc Port Schmitt trigger control */
570
571
572/***********************************************************************************
573** System MMR Register Bits And Macros
574**
575** Disclaimer: All macros are intended to make C and Assembly code more readable.
576** Use these macros carefully, as any that do left shifts for field
577** depositing will result in the lower order bits being destroyed. Any
578** macro that shifts left to properly position the bit-field should be
579** used as part of an OR to initialize a register and NOT as a dynamic
580** modifier UNLESS the lower order bits are saved and ORed back in when
581** the macro is used.
582*************************************************************************************/
583
584/* CHIPID Masks */
585#define CHIPID_VERSION 0xF0000000
586#define CHIPID_FAMILY 0x0FFFF000
587#define CHIPID_MANUFACTURE 0x00000FFE
588
589/* SWRST Masks */
590#define SYSTEM_RESET 0x0007 /* Initiates A System Software Reset */
591#define DOUBLE_FAULT 0x0008 /* Core Double Fault Causes Reset */
592#define RESET_DOUBLE 0x2000 /* SW Reset Generated By Core Double-Fault */
593#define RESET_WDOG 0x4000 /* SW Reset Generated By Watchdog Timer */
594#define RESET_SOFTWARE 0x8000 /* SW Reset Occurred Since Last Read Of SWRST */
595
596/* SYSCR Masks */
597#define BMODE 0x0007 /* Boot Mode - Latched During HW Reset From Mode Pins */
598#define NOBOOT 0x0010 /* Execute From L1 or ASYNC Bank 0 When BMODE = 0 */
599
600
601/* ************* SYSTEM INTERRUPT CONTROLLER MASKS *************************************/
602/* Peripheral Masks For SIC_ISR, SIC_IWR, SIC_IMASK */
603
604#if 0
605#define IRQ_PLL_WAKEUP 0x00000001 /* PLL Wakeup Interrupt */
606
607#define IRQ_ERROR1 0x00000002 /* Error Interrupt (DMA, DMARx Block, DMARx Overflow) */
608#define IRQ_ERROR2 0x00000004 /* Error Interrupt (CAN, Ethernet, SPORTx, PPI, SPI, UARTx) */
609#define IRQ_RTC 0x00000008 /* Real Time Clock Interrupt */
610#define IRQ_DMA0 0x00000010 /* DMA Channel 0 (PPI) Interrupt */
611#define IRQ_DMA3 0x00000020 /* DMA Channel 3 (SPORT0 RX) Interrupt */
612#define IRQ_DMA4 0x00000040 /* DMA Channel 4 (SPORT0 TX) Interrupt */
613#define IRQ_DMA5 0x00000080 /* DMA Channel 5 (SPORT1 RX) Interrupt */
614
615#define IRQ_DMA6 0x00000100 /* DMA Channel 6 (SPORT1 TX) Interrupt */
616#define IRQ_TWI 0x00000200 /* TWI Interrupt */
617#define IRQ_DMA7 0x00000400 /* DMA Channel 7 (SPI) Interrupt */
618#define IRQ_DMA8 0x00000800 /* DMA Channel 8 (UART0 RX) Interrupt */
619#define IRQ_DMA9 0x00001000 /* DMA Channel 9 (UART0 TX) Interrupt */
620#define IRQ_DMA10 0x00002000 /* DMA Channel 10 (UART1 RX) Interrupt */
621#define IRQ_DMA11 0x00004000 /* DMA Channel 11 (UART1 TX) Interrupt */
622#define IRQ_CAN_RX 0x00008000 /* CAN Receive Interrupt */
623
624#define IRQ_CAN_TX 0x00010000 /* CAN Transmit Interrupt */
625#define IRQ_DMA1 0x00020000 /* DMA Channel 1 (Ethernet RX) Interrupt */
626#define IRQ_PFA_PORTH 0x00020000 /* PF Port H (PF47:32) Interrupt A */
627#define IRQ_DMA2 0x00040000 /* DMA Channel 2 (Ethernet TX) Interrupt */
628#define IRQ_PFB_PORTH 0x00040000 /* PF Port H (PF47:32) Interrupt B */
629#define IRQ_TIMER0 0x00080000 /* Timer 0 Interrupt */
630#define IRQ_TIMER1 0x00100000 /* Timer 1 Interrupt */
631#define IRQ_TIMER2 0x00200000 /* Timer 2 Interrupt */
632#define IRQ_TIMER3 0x00400000 /* Timer 3 Interrupt */
633#define IRQ_TIMER4 0x00800000 /* Timer 4 Interrupt */
634
635#define IRQ_TIMER5 0x01000000 /* Timer 5 Interrupt */
636#define IRQ_TIMER6 0x02000000 /* Timer 6 Interrupt */
637#define IRQ_TIMER7 0x04000000 /* Timer 7 Interrupt */
638#define IRQ_PFA_PORTFG 0x08000000 /* PF Ports F&G (PF31:0) Interrupt A */
639#define IRQ_PFB_PORTF 0x80000000 /* PF Port F (PF15:0) Interrupt B */
640#define IRQ_DMA12 0x20000000 /* DMA Channels 12 (MDMA1 Source) RX Interrupt */
641#define IRQ_DMA13 0x20000000 /* DMA Channels 13 (MDMA1 Destination) TX Interrupt */
642#define IRQ_DMA14 0x40000000 /* DMA Channels 14 (MDMA0 Source) RX Interrupt */
643#define IRQ_DMA15 0x40000000 /* DMA Channels 15 (MDMA0 Destination) TX Interrupt */
644#define IRQ_WDOG 0x80000000 /* Software Watchdog Timer Interrupt */
645#define IRQ_PFB_PORTG 0x10000000 /* PF Port G (PF31:16) Interrupt B */
646#endif
647
648/* SIC_IAR0 Macros */
649#define P0_IVG(x) (((x)&0xF)-7) /* Peripheral #0 assigned IVG #x */
650#define P1_IVG(x) (((x)&0xF)-7) << 0x4 /* Peripheral #1 assigned IVG #x */
651#define P2_IVG(x) (((x)&0xF)-7) << 0x8 /* Peripheral #2 assigned IVG #x */
652#define P3_IVG(x) (((x)&0xF)-7) << 0xC /* Peripheral #3 assigned IVG #x */
653#define P4_IVG(x) (((x)&0xF)-7) << 0x10 /* Peripheral #4 assigned IVG #x */
654#define P5_IVG(x) (((x)&0xF)-7) << 0x14 /* Peripheral #5 assigned IVG #x */
655#define P6_IVG(x) (((x)&0xF)-7) << 0x18 /* Peripheral #6 assigned IVG #x */
656#define P7_IVG(x) (((x)&0xF)-7) << 0x1C /* Peripheral #7 assigned IVG #x */
657
658/* SIC_IAR1 Macros */
659#define P8_IVG(x) (((x)&0xF)-7) /* Peripheral #8 assigned IVG #x */
660#define P9_IVG(x) (((x)&0xF)-7) << 0x4 /* Peripheral #9 assigned IVG #x */
661#define P10_IVG(x) (((x)&0xF)-7) << 0x8 /* Peripheral #10 assigned IVG #x */
662#define P11_IVG(x) (((x)&0xF)-7) << 0xC /* Peripheral #11 assigned IVG #x */
663#define P12_IVG(x) (((x)&0xF)-7) << 0x10 /* Peripheral #12 assigned IVG #x */
664#define P13_IVG(x) (((x)&0xF)-7) << 0x14 /* Peripheral #13 assigned IVG #x */
665#define P14_IVG(x) (((x)&0xF)-7) << 0x18 /* Peripheral #14 assigned IVG #x */
666#define P15_IVG(x) (((x)&0xF)-7) << 0x1C /* Peripheral #15 assigned IVG #x */
667
668/* SIC_IAR2 Macros */
669#define P16_IVG(x) (((x)&0xF)-7) /* Peripheral #16 assigned IVG #x */
670#define P17_IVG(x) (((x)&0xF)-7) << 0x4 /* Peripheral #17 assigned IVG #x */
671#define P18_IVG(x) (((x)&0xF)-7) << 0x8 /* Peripheral #18 assigned IVG #x */
672#define P19_IVG(x) (((x)&0xF)-7) << 0xC /* Peripheral #19 assigned IVG #x */
673#define P20_IVG(x) (((x)&0xF)-7) << 0x10 /* Peripheral #20 assigned IVG #x */
674#define P21_IVG(x) (((x)&0xF)-7) << 0x14 /* Peripheral #21 assigned IVG #x */
675#define P22_IVG(x) (((x)&0xF)-7) << 0x18 /* Peripheral #22 assigned IVG #x */
676#define P23_IVG(x) (((x)&0xF)-7) << 0x1C /* Peripheral #23 assigned IVG #x */
677
678/* SIC_IAR3 Macros */
679#define P24_IVG(x) (((x)&0xF)-7) /* Peripheral #24 assigned IVG #x */
680#define P25_IVG(x) (((x)&0xF)-7) << 0x4 /* Peripheral #25 assigned IVG #x */
681#define P26_IVG(x) (((x)&0xF)-7) << 0x8 /* Peripheral #26 assigned IVG #x */
682#define P27_IVG(x) (((x)&0xF)-7) << 0xC /* Peripheral #27 assigned IVG #x */
683#define P28_IVG(x) (((x)&0xF)-7) << 0x10 /* Peripheral #28 assigned IVG #x */
684#define P29_IVG(x) (((x)&0xF)-7) << 0x14 /* Peripheral #29 assigned IVG #x */
685#define P30_IVG(x) (((x)&0xF)-7) << 0x18 /* Peripheral #30 assigned IVG #x */
686#define P31_IVG(x) (((x)&0xF)-7) << 0x1C /* Peripheral #31 assigned IVG #x */
687
688
689/* SIC_IMASK Masks */
690#define SIC_UNMASK_ALL 0x00000000 /* Unmask all peripheral interrupts */
691#define SIC_MASK_ALL 0xFFFFFFFF /* Mask all peripheral interrupts */
692#define SIC_MASK(x) (1 << ((x)&0x1F)) /* Mask Peripheral #x interrupt */
693#define SIC_UNMASK(x) (0xFFFFFFFF ^ (1 << ((x)&0x1F))) /* Unmask Peripheral #x interrupt */
694
695/* SIC_IWR Masks */
696#define IWR_DISABLE_ALL 0x00000000 /* Wakeup Disable all peripherals */
697#define IWR_ENABLE_ALL 0xFFFFFFFF /* Wakeup Enable all peripherals */
698#define IWR_ENABLE(x) (1 << ((x)&0x1F)) /* Wakeup Enable Peripheral #x */
699#define IWR_DISABLE(x) (0xFFFFFFFF ^ (1 << ((x)&0x1F))) /* Wakeup Disable Peripheral #x */
700
701/* **************** GENERAL PURPOSE TIMER MASKS **********************/
702/* TIMER_ENABLE Masks */
703#define TIMEN0 0x0001 /* Enable Timer 0 */
704#define TIMEN1 0x0002 /* Enable Timer 1 */
705#define TIMEN2 0x0004 /* Enable Timer 2 */
706#define TIMEN3 0x0008 /* Enable Timer 3 */
707#define TIMEN4 0x0010 /* Enable Timer 4 */
708#define TIMEN5 0x0020 /* Enable Timer 5 */
709#define TIMEN6 0x0040 /* Enable Timer 6 */
710#define TIMEN7 0x0080 /* Enable Timer 7 */
711
712/* TIMER_DISABLE Masks */
713#define TIMDIS0 TIMEN0 /* Disable Timer 0 */
714#define TIMDIS1 TIMEN1 /* Disable Timer 1 */
715#define TIMDIS2 TIMEN2 /* Disable Timer 2 */
716#define TIMDIS3 TIMEN3 /* Disable Timer 3 */
717#define TIMDIS4 TIMEN4 /* Disable Timer 4 */
718#define TIMDIS5 TIMEN5 /* Disable Timer 5 */
719#define TIMDIS6 TIMEN6 /* Disable Timer 6 */
720#define TIMDIS7 TIMEN7 /* Disable Timer 7 */
721
722/* TIMER_STATUS Masks */
723#define TIMIL0 0x00000001 /* Timer 0 Interrupt */
724#define TIMIL1 0x00000002 /* Timer 1 Interrupt */
725#define TIMIL2 0x00000004 /* Timer 2 Interrupt */
726#define TIMIL3 0x00000008 /* Timer 3 Interrupt */
727#define TOVF_ERR0 0x00000010 /* Timer 0 Counter Overflow */
728#define TOVF_ERR1 0x00000020 /* Timer 1 Counter Overflow */
729#define TOVF_ERR2 0x00000040 /* Timer 2 Counter Overflow */
730#define TOVF_ERR3 0x00000080 /* Timer 3 Counter Overflow */
731#define TRUN0 0x00001000 /* Timer 0 Slave Enable Status */
732#define TRUN1 0x00002000 /* Timer 1 Slave Enable Status */
733#define TRUN2 0x00004000 /* Timer 2 Slave Enable Status */
734#define TRUN3 0x00008000 /* Timer 3 Slave Enable Status */
735#define TIMIL4 0x00010000 /* Timer 4 Interrupt */
736#define TIMIL5 0x00020000 /* Timer 5 Interrupt */
737#define TIMIL6 0x00040000 /* Timer 6 Interrupt */
738#define TIMIL7 0x00080000 /* Timer 7 Interrupt */
739#define TOVF_ERR4 0x00100000 /* Timer 4 Counter Overflow */
740#define TOVF_ERR5 0x00200000 /* Timer 5 Counter Overflow */
741#define TOVF_ERR6 0x00400000 /* Timer 6 Counter Overflow */
742#define TOVF_ERR7 0x00800000 /* Timer 7 Counter Overflow */
743#define TRUN4 0x10000000 /* Timer 4 Slave Enable Status */
744#define TRUN5 0x20000000 /* Timer 5 Slave Enable Status */
745#define TRUN6 0x40000000 /* Timer 6 Slave Enable Status */
746#define TRUN7 0x80000000 /* Timer 7 Slave Enable Status */
747
748/* Alternate Deprecated Macros Provided For Backwards Code Compatibility */
749#define TOVL_ERR0 TOVF_ERR0
750#define TOVL_ERR1 TOVF_ERR1
751#define TOVL_ERR2 TOVF_ERR2
752#define TOVL_ERR3 TOVF_ERR3
753#define TOVL_ERR4 TOVF_ERR4
754#define TOVL_ERR5 TOVF_ERR5
755#define TOVL_ERR6 TOVF_ERR6
756#define TOVL_ERR7 TOVF_ERR7
757
758/* TIMERx_CONFIG Masks */
759#define PWM_OUT 0x0001 /* Pulse-Width Modulation Output Mode */
760#define WDTH_CAP 0x0002 /* Width Capture Input Mode */
761#define EXT_CLK 0x0003 /* External Clock Mode */
762#define PULSE_HI 0x0004 /* Action Pulse (Positive/Negative*) */
763#define PERIOD_CNT 0x0008 /* Period Count */
764#define IRQ_ENA 0x0010 /* Interrupt Request Enable */
765#define TIN_SEL 0x0020 /* Timer Input Select */
766#define OUT_DIS 0x0040 /* Output Pad Disable */
767#define CLK_SEL 0x0080 /* Timer Clock Select */
768#define TOGGLE_HI 0x0100 /* PWM_OUT PULSE_HI Toggle Mode */
769#define EMU_RUN 0x0200 /* Emulation Behavior Select */
770#define ERR_TYP 0xC000 /* Error Type */
771
772/* ********************* ASYNCHRONOUS MEMORY CONTROLLER MASKS *************************/
773/* EBIU_AMGCTL Masks */
774#define AMCKEN 0x0001 /* Enable CLKOUT */
775#define AMBEN_NONE 0x0000 /* All Banks Disabled */
776#define AMBEN_B0 0x0002 /* Enable Async Memory Bank 0 only */
777#define AMBEN_B0_B1 0x0004 /* Enable Async Memory Banks 0 & 1 only */
778#define AMBEN_B0_B1_B2 0x0006 /* Enable Async Memory Banks 0, 1, and 2 */
779#define AMBEN_ALL 0x0008 /* Enable Async Memory Banks (all) 0, 1, 2, and 3 */
780
781/* EBIU_AMBCTL0 Masks */
782#define B0RDYEN 0x00000001 /* Bank 0 (B0) RDY Enable */
783#define B0RDYPOL 0x00000002 /* B0 RDY Active High */
784#define B0TT_1 0x00000004 /* B0 Transition Time (Read to Write) = 1 cycle */
785#define B0TT_2 0x00000008 /* B0 Transition Time (Read to Write) = 2 cycles */
786#define B0TT_3 0x0000000C /* B0 Transition Time (Read to Write) = 3 cycles */
787#define B0TT_4 0x00000000 /* B0 Transition Time (Read to Write) = 4 cycles */
788#define B0ST_1 0x00000010 /* B0 Setup Time (AOE to Read/Write) = 1 cycle */
789#define B0ST_2 0x00000020 /* B0 Setup Time (AOE to Read/Write) = 2 cycles */
790#define B0ST_3 0x00000030 /* B0 Setup Time (AOE to Read/Write) = 3 cycles */
791#define B0ST_4 0x00000000 /* B0 Setup Time (AOE to Read/Write) = 4 cycles */
792#define B0HT_1 0x00000040 /* B0 Hold Time (~Read/Write to ~AOE) = 1 cycle */
793#define B0HT_2 0x00000080 /* B0 Hold Time (~Read/Write to ~AOE) = 2 cycles */
794#define B0HT_3 0x000000C0 /* B0 Hold Time (~Read/Write to ~AOE) = 3 cycles */
795#define B0HT_0 0x00000000 /* B0 Hold Time (~Read/Write to ~AOE) = 0 cycles */
796#define B0RAT_1 0x00000100 /* B0 Read Access Time = 1 cycle */
797#define B0RAT_2 0x00000200 /* B0 Read Access Time = 2 cycles */
798#define B0RAT_3 0x00000300 /* B0 Read Access Time = 3 cycles */
799#define B0RAT_4 0x00000400 /* B0 Read Access Time = 4 cycles */
800#define B0RAT_5 0x00000500 /* B0 Read Access Time = 5 cycles */
801#define B0RAT_6 0x00000600 /* B0 Read Access Time = 6 cycles */
802#define B0RAT_7 0x00000700 /* B0 Read Access Time = 7 cycles */
803#define B0RAT_8 0x00000800 /* B0 Read Access Time = 8 cycles */
804#define B0RAT_9 0x00000900 /* B0 Read Access Time = 9 cycles */
805#define B0RAT_10 0x00000A00 /* B0 Read Access Time = 10 cycles */
806#define B0RAT_11 0x00000B00 /* B0 Read Access Time = 11 cycles */
807#define B0RAT_12 0x00000C00 /* B0 Read Access Time = 12 cycles */
808#define B0RAT_13 0x00000D00 /* B0 Read Access Time = 13 cycles */
809#define B0RAT_14 0x00000E00 /* B0 Read Access Time = 14 cycles */
810#define B0RAT_15 0x00000F00 /* B0 Read Access Time = 15 cycles */
811#define B0WAT_1 0x00001000 /* B0 Write Access Time = 1 cycle */
812#define B0WAT_2 0x00002000 /* B0 Write Access Time = 2 cycles */
813#define B0WAT_3 0x00003000 /* B0 Write Access Time = 3 cycles */
814#define B0WAT_4 0x00004000 /* B0 Write Access Time = 4 cycles */
815#define B0WAT_5 0x00005000 /* B0 Write Access Time = 5 cycles */
816#define B0WAT_6 0x00006000 /* B0 Write Access Time = 6 cycles */
817#define B0WAT_7 0x00007000 /* B0 Write Access Time = 7 cycles */
818#define B0WAT_8 0x00008000 /* B0 Write Access Time = 8 cycles */
819#define B0WAT_9 0x00009000 /* B0 Write Access Time = 9 cycles */
820#define B0WAT_10 0x0000A000 /* B0 Write Access Time = 10 cycles */
821#define B0WAT_11 0x0000B000 /* B0 Write Access Time = 11 cycles */
822#define B0WAT_12 0x0000C000 /* B0 Write Access Time = 12 cycles */
823#define B0WAT_13 0x0000D000 /* B0 Write Access Time = 13 cycles */
824#define B0WAT_14 0x0000E000 /* B0 Write Access Time = 14 cycles */
825#define B0WAT_15 0x0000F000 /* B0 Write Access Time = 15 cycles */
826
827#define B1RDYEN 0x00010000 /* Bank 1 (B1) RDY Enable */
828#define B1RDYPOL 0x00020000 /* B1 RDY Active High */
829#define B1TT_1 0x00040000 /* B1 Transition Time (Read to Write) = 1 cycle */
830#define B1TT_2 0x00080000 /* B1 Transition Time (Read to Write) = 2 cycles */
831#define B1TT_3 0x000C0000 /* B1 Transition Time (Read to Write) = 3 cycles */
832#define B1TT_4 0x00000000 /* B1 Transition Time (Read to Write) = 4 cycles */
833#define B1ST_1 0x00100000 /* B1 Setup Time (AOE to Read/Write) = 1 cycle */
834#define B1ST_2 0x00200000 /* B1 Setup Time (AOE to Read/Write) = 2 cycles */
835#define B1ST_3 0x00300000 /* B1 Setup Time (AOE to Read/Write) = 3 cycles */
836#define B1ST_4 0x00000000 /* B1 Setup Time (AOE to Read/Write) = 4 cycles */
837#define B1HT_1 0x00400000 /* B1 Hold Time (~Read/Write to ~AOE) = 1 cycle */
838#define B1HT_2 0x00800000 /* B1 Hold Time (~Read/Write to ~AOE) = 2 cycles */
839#define B1HT_3 0x00C00000 /* B1 Hold Time (~Read/Write to ~AOE) = 3 cycles */
840#define B1HT_0 0x00000000 /* B1 Hold Time (~Read/Write to ~AOE) = 0 cycles */
841#define B1RAT_1 0x01000000 /* B1 Read Access Time = 1 cycle */
842#define B1RAT_2 0x02000000 /* B1 Read Access Time = 2 cycles */
843#define B1RAT_3 0x03000000 /* B1 Read Access Time = 3 cycles */
844#define B1RAT_4 0x04000000 /* B1 Read Access Time = 4 cycles */
845#define B1RAT_5 0x05000000 /* B1 Read Access Time = 5 cycles */
846#define B1RAT_6 0x06000000 /* B1 Read Access Time = 6 cycles */
847#define B1RAT_7 0x07000000 /* B1 Read Access Time = 7 cycles */
848#define B1RAT_8 0x08000000 /* B1 Read Access Time = 8 cycles */
849#define B1RAT_9 0x09000000 /* B1 Read Access Time = 9 cycles */
850#define B1RAT_10 0x0A000000 /* B1 Read Access Time = 10 cycles */
851#define B1RAT_11 0x0B000000 /* B1 Read Access Time = 11 cycles */
852#define B1RAT_12 0x0C000000 /* B1 Read Access Time = 12 cycles */
853#define B1RAT_13 0x0D000000 /* B1 Read Access Time = 13 cycles */
854#define B1RAT_14 0x0E000000 /* B1 Read Access Time = 14 cycles */
855#define B1RAT_15 0x0F000000 /* B1 Read Access Time = 15 cycles */
856#define B1WAT_1 0x10000000 /* B1 Write Access Time = 1 cycle */
857#define B1WAT_2 0x20000000 /* B1 Write Access Time = 2 cycles */
858#define B1WAT_3 0x30000000 /* B1 Write Access Time = 3 cycles */
859#define B1WAT_4 0x40000000 /* B1 Write Access Time = 4 cycles */
860#define B1WAT_5 0x50000000 /* B1 Write Access Time = 5 cycles */
861#define B1WAT_6 0x60000000 /* B1 Write Access Time = 6 cycles */
862#define B1WAT_7 0x70000000 /* B1 Write Access Time = 7 cycles */
863#define B1WAT_8 0x80000000 /* B1 Write Access Time = 8 cycles */
864#define B1WAT_9 0x90000000 /* B1 Write Access Time = 9 cycles */
865#define B1WAT_10 0xA0000000 /* B1 Write Access Time = 10 cycles */
866#define B1WAT_11 0xB0000000 /* B1 Write Access Time = 11 cycles */
867#define B1WAT_12 0xC0000000 /* B1 Write Access Time = 12 cycles */
868#define B1WAT_13 0xD0000000 /* B1 Write Access Time = 13 cycles */
869#define B1WAT_14 0xE0000000 /* B1 Write Access Time = 14 cycles */
870#define B1WAT_15 0xF0000000 /* B1 Write Access Time = 15 cycles */
871
872/* EBIU_AMBCTL1 Masks */
873#define B2RDYEN 0x00000001 /* Bank 2 (B2) RDY Enable */
874#define B2RDYPOL 0x00000002 /* B2 RDY Active High */
875#define B2TT_1 0x00000004 /* B2 Transition Time (Read to Write) = 1 cycle */
876#define B2TT_2 0x00000008 /* B2 Transition Time (Read to Write) = 2 cycles */
877#define B2TT_3 0x0000000C /* B2 Transition Time (Read to Write) = 3 cycles */
878#define B2TT_4 0x00000000 /* B2 Transition Time (Read to Write) = 4 cycles */
879#define B2ST_1 0x00000010 /* B2 Setup Time (AOE to Read/Write) = 1 cycle */
880#define B2ST_2 0x00000020 /* B2 Setup Time (AOE to Read/Write) = 2 cycles */
881#define B2ST_3 0x00000030 /* B2 Setup Time (AOE to Read/Write) = 3 cycles */
882#define B2ST_4 0x00000000 /* B2 Setup Time (AOE to Read/Write) = 4 cycles */
883#define B2HT_1 0x00000040 /* B2 Hold Time (~Read/Write to ~AOE) = 1 cycle */
884#define B2HT_2 0x00000080 /* B2 Hold Time (~Read/Write to ~AOE) = 2 cycles */
885#define B2HT_3 0x000000C0 /* B2 Hold Time (~Read/Write to ~AOE) = 3 cycles */
886#define B2HT_0 0x00000000 /* B2 Hold Time (~Read/Write to ~AOE) = 0 cycles */
887#define B2RAT_1 0x00000100 /* B2 Read Access Time = 1 cycle */
888#define B2RAT_2 0x00000200 /* B2 Read Access Time = 2 cycles */
889#define B2RAT_3 0x00000300 /* B2 Read Access Time = 3 cycles */
890#define B2RAT_4 0x00000400 /* B2 Read Access Time = 4 cycles */
891#define B2RAT_5 0x00000500 /* B2 Read Access Time = 5 cycles */
892#define B2RAT_6 0x00000600 /* B2 Read Access Time = 6 cycles */
893#define B2RAT_7 0x00000700 /* B2 Read Access Time = 7 cycles */
894#define B2RAT_8 0x00000800 /* B2 Read Access Time = 8 cycles */
895#define B2RAT_9 0x00000900 /* B2 Read Access Time = 9 cycles */
896#define B2RAT_10 0x00000A00 /* B2 Read Access Time = 10 cycles */
897#define B2RAT_11 0x00000B00 /* B2 Read Access Time = 11 cycles */
898#define B2RAT_12 0x00000C00 /* B2 Read Access Time = 12 cycles */
899#define B2RAT_13 0x00000D00 /* B2 Read Access Time = 13 cycles */
900#define B2RAT_14 0x00000E00 /* B2 Read Access Time = 14 cycles */
901#define B2RAT_15 0x00000F00 /* B2 Read Access Time = 15 cycles */
902#define B2WAT_1 0x00001000 /* B2 Write Access Time = 1 cycle */
903#define B2WAT_2 0x00002000 /* B2 Write Access Time = 2 cycles */
904#define B2WAT_3 0x00003000 /* B2 Write Access Time = 3 cycles */
905#define B2WAT_4 0x00004000 /* B2 Write Access Time = 4 cycles */
906#define B2WAT_5 0x00005000 /* B2 Write Access Time = 5 cycles */
907#define B2WAT_6 0x00006000 /* B2 Write Access Time = 6 cycles */
908#define B2WAT_7 0x00007000 /* B2 Write Access Time = 7 cycles */
909#define B2WAT_8 0x00008000 /* B2 Write Access Time = 8 cycles */
910#define B2WAT_9 0x00009000 /* B2 Write Access Time = 9 cycles */
911#define B2WAT_10 0x0000A000 /* B2 Write Access Time = 10 cycles */
912#define B2WAT_11 0x0000B000 /* B2 Write Access Time = 11 cycles */
913#define B2WAT_12 0x0000C000 /* B2 Write Access Time = 12 cycles */
914#define B2WAT_13 0x0000D000 /* B2 Write Access Time = 13 cycles */
915#define B2WAT_14 0x0000E000 /* B2 Write Access Time = 14 cycles */
916#define B2WAT_15 0x0000F000 /* B2 Write Access Time = 15 cycles */
917
918#define B3RDYEN 0x00010000 /* Bank 3 (B3) RDY Enable */
919#define B3RDYPOL 0x00020000 /* B3 RDY Active High */
920#define B3TT_1 0x00040000 /* B3 Transition Time (Read to Write) = 1 cycle */
921#define B3TT_2 0x00080000 /* B3 Transition Time (Read to Write) = 2 cycles */
922#define B3TT_3 0x000C0000 /* B3 Transition Time (Read to Write) = 3 cycles */
923#define B3TT_4 0x00000000 /* B3 Transition Time (Read to Write) = 4 cycles */
924#define B3ST_1 0x00100000 /* B3 Setup Time (AOE to Read/Write) = 1 cycle */
925#define B3ST_2 0x00200000 /* B3 Setup Time (AOE to Read/Write) = 2 cycles */
926#define B3ST_3 0x00300000 /* B3 Setup Time (AOE to Read/Write) = 3 cycles */
927#define B3ST_4 0x00000000 /* B3 Setup Time (AOE to Read/Write) = 4 cycles */
928#define B3HT_1 0x00400000 /* B3 Hold Time (~Read/Write to ~AOE) = 1 cycle */
929#define B3HT_2 0x00800000 /* B3 Hold Time (~Read/Write to ~AOE) = 2 cycles */
930#define B3HT_3 0x00C00000 /* B3 Hold Time (~Read/Write to ~AOE) = 3 cycles */
931#define B3HT_0 0x00000000 /* B3 Hold Time (~Read/Write to ~AOE) = 0 cycles */
932#define B3RAT_1 0x01000000 /* B3 Read Access Time = 1 cycle */
933#define B3RAT_2 0x02000000 /* B3 Read Access Time = 2 cycles */
934#define B3RAT_3 0x03000000 /* B3 Read Access Time = 3 cycles */
935#define B3RAT_4 0x04000000 /* B3 Read Access Time = 4 cycles */
936#define B3RAT_5 0x05000000 /* B3 Read Access Time = 5 cycles */
937#define B3RAT_6 0x06000000 /* B3 Read Access Time = 6 cycles */
938#define B3RAT_7 0x07000000 /* B3 Read Access Time = 7 cycles */
939#define B3RAT_8 0x08000000 /* B3 Read Access Time = 8 cycles */
940#define B3RAT_9 0x09000000 /* B3 Read Access Time = 9 cycles */
941#define B3RAT_10 0x0A000000 /* B3 Read Access Time = 10 cycles */
942#define B3RAT_11 0x0B000000 /* B3 Read Access Time = 11 cycles */
943#define B3RAT_12 0x0C000000 /* B3 Read Access Time = 12 cycles */
944#define B3RAT_13 0x0D000000 /* B3 Read Access Time = 13 cycles */
945#define B3RAT_14 0x0E000000 /* B3 Read Access Time = 14 cycles */
946#define B3RAT_15 0x0F000000 /* B3 Read Access Time = 15 cycles */
947#define B3WAT_1 0x10000000 /* B3 Write Access Time = 1 cycle */
948#define B3WAT_2 0x20000000 /* B3 Write Access Time = 2 cycles */
949#define B3WAT_3 0x30000000 /* B3 Write Access Time = 3 cycles */
950#define B3WAT_4 0x40000000 /* B3 Write Access Time = 4 cycles */
951#define B3WAT_5 0x50000000 /* B3 Write Access Time = 5 cycles */
952#define B3WAT_6 0x60000000 /* B3 Write Access Time = 6 cycles */
953#define B3WAT_7 0x70000000 /* B3 Write Access Time = 7 cycles */
954#define B3WAT_8 0x80000000 /* B3 Write Access Time = 8 cycles */
955#define B3WAT_9 0x90000000 /* B3 Write Access Time = 9 cycles */
956#define B3WAT_10 0xA0000000 /* B3 Write Access Time = 10 cycles */
957#define B3WAT_11 0xB0000000 /* B3 Write Access Time = 11 cycles */
958#define B3WAT_12 0xC0000000 /* B3 Write Access Time = 12 cycles */
959#define B3WAT_13 0xD0000000 /* B3 Write Access Time = 13 cycles */
960#define B3WAT_14 0xE0000000 /* B3 Write Access Time = 14 cycles */
961#define B3WAT_15 0xF0000000 /* B3 Write Access Time = 15 cycles */
962
963
964/* ********************** SDRAM CONTROLLER MASKS **********************************************/
965/* EBIU_SDGCTL Masks */
966#define SCTLE 0x00000001 /* Enable SDRAM Signals */
967#define CL_2 0x00000008 /* SDRAM CAS Latency = 2 cycles */
968#define CL_3 0x0000000C /* SDRAM CAS Latency = 3 cycles */
969#define PASR_ALL 0x00000000 /* All 4 SDRAM Banks Refreshed In Self-Refresh */
970#define PASR_B0_B1 0x00000010 /* SDRAM Banks 0 and 1 Are Refreshed In Self-Refresh */
971#define PASR_B0 0x00000020 /* Only SDRAM Bank 0 Is Refreshed In Self-Refresh */
972#define TRAS_1 0x00000040 /* SDRAM tRAS = 1 cycle */
973#define TRAS_2 0x00000080 /* SDRAM tRAS = 2 cycles */
974#define TRAS_3 0x000000C0 /* SDRAM tRAS = 3 cycles */
975#define TRAS_4 0x00000100 /* SDRAM tRAS = 4 cycles */
976#define TRAS_5 0x00000140 /* SDRAM tRAS = 5 cycles */
977#define TRAS_6 0x00000180 /* SDRAM tRAS = 6 cycles */
978#define TRAS_7 0x000001C0 /* SDRAM tRAS = 7 cycles */
979#define TRAS_8 0x00000200 /* SDRAM tRAS = 8 cycles */
980#define TRAS_9 0x00000240 /* SDRAM tRAS = 9 cycles */
981#define TRAS_10 0x00000280 /* SDRAM tRAS = 10 cycles */
982#define TRAS_11 0x000002C0 /* SDRAM tRAS = 11 cycles */
983#define TRAS_12 0x00000300 /* SDRAM tRAS = 12 cycles */
984#define TRAS_13 0x00000340 /* SDRAM tRAS = 13 cycles */
985#define TRAS_14 0x00000380 /* SDRAM tRAS = 14 cycles */
986#define TRAS_15 0x000003C0 /* SDRAM tRAS = 15 cycles */
987#define TRP_1 0x00000800 /* SDRAM tRP = 1 cycle */
988#define TRP_2 0x00001000 /* SDRAM tRP = 2 cycles */
989#define TRP_3 0x00001800 /* SDRAM tRP = 3 cycles */
990#define TRP_4 0x00002000 /* SDRAM tRP = 4 cycles */
991#define TRP_5 0x00002800 /* SDRAM tRP = 5 cycles */
992#define TRP_6 0x00003000 /* SDRAM tRP = 6 cycles */
993#define TRP_7 0x00003800 /* SDRAM tRP = 7 cycles */
994#define TRCD_1 0x00008000 /* SDRAM tRCD = 1 cycle */
995#define TRCD_2 0x00010000 /* SDRAM tRCD = 2 cycles */
996#define TRCD_3 0x00018000 /* SDRAM tRCD = 3 cycles */
997#define TRCD_4 0x00020000 /* SDRAM tRCD = 4 cycles */
998#define TRCD_5 0x00028000 /* SDRAM tRCD = 5 cycles */
999#define TRCD_6 0x00030000 /* SDRAM tRCD = 6 cycles */
1000#define TRCD_7 0x00038000 /* SDRAM tRCD = 7 cycles */
1001#define TWR_1 0x00080000 /* SDRAM tWR = 1 cycle */
1002#define TWR_2 0x00100000 /* SDRAM tWR = 2 cycles */
1003#define TWR_3 0x00180000 /* SDRAM tWR = 3 cycles */
1004#define PUPSD 0x00200000 /* Power-Up Start Delay (15 SCLK Cycles Delay) */
1005#define PSM 0x00400000 /* Power-Up Sequence (Mode Register Before/After* Refresh) */
1006#define PSS 0x00800000 /* Enable Power-Up Sequence on Next SDRAM Access */
1007#define SRFS 0x01000000 /* Enable SDRAM Self-Refresh Mode */
1008#define EBUFE 0x02000000 /* Enable External Buffering Timing */
1009#define FBBRW 0x04000000 /* Enable Fast Back-To-Back Read To Write */
1010#define EMREN 0x10000000 /* Extended Mode Register Enable */
1011#define TCSR 0x20000000 /* Temp-Compensated Self-Refresh Value (85/45* Deg C) */
1012#define CDDBG 0x40000000 /* Tristate SDRAM Controls During Bus Grant */
1013
1014/* EBIU_SDBCTL Masks */
1015#define EBE 0x0001 /* Enable SDRAM External Bank */
1016#define EBSZ_16 0x0000 /* SDRAM External Bank Size = 16MB */
1017#define EBSZ_32 0x0002 /* SDRAM External Bank Size = 32MB */
1018#define EBSZ_64 0x0004 /* SDRAM External Bank Size = 64MB */
1019#define EBSZ_128 0x0006 /* SDRAM External Bank Size = 128MB */
1020#define EBSZ_256 0x0008 /* SDRAM External Bank Size = 256MB */
1021#define EBSZ_512 0x000A /* SDRAM External Bank Size = 512MB */
1022#define EBCAW_8 0x0000 /* SDRAM External Bank Column Address Width = 8 Bits */
1023#define EBCAW_9 0x0010 /* SDRAM External Bank Column Address Width = 9 Bits */
1024#define EBCAW_10 0x0020 /* SDRAM External Bank Column Address Width = 10 Bits */
1025#define EBCAW_11 0x0030 /* SDRAM External Bank Column Address Width = 11 Bits */
1026
1027/* EBIU_SDSTAT Masks */
1028#define SDCI 0x0001 /* SDRAM Controller Idle */
1029#define SDSRA 0x0002 /* SDRAM Self-Refresh Active */
1030#define SDPUA 0x0004 /* SDRAM Power-Up Active */
1031#define SDRS 0x0008 /* SDRAM Will Power-Up On Next Access */
1032#define SDEASE 0x0010 /* SDRAM EAB Sticky Error Status */
1033#define BGSTAT 0x0020 /* Bus Grant Status */
1034
1035
1036/* ************************** DMA CONTROLLER MASKS ********************************/
1037
1038/* DMAx_PERIPHERAL_MAP, MDMA_yy_PERIPHERAL_MAP Masks */
1039#define CTYPE 0x0040 /* DMA Channel Type Indicator (Memory/Peripheral*) */
1040#define PMAP 0xF000 /* Peripheral Mapped To This Channel */
1041#define PMAP_PPI 0x0000 /* PPI Port DMA */
1042#define PMAP_EMACRX 0x1000 /* Ethernet Receive DMA */
1043#define PMAP_EMACTX 0x2000 /* Ethernet Transmit DMA */
1044#define PMAP_SPORT0RX 0x3000 /* SPORT0 Receive DMA */
1045#define PMAP_SPORT0TX 0x4000 /* SPORT0 Transmit DMA */
1046#define PMAP_SPORT1RX 0x5000 /* SPORT1 Receive DMA */
1047#define PMAP_SPORT1TX 0x6000 /* SPORT1 Transmit DMA */
1048#define PMAP_SPI 0x7000 /* SPI Port DMA */
1049#define PMAP_UART0RX 0x8000 /* UART0 Port Receive DMA */
1050#define PMAP_UART0TX 0x9000 /* UART0 Port Transmit DMA */
1051#define PMAP_UART1RX 0xA000 /* UART1 Port Receive DMA */
1052#define PMAP_UART1TX 0xB000 /* UART1 Port Transmit DMA */
1053
1054/* ************ PARALLEL PERIPHERAL INTERFACE (PPI) MASKS *************/
1055/* PPI_CONTROL Masks */
1056#define PORT_EN 0x0001 /* PPI Port Enable */
1057#define PORT_DIR 0x0002 /* PPI Port Direction */
1058#define XFR_TYPE 0x000C /* PPI Transfer Type */
1059#define PORT_CFG 0x0030 /* PPI Port Configuration */
1060#define FLD_SEL 0x0040 /* PPI Active Field Select */
1061#define PACK_EN 0x0080 /* PPI Packing Mode */
1062#define DMA32 0x0100 /* PPI 32-bit DMA Enable */
1063#define SKIP_EN 0x0200 /* PPI Skip Element Enable */
1064#define SKIP_EO 0x0400 /* PPI Skip Even/Odd Elements */
1065#define DLEN_8 0x0000 /* Data Length = 8 Bits */
1066#define DLEN_10 0x0800 /* Data Length = 10 Bits */
1067#define DLEN_11 0x1000 /* Data Length = 11 Bits */
1068#define DLEN_12 0x1800 /* Data Length = 12 Bits */
1069#define DLEN_13 0x2000 /* Data Length = 13 Bits */
1070#define DLEN_14 0x2800 /* Data Length = 14 Bits */
1071#define DLEN_15 0x3000 /* Data Length = 15 Bits */
1072#define DLEN_16 0x3800 /* Data Length = 16 Bits */
1073#define DLENGTH 0x3800 /* PPI Data Length */
1074#define POLC 0x4000 /* PPI Clock Polarity */
1075#define POLS 0x8000 /* PPI Frame Sync Polarity */
1076
1077/* PPI_STATUS Masks */
1078#define FLD 0x0400 /* Field Indicator */
1079#define FT_ERR 0x0800 /* Frame Track Error */
1080#define OVR 0x1000 /* FIFO Overflow Error */
1081#define UNDR 0x2000 /* FIFO Underrun Error */
1082#define ERR_DET 0x4000 /* Error Detected Indicator */
1083#define ERR_NCOR 0x8000 /* Error Not Corrected Indicator */
1084
1085
1086/* ******************** TWO-WIRE INTERFACE (TWI) MASKS ***********************/
1087/* TWI_CLKDIV Macros (Use: *pTWI_CLKDIV = CLKLOW(x)|CLKHI(y); ) */
1088#define CLKLOW(x) ((x) & 0xFF) /* Periods Clock Is Held Low */
1089#define CLKHI(y) (((y)&0xFF)<<0x8) /* Periods Before New Clock Low */
1090
1091/* TWI_PRESCALE Masks */
1092#define PRESCALE 0x007F /* SCLKs Per Internal Time Reference (10MHz) */
1093#define TWI_ENA 0x0080 /* TWI Enable */
1094#define SCCB 0x0200 /* SCCB Compatibility Enable */
1095
1096/* TWI_SLAVE_CTL Masks */
1097#define SEN 0x0001 /* Slave Enable */
1098#define SADD_LEN 0x0002 /* Slave Address Length */
1099#define STDVAL 0x0004 /* Slave Transmit Data Valid */
1100#define NAK 0x0008 /* NAK/ACK* Generated At Conclusion Of Transfer */
1101#define GEN 0x0010 /* General Call Adrress Matching Enabled */
1102
1103/* TWI_SLAVE_STAT Masks */
1104#define SDIR 0x0001 /* Slave Transfer Direction (Transmit/Receive*) */
1105#define GCALL 0x0002 /* General Call Indicator */
1106
1107/* TWI_MASTER_CTL Masks */
1108#define MEN 0x0001 /* Master Mode Enable */
1109#define MADD_LEN 0x0002 /* Master Address Length */
1110#define MDIR 0x0004 /* Master Transmit Direction (RX/TX*) */
1111#define FAST 0x0008 /* Use Fast Mode Timing Specs */
1112#define STOP 0x0010 /* Issue Stop Condition */
1113#define RSTART 0x0020 /* Repeat Start or Stop* At End Of Transfer */
1114#define DCNT 0x3FC0 /* Data Bytes To Transfer */
1115#define SDAOVR 0x4000 /* Serial Data Override */
1116#define SCLOVR 0x8000 /* Serial Clock Override */
1117
1118/* TWI_MASTER_STAT Masks */
1119#define MPROG 0x0001 /* Master Transfer In Progress */
1120#define LOSTARB 0x0002 /* Lost Arbitration Indicator (Xfer Aborted) */
1121#define ANAK 0x0004 /* Address Not Acknowledged */
1122#define DNAK 0x0008 /* Data Not Acknowledged */
1123#define BUFRDERR 0x0010 /* Buffer Read Error */
1124#define BUFWRERR 0x0020 /* Buffer Write Error */
1125#define SDASEN 0x0040 /* Serial Data Sense */
1126#define SCLSEN 0x0080 /* Serial Clock Sense */
1127#define BUSBUSY 0x0100 /* Bus Busy Indicator */
1128
1129/* TWI_INT_SRC and TWI_INT_ENABLE Masks */
1130#define SINIT 0x0001 /* Slave Transfer Initiated */
1131#define SCOMP 0x0002 /* Slave Transfer Complete */
1132#define SERR 0x0004 /* Slave Transfer Error */
1133#define SOVF 0x0008 /* Slave Overflow */
1134#define MCOMP 0x0010 /* Master Transfer Complete */
1135#define MERR 0x0020 /* Master Transfer Error */
1136#define XMTSERV 0x0040 /* Transmit FIFO Service */
1137#define RCVSERV 0x0080 /* Receive FIFO Service */
1138
1139/* TWI_FIFO_CTRL Masks */
1140#define XMTFLUSH 0x0001 /* Transmit Buffer Flush */
1141#define RCVFLUSH 0x0002 /* Receive Buffer Flush */
1142#define XMTINTLEN 0x0004 /* Transmit Buffer Interrupt Length */
1143#define RCVINTLEN 0x0008 /* Receive Buffer Interrupt Length */
1144
1145/* TWI_FIFO_STAT Masks */
1146#define XMTSTAT 0x0003 /* Transmit FIFO Status */
1147#define XMT_EMPTY 0x0000 /* Transmit FIFO Empty */
1148#define XMT_HALF 0x0001 /* Transmit FIFO Has 1 Byte To Write */
1149#define XMT_FULL 0x0003 /* Transmit FIFO Full (2 Bytes To Write) */
1150
1151#define RCVSTAT 0x000C /* Receive FIFO Status */
1152#define RCV_EMPTY 0x0000 /* Receive FIFO Empty */
1153#define RCV_HALF 0x0004 /* Receive FIFO Has 1 Byte To Read */
1154#define RCV_FULL 0x000C /* Receive FIFO Full (2 Bytes To Read) */
1155
1156
1157/* ******************* PIN CONTROL REGISTER MASKS ************************/
1158/* PORT_MUX Masks */
1159#define PJSE 0x0001 /* Port J SPI/SPORT Enable */
1160#define PJSE_SPORT 0x0000 /* Enable TFS0/DT0PRI */
1161#define PJSE_SPI 0x0001 /* Enable SPI_SSEL3:2 */
1162
1163#define PJCE(x) (((x)&0x3)<<1) /* Port J CAN/SPI/SPORT Enable */
1164#define PJCE_SPORT 0x0000 /* Enable DR0SEC/DT0SEC */
1165#define PJCE_CAN 0x0002 /* Enable CAN RX/TX */
1166#define PJCE_SPI 0x0004 /* Enable SPI_SSEL7 */
1167
1168#define PFDE 0x0008 /* Port F DMA Request Enable */
1169#define PFDE_UART 0x0000 /* Enable UART0 RX/TX */
1170#define PFDE_DMA 0x0008 /* Enable DMAR1:0 */
1171
1172#define PFTE 0x0010 /* Port F Timer Enable */
1173#define PFTE_UART 0x0000 /* Enable UART1 RX/TX */
1174#define PFTE_TIMER 0x0010 /* Enable TMR7:6 */
1175
1176#define PFS6E 0x0020 /* Port F SPI SSEL 6 Enable */
1177#define PFS6E_TIMER 0x0000 /* Enable TMR5 */
1178#define PFS6E_SPI 0x0020 /* Enable SPI_SSEL6 */
1179
1180#define PFS5E 0x0040 /* Port F SPI SSEL 5 Enable */
1181#define PFS5E_TIMER 0x0000 /* Enable TMR4 */
1182#define PFS5E_SPI 0x0040 /* Enable SPI_SSEL5 */
1183
1184#define PFS4E 0x0080 /* Port F SPI SSEL 4 Enable */
1185#define PFS4E_TIMER 0x0000 /* Enable TMR3 */
1186#define PFS4E_SPI 0x0080 /* Enable SPI_SSEL4 */
1187
1188#define PFFE 0x0100 /* Port F PPI Frame Sync Enable */
1189#define PFFE_TIMER 0x0000 /* Enable TMR2 */
1190#define PFFE_PPI 0x0100 /* Enable PPI FS3 */
1191
1192#define PGSE 0x0200 /* Port G SPORT1 Secondary Enable */
1193#define PGSE_PPI 0x0000 /* Enable PPI D9:8 */
1194#define PGSE_SPORT 0x0200 /* Enable DR1SEC/DT1SEC */
1195
1196#define PGRE 0x0400 /* Port G SPORT1 Receive Enable */
1197#define PGRE_PPI 0x0000 /* Enable PPI D12:10 */
1198#define PGRE_SPORT 0x0400 /* Enable DR1PRI/RFS1/RSCLK1 */
1199
1200#define PGTE 0x0800 /* Port G SPORT1 Transmit Enable */
1201#define PGTE_PPI 0x0000 /* Enable PPI D15:13 */
1202#define PGTE_SPORT 0x0800 /* Enable DT1PRI/TFS1/TSCLK1 */
1203
1204
1205/* ****************** HANDSHAKE DMA (HDMA) MASKS *********************/
1206/* HDMAx_CTL Masks */
1207#define HMDMAEN 0x0001 /* Enable Handshake DMA 0/1 */
1208#define REP 0x0002 /* HDMA Request Polarity */
1209#define UTE 0x0004 /* Urgency Threshold Enable */
1210#define OIE 0x0010 /* Overflow Interrupt Enable */
1211#define BDIE 0x0020 /* Block Done Interrupt Enable */
1212#define MBDI 0x0040 /* Mask Block Done IRQ If Pending ECNT */
1213#define DRQ 0x0300 /* HDMA Request Type */
1214#define DRQ_NONE 0x0000 /* No Request */
1215#define DRQ_SINGLE 0x0100 /* Channels Request Single */
1216#define DRQ_MULTI 0x0200 /* Channels Request Multi (Default) */
1217#define DRQ_URGENT 0x0300 /* Channels Request Multi Urgent */
1218#define RBC 0x1000 /* Reload BCNT With IBCNT */
1219#define PS 0x2000 /* HDMA Pin Status */
1220#define OI 0x4000 /* Overflow Interrupt Generated */
1221#define BDI 0x8000 /* Block Done Interrupt Generated */
1222
1223/* entry addresses of the user-callable Boot ROM functions */
1224
1225#define _BOOTROM_RESET 0xEF000000
1226#define _BOOTROM_FINAL_INIT 0xEF000002
1227#define _BOOTROM_DO_MEMORY_DMA 0xEF000006
1228#define _BOOTROM_BOOT_DXE_FLASH 0xEF000008
1229#define _BOOTROM_BOOT_DXE_SPI 0xEF00000A
1230#define _BOOTROM_BOOT_DXE_TWI 0xEF00000C
1231#define _BOOTROM_GET_DXE_ADDRESS_FLASH 0xEF000010
1232#define _BOOTROM_GET_DXE_ADDRESS_SPI 0xEF000012
1233#define _BOOTROM_GET_DXE_ADDRESS_TWI 0xEF000014
1234
1235/* Alternate Deprecated Macros Provided For Backwards Code Compatibility */
1236#define PGDE_UART PFDE_UART
1237#define PGDE_DMA PFDE_DMA
1238#define CKELOW SCKELOW
1239
1240/* HOST Port Registers */
1241
1242#define HOST_CONTROL 0xffc03400 /* HOST Control Register */
1243#define HOST_STATUS 0xffc03404 /* HOST Status Register */
1244#define HOST_TIMEOUT 0xffc03408 /* HOST Acknowledge Mode Timeout Register */
1245
1246/* Counter Registers */
1247
1248#define CNT_CONFIG 0xffc03500 /* Configuration Register */
1249#define CNT_IMASK 0xffc03504 /* Interrupt Mask Register */
1250#define CNT_STATUS 0xffc03508 /* Status Register */
1251#define CNT_COMMAND 0xffc0350c /* Command Register */
1252#define CNT_DEBOUNCE 0xffc03510 /* Debounce Register */
1253#define CNT_COUNTER 0xffc03514 /* Counter Register */
1254#define CNT_MAX 0xffc03518 /* Maximal Count Register */
1255#define CNT_MIN 0xffc0351c /* Minimal Count Register */
1256
1257/* OTP/FUSE Registers */
1258
1259#define OTP_CONTROL 0xffc03600 /* OTP/Fuse Control Register */
1260#define OTP_BEN 0xffc03604 /* OTP/Fuse Byte Enable */
1261#define OTP_STATUS 0xffc03608 /* OTP/Fuse Status */
1262#define OTP_TIMING 0xffc0360c /* OTP/Fuse Access Timing */
1263
1264/* Security Registers */
1265
1266#define SECURE_SYSSWT 0xffc03620 /* Secure System Switches */
1267#define SECURE_CONTROL 0xffc03624 /* Secure Control */
1268#define SECURE_STATUS 0xffc03628 /* Secure Status */
1269
1270/* OTP Read/Write Data Buffer Registers */
1271
1272#define OTP_DATA0 0xffc03680 /* OTP/Fuse Data (OTP_DATA0-3) accesses the fuse read write buffer */
1273#define OTP_DATA1 0xffc03684 /* OTP/Fuse Data (OTP_DATA0-3) accesses the fuse read write buffer */
1274#define OTP_DATA2 0xffc03688 /* OTP/Fuse Data (OTP_DATA0-3) accesses the fuse read write buffer */
1275#define OTP_DATA3 0xffc0368c /* OTP/Fuse Data (OTP_DATA0-3) accesses the fuse read write buffer */
1276
1277/* Motor Control PWM Registers */
1278
1279#define PWM_CTRL 0xffc03700 /* PWM Control Register */
1280#define PWM_STAT 0xffc03704 /* PWM Status Register */
1281#define PWM_TM 0xffc03708 /* PWM Period Register */
1282#define PWM_DT 0xffc0370c /* PWM Dead Time Register */
1283#define PWM_GATE 0xffc03710 /* PWM Chopping Control */
1284#define PWM_CHA 0xffc03714 /* PWM Channel A Duty Control */
1285#define PWM_CHB 0xffc03718 /* PWM Channel B Duty Control */
1286#define PWM_CHC 0xffc0371c /* PWM Channel C Duty Control */
1287#define PWM_SEG 0xffc03720 /* PWM Crossover and Output Enable */
1288#define PWM_SYNCWT 0xffc03724 /* PWM Sync Pluse Width Control */
1289#define PWM_CHAL 0xffc03728 /* PWM Channel AL Duty Control (SR mode only) */
1290#define PWM_CHBL 0xffc0372c /* PWM Channel BL Duty Control (SR mode only) */
1291#define PWM_CHCL 0xffc03730 /* PWM Channel CL Duty Control (SR mode only) */
1292#define PWM_LSI 0xffc03734 /* PWM Low Side Invert (SR mode only) */
1293#define PWM_STAT2 0xffc03738 /* PWM Status Register 2 */
1294
1295
1296/* ********************************************************** */
1297/* SINGLE BIT MACRO PAIRS (bit mask and negated one) */
1298/* and MULTI BIT READ MACROS */
1299/* ********************************************************** */
1300
1301/* Bit masks for HOST_CONTROL */
1302
1303#define HOST_CNTR_HOST_EN 0x1 /* Host Enable */
1304#define HOST_CNTR_nHOST_EN 0x0
1305#define HOST_CNTR_HOST_END 0x2 /* Host Endianess */
1306#define HOST_CNTR_nHOST_END 0x0
1307#define HOST_CNTR_DATA_SIZE 0x4 /* Data Size */
1308#define HOST_CNTR_nDATA_SIZE 0x0
1309#define HOST_CNTR_HOST_RST 0x8 /* Host Reset */
1310#define HOST_CNTR_nHOST_RST 0x0
1311#define HOST_CNTR_HRDY_OVR 0x20 /* Host Ready Override */
1312#define HOST_CNTR_nHRDY_OVR 0x0
1313#define HOST_CNTR_INT_MODE 0x40 /* Interrupt Mode */
1314#define HOST_CNTR_nINT_MODE 0x0
1315#define HOST_CNTR_BT_EN 0x80 /* Bus Timeout Enable */
1316#define HOST_CNTR_ nBT_EN 0x0
1317#define HOST_CNTR_EHW 0x100 /* Enable Host Write */
1318#define HOST_CNTR_nEHW 0x0
1319#define HOST_CNTR_EHR 0x200 /* Enable Host Read */
1320#define HOST_CNTR_nEHR 0x0
1321#define HOST_CNTR_BDR 0x400 /* Burst DMA Requests */
1322#define HOST_CNTR_nBDR 0x0
1323
1324/* Bit masks for HOST_STATUS */
1325
1326#define HOST_STAT_READY 0x1 /* DMA Ready */
1327#define HOST_STAT_nREADY 0x0
1328#define HOST_STAT_FIFOFULL 0x2 /* FIFO Full */
1329#define HOST_STAT_nFIFOFULL 0x0
1330#define HOST_STAT_FIFOEMPTY 0x4 /* FIFO Empty */
1331#define HOST_STAT_nFIFOEMPTY 0x0
1332#define HOST_STAT_COMPLETE 0x8 /* DMA Complete */
1333#define HOST_STAT_nCOMPLETE 0x0
1334#define HOST_STAT_HSHK 0x10 /* Host Handshake */
1335#define HOST_STAT_nHSHK 0x0
1336#define HOST_STAT_TIMEOUT 0x20 /* Host Timeout */
1337#define HOST_STAT_nTIMEOUT 0x0
1338#define HOST_STAT_HIRQ 0x40 /* Host Interrupt Request */
1339#define HOST_STAT_nHIRQ 0x0
1340#define HOST_STAT_ALLOW_CNFG 0x80 /* Allow New Configuration */
1341#define HOST_STAT_nALLOW_CNFG 0x0
1342#define HOST_STAT_DMA_DIR 0x100 /* DMA Direction */
1343#define HOST_STAT_nDMA_DIR 0x0
1344#define HOST_STAT_BTE 0x200 /* Bus Timeout Enabled */
1345#define HOST_STAT_nBTE 0x0
1346#define HOST_STAT_HOSTRD_DONE 0x8000 /* Host Read Completion Interrupt */
1347#define HOST_STAT_nHOSTRD_DONE 0x0
1348
1349/* Bit masks for HOST_TIMEOUT */
1350
1351#define HOST_COUNT_TIMEOUT 0x7ff /* Host Timeout count */
1352
1353/* Bit masks for SECURE_SYSSWT */
1354
1355#define EMUDABL 0x1 /* Emulation Disable. */
1356#define nEMUDABL 0x0
1357#define RSTDABL 0x2 /* Reset Disable */
1358#define nRSTDABL 0x0
1359#define L1IDABL 0x1c /* L1 Instruction Memory Disable. */
1360#define L1DADABL 0xe0 /* L1 Data Bank A Memory Disable. */
1361#define L1DBDABL 0x700 /* L1 Data Bank B Memory Disable. */
1362#define DMA0OVR 0x800 /* DMA0 Memory Access Override */
1363#define nDMA0OVR 0x0
1364#define DMA1OVR 0x1000 /* DMA1 Memory Access Override */
1365#define nDMA1OVR 0x0
1366#define EMUOVR 0x4000 /* Emulation Override */
1367#define nEMUOVR 0x0
1368#define OTPSEN 0x8000 /* OTP Secrets Enable. */
1369#define nOTPSEN 0x0
1370#define L2DABL 0x70000 /* L2 Memory Disable. */
1371
1372/* Bit masks for SECURE_CONTROL */
1373
1374#define SECURE0 0x1 /* SECURE 0 */
1375#define nSECURE0 0x0
1376#define SECURE1 0x2 /* SECURE 1 */
1377#define nSECURE1 0x0
1378#define SECURE2 0x4 /* SECURE 2 */
1379#define nSECURE2 0x0
1380#define SECURE3 0x8 /* SECURE 3 */
1381#define nSECURE3 0x0
1382
1383/* Bit masks for SECURE_STATUS */
1384
1385#define SECMODE 0x3 /* Secured Mode Control State */
1386#define NMI 0x4 /* Non Maskable Interrupt */
1387#define nNMI 0x0
1388#define AFVALID 0x8 /* Authentication Firmware Valid */
1389#define nAFVALID 0x0
1390#define AFEXIT 0x10 /* Authentication Firmware Exit */
1391#define nAFEXIT 0x0
1392#define SECSTAT 0xe0 /* Secure Status */
17 1393
18#endif /* _DEF_BF512_H */ 1394#endif /* _DEF_BF512_H */
diff --git a/arch/blackfin/mach-bf518/include/mach/defBF51x_base.h b/arch/blackfin/mach-bf518/include/mach/defBF51x_base.h
deleted file mode 100644
index 5f84913dcd91..000000000000
--- a/arch/blackfin/mach-bf518/include/mach/defBF51x_base.h
+++ /dev/null
@@ -1,1495 +0,0 @@
1/*
2 * Copyright 2008 Analog Devices Inc.
3 *
4 * Licensed under the ADI BSD license or the GPL-2 (or later)
5 */
6
7#ifndef _DEF_BF51X_H
8#define _DEF_BF51X_H
9
10
11/* ************************************************************** */
12/* SYSTEM & MMR ADDRESS DEFINITIONS COMMON TO ALL ADSP-BF51x */
13/* ************************************************************** */
14
15/* Clock and System Control (0xFFC00000 - 0xFFC000FF) */
16#define PLL_CTL 0xFFC00000 /* PLL Control Register */
17#define PLL_DIV 0xFFC00004 /* PLL Divide Register */
18#define VR_CTL 0xFFC00008 /* Voltage Regulator Control Register */
19#define PLL_STAT 0xFFC0000C /* PLL Status Register */
20#define PLL_LOCKCNT 0xFFC00010 /* PLL Lock Count Register */
21#define CHIPID 0xFFC00014 /* Device ID Register */
22
23/* System Interrupt Controller (0xFFC00100 - 0xFFC001FF) */
24#define SWRST 0xFFC00100 /* Software Reset Register */
25#define SYSCR 0xFFC00104 /* System Configuration Register */
26#define SIC_RVECT 0xFFC00108 /* Interrupt Reset Vector Address Register */
27
28#define SIC_IMASK0 0xFFC0010C /* Interrupt Mask Register */
29#define SIC_IAR0 0xFFC00110 /* Interrupt Assignment Register 0 */
30#define SIC_IAR1 0xFFC00114 /* Interrupt Assignment Register 1 */
31#define SIC_IAR2 0xFFC00118 /* Interrupt Assignment Register 2 */
32#define SIC_IAR3 0xFFC0011C /* Interrupt Assignment Register 3 */
33#define SIC_ISR0 0xFFC00120 /* Interrupt Status Register */
34#define SIC_IWR0 0xFFC00124 /* Interrupt Wakeup Register */
35
36/* SIC Additions to ADSP-BF51x (0xFFC0014C - 0xFFC00162) */
37#define SIC_IMASK1 0xFFC0014C /* Interrupt Mask register of SIC2 */
38#define SIC_IAR4 0xFFC00150 /* Interrupt Assignment register4 */
39#define SIC_IAR5 0xFFC00154 /* Interrupt Assignment register5 */
40#define SIC_IAR6 0xFFC00158 /* Interrupt Assignment register6 */
41#define SIC_IAR7 0xFFC0015C /* Interrupt Assignment register7 */
42#define SIC_ISR1 0xFFC00160 /* Interrupt Statur register */
43#define SIC_IWR1 0xFFC00164 /* Interrupt Wakeup register */
44
45
46/* Watchdog Timer (0xFFC00200 - 0xFFC002FF) */
47#define WDOG_CTL 0xFFC00200 /* Watchdog Control Register */
48#define WDOG_CNT 0xFFC00204 /* Watchdog Count Register */
49#define WDOG_STAT 0xFFC00208 /* Watchdog Status Register */
50
51
52/* Real Time Clock (0xFFC00300 - 0xFFC003FF) */
53#define RTC_STAT 0xFFC00300 /* RTC Status Register */
54#define RTC_ICTL 0xFFC00304 /* RTC Interrupt Control Register */
55#define RTC_ISTAT 0xFFC00308 /* RTC Interrupt Status Register */
56#define RTC_SWCNT 0xFFC0030C /* RTC Stopwatch Count Register */
57#define RTC_ALARM 0xFFC00310 /* RTC Alarm Time Register */
58#define RTC_FAST 0xFFC00314 /* RTC Prescaler Enable Register */
59#define RTC_PREN 0xFFC00314 /* RTC Prescaler Enable Alternate Macro */
60
61
62/* UART0 Controller (0xFFC00400 - 0xFFC004FF) */
63#define UART0_THR 0xFFC00400 /* Transmit Holding register */
64#define UART0_RBR 0xFFC00400 /* Receive Buffer register */
65#define UART0_DLL 0xFFC00400 /* Divisor Latch (Low-Byte) */
66#define UART0_IER 0xFFC00404 /* Interrupt Enable Register */
67#define UART0_DLH 0xFFC00404 /* Divisor Latch (High-Byte) */
68#define UART0_IIR 0xFFC00408 /* Interrupt Identification Register */
69#define UART0_LCR 0xFFC0040C /* Line Control Register */
70#define UART0_MCR 0xFFC00410 /* Modem Control Register */
71#define UART0_LSR 0xFFC00414 /* Line Status Register */
72#define UART0_MSR 0xFFC00418 /* Modem Status Register */
73#define UART0_SCR 0xFFC0041C /* SCR Scratch Register */
74#define UART0_GCTL 0xFFC00424 /* Global Control Register */
75
76/* SPI0 Controller (0xFFC00500 - 0xFFC005FF) */
77#define SPI0_REGBASE 0xFFC00500
78#define SPI0_CTL 0xFFC00500 /* SPI Control Register */
79#define SPI0_FLG 0xFFC00504 /* SPI Flag register */
80#define SPI0_STAT 0xFFC00508 /* SPI Status register */
81#define SPI0_TDBR 0xFFC0050C /* SPI Transmit Data Buffer Register */
82#define SPI0_RDBR 0xFFC00510 /* SPI Receive Data Buffer Register */
83#define SPI0_BAUD 0xFFC00514 /* SPI Baud rate Register */
84#define SPI0_SHADOW 0xFFC00518 /* SPI_RDBR Shadow Register */
85
86/* SPI1 Controller (0xFFC03400 - 0xFFC034FF) */
87#define SPI1_REGBASE 0xFFC03400
88#define SPI1_CTL 0xFFC03400 /* SPI Control Register */
89#define SPI1_FLG 0xFFC03404 /* SPI Flag register */
90#define SPI1_STAT 0xFFC03408 /* SPI Status register */
91#define SPI1_TDBR 0xFFC0340C /* SPI Transmit Data Buffer Register */
92#define SPI1_RDBR 0xFFC03410 /* SPI Receive Data Buffer Register */
93#define SPI1_BAUD 0xFFC03414 /* SPI Baud rate Register */
94#define SPI1_SHADOW 0xFFC03418 /* SPI_RDBR Shadow Register */
95
96/* TIMER0-7 Registers (0xFFC00600 - 0xFFC006FF) */
97#define TIMER0_CONFIG 0xFFC00600 /* Timer 0 Configuration Register */
98#define TIMER0_COUNTER 0xFFC00604 /* Timer 0 Counter Register */
99#define TIMER0_PERIOD 0xFFC00608 /* Timer 0 Period Register */
100#define TIMER0_WIDTH 0xFFC0060C /* Timer 0 Width Register */
101
102#define TIMER1_CONFIG 0xFFC00610 /* Timer 1 Configuration Register */
103#define TIMER1_COUNTER 0xFFC00614 /* Timer 1 Counter Register */
104#define TIMER1_PERIOD 0xFFC00618 /* Timer 1 Period Register */
105#define TIMER1_WIDTH 0xFFC0061C /* Timer 1 Width Register */
106
107#define TIMER2_CONFIG 0xFFC00620 /* Timer 2 Configuration Register */
108#define TIMER2_COUNTER 0xFFC00624 /* Timer 2 Counter Register */
109#define TIMER2_PERIOD 0xFFC00628 /* Timer 2 Period Register */
110#define TIMER2_WIDTH 0xFFC0062C /* Timer 2 Width Register */
111
112#define TIMER3_CONFIG 0xFFC00630 /* Timer 3 Configuration Register */
113#define TIMER3_COUNTER 0xFFC00634 /* Timer 3 Counter Register */
114#define TIMER3_PERIOD 0xFFC00638 /* Timer 3 Period Register */
115#define TIMER3_WIDTH 0xFFC0063C /* Timer 3 Width Register */
116
117#define TIMER4_CONFIG 0xFFC00640 /* Timer 4 Configuration Register */
118#define TIMER4_COUNTER 0xFFC00644 /* Timer 4 Counter Register */
119#define TIMER4_PERIOD 0xFFC00648 /* Timer 4 Period Register */
120#define TIMER4_WIDTH 0xFFC0064C /* Timer 4 Width Register */
121
122#define TIMER5_CONFIG 0xFFC00650 /* Timer 5 Configuration Register */
123#define TIMER5_COUNTER 0xFFC00654 /* Timer 5 Counter Register */
124#define TIMER5_PERIOD 0xFFC00658 /* Timer 5 Period Register */
125#define TIMER5_WIDTH 0xFFC0065C /* Timer 5 Width Register */
126
127#define TIMER6_CONFIG 0xFFC00660 /* Timer 6 Configuration Register */
128#define TIMER6_COUNTER 0xFFC00664 /* Timer 6 Counter Register */
129#define TIMER6_PERIOD 0xFFC00668 /* Timer 6 Period Register */
130#define TIMER6_WIDTH 0xFFC0066C /* Timer 6 Width Register */
131
132#define TIMER7_CONFIG 0xFFC00670 /* Timer 7 Configuration Register */
133#define TIMER7_COUNTER 0xFFC00674 /* Timer 7 Counter Register */
134#define TIMER7_PERIOD 0xFFC00678 /* Timer 7 Period Register */
135#define TIMER7_WIDTH 0xFFC0067C /* Timer 7 Width Register */
136
137#define TIMER_ENABLE 0xFFC00680 /* Timer Enable Register */
138#define TIMER_DISABLE 0xFFC00684 /* Timer Disable Register */
139#define TIMER_STATUS 0xFFC00688 /* Timer Status Register */
140
141/* General Purpose I/O Port F (0xFFC00700 - 0xFFC007FF) */
142#define PORTFIO 0xFFC00700 /* Port F I/O Pin State Specify Register */
143#define PORTFIO_CLEAR 0xFFC00704 /* Port F I/O Peripheral Interrupt Clear Register */
144#define PORTFIO_SET 0xFFC00708 /* Port F I/O Peripheral Interrupt Set Register */
145#define PORTFIO_TOGGLE 0xFFC0070C /* Port F I/O Pin State Toggle Register */
146#define PORTFIO_MASKA 0xFFC00710 /* Port F I/O Mask State Specify Interrupt A Register */
147#define PORTFIO_MASKA_CLEAR 0xFFC00714 /* Port F I/O Mask Disable Interrupt A Register */
148#define PORTFIO_MASKA_SET 0xFFC00718 /* Port F I/O Mask Enable Interrupt A Register */
149#define PORTFIO_MASKA_TOGGLE 0xFFC0071C /* Port F I/O Mask Toggle Enable Interrupt A Register */
150#define PORTFIO_MASKB 0xFFC00720 /* Port F I/O Mask State Specify Interrupt B Register */
151#define PORTFIO_MASKB_CLEAR 0xFFC00724 /* Port F I/O Mask Disable Interrupt B Register */
152#define PORTFIO_MASKB_SET 0xFFC00728 /* Port F I/O Mask Enable Interrupt B Register */
153#define PORTFIO_MASKB_TOGGLE 0xFFC0072C /* Port F I/O Mask Toggle Enable Interrupt B Register */
154#define PORTFIO_DIR 0xFFC00730 /* Port F I/O Direction Register */
155#define PORTFIO_POLAR 0xFFC00734 /* Port F I/O Source Polarity Register */
156#define PORTFIO_EDGE 0xFFC00738 /* Port F I/O Source Sensitivity Register */
157#define PORTFIO_BOTH 0xFFC0073C /* Port F I/O Set on BOTH Edges Register */
158#define PORTFIO_INEN 0xFFC00740 /* Port F I/O Input Enable Register */
159
160/* SPORT0 Controller (0xFFC00800 - 0xFFC008FF) */
161#define SPORT0_TCR1 0xFFC00800 /* SPORT0 Transmit Configuration 1 Register */
162#define SPORT0_TCR2 0xFFC00804 /* SPORT0 Transmit Configuration 2 Register */
163#define SPORT0_TCLKDIV 0xFFC00808 /* SPORT0 Transmit Clock Divider */
164#define SPORT0_TFSDIV 0xFFC0080C /* SPORT0 Transmit Frame Sync Divider */
165#define SPORT0_TX 0xFFC00810 /* SPORT0 TX Data Register */
166#define SPORT0_RX 0xFFC00818 /* SPORT0 RX Data Register */
167#define SPORT0_RCR1 0xFFC00820 /* SPORT0 Transmit Configuration 1 Register */
168#define SPORT0_RCR2 0xFFC00824 /* SPORT0 Transmit Configuration 2 Register */
169#define SPORT0_RCLKDIV 0xFFC00828 /* SPORT0 Receive Clock Divider */
170#define SPORT0_RFSDIV 0xFFC0082C /* SPORT0 Receive Frame Sync Divider */
171#define SPORT0_STAT 0xFFC00830 /* SPORT0 Status Register */
172#define SPORT0_CHNL 0xFFC00834 /* SPORT0 Current Channel Register */
173#define SPORT0_MCMC1 0xFFC00838 /* SPORT0 Multi-Channel Configuration Register 1 */
174#define SPORT0_MCMC2 0xFFC0083C /* SPORT0 Multi-Channel Configuration Register 2 */
175#define SPORT0_MTCS0 0xFFC00840 /* SPORT0 Multi-Channel Transmit Select Register 0 */
176#define SPORT0_MTCS1 0xFFC00844 /* SPORT0 Multi-Channel Transmit Select Register 1 */
177#define SPORT0_MTCS2 0xFFC00848 /* SPORT0 Multi-Channel Transmit Select Register 2 */
178#define SPORT0_MTCS3 0xFFC0084C /* SPORT0 Multi-Channel Transmit Select Register 3 */
179#define SPORT0_MRCS0 0xFFC00850 /* SPORT0 Multi-Channel Receive Select Register 0 */
180#define SPORT0_MRCS1 0xFFC00854 /* SPORT0 Multi-Channel Receive Select Register 1 */
181#define SPORT0_MRCS2 0xFFC00858 /* SPORT0 Multi-Channel Receive Select Register 2 */
182#define SPORT0_MRCS3 0xFFC0085C /* SPORT0 Multi-Channel Receive Select Register 3 */
183
184/* SPORT1 Controller (0xFFC00900 - 0xFFC009FF) */
185#define SPORT1_TCR1 0xFFC00900 /* SPORT1 Transmit Configuration 1 Register */
186#define SPORT1_TCR2 0xFFC00904 /* SPORT1 Transmit Configuration 2 Register */
187#define SPORT1_TCLKDIV 0xFFC00908 /* SPORT1 Transmit Clock Divider */
188#define SPORT1_TFSDIV 0xFFC0090C /* SPORT1 Transmit Frame Sync Divider */
189#define SPORT1_TX 0xFFC00910 /* SPORT1 TX Data Register */
190#define SPORT1_RX 0xFFC00918 /* SPORT1 RX Data Register */
191#define SPORT1_RCR1 0xFFC00920 /* SPORT1 Transmit Configuration 1 Register */
192#define SPORT1_RCR2 0xFFC00924 /* SPORT1 Transmit Configuration 2 Register */
193#define SPORT1_RCLKDIV 0xFFC00928 /* SPORT1 Receive Clock Divider */
194#define SPORT1_RFSDIV 0xFFC0092C /* SPORT1 Receive Frame Sync Divider */
195#define SPORT1_STAT 0xFFC00930 /* SPORT1 Status Register */
196#define SPORT1_CHNL 0xFFC00934 /* SPORT1 Current Channel Register */
197#define SPORT1_MCMC1 0xFFC00938 /* SPORT1 Multi-Channel Configuration Register 1 */
198#define SPORT1_MCMC2 0xFFC0093C /* SPORT1 Multi-Channel Configuration Register 2 */
199#define SPORT1_MTCS0 0xFFC00940 /* SPORT1 Multi-Channel Transmit Select Register 0 */
200#define SPORT1_MTCS1 0xFFC00944 /* SPORT1 Multi-Channel Transmit Select Register 1 */
201#define SPORT1_MTCS2 0xFFC00948 /* SPORT1 Multi-Channel Transmit Select Register 2 */
202#define SPORT1_MTCS3 0xFFC0094C /* SPORT1 Multi-Channel Transmit Select Register 3 */
203#define SPORT1_MRCS0 0xFFC00950 /* SPORT1 Multi-Channel Receive Select Register 0 */
204#define SPORT1_MRCS1 0xFFC00954 /* SPORT1 Multi-Channel Receive Select Register 1 */
205#define SPORT1_MRCS2 0xFFC00958 /* SPORT1 Multi-Channel Receive Select Register 2 */
206#define SPORT1_MRCS3 0xFFC0095C /* SPORT1 Multi-Channel Receive Select Register 3 */
207
208/* External Bus Interface Unit (0xFFC00A00 - 0xFFC00AFF) */
209#define EBIU_AMGCTL 0xFFC00A00 /* Asynchronous Memory Global Control Register */
210#define EBIU_AMBCTL0 0xFFC00A04 /* Asynchronous Memory Bank Control Register 0 */
211#define EBIU_AMBCTL1 0xFFC00A08 /* Asynchronous Memory Bank Control Register 1 */
212#define EBIU_SDGCTL 0xFFC00A10 /* SDRAM Global Control Register */
213#define EBIU_SDBCTL 0xFFC00A14 /* SDRAM Bank Control Register */
214#define EBIU_SDRRC 0xFFC00A18 /* SDRAM Refresh Rate Control Register */
215#define EBIU_SDSTAT 0xFFC00A1C /* SDRAM Status Register */
216
217/* DMA Traffic Control Registers */
218#define DMA_TC_PER 0xFFC00B0C /* Traffic Control Periods Register */
219#define DMA_TC_CNT 0xFFC00B10 /* Traffic Control Current Counts Register */
220
221/* Alternate deprecated register names (below) provided for backwards code compatibility */
222#define DMA_TCPER 0xFFC00B0C /* Traffic Control Periods Register */
223#define DMA_TCCNT 0xFFC00B10 /* Traffic Control Current Counts Register */
224
225/* DMA Controller (0xFFC00C00 - 0xFFC00FFF) */
226#define DMA0_NEXT_DESC_PTR 0xFFC00C00 /* DMA Channel 0 Next Descriptor Pointer Register */
227#define DMA0_START_ADDR 0xFFC00C04 /* DMA Channel 0 Start Address Register */
228#define DMA0_CONFIG 0xFFC00C08 /* DMA Channel 0 Configuration Register */
229#define DMA0_X_COUNT 0xFFC00C10 /* DMA Channel 0 X Count Register */
230#define DMA0_X_MODIFY 0xFFC00C14 /* DMA Channel 0 X Modify Register */
231#define DMA0_Y_COUNT 0xFFC00C18 /* DMA Channel 0 Y Count Register */
232#define DMA0_Y_MODIFY 0xFFC00C1C /* DMA Channel 0 Y Modify Register */
233#define DMA0_CURR_DESC_PTR 0xFFC00C20 /* DMA Channel 0 Current Descriptor Pointer Register */
234#define DMA0_CURR_ADDR 0xFFC00C24 /* DMA Channel 0 Current Address Register */
235#define DMA0_IRQ_STATUS 0xFFC00C28 /* DMA Channel 0 Interrupt/Status Register */
236#define DMA0_PERIPHERAL_MAP 0xFFC00C2C /* DMA Channel 0 Peripheral Map Register */
237#define DMA0_CURR_X_COUNT 0xFFC00C30 /* DMA Channel 0 Current X Count Register */
238#define DMA0_CURR_Y_COUNT 0xFFC00C38 /* DMA Channel 0 Current Y Count Register */
239
240#define DMA1_NEXT_DESC_PTR 0xFFC00C40 /* DMA Channel 1 Next Descriptor Pointer Register */
241#define DMA1_START_ADDR 0xFFC00C44 /* DMA Channel 1 Start Address Register */
242#define DMA1_CONFIG 0xFFC00C48 /* DMA Channel 1 Configuration Register */
243#define DMA1_X_COUNT 0xFFC00C50 /* DMA Channel 1 X Count Register */
244#define DMA1_X_MODIFY 0xFFC00C54 /* DMA Channel 1 X Modify Register */
245#define DMA1_Y_COUNT 0xFFC00C58 /* DMA Channel 1 Y Count Register */
246#define DMA1_Y_MODIFY 0xFFC00C5C /* DMA Channel 1 Y Modify Register */
247#define DMA1_CURR_DESC_PTR 0xFFC00C60 /* DMA Channel 1 Current Descriptor Pointer Register */
248#define DMA1_CURR_ADDR 0xFFC00C64 /* DMA Channel 1 Current Address Register */
249#define DMA1_IRQ_STATUS 0xFFC00C68 /* DMA Channel 1 Interrupt/Status Register */
250#define DMA1_PERIPHERAL_MAP 0xFFC00C6C /* DMA Channel 1 Peripheral Map Register */
251#define DMA1_CURR_X_COUNT 0xFFC00C70 /* DMA Channel 1 Current X Count Register */
252#define DMA1_CURR_Y_COUNT 0xFFC00C78 /* DMA Channel 1 Current Y Count Register */
253
254#define DMA2_NEXT_DESC_PTR 0xFFC00C80 /* DMA Channel 2 Next Descriptor Pointer Register */
255#define DMA2_START_ADDR 0xFFC00C84 /* DMA Channel 2 Start Address Register */
256#define DMA2_CONFIG 0xFFC00C88 /* DMA Channel 2 Configuration Register */
257#define DMA2_X_COUNT 0xFFC00C90 /* DMA Channel 2 X Count Register */
258#define DMA2_X_MODIFY 0xFFC00C94 /* DMA Channel 2 X Modify Register */
259#define DMA2_Y_COUNT 0xFFC00C98 /* DMA Channel 2 Y Count Register */
260#define DMA2_Y_MODIFY 0xFFC00C9C /* DMA Channel 2 Y Modify Register */
261#define DMA2_CURR_DESC_PTR 0xFFC00CA0 /* DMA Channel 2 Current Descriptor Pointer Register */
262#define DMA2_CURR_ADDR 0xFFC00CA4 /* DMA Channel 2 Current Address Register */
263#define DMA2_IRQ_STATUS 0xFFC00CA8 /* DMA Channel 2 Interrupt/Status Register */
264#define DMA2_PERIPHERAL_MAP 0xFFC00CAC /* DMA Channel 2 Peripheral Map Register */
265#define DMA2_CURR_X_COUNT 0xFFC00CB0 /* DMA Channel 2 Current X Count Register */
266#define DMA2_CURR_Y_COUNT 0xFFC00CB8 /* DMA Channel 2 Current Y Count Register */
267
268#define DMA3_NEXT_DESC_PTR 0xFFC00CC0 /* DMA Channel 3 Next Descriptor Pointer Register */
269#define DMA3_START_ADDR 0xFFC00CC4 /* DMA Channel 3 Start Address Register */
270#define DMA3_CONFIG 0xFFC00CC8 /* DMA Channel 3 Configuration Register */
271#define DMA3_X_COUNT 0xFFC00CD0 /* DMA Channel 3 X Count Register */
272#define DMA3_X_MODIFY 0xFFC00CD4 /* DMA Channel 3 X Modify Register */
273#define DMA3_Y_COUNT 0xFFC00CD8 /* DMA Channel 3 Y Count Register */
274#define DMA3_Y_MODIFY 0xFFC00CDC /* DMA Channel 3 Y Modify Register */
275#define DMA3_CURR_DESC_PTR 0xFFC00CE0 /* DMA Channel 3 Current Descriptor Pointer Register */
276#define DMA3_CURR_ADDR 0xFFC00CE4 /* DMA Channel 3 Current Address Register */
277#define DMA3_IRQ_STATUS 0xFFC00CE8 /* DMA Channel 3 Interrupt/Status Register */
278#define DMA3_PERIPHERAL_MAP 0xFFC00CEC /* DMA Channel 3 Peripheral Map Register */
279#define DMA3_CURR_X_COUNT 0xFFC00CF0 /* DMA Channel 3 Current X Count Register */
280#define DMA3_CURR_Y_COUNT 0xFFC00CF8 /* DMA Channel 3 Current Y Count Register */
281
282#define DMA4_NEXT_DESC_PTR 0xFFC00D00 /* DMA Channel 4 Next Descriptor Pointer Register */
283#define DMA4_START_ADDR 0xFFC00D04 /* DMA Channel 4 Start Address Register */
284#define DMA4_CONFIG 0xFFC00D08 /* DMA Channel 4 Configuration Register */
285#define DMA4_X_COUNT 0xFFC00D10 /* DMA Channel 4 X Count Register */
286#define DMA4_X_MODIFY 0xFFC00D14 /* DMA Channel 4 X Modify Register */
287#define DMA4_Y_COUNT 0xFFC00D18 /* DMA Channel 4 Y Count Register */
288#define DMA4_Y_MODIFY 0xFFC00D1C /* DMA Channel 4 Y Modify Register */
289#define DMA4_CURR_DESC_PTR 0xFFC00D20 /* DMA Channel 4 Current Descriptor Pointer Register */
290#define DMA4_CURR_ADDR 0xFFC00D24 /* DMA Channel 4 Current Address Register */
291#define DMA4_IRQ_STATUS 0xFFC00D28 /* DMA Channel 4 Interrupt/Status Register */
292#define DMA4_PERIPHERAL_MAP 0xFFC00D2C /* DMA Channel 4 Peripheral Map Register */
293#define DMA4_CURR_X_COUNT 0xFFC00D30 /* DMA Channel 4 Current X Count Register */
294#define DMA4_CURR_Y_COUNT 0xFFC00D38 /* DMA Channel 4 Current Y Count Register */
295
296#define DMA5_NEXT_DESC_PTR 0xFFC00D40 /* DMA Channel 5 Next Descriptor Pointer Register */
297#define DMA5_START_ADDR 0xFFC00D44 /* DMA Channel 5 Start Address Register */
298#define DMA5_CONFIG 0xFFC00D48 /* DMA Channel 5 Configuration Register */
299#define DMA5_X_COUNT 0xFFC00D50 /* DMA Channel 5 X Count Register */
300#define DMA5_X_MODIFY 0xFFC00D54 /* DMA Channel 5 X Modify Register */
301#define DMA5_Y_COUNT 0xFFC00D58 /* DMA Channel 5 Y Count Register */
302#define DMA5_Y_MODIFY 0xFFC00D5C /* DMA Channel 5 Y Modify Register */
303#define DMA5_CURR_DESC_PTR 0xFFC00D60 /* DMA Channel 5 Current Descriptor Pointer Register */
304#define DMA5_CURR_ADDR 0xFFC00D64 /* DMA Channel 5 Current Address Register */
305#define DMA5_IRQ_STATUS 0xFFC00D68 /* DMA Channel 5 Interrupt/Status Register */
306#define DMA5_PERIPHERAL_MAP 0xFFC00D6C /* DMA Channel 5 Peripheral Map Register */
307#define DMA5_CURR_X_COUNT 0xFFC00D70 /* DMA Channel 5 Current X Count Register */
308#define DMA5_CURR_Y_COUNT 0xFFC00D78 /* DMA Channel 5 Current Y Count Register */
309
310#define DMA6_NEXT_DESC_PTR 0xFFC00D80 /* DMA Channel 6 Next Descriptor Pointer Register */
311#define DMA6_START_ADDR 0xFFC00D84 /* DMA Channel 6 Start Address Register */
312#define DMA6_CONFIG 0xFFC00D88 /* DMA Channel 6 Configuration Register */
313#define DMA6_X_COUNT 0xFFC00D90 /* DMA Channel 6 X Count Register */
314#define DMA6_X_MODIFY 0xFFC00D94 /* DMA Channel 6 X Modify Register */
315#define DMA6_Y_COUNT 0xFFC00D98 /* DMA Channel 6 Y Count Register */
316#define DMA6_Y_MODIFY 0xFFC00D9C /* DMA Channel 6 Y Modify Register */
317#define DMA6_CURR_DESC_PTR 0xFFC00DA0 /* DMA Channel 6 Current Descriptor Pointer Register */
318#define DMA6_CURR_ADDR 0xFFC00DA4 /* DMA Channel 6 Current Address Register */
319#define DMA6_IRQ_STATUS 0xFFC00DA8 /* DMA Channel 6 Interrupt/Status Register */
320#define DMA6_PERIPHERAL_MAP 0xFFC00DAC /* DMA Channel 6 Peripheral Map Register */
321#define DMA6_CURR_X_COUNT 0xFFC00DB0 /* DMA Channel 6 Current X Count Register */
322#define DMA6_CURR_Y_COUNT 0xFFC00DB8 /* DMA Channel 6 Current Y Count Register */
323
324#define DMA7_NEXT_DESC_PTR 0xFFC00DC0 /* DMA Channel 7 Next Descriptor Pointer Register */
325#define DMA7_START_ADDR 0xFFC00DC4 /* DMA Channel 7 Start Address Register */
326#define DMA7_CONFIG 0xFFC00DC8 /* DMA Channel 7 Configuration Register */
327#define DMA7_X_COUNT 0xFFC00DD0 /* DMA Channel 7 X Count Register */
328#define DMA7_X_MODIFY 0xFFC00DD4 /* DMA Channel 7 X Modify Register */
329#define DMA7_Y_COUNT 0xFFC00DD8 /* DMA Channel 7 Y Count Register */
330#define DMA7_Y_MODIFY 0xFFC00DDC /* DMA Channel 7 Y Modify Register */
331#define DMA7_CURR_DESC_PTR 0xFFC00DE0 /* DMA Channel 7 Current Descriptor Pointer Register */
332#define DMA7_CURR_ADDR 0xFFC00DE4 /* DMA Channel 7 Current Address Register */
333#define DMA7_IRQ_STATUS 0xFFC00DE8 /* DMA Channel 7 Interrupt/Status Register */
334#define DMA7_PERIPHERAL_MAP 0xFFC00DEC /* DMA Channel 7 Peripheral Map Register */
335#define DMA7_CURR_X_COUNT 0xFFC00DF0 /* DMA Channel 7 Current X Count Register */
336#define DMA7_CURR_Y_COUNT 0xFFC00DF8 /* DMA Channel 7 Current Y Count Register */
337
338#define DMA8_NEXT_DESC_PTR 0xFFC00E00 /* DMA Channel 8 Next Descriptor Pointer Register */
339#define DMA8_START_ADDR 0xFFC00E04 /* DMA Channel 8 Start Address Register */
340#define DMA8_CONFIG 0xFFC00E08 /* DMA Channel 8 Configuration Register */
341#define DMA8_X_COUNT 0xFFC00E10 /* DMA Channel 8 X Count Register */
342#define DMA8_X_MODIFY 0xFFC00E14 /* DMA Channel 8 X Modify Register */
343#define DMA8_Y_COUNT 0xFFC00E18 /* DMA Channel 8 Y Count Register */
344#define DMA8_Y_MODIFY 0xFFC00E1C /* DMA Channel 8 Y Modify Register */
345#define DMA8_CURR_DESC_PTR 0xFFC00E20 /* DMA Channel 8 Current Descriptor Pointer Register */
346#define DMA8_CURR_ADDR 0xFFC00E24 /* DMA Channel 8 Current Address Register */
347#define DMA8_IRQ_STATUS 0xFFC00E28 /* DMA Channel 8 Interrupt/Status Register */
348#define DMA8_PERIPHERAL_MAP 0xFFC00E2C /* DMA Channel 8 Peripheral Map Register */
349#define DMA8_CURR_X_COUNT 0xFFC00E30 /* DMA Channel 8 Current X Count Register */
350#define DMA8_CURR_Y_COUNT 0xFFC00E38 /* DMA Channel 8 Current Y Count Register */
351
352#define DMA9_NEXT_DESC_PTR 0xFFC00E40 /* DMA Channel 9 Next Descriptor Pointer Register */
353#define DMA9_START_ADDR 0xFFC00E44 /* DMA Channel 9 Start Address Register */
354#define DMA9_CONFIG 0xFFC00E48 /* DMA Channel 9 Configuration Register */
355#define DMA9_X_COUNT 0xFFC00E50 /* DMA Channel 9 X Count Register */
356#define DMA9_X_MODIFY 0xFFC00E54 /* DMA Channel 9 X Modify Register */
357#define DMA9_Y_COUNT 0xFFC00E58 /* DMA Channel 9 Y Count Register */
358#define DMA9_Y_MODIFY 0xFFC00E5C /* DMA Channel 9 Y Modify Register */
359#define DMA9_CURR_DESC_PTR 0xFFC00E60 /* DMA Channel 9 Current Descriptor Pointer Register */
360#define DMA9_CURR_ADDR 0xFFC00E64 /* DMA Channel 9 Current Address Register */
361#define DMA9_IRQ_STATUS 0xFFC00E68 /* DMA Channel 9 Interrupt/Status Register */
362#define DMA9_PERIPHERAL_MAP 0xFFC00E6C /* DMA Channel 9 Peripheral Map Register */
363#define DMA9_CURR_X_COUNT 0xFFC00E70 /* DMA Channel 9 Current X Count Register */
364#define DMA9_CURR_Y_COUNT 0xFFC00E78 /* DMA Channel 9 Current Y Count Register */
365
366#define DMA10_NEXT_DESC_PTR 0xFFC00E80 /* DMA Channel 10 Next Descriptor Pointer Register */
367#define DMA10_START_ADDR 0xFFC00E84 /* DMA Channel 10 Start Address Register */
368#define DMA10_CONFIG 0xFFC00E88 /* DMA Channel 10 Configuration Register */
369#define DMA10_X_COUNT 0xFFC00E90 /* DMA Channel 10 X Count Register */
370#define DMA10_X_MODIFY 0xFFC00E94 /* DMA Channel 10 X Modify Register */
371#define DMA10_Y_COUNT 0xFFC00E98 /* DMA Channel 10 Y Count Register */
372#define DMA10_Y_MODIFY 0xFFC00E9C /* DMA Channel 10 Y Modify Register */
373#define DMA10_CURR_DESC_PTR 0xFFC00EA0 /* DMA Channel 10 Current Descriptor Pointer Register */
374#define DMA10_CURR_ADDR 0xFFC00EA4 /* DMA Channel 10 Current Address Register */
375#define DMA10_IRQ_STATUS 0xFFC00EA8 /* DMA Channel 10 Interrupt/Status Register */
376#define DMA10_PERIPHERAL_MAP 0xFFC00EAC /* DMA Channel 10 Peripheral Map Register */
377#define DMA10_CURR_X_COUNT 0xFFC00EB0 /* DMA Channel 10 Current X Count Register */
378#define DMA10_CURR_Y_COUNT 0xFFC00EB8 /* DMA Channel 10 Current Y Count Register */
379
380#define DMA11_NEXT_DESC_PTR 0xFFC00EC0 /* DMA Channel 11 Next Descriptor Pointer Register */
381#define DMA11_START_ADDR 0xFFC00EC4 /* DMA Channel 11 Start Address Register */
382#define DMA11_CONFIG 0xFFC00EC8 /* DMA Channel 11 Configuration Register */
383#define DMA11_X_COUNT 0xFFC00ED0 /* DMA Channel 11 X Count Register */
384#define DMA11_X_MODIFY 0xFFC00ED4 /* DMA Channel 11 X Modify Register */
385#define DMA11_Y_COUNT 0xFFC00ED8 /* DMA Channel 11 Y Count Register */
386#define DMA11_Y_MODIFY 0xFFC00EDC /* DMA Channel 11 Y Modify Register */
387#define DMA11_CURR_DESC_PTR 0xFFC00EE0 /* DMA Channel 11 Current Descriptor Pointer Register */
388#define DMA11_CURR_ADDR 0xFFC00EE4 /* DMA Channel 11 Current Address Register */
389#define DMA11_IRQ_STATUS 0xFFC00EE8 /* DMA Channel 11 Interrupt/Status Register */
390#define DMA11_PERIPHERAL_MAP 0xFFC00EEC /* DMA Channel 11 Peripheral Map Register */
391#define DMA11_CURR_X_COUNT 0xFFC00EF0 /* DMA Channel 11 Current X Count Register */
392#define DMA11_CURR_Y_COUNT 0xFFC00EF8 /* DMA Channel 11 Current Y Count Register */
393
394#define MDMA_D0_NEXT_DESC_PTR 0xFFC00F00 /* MemDMA Stream 0 Destination Next Descriptor Pointer Register */
395#define MDMA_D0_START_ADDR 0xFFC00F04 /* MemDMA Stream 0 Destination Start Address Register */
396#define MDMA_D0_CONFIG 0xFFC00F08 /* MemDMA Stream 0 Destination Configuration Register */
397#define MDMA_D0_X_COUNT 0xFFC00F10 /* MemDMA Stream 0 Destination X Count Register */
398#define MDMA_D0_X_MODIFY 0xFFC00F14 /* MemDMA Stream 0 Destination X Modify Register */
399#define MDMA_D0_Y_COUNT 0xFFC00F18 /* MemDMA Stream 0 Destination Y Count Register */
400#define MDMA_D0_Y_MODIFY 0xFFC00F1C /* MemDMA Stream 0 Destination Y Modify Register */
401#define MDMA_D0_CURR_DESC_PTR 0xFFC00F20 /* MemDMA Stream 0 Destination Current Descriptor Pointer Register */
402#define MDMA_D0_CURR_ADDR 0xFFC00F24 /* MemDMA Stream 0 Destination Current Address Register */
403#define MDMA_D0_IRQ_STATUS 0xFFC00F28 /* MemDMA Stream 0 Destination Interrupt/Status Register */
404#define MDMA_D0_PERIPHERAL_MAP 0xFFC00F2C /* MemDMA Stream 0 Destination Peripheral Map Register */
405#define MDMA_D0_CURR_X_COUNT 0xFFC00F30 /* MemDMA Stream 0 Destination Current X Count Register */
406#define MDMA_D0_CURR_Y_COUNT 0xFFC00F38 /* MemDMA Stream 0 Destination Current Y Count Register */
407
408#define MDMA_S0_NEXT_DESC_PTR 0xFFC00F40 /* MemDMA Stream 0 Source Next Descriptor Pointer Register */
409#define MDMA_S0_START_ADDR 0xFFC00F44 /* MemDMA Stream 0 Source Start Address Register */
410#define MDMA_S0_CONFIG 0xFFC00F48 /* MemDMA Stream 0 Source Configuration Register */
411#define MDMA_S0_X_COUNT 0xFFC00F50 /* MemDMA Stream 0 Source X Count Register */
412#define MDMA_S0_X_MODIFY 0xFFC00F54 /* MemDMA Stream 0 Source X Modify Register */
413#define MDMA_S0_Y_COUNT 0xFFC00F58 /* MemDMA Stream 0 Source Y Count Register */
414#define MDMA_S0_Y_MODIFY 0xFFC00F5C /* MemDMA Stream 0 Source Y Modify Register */
415#define MDMA_S0_CURR_DESC_PTR 0xFFC00F60 /* MemDMA Stream 0 Source Current Descriptor Pointer Register */
416#define MDMA_S0_CURR_ADDR 0xFFC00F64 /* MemDMA Stream 0 Source Current Address Register */
417#define MDMA_S0_IRQ_STATUS 0xFFC00F68 /* MemDMA Stream 0 Source Interrupt/Status Register */
418#define MDMA_S0_PERIPHERAL_MAP 0xFFC00F6C /* MemDMA Stream 0 Source Peripheral Map Register */
419#define MDMA_S0_CURR_X_COUNT 0xFFC00F70 /* MemDMA Stream 0 Source Current X Count Register */
420#define MDMA_S0_CURR_Y_COUNT 0xFFC00F78 /* MemDMA Stream 0 Source Current Y Count Register */
421
422#define MDMA_D1_NEXT_DESC_PTR 0xFFC00F80 /* MemDMA Stream 1 Destination Next Descriptor Pointer Register */
423#define MDMA_D1_START_ADDR 0xFFC00F84 /* MemDMA Stream 1 Destination Start Address Register */
424#define MDMA_D1_CONFIG 0xFFC00F88 /* MemDMA Stream 1 Destination Configuration Register */
425#define MDMA_D1_X_COUNT 0xFFC00F90 /* MemDMA Stream 1 Destination X Count Register */
426#define MDMA_D1_X_MODIFY 0xFFC00F94 /* MemDMA Stream 1 Destination X Modify Register */
427#define MDMA_D1_Y_COUNT 0xFFC00F98 /* MemDMA Stream 1 Destination Y Count Register */
428#define MDMA_D1_Y_MODIFY 0xFFC00F9C /* MemDMA Stream 1 Destination Y Modify Register */
429#define MDMA_D1_CURR_DESC_PTR 0xFFC00FA0 /* MemDMA Stream 1 Destination Current Descriptor Pointer Register */
430#define MDMA_D1_CURR_ADDR 0xFFC00FA4 /* MemDMA Stream 1 Destination Current Address Register */
431#define MDMA_D1_IRQ_STATUS 0xFFC00FA8 /* MemDMA Stream 1 Destination Interrupt/Status Register */
432#define MDMA_D1_PERIPHERAL_MAP 0xFFC00FAC /* MemDMA Stream 1 Destination Peripheral Map Register */
433#define MDMA_D1_CURR_X_COUNT 0xFFC00FB0 /* MemDMA Stream 1 Destination Current X Count Register */
434#define MDMA_D1_CURR_Y_COUNT 0xFFC00FB8 /* MemDMA Stream 1 Destination Current Y Count Register */
435
436#define MDMA_S1_NEXT_DESC_PTR 0xFFC00FC0 /* MemDMA Stream 1 Source Next Descriptor Pointer Register */
437#define MDMA_S1_START_ADDR 0xFFC00FC4 /* MemDMA Stream 1 Source Start Address Register */
438#define MDMA_S1_CONFIG 0xFFC00FC8 /* MemDMA Stream 1 Source Configuration Register */
439#define MDMA_S1_X_COUNT 0xFFC00FD0 /* MemDMA Stream 1 Source X Count Register */
440#define MDMA_S1_X_MODIFY 0xFFC00FD4 /* MemDMA Stream 1 Source X Modify Register */
441#define MDMA_S1_Y_COUNT 0xFFC00FD8 /* MemDMA Stream 1 Source Y Count Register */
442#define MDMA_S1_Y_MODIFY 0xFFC00FDC /* MemDMA Stream 1 Source Y Modify Register */
443#define MDMA_S1_CURR_DESC_PTR 0xFFC00FE0 /* MemDMA Stream 1 Source Current Descriptor Pointer Register */
444#define MDMA_S1_CURR_ADDR 0xFFC00FE4 /* MemDMA Stream 1 Source Current Address Register */
445#define MDMA_S1_IRQ_STATUS 0xFFC00FE8 /* MemDMA Stream 1 Source Interrupt/Status Register */
446#define MDMA_S1_PERIPHERAL_MAP 0xFFC00FEC /* MemDMA Stream 1 Source Peripheral Map Register */
447#define MDMA_S1_CURR_X_COUNT 0xFFC00FF0 /* MemDMA Stream 1 Source Current X Count Register */
448#define MDMA_S1_CURR_Y_COUNT 0xFFC00FF8 /* MemDMA Stream 1 Source Current Y Count Register */
449
450
451/* Parallel Peripheral Interface (0xFFC01000 - 0xFFC010FF) */
452#define PPI_CONTROL 0xFFC01000 /* PPI Control Register */
453#define PPI_STATUS 0xFFC01004 /* PPI Status Register */
454#define PPI_COUNT 0xFFC01008 /* PPI Transfer Count Register */
455#define PPI_DELAY 0xFFC0100C /* PPI Delay Count Register */
456#define PPI_FRAME 0xFFC01010 /* PPI Frame Length Register */
457
458
459/* Two-Wire Interface (0xFFC01400 - 0xFFC014FF) */
460#define TWI0_REGBASE 0xFFC01400
461#define TWI0_CLKDIV 0xFFC01400 /* Serial Clock Divider Register */
462#define TWI0_CONTROL 0xFFC01404 /* TWI Control Register */
463#define TWI0_SLAVE_CTL 0xFFC01408 /* Slave Mode Control Register */
464#define TWI0_SLAVE_STAT 0xFFC0140C /* Slave Mode Status Register */
465#define TWI0_SLAVE_ADDR 0xFFC01410 /* Slave Mode Address Register */
466#define TWI0_MASTER_CTL 0xFFC01414 /* Master Mode Control Register */
467#define TWI0_MASTER_STAT 0xFFC01418 /* Master Mode Status Register */
468#define TWI0_MASTER_ADDR 0xFFC0141C /* Master Mode Address Register */
469#define TWI0_INT_STAT 0xFFC01420 /* TWI Interrupt Status Register */
470#define TWI0_INT_MASK 0xFFC01424 /* TWI Master Interrupt Mask Register */
471#define TWI0_FIFO_CTL 0xFFC01428 /* FIFO Control Register */
472#define TWI0_FIFO_STAT 0xFFC0142C /* FIFO Status Register */
473#define TWI0_XMT_DATA8 0xFFC01480 /* FIFO Transmit Data Single Byte Register */
474#define TWI0_XMT_DATA16 0xFFC01484 /* FIFO Transmit Data Double Byte Register */
475#define TWI0_RCV_DATA8 0xFFC01488 /* FIFO Receive Data Single Byte Register */
476#define TWI0_RCV_DATA16 0xFFC0148C /* FIFO Receive Data Double Byte Register */
477
478
479/* General Purpose I/O Port G (0xFFC01500 - 0xFFC015FF) */
480#define PORTGIO 0xFFC01500 /* Port G I/O Pin State Specify Register */
481#define PORTGIO_CLEAR 0xFFC01504 /* Port G I/O Peripheral Interrupt Clear Register */
482#define PORTGIO_SET 0xFFC01508 /* Port G I/O Peripheral Interrupt Set Register */
483#define PORTGIO_TOGGLE 0xFFC0150C /* Port G I/O Pin State Toggle Register */
484#define PORTGIO_MASKA 0xFFC01510 /* Port G I/O Mask State Specify Interrupt A Register */
485#define PORTGIO_MASKA_CLEAR 0xFFC01514 /* Port G I/O Mask Disable Interrupt A Register */
486#define PORTGIO_MASKA_SET 0xFFC01518 /* Port G I/O Mask Enable Interrupt A Register */
487#define PORTGIO_MASKA_TOGGLE 0xFFC0151C /* Port G I/O Mask Toggle Enable Interrupt A Register */
488#define PORTGIO_MASKB 0xFFC01520 /* Port G I/O Mask State Specify Interrupt B Register */
489#define PORTGIO_MASKB_CLEAR 0xFFC01524 /* Port G I/O Mask Disable Interrupt B Register */
490#define PORTGIO_MASKB_SET 0xFFC01528 /* Port G I/O Mask Enable Interrupt B Register */
491#define PORTGIO_MASKB_TOGGLE 0xFFC0152C /* Port G I/O Mask Toggle Enable Interrupt B Register */
492#define PORTGIO_DIR 0xFFC01530 /* Port G I/O Direction Register */
493#define PORTGIO_POLAR 0xFFC01534 /* Port G I/O Source Polarity Register */
494#define PORTGIO_EDGE 0xFFC01538 /* Port G I/O Source Sensitivity Register */
495#define PORTGIO_BOTH 0xFFC0153C /* Port G I/O Set on BOTH Edges Register */
496#define PORTGIO_INEN 0xFFC01540 /* Port G I/O Input Enable Register */
497
498
499/* General Purpose I/O Port H (0xFFC01700 - 0xFFC017FF) */
500#define PORTHIO 0xFFC01700 /* Port H I/O Pin State Specify Register */
501#define PORTHIO_CLEAR 0xFFC01704 /* Port H I/O Peripheral Interrupt Clear Register */
502#define PORTHIO_SET 0xFFC01708 /* Port H I/O Peripheral Interrupt Set Register */
503#define PORTHIO_TOGGLE 0xFFC0170C /* Port H I/O Pin State Toggle Register */
504#define PORTHIO_MASKA 0xFFC01710 /* Port H I/O Mask State Specify Interrupt A Register */
505#define PORTHIO_MASKA_CLEAR 0xFFC01714 /* Port H I/O Mask Disable Interrupt A Register */
506#define PORTHIO_MASKA_SET 0xFFC01718 /* Port H I/O Mask Enable Interrupt A Register */
507#define PORTHIO_MASKA_TOGGLE 0xFFC0171C /* Port H I/O Mask Toggle Enable Interrupt A Register */
508#define PORTHIO_MASKB 0xFFC01720 /* Port H I/O Mask State Specify Interrupt B Register */
509#define PORTHIO_MASKB_CLEAR 0xFFC01724 /* Port H I/O Mask Disable Interrupt B Register */
510#define PORTHIO_MASKB_SET 0xFFC01728 /* Port H I/O Mask Enable Interrupt B Register */
511#define PORTHIO_MASKB_TOGGLE 0xFFC0172C /* Port H I/O Mask Toggle Enable Interrupt B Register */
512#define PORTHIO_DIR 0xFFC01730 /* Port H I/O Direction Register */
513#define PORTHIO_POLAR 0xFFC01734 /* Port H I/O Source Polarity Register */
514#define PORTHIO_EDGE 0xFFC01738 /* Port H I/O Source Sensitivity Register */
515#define PORTHIO_BOTH 0xFFC0173C /* Port H I/O Set on BOTH Edges Register */
516#define PORTHIO_INEN 0xFFC01740 /* Port H I/O Input Enable Register */
517
518
519/* UART1 Controller (0xFFC02000 - 0xFFC020FF) */
520#define UART1_THR 0xFFC02000 /* Transmit Holding register */
521#define UART1_RBR 0xFFC02000 /* Receive Buffer register */
522#define UART1_DLL 0xFFC02000 /* Divisor Latch (Low-Byte) */
523#define UART1_IER 0xFFC02004 /* Interrupt Enable Register */
524#define UART1_DLH 0xFFC02004 /* Divisor Latch (High-Byte) */
525#define UART1_IIR 0xFFC02008 /* Interrupt Identification Register */
526#define UART1_LCR 0xFFC0200C /* Line Control Register */
527#define UART1_MCR 0xFFC02010 /* Modem Control Register */
528#define UART1_LSR 0xFFC02014 /* Line Status Register */
529#define UART1_MSR 0xFFC02018 /* Modem Status Register */
530#define UART1_SCR 0xFFC0201C /* SCR Scratch Register */
531#define UART1_GCTL 0xFFC02024 /* Global Control Register */
532
533
534/* Pin Control Registers (0xFFC03200 - 0xFFC032FF) */
535#define PORTF_FER 0xFFC03200 /* Port F Function Enable Register (Alternate/Flag*) */
536#define PORTG_FER 0xFFC03204 /* Port G Function Enable Register (Alternate/Flag*) */
537#define PORTH_FER 0xFFC03208 /* Port H Function Enable Register (Alternate/Flag*) */
538#define BFIN_PORT_MUX 0xFFC0320C /* Port Multiplexer Control Register */
539
540
541/* Handshake MDMA Registers (0xFFC03300 - 0xFFC033FF) */
542#define HMDMA0_CONTROL 0xFFC03300 /* Handshake MDMA0 Control Register */
543#define HMDMA0_ECINIT 0xFFC03304 /* HMDMA0 Initial Edge Count Register */
544#define HMDMA0_BCINIT 0xFFC03308 /* HMDMA0 Initial Block Count Register */
545#define HMDMA0_ECURGENT 0xFFC0330C /* HMDMA0 Urgent Edge Count Threshold Register */
546#define HMDMA0_ECOVERFLOW 0xFFC03310 /* HMDMA0 Edge Count Overflow Interrupt Register */
547#define HMDMA0_ECOUNT 0xFFC03314 /* HMDMA0 Current Edge Count Register */
548#define HMDMA0_BCOUNT 0xFFC03318 /* HMDMA0 Current Block Count Register */
549
550#define HMDMA1_CONTROL 0xFFC03340 /* Handshake MDMA1 Control Register */
551#define HMDMA1_ECINIT 0xFFC03344 /* HMDMA1 Initial Edge Count Register */
552#define HMDMA1_BCINIT 0xFFC03348 /* HMDMA1 Initial Block Count Register */
553#define HMDMA1_ECURGENT 0xFFC0334C /* HMDMA1 Urgent Edge Count Threshold Register */
554#define HMDMA1_ECOVERFLOW 0xFFC03350 /* HMDMA1 Edge Count Overflow Interrupt Register */
555#define HMDMA1_ECOUNT 0xFFC03354 /* HMDMA1 Current Edge Count Register */
556#define HMDMA1_BCOUNT 0xFFC03358 /* HMDMA1 Current Block Count Register */
557
558
559/* GPIO PIN mux (0xFFC03210 - OxFFC03288) */
560#define PORTF_MUX 0xFFC03210 /* Port F mux control */
561#define PORTG_MUX 0xFFC03214 /* Port G mux control */
562#define PORTH_MUX 0xFFC03218 /* Port H mux control */
563#define PORTF_DRIVE 0xFFC03220 /* Port F drive strength control */
564#define PORTG_DRIVE 0xFFC03224 /* Port G drive strength control */
565#define PORTH_DRIVE 0xFFC03228 /* Port H drive strength control */
566#define PORTF_SLEW 0xFFC03230 /* Port F slew control */
567#define PORTG_SLEW 0xFFC03234 /* Port G slew control */
568#define PORTH_SLEW 0xFFC03238 /* Port H slew control */
569#define PORTF_HYSTERISIS 0xFFC03240 /* Port F Schmitt trigger control */
570#define PORTG_HYSTERISIS 0xFFC03244 /* Port G Schmitt trigger control */
571#define PORTH_HYSTERISIS 0xFFC03248 /* Port H Schmitt trigger control */
572#define MISCPORT_DRIVE 0xFFC03280 /* Misc Port drive strength control */
573#define MISCPORT_SLEW 0xFFC03284 /* Misc Port slew control */
574#define MISCPORT_HYSTERISIS 0xFFC03288 /* Misc Port Schmitt trigger control */
575
576
577/***********************************************************************************
578** System MMR Register Bits And Macros
579**
580** Disclaimer: All macros are intended to make C and Assembly code more readable.
581** Use these macros carefully, as any that do left shifts for field
582** depositing will result in the lower order bits being destroyed. Any
583** macro that shifts left to properly position the bit-field should be
584** used as part of an OR to initialize a register and NOT as a dynamic
585** modifier UNLESS the lower order bits are saved and ORed back in when
586** the macro is used.
587*************************************************************************************/
588
589/* CHIPID Masks */
590#define CHIPID_VERSION 0xF0000000
591#define CHIPID_FAMILY 0x0FFFF000
592#define CHIPID_MANUFACTURE 0x00000FFE
593
594/* SWRST Masks */
595#define SYSTEM_RESET 0x0007 /* Initiates A System Software Reset */
596#define DOUBLE_FAULT 0x0008 /* Core Double Fault Causes Reset */
597#define RESET_DOUBLE 0x2000 /* SW Reset Generated By Core Double-Fault */
598#define RESET_WDOG 0x4000 /* SW Reset Generated By Watchdog Timer */
599#define RESET_SOFTWARE 0x8000 /* SW Reset Occurred Since Last Read Of SWRST */
600
601/* SYSCR Masks */
602#define BMODE 0x0007 /* Boot Mode - Latched During HW Reset From Mode Pins */
603#define NOBOOT 0x0010 /* Execute From L1 or ASYNC Bank 0 When BMODE = 0 */
604
605
606/* ************* SYSTEM INTERRUPT CONTROLLER MASKS *************************************/
607/* Peripheral Masks For SIC_ISR, SIC_IWR, SIC_IMASK */
608
609#if 0
610#define IRQ_PLL_WAKEUP 0x00000001 /* PLL Wakeup Interrupt */
611
612#define IRQ_ERROR1 0x00000002 /* Error Interrupt (DMA, DMARx Block, DMARx Overflow) */
613#define IRQ_ERROR2 0x00000004 /* Error Interrupt (CAN, Ethernet, SPORTx, PPI, SPI, UARTx) */
614#define IRQ_RTC 0x00000008 /* Real Time Clock Interrupt */
615#define IRQ_DMA0 0x00000010 /* DMA Channel 0 (PPI) Interrupt */
616#define IRQ_DMA3 0x00000020 /* DMA Channel 3 (SPORT0 RX) Interrupt */
617#define IRQ_DMA4 0x00000040 /* DMA Channel 4 (SPORT0 TX) Interrupt */
618#define IRQ_DMA5 0x00000080 /* DMA Channel 5 (SPORT1 RX) Interrupt */
619
620#define IRQ_DMA6 0x00000100 /* DMA Channel 6 (SPORT1 TX) Interrupt */
621#define IRQ_TWI 0x00000200 /* TWI Interrupt */
622#define IRQ_DMA7 0x00000400 /* DMA Channel 7 (SPI) Interrupt */
623#define IRQ_DMA8 0x00000800 /* DMA Channel 8 (UART0 RX) Interrupt */
624#define IRQ_DMA9 0x00001000 /* DMA Channel 9 (UART0 TX) Interrupt */
625#define IRQ_DMA10 0x00002000 /* DMA Channel 10 (UART1 RX) Interrupt */
626#define IRQ_DMA11 0x00004000 /* DMA Channel 11 (UART1 TX) Interrupt */
627#define IRQ_CAN_RX 0x00008000 /* CAN Receive Interrupt */
628
629#define IRQ_CAN_TX 0x00010000 /* CAN Transmit Interrupt */
630#define IRQ_DMA1 0x00020000 /* DMA Channel 1 (Ethernet RX) Interrupt */
631#define IRQ_PFA_PORTH 0x00020000 /* PF Port H (PF47:32) Interrupt A */
632#define IRQ_DMA2 0x00040000 /* DMA Channel 2 (Ethernet TX) Interrupt */
633#define IRQ_PFB_PORTH 0x00040000 /* PF Port H (PF47:32) Interrupt B */
634#define IRQ_TIMER0 0x00080000 /* Timer 0 Interrupt */
635#define IRQ_TIMER1 0x00100000 /* Timer 1 Interrupt */
636#define IRQ_TIMER2 0x00200000 /* Timer 2 Interrupt */
637#define IRQ_TIMER3 0x00400000 /* Timer 3 Interrupt */
638#define IRQ_TIMER4 0x00800000 /* Timer 4 Interrupt */
639
640#define IRQ_TIMER5 0x01000000 /* Timer 5 Interrupt */
641#define IRQ_TIMER6 0x02000000 /* Timer 6 Interrupt */
642#define IRQ_TIMER7 0x04000000 /* Timer 7 Interrupt */
643#define IRQ_PFA_PORTFG 0x08000000 /* PF Ports F&G (PF31:0) Interrupt A */
644#define IRQ_PFB_PORTF 0x80000000 /* PF Port F (PF15:0) Interrupt B */
645#define IRQ_DMA12 0x20000000 /* DMA Channels 12 (MDMA1 Source) RX Interrupt */
646#define IRQ_DMA13 0x20000000 /* DMA Channels 13 (MDMA1 Destination) TX Interrupt */
647#define IRQ_DMA14 0x40000000 /* DMA Channels 14 (MDMA0 Source) RX Interrupt */
648#define IRQ_DMA15 0x40000000 /* DMA Channels 15 (MDMA0 Destination) TX Interrupt */
649#define IRQ_WDOG 0x80000000 /* Software Watchdog Timer Interrupt */
650#define IRQ_PFB_PORTG 0x10000000 /* PF Port G (PF31:16) Interrupt B */
651#endif
652
653/* SIC_IAR0 Macros */
654#define P0_IVG(x) (((x)&0xF)-7) /* Peripheral #0 assigned IVG #x */
655#define P1_IVG(x) (((x)&0xF)-7) << 0x4 /* Peripheral #1 assigned IVG #x */
656#define P2_IVG(x) (((x)&0xF)-7) << 0x8 /* Peripheral #2 assigned IVG #x */
657#define P3_IVG(x) (((x)&0xF)-7) << 0xC /* Peripheral #3 assigned IVG #x */
658#define P4_IVG(x) (((x)&0xF)-7) << 0x10 /* Peripheral #4 assigned IVG #x */
659#define P5_IVG(x) (((x)&0xF)-7) << 0x14 /* Peripheral #5 assigned IVG #x */
660#define P6_IVG(x) (((x)&0xF)-7) << 0x18 /* Peripheral #6 assigned IVG #x */
661#define P7_IVG(x) (((x)&0xF)-7) << 0x1C /* Peripheral #7 assigned IVG #x */
662
663/* SIC_IAR1 Macros */
664#define P8_IVG(x) (((x)&0xF)-7) /* Peripheral #8 assigned IVG #x */
665#define P9_IVG(x) (((x)&0xF)-7) << 0x4 /* Peripheral #9 assigned IVG #x */
666#define P10_IVG(x) (((x)&0xF)-7) << 0x8 /* Peripheral #10 assigned IVG #x */
667#define P11_IVG(x) (((x)&0xF)-7) << 0xC /* Peripheral #11 assigned IVG #x */
668#define P12_IVG(x) (((x)&0xF)-7) << 0x10 /* Peripheral #12 assigned IVG #x */
669#define P13_IVG(x) (((x)&0xF)-7) << 0x14 /* Peripheral #13 assigned IVG #x */
670#define P14_IVG(x) (((x)&0xF)-7) << 0x18 /* Peripheral #14 assigned IVG #x */
671#define P15_IVG(x) (((x)&0xF)-7) << 0x1C /* Peripheral #15 assigned IVG #x */
672
673/* SIC_IAR2 Macros */
674#define P16_IVG(x) (((x)&0xF)-7) /* Peripheral #16 assigned IVG #x */
675#define P17_IVG(x) (((x)&0xF)-7) << 0x4 /* Peripheral #17 assigned IVG #x */
676#define P18_IVG(x) (((x)&0xF)-7) << 0x8 /* Peripheral #18 assigned IVG #x */
677#define P19_IVG(x) (((x)&0xF)-7) << 0xC /* Peripheral #19 assigned IVG #x */
678#define P20_IVG(x) (((x)&0xF)-7) << 0x10 /* Peripheral #20 assigned IVG #x */
679#define P21_IVG(x) (((x)&0xF)-7) << 0x14 /* Peripheral #21 assigned IVG #x */
680#define P22_IVG(x) (((x)&0xF)-7) << 0x18 /* Peripheral #22 assigned IVG #x */
681#define P23_IVG(x) (((x)&0xF)-7) << 0x1C /* Peripheral #23 assigned IVG #x */
682
683/* SIC_IAR3 Macros */
684#define P24_IVG(x) (((x)&0xF)-7) /* Peripheral #24 assigned IVG #x */
685#define P25_IVG(x) (((x)&0xF)-7) << 0x4 /* Peripheral #25 assigned IVG #x */
686#define P26_IVG(x) (((x)&0xF)-7) << 0x8 /* Peripheral #26 assigned IVG #x */
687#define P27_IVG(x) (((x)&0xF)-7) << 0xC /* Peripheral #27 assigned IVG #x */
688#define P28_IVG(x) (((x)&0xF)-7) << 0x10 /* Peripheral #28 assigned IVG #x */
689#define P29_IVG(x) (((x)&0xF)-7) << 0x14 /* Peripheral #29 assigned IVG #x */
690#define P30_IVG(x) (((x)&0xF)-7) << 0x18 /* Peripheral #30 assigned IVG #x */
691#define P31_IVG(x) (((x)&0xF)-7) << 0x1C /* Peripheral #31 assigned IVG #x */
692
693
694/* SIC_IMASK Masks */
695#define SIC_UNMASK_ALL 0x00000000 /* Unmask all peripheral interrupts */
696#define SIC_MASK_ALL 0xFFFFFFFF /* Mask all peripheral interrupts */
697#define SIC_MASK(x) (1 << ((x)&0x1F)) /* Mask Peripheral #x interrupt */
698#define SIC_UNMASK(x) (0xFFFFFFFF ^ (1 << ((x)&0x1F))) /* Unmask Peripheral #x interrupt */
699
700/* SIC_IWR Masks */
701#define IWR_DISABLE_ALL 0x00000000 /* Wakeup Disable all peripherals */
702#define IWR_ENABLE_ALL 0xFFFFFFFF /* Wakeup Enable all peripherals */
703#define IWR_ENABLE(x) (1 << ((x)&0x1F)) /* Wakeup Enable Peripheral #x */
704#define IWR_DISABLE(x) (0xFFFFFFFF ^ (1 << ((x)&0x1F))) /* Wakeup Disable Peripheral #x */
705
706
707/* ************** UART CONTROLLER MASKS *************************/
708/* UARTx_LCR Masks */
709#define WLS(x) (((x)-5) & 0x03) /* Word Length Select */
710#define STB 0x04 /* Stop Bits */
711#define PEN 0x08 /* Parity Enable */
712#define EPS 0x10 /* Even Parity Select */
713#define STP 0x20 /* Stick Parity */
714#define SB 0x40 /* Set Break */
715#define DLAB 0x80 /* Divisor Latch Access */
716
717/* UARTx_MCR Mask */
718#define LOOP_ENA 0x10 /* Loopback Mode Enable */
719#define LOOP_ENA_P 0x04
720
721/* UARTx_LSR Masks */
722#define DR 0x01 /* Data Ready */
723#define OE 0x02 /* Overrun Error */
724#define PE 0x04 /* Parity Error */
725#define FE 0x08 /* Framing Error */
726#define BI 0x10 /* Break Interrupt */
727#define THRE 0x20 /* THR Empty */
728#define TEMT 0x40 /* TSR and UART_THR Empty */
729
730/* UARTx_IER Masks */
731#define ERBFI 0x01 /* Enable Receive Buffer Full Interrupt */
732#define ETBEI 0x02 /* Enable Transmit Buffer Empty Interrupt */
733#define ELSI 0x04 /* Enable RX Status Interrupt */
734
735/* UARTx_IIR Masks */
736#define NINT 0x01 /* Pending Interrupt */
737#define IIR_TX_READY 0x02 /* UART_THR empty */
738#define IIR_RX_READY 0x04 /* Receive data ready */
739#define IIR_LINE_CHANGE 0x06 /* Receive line status */
740#define IIR_STATUS 0x06 /* Highest Priority Pending Interrupt */
741
742/* UARTx_GCTL Masks */
743#define UCEN 0x01 /* Enable UARTx Clocks */
744#define IREN 0x02 /* Enable IrDA Mode */
745#define TPOLC 0x04 /* IrDA TX Polarity Change */
746#define RPOLC 0x08 /* IrDA RX Polarity Change */
747#define FPE 0x10 /* Force Parity Error On Transmit */
748#define FFE 0x20 /* Force Framing Error On Transmit */
749
750
751/* **************** GENERAL PURPOSE TIMER MASKS **********************/
752/* TIMER_ENABLE Masks */
753#define TIMEN0 0x0001 /* Enable Timer 0 */
754#define TIMEN1 0x0002 /* Enable Timer 1 */
755#define TIMEN2 0x0004 /* Enable Timer 2 */
756#define TIMEN3 0x0008 /* Enable Timer 3 */
757#define TIMEN4 0x0010 /* Enable Timer 4 */
758#define TIMEN5 0x0020 /* Enable Timer 5 */
759#define TIMEN6 0x0040 /* Enable Timer 6 */
760#define TIMEN7 0x0080 /* Enable Timer 7 */
761
762/* TIMER_DISABLE Masks */
763#define TIMDIS0 TIMEN0 /* Disable Timer 0 */
764#define TIMDIS1 TIMEN1 /* Disable Timer 1 */
765#define TIMDIS2 TIMEN2 /* Disable Timer 2 */
766#define TIMDIS3 TIMEN3 /* Disable Timer 3 */
767#define TIMDIS4 TIMEN4 /* Disable Timer 4 */
768#define TIMDIS5 TIMEN5 /* Disable Timer 5 */
769#define TIMDIS6 TIMEN6 /* Disable Timer 6 */
770#define TIMDIS7 TIMEN7 /* Disable Timer 7 */
771
772/* TIMER_STATUS Masks */
773#define TIMIL0 0x00000001 /* Timer 0 Interrupt */
774#define TIMIL1 0x00000002 /* Timer 1 Interrupt */
775#define TIMIL2 0x00000004 /* Timer 2 Interrupt */
776#define TIMIL3 0x00000008 /* Timer 3 Interrupt */
777#define TOVF_ERR0 0x00000010 /* Timer 0 Counter Overflow */
778#define TOVF_ERR1 0x00000020 /* Timer 1 Counter Overflow */
779#define TOVF_ERR2 0x00000040 /* Timer 2 Counter Overflow */
780#define TOVF_ERR3 0x00000080 /* Timer 3 Counter Overflow */
781#define TRUN0 0x00001000 /* Timer 0 Slave Enable Status */
782#define TRUN1 0x00002000 /* Timer 1 Slave Enable Status */
783#define TRUN2 0x00004000 /* Timer 2 Slave Enable Status */
784#define TRUN3 0x00008000 /* Timer 3 Slave Enable Status */
785#define TIMIL4 0x00010000 /* Timer 4 Interrupt */
786#define TIMIL5 0x00020000 /* Timer 5 Interrupt */
787#define TIMIL6 0x00040000 /* Timer 6 Interrupt */
788#define TIMIL7 0x00080000 /* Timer 7 Interrupt */
789#define TOVF_ERR4 0x00100000 /* Timer 4 Counter Overflow */
790#define TOVF_ERR5 0x00200000 /* Timer 5 Counter Overflow */
791#define TOVF_ERR6 0x00400000 /* Timer 6 Counter Overflow */
792#define TOVF_ERR7 0x00800000 /* Timer 7 Counter Overflow */
793#define TRUN4 0x10000000 /* Timer 4 Slave Enable Status */
794#define TRUN5 0x20000000 /* Timer 5 Slave Enable Status */
795#define TRUN6 0x40000000 /* Timer 6 Slave Enable Status */
796#define TRUN7 0x80000000 /* Timer 7 Slave Enable Status */
797
798/* Alternate Deprecated Macros Provided For Backwards Code Compatibility */
799#define TOVL_ERR0 TOVF_ERR0
800#define TOVL_ERR1 TOVF_ERR1
801#define TOVL_ERR2 TOVF_ERR2
802#define TOVL_ERR3 TOVF_ERR3
803#define TOVL_ERR4 TOVF_ERR4
804#define TOVL_ERR5 TOVF_ERR5
805#define TOVL_ERR6 TOVF_ERR6
806#define TOVL_ERR7 TOVF_ERR7
807
808/* TIMERx_CONFIG Masks */
809#define PWM_OUT 0x0001 /* Pulse-Width Modulation Output Mode */
810#define WDTH_CAP 0x0002 /* Width Capture Input Mode */
811#define EXT_CLK 0x0003 /* External Clock Mode */
812#define PULSE_HI 0x0004 /* Action Pulse (Positive/Negative*) */
813#define PERIOD_CNT 0x0008 /* Period Count */
814#define IRQ_ENA 0x0010 /* Interrupt Request Enable */
815#define TIN_SEL 0x0020 /* Timer Input Select */
816#define OUT_DIS 0x0040 /* Output Pad Disable */
817#define CLK_SEL 0x0080 /* Timer Clock Select */
818#define TOGGLE_HI 0x0100 /* PWM_OUT PULSE_HI Toggle Mode */
819#define EMU_RUN 0x0200 /* Emulation Behavior Select */
820#define ERR_TYP 0xC000 /* Error Type */
821
822
823/* ****************** GPIO PORTS F, G, H MASKS ***********************/
824/* General Purpose IO (0xFFC00700 - 0xFFC007FF) Masks */
825/* Port F Masks */
826#define PF0 0x0001
827#define PF1 0x0002
828#define PF2 0x0004
829#define PF3 0x0008
830#define PF4 0x0010
831#define PF5 0x0020
832#define PF6 0x0040
833#define PF7 0x0080
834#define PF8 0x0100
835#define PF9 0x0200
836#define PF10 0x0400
837#define PF11 0x0800
838#define PF12 0x1000
839#define PF13 0x2000
840#define PF14 0x4000
841#define PF15 0x8000
842
843/* Port G Masks */
844#define PG0 0x0001
845#define PG1 0x0002
846#define PG2 0x0004
847#define PG3 0x0008
848#define PG4 0x0010
849#define PG5 0x0020
850#define PG6 0x0040
851#define PG7 0x0080
852#define PG8 0x0100
853#define PG9 0x0200
854#define PG10 0x0400
855#define PG11 0x0800
856#define PG12 0x1000
857#define PG13 0x2000
858#define PG14 0x4000
859#define PG15 0x8000
860
861/* Port H Masks */
862#define PH0 0x0001
863#define PH1 0x0002
864#define PH2 0x0004
865#define PH3 0x0008
866#define PH4 0x0010
867#define PH5 0x0020
868#define PH6 0x0040
869#define PH7 0x0080
870
871/* ********************* ASYNCHRONOUS MEMORY CONTROLLER MASKS *************************/
872/* EBIU_AMGCTL Masks */
873#define AMCKEN 0x0001 /* Enable CLKOUT */
874#define AMBEN_NONE 0x0000 /* All Banks Disabled */
875#define AMBEN_B0 0x0002 /* Enable Async Memory Bank 0 only */
876#define AMBEN_B0_B1 0x0004 /* Enable Async Memory Banks 0 & 1 only */
877#define AMBEN_B0_B1_B2 0x0006 /* Enable Async Memory Banks 0, 1, and 2 */
878#define AMBEN_ALL 0x0008 /* Enable Async Memory Banks (all) 0, 1, 2, and 3 */
879
880/* EBIU_AMBCTL0 Masks */
881#define B0RDYEN 0x00000001 /* Bank 0 (B0) RDY Enable */
882#define B0RDYPOL 0x00000002 /* B0 RDY Active High */
883#define B0TT_1 0x00000004 /* B0 Transition Time (Read to Write) = 1 cycle */
884#define B0TT_2 0x00000008 /* B0 Transition Time (Read to Write) = 2 cycles */
885#define B0TT_3 0x0000000C /* B0 Transition Time (Read to Write) = 3 cycles */
886#define B0TT_4 0x00000000 /* B0 Transition Time (Read to Write) = 4 cycles */
887#define B0ST_1 0x00000010 /* B0 Setup Time (AOE to Read/Write) = 1 cycle */
888#define B0ST_2 0x00000020 /* B0 Setup Time (AOE to Read/Write) = 2 cycles */
889#define B0ST_3 0x00000030 /* B0 Setup Time (AOE to Read/Write) = 3 cycles */
890#define B0ST_4 0x00000000 /* B0 Setup Time (AOE to Read/Write) = 4 cycles */
891#define B0HT_1 0x00000040 /* B0 Hold Time (~Read/Write to ~AOE) = 1 cycle */
892#define B0HT_2 0x00000080 /* B0 Hold Time (~Read/Write to ~AOE) = 2 cycles */
893#define B0HT_3 0x000000C0 /* B0 Hold Time (~Read/Write to ~AOE) = 3 cycles */
894#define B0HT_0 0x00000000 /* B0 Hold Time (~Read/Write to ~AOE) = 0 cycles */
895#define B0RAT_1 0x00000100 /* B0 Read Access Time = 1 cycle */
896#define B0RAT_2 0x00000200 /* B0 Read Access Time = 2 cycles */
897#define B0RAT_3 0x00000300 /* B0 Read Access Time = 3 cycles */
898#define B0RAT_4 0x00000400 /* B0 Read Access Time = 4 cycles */
899#define B0RAT_5 0x00000500 /* B0 Read Access Time = 5 cycles */
900#define B0RAT_6 0x00000600 /* B0 Read Access Time = 6 cycles */
901#define B0RAT_7 0x00000700 /* B0 Read Access Time = 7 cycles */
902#define B0RAT_8 0x00000800 /* B0 Read Access Time = 8 cycles */
903#define B0RAT_9 0x00000900 /* B0 Read Access Time = 9 cycles */
904#define B0RAT_10 0x00000A00 /* B0 Read Access Time = 10 cycles */
905#define B0RAT_11 0x00000B00 /* B0 Read Access Time = 11 cycles */
906#define B0RAT_12 0x00000C00 /* B0 Read Access Time = 12 cycles */
907#define B0RAT_13 0x00000D00 /* B0 Read Access Time = 13 cycles */
908#define B0RAT_14 0x00000E00 /* B0 Read Access Time = 14 cycles */
909#define B0RAT_15 0x00000F00 /* B0 Read Access Time = 15 cycles */
910#define B0WAT_1 0x00001000 /* B0 Write Access Time = 1 cycle */
911#define B0WAT_2 0x00002000 /* B0 Write Access Time = 2 cycles */
912#define B0WAT_3 0x00003000 /* B0 Write Access Time = 3 cycles */
913#define B0WAT_4 0x00004000 /* B0 Write Access Time = 4 cycles */
914#define B0WAT_5 0x00005000 /* B0 Write Access Time = 5 cycles */
915#define B0WAT_6 0x00006000 /* B0 Write Access Time = 6 cycles */
916#define B0WAT_7 0x00007000 /* B0 Write Access Time = 7 cycles */
917#define B0WAT_8 0x00008000 /* B0 Write Access Time = 8 cycles */
918#define B0WAT_9 0x00009000 /* B0 Write Access Time = 9 cycles */
919#define B0WAT_10 0x0000A000 /* B0 Write Access Time = 10 cycles */
920#define B0WAT_11 0x0000B000 /* B0 Write Access Time = 11 cycles */
921#define B0WAT_12 0x0000C000 /* B0 Write Access Time = 12 cycles */
922#define B0WAT_13 0x0000D000 /* B0 Write Access Time = 13 cycles */
923#define B0WAT_14 0x0000E000 /* B0 Write Access Time = 14 cycles */
924#define B0WAT_15 0x0000F000 /* B0 Write Access Time = 15 cycles */
925
926#define B1RDYEN 0x00010000 /* Bank 1 (B1) RDY Enable */
927#define B1RDYPOL 0x00020000 /* B1 RDY Active High */
928#define B1TT_1 0x00040000 /* B1 Transition Time (Read to Write) = 1 cycle */
929#define B1TT_2 0x00080000 /* B1 Transition Time (Read to Write) = 2 cycles */
930#define B1TT_3 0x000C0000 /* B1 Transition Time (Read to Write) = 3 cycles */
931#define B1TT_4 0x00000000 /* B1 Transition Time (Read to Write) = 4 cycles */
932#define B1ST_1 0x00100000 /* B1 Setup Time (AOE to Read/Write) = 1 cycle */
933#define B1ST_2 0x00200000 /* B1 Setup Time (AOE to Read/Write) = 2 cycles */
934#define B1ST_3 0x00300000 /* B1 Setup Time (AOE to Read/Write) = 3 cycles */
935#define B1ST_4 0x00000000 /* B1 Setup Time (AOE to Read/Write) = 4 cycles */
936#define B1HT_1 0x00400000 /* B1 Hold Time (~Read/Write to ~AOE) = 1 cycle */
937#define B1HT_2 0x00800000 /* B1 Hold Time (~Read/Write to ~AOE) = 2 cycles */
938#define B1HT_3 0x00C00000 /* B1 Hold Time (~Read/Write to ~AOE) = 3 cycles */
939#define B1HT_0 0x00000000 /* B1 Hold Time (~Read/Write to ~AOE) = 0 cycles */
940#define B1RAT_1 0x01000000 /* B1 Read Access Time = 1 cycle */
941#define B1RAT_2 0x02000000 /* B1 Read Access Time = 2 cycles */
942#define B1RAT_3 0x03000000 /* B1 Read Access Time = 3 cycles */
943#define B1RAT_4 0x04000000 /* B1 Read Access Time = 4 cycles */
944#define B1RAT_5 0x05000000 /* B1 Read Access Time = 5 cycles */
945#define B1RAT_6 0x06000000 /* B1 Read Access Time = 6 cycles */
946#define B1RAT_7 0x07000000 /* B1 Read Access Time = 7 cycles */
947#define B1RAT_8 0x08000000 /* B1 Read Access Time = 8 cycles */
948#define B1RAT_9 0x09000000 /* B1 Read Access Time = 9 cycles */
949#define B1RAT_10 0x0A000000 /* B1 Read Access Time = 10 cycles */
950#define B1RAT_11 0x0B000000 /* B1 Read Access Time = 11 cycles */
951#define B1RAT_12 0x0C000000 /* B1 Read Access Time = 12 cycles */
952#define B1RAT_13 0x0D000000 /* B1 Read Access Time = 13 cycles */
953#define B1RAT_14 0x0E000000 /* B1 Read Access Time = 14 cycles */
954#define B1RAT_15 0x0F000000 /* B1 Read Access Time = 15 cycles */
955#define B1WAT_1 0x10000000 /* B1 Write Access Time = 1 cycle */
956#define B1WAT_2 0x20000000 /* B1 Write Access Time = 2 cycles */
957#define B1WAT_3 0x30000000 /* B1 Write Access Time = 3 cycles */
958#define B1WAT_4 0x40000000 /* B1 Write Access Time = 4 cycles */
959#define B1WAT_5 0x50000000 /* B1 Write Access Time = 5 cycles */
960#define B1WAT_6 0x60000000 /* B1 Write Access Time = 6 cycles */
961#define B1WAT_7 0x70000000 /* B1 Write Access Time = 7 cycles */
962#define B1WAT_8 0x80000000 /* B1 Write Access Time = 8 cycles */
963#define B1WAT_9 0x90000000 /* B1 Write Access Time = 9 cycles */
964#define B1WAT_10 0xA0000000 /* B1 Write Access Time = 10 cycles */
965#define B1WAT_11 0xB0000000 /* B1 Write Access Time = 11 cycles */
966#define B1WAT_12 0xC0000000 /* B1 Write Access Time = 12 cycles */
967#define B1WAT_13 0xD0000000 /* B1 Write Access Time = 13 cycles */
968#define B1WAT_14 0xE0000000 /* B1 Write Access Time = 14 cycles */
969#define B1WAT_15 0xF0000000 /* B1 Write Access Time = 15 cycles */
970
971/* EBIU_AMBCTL1 Masks */
972#define B2RDYEN 0x00000001 /* Bank 2 (B2) RDY Enable */
973#define B2RDYPOL 0x00000002 /* B2 RDY Active High */
974#define B2TT_1 0x00000004 /* B2 Transition Time (Read to Write) = 1 cycle */
975#define B2TT_2 0x00000008 /* B2 Transition Time (Read to Write) = 2 cycles */
976#define B2TT_3 0x0000000C /* B2 Transition Time (Read to Write) = 3 cycles */
977#define B2TT_4 0x00000000 /* B2 Transition Time (Read to Write) = 4 cycles */
978#define B2ST_1 0x00000010 /* B2 Setup Time (AOE to Read/Write) = 1 cycle */
979#define B2ST_2 0x00000020 /* B2 Setup Time (AOE to Read/Write) = 2 cycles */
980#define B2ST_3 0x00000030 /* B2 Setup Time (AOE to Read/Write) = 3 cycles */
981#define B2ST_4 0x00000000 /* B2 Setup Time (AOE to Read/Write) = 4 cycles */
982#define B2HT_1 0x00000040 /* B2 Hold Time (~Read/Write to ~AOE) = 1 cycle */
983#define B2HT_2 0x00000080 /* B2 Hold Time (~Read/Write to ~AOE) = 2 cycles */
984#define B2HT_3 0x000000C0 /* B2 Hold Time (~Read/Write to ~AOE) = 3 cycles */
985#define B2HT_0 0x00000000 /* B2 Hold Time (~Read/Write to ~AOE) = 0 cycles */
986#define B2RAT_1 0x00000100 /* B2 Read Access Time = 1 cycle */
987#define B2RAT_2 0x00000200 /* B2 Read Access Time = 2 cycles */
988#define B2RAT_3 0x00000300 /* B2 Read Access Time = 3 cycles */
989#define B2RAT_4 0x00000400 /* B2 Read Access Time = 4 cycles */
990#define B2RAT_5 0x00000500 /* B2 Read Access Time = 5 cycles */
991#define B2RAT_6 0x00000600 /* B2 Read Access Time = 6 cycles */
992#define B2RAT_7 0x00000700 /* B2 Read Access Time = 7 cycles */
993#define B2RAT_8 0x00000800 /* B2 Read Access Time = 8 cycles */
994#define B2RAT_9 0x00000900 /* B2 Read Access Time = 9 cycles */
995#define B2RAT_10 0x00000A00 /* B2 Read Access Time = 10 cycles */
996#define B2RAT_11 0x00000B00 /* B2 Read Access Time = 11 cycles */
997#define B2RAT_12 0x00000C00 /* B2 Read Access Time = 12 cycles */
998#define B2RAT_13 0x00000D00 /* B2 Read Access Time = 13 cycles */
999#define B2RAT_14 0x00000E00 /* B2 Read Access Time = 14 cycles */
1000#define B2RAT_15 0x00000F00 /* B2 Read Access Time = 15 cycles */
1001#define B2WAT_1 0x00001000 /* B2 Write Access Time = 1 cycle */
1002#define B2WAT_2 0x00002000 /* B2 Write Access Time = 2 cycles */
1003#define B2WAT_3 0x00003000 /* B2 Write Access Time = 3 cycles */
1004#define B2WAT_4 0x00004000 /* B2 Write Access Time = 4 cycles */
1005#define B2WAT_5 0x00005000 /* B2 Write Access Time = 5 cycles */
1006#define B2WAT_6 0x00006000 /* B2 Write Access Time = 6 cycles */
1007#define B2WAT_7 0x00007000 /* B2 Write Access Time = 7 cycles */
1008#define B2WAT_8 0x00008000 /* B2 Write Access Time = 8 cycles */
1009#define B2WAT_9 0x00009000 /* B2 Write Access Time = 9 cycles */
1010#define B2WAT_10 0x0000A000 /* B2 Write Access Time = 10 cycles */
1011#define B2WAT_11 0x0000B000 /* B2 Write Access Time = 11 cycles */
1012#define B2WAT_12 0x0000C000 /* B2 Write Access Time = 12 cycles */
1013#define B2WAT_13 0x0000D000 /* B2 Write Access Time = 13 cycles */
1014#define B2WAT_14 0x0000E000 /* B2 Write Access Time = 14 cycles */
1015#define B2WAT_15 0x0000F000 /* B2 Write Access Time = 15 cycles */
1016
1017#define B3RDYEN 0x00010000 /* Bank 3 (B3) RDY Enable */
1018#define B3RDYPOL 0x00020000 /* B3 RDY Active High */
1019#define B3TT_1 0x00040000 /* B3 Transition Time (Read to Write) = 1 cycle */
1020#define B3TT_2 0x00080000 /* B3 Transition Time (Read to Write) = 2 cycles */
1021#define B3TT_3 0x000C0000 /* B3 Transition Time (Read to Write) = 3 cycles */
1022#define B3TT_4 0x00000000 /* B3 Transition Time (Read to Write) = 4 cycles */
1023#define B3ST_1 0x00100000 /* B3 Setup Time (AOE to Read/Write) = 1 cycle */
1024#define B3ST_2 0x00200000 /* B3 Setup Time (AOE to Read/Write) = 2 cycles */
1025#define B3ST_3 0x00300000 /* B3 Setup Time (AOE to Read/Write) = 3 cycles */
1026#define B3ST_4 0x00000000 /* B3 Setup Time (AOE to Read/Write) = 4 cycles */
1027#define B3HT_1 0x00400000 /* B3 Hold Time (~Read/Write to ~AOE) = 1 cycle */
1028#define B3HT_2 0x00800000 /* B3 Hold Time (~Read/Write to ~AOE) = 2 cycles */
1029#define B3HT_3 0x00C00000 /* B3 Hold Time (~Read/Write to ~AOE) = 3 cycles */
1030#define B3HT_0 0x00000000 /* B3 Hold Time (~Read/Write to ~AOE) = 0 cycles */
1031#define B3RAT_1 0x01000000 /* B3 Read Access Time = 1 cycle */
1032#define B3RAT_2 0x02000000 /* B3 Read Access Time = 2 cycles */
1033#define B3RAT_3 0x03000000 /* B3 Read Access Time = 3 cycles */
1034#define B3RAT_4 0x04000000 /* B3 Read Access Time = 4 cycles */
1035#define B3RAT_5 0x05000000 /* B3 Read Access Time = 5 cycles */
1036#define B3RAT_6 0x06000000 /* B3 Read Access Time = 6 cycles */
1037#define B3RAT_7 0x07000000 /* B3 Read Access Time = 7 cycles */
1038#define B3RAT_8 0x08000000 /* B3 Read Access Time = 8 cycles */
1039#define B3RAT_9 0x09000000 /* B3 Read Access Time = 9 cycles */
1040#define B3RAT_10 0x0A000000 /* B3 Read Access Time = 10 cycles */
1041#define B3RAT_11 0x0B000000 /* B3 Read Access Time = 11 cycles */
1042#define B3RAT_12 0x0C000000 /* B3 Read Access Time = 12 cycles */
1043#define B3RAT_13 0x0D000000 /* B3 Read Access Time = 13 cycles */
1044#define B3RAT_14 0x0E000000 /* B3 Read Access Time = 14 cycles */
1045#define B3RAT_15 0x0F000000 /* B3 Read Access Time = 15 cycles */
1046#define B3WAT_1 0x10000000 /* B3 Write Access Time = 1 cycle */
1047#define B3WAT_2 0x20000000 /* B3 Write Access Time = 2 cycles */
1048#define B3WAT_3 0x30000000 /* B3 Write Access Time = 3 cycles */
1049#define B3WAT_4 0x40000000 /* B3 Write Access Time = 4 cycles */
1050#define B3WAT_5 0x50000000 /* B3 Write Access Time = 5 cycles */
1051#define B3WAT_6 0x60000000 /* B3 Write Access Time = 6 cycles */
1052#define B3WAT_7 0x70000000 /* B3 Write Access Time = 7 cycles */
1053#define B3WAT_8 0x80000000 /* B3 Write Access Time = 8 cycles */
1054#define B3WAT_9 0x90000000 /* B3 Write Access Time = 9 cycles */
1055#define B3WAT_10 0xA0000000 /* B3 Write Access Time = 10 cycles */
1056#define B3WAT_11 0xB0000000 /* B3 Write Access Time = 11 cycles */
1057#define B3WAT_12 0xC0000000 /* B3 Write Access Time = 12 cycles */
1058#define B3WAT_13 0xD0000000 /* B3 Write Access Time = 13 cycles */
1059#define B3WAT_14 0xE0000000 /* B3 Write Access Time = 14 cycles */
1060#define B3WAT_15 0xF0000000 /* B3 Write Access Time = 15 cycles */
1061
1062
1063/* ********************** SDRAM CONTROLLER MASKS **********************************************/
1064/* EBIU_SDGCTL Masks */
1065#define SCTLE 0x00000001 /* Enable SDRAM Signals */
1066#define CL_2 0x00000008 /* SDRAM CAS Latency = 2 cycles */
1067#define CL_3 0x0000000C /* SDRAM CAS Latency = 3 cycles */
1068#define PASR_ALL 0x00000000 /* All 4 SDRAM Banks Refreshed In Self-Refresh */
1069#define PASR_B0_B1 0x00000010 /* SDRAM Banks 0 and 1 Are Refreshed In Self-Refresh */
1070#define PASR_B0 0x00000020 /* Only SDRAM Bank 0 Is Refreshed In Self-Refresh */
1071#define TRAS_1 0x00000040 /* SDRAM tRAS = 1 cycle */
1072#define TRAS_2 0x00000080 /* SDRAM tRAS = 2 cycles */
1073#define TRAS_3 0x000000C0 /* SDRAM tRAS = 3 cycles */
1074#define TRAS_4 0x00000100 /* SDRAM tRAS = 4 cycles */
1075#define TRAS_5 0x00000140 /* SDRAM tRAS = 5 cycles */
1076#define TRAS_6 0x00000180 /* SDRAM tRAS = 6 cycles */
1077#define TRAS_7 0x000001C0 /* SDRAM tRAS = 7 cycles */
1078#define TRAS_8 0x00000200 /* SDRAM tRAS = 8 cycles */
1079#define TRAS_9 0x00000240 /* SDRAM tRAS = 9 cycles */
1080#define TRAS_10 0x00000280 /* SDRAM tRAS = 10 cycles */
1081#define TRAS_11 0x000002C0 /* SDRAM tRAS = 11 cycles */
1082#define TRAS_12 0x00000300 /* SDRAM tRAS = 12 cycles */
1083#define TRAS_13 0x00000340 /* SDRAM tRAS = 13 cycles */
1084#define TRAS_14 0x00000380 /* SDRAM tRAS = 14 cycles */
1085#define TRAS_15 0x000003C0 /* SDRAM tRAS = 15 cycles */
1086#define TRP_1 0x00000800 /* SDRAM tRP = 1 cycle */
1087#define TRP_2 0x00001000 /* SDRAM tRP = 2 cycles */
1088#define TRP_3 0x00001800 /* SDRAM tRP = 3 cycles */
1089#define TRP_4 0x00002000 /* SDRAM tRP = 4 cycles */
1090#define TRP_5 0x00002800 /* SDRAM tRP = 5 cycles */
1091#define TRP_6 0x00003000 /* SDRAM tRP = 6 cycles */
1092#define TRP_7 0x00003800 /* SDRAM tRP = 7 cycles */
1093#define TRCD_1 0x00008000 /* SDRAM tRCD = 1 cycle */
1094#define TRCD_2 0x00010000 /* SDRAM tRCD = 2 cycles */
1095#define TRCD_3 0x00018000 /* SDRAM tRCD = 3 cycles */
1096#define TRCD_4 0x00020000 /* SDRAM tRCD = 4 cycles */
1097#define TRCD_5 0x00028000 /* SDRAM tRCD = 5 cycles */
1098#define TRCD_6 0x00030000 /* SDRAM tRCD = 6 cycles */
1099#define TRCD_7 0x00038000 /* SDRAM tRCD = 7 cycles */
1100#define TWR_1 0x00080000 /* SDRAM tWR = 1 cycle */
1101#define TWR_2 0x00100000 /* SDRAM tWR = 2 cycles */
1102#define TWR_3 0x00180000 /* SDRAM tWR = 3 cycles */
1103#define PUPSD 0x00200000 /* Power-Up Start Delay (15 SCLK Cycles Delay) */
1104#define PSM 0x00400000 /* Power-Up Sequence (Mode Register Before/After* Refresh) */
1105#define PSS 0x00800000 /* Enable Power-Up Sequence on Next SDRAM Access */
1106#define SRFS 0x01000000 /* Enable SDRAM Self-Refresh Mode */
1107#define EBUFE 0x02000000 /* Enable External Buffering Timing */
1108#define FBBRW 0x04000000 /* Enable Fast Back-To-Back Read To Write */
1109#define EMREN 0x10000000 /* Extended Mode Register Enable */
1110#define TCSR 0x20000000 /* Temp-Compensated Self-Refresh Value (85/45* Deg C) */
1111#define CDDBG 0x40000000 /* Tristate SDRAM Controls During Bus Grant */
1112
1113/* EBIU_SDBCTL Masks */
1114#define EBE 0x0001 /* Enable SDRAM External Bank */
1115#define EBSZ_16 0x0000 /* SDRAM External Bank Size = 16MB */
1116#define EBSZ_32 0x0002 /* SDRAM External Bank Size = 32MB */
1117#define EBSZ_64 0x0004 /* SDRAM External Bank Size = 64MB */
1118#define EBSZ_128 0x0006 /* SDRAM External Bank Size = 128MB */
1119#define EBSZ_256 0x0008 /* SDRAM External Bank Size = 256MB */
1120#define EBSZ_512 0x000A /* SDRAM External Bank Size = 512MB */
1121#define EBCAW_8 0x0000 /* SDRAM External Bank Column Address Width = 8 Bits */
1122#define EBCAW_9 0x0010 /* SDRAM External Bank Column Address Width = 9 Bits */
1123#define EBCAW_10 0x0020 /* SDRAM External Bank Column Address Width = 10 Bits */
1124#define EBCAW_11 0x0030 /* SDRAM External Bank Column Address Width = 11 Bits */
1125
1126/* EBIU_SDSTAT Masks */
1127#define SDCI 0x0001 /* SDRAM Controller Idle */
1128#define SDSRA 0x0002 /* SDRAM Self-Refresh Active */
1129#define SDPUA 0x0004 /* SDRAM Power-Up Active */
1130#define SDRS 0x0008 /* SDRAM Will Power-Up On Next Access */
1131#define SDEASE 0x0010 /* SDRAM EAB Sticky Error Status */
1132#define BGSTAT 0x0020 /* Bus Grant Status */
1133
1134
1135/* ************************** DMA CONTROLLER MASKS ********************************/
1136
1137/* DMAx_PERIPHERAL_MAP, MDMA_yy_PERIPHERAL_MAP Masks */
1138#define CTYPE 0x0040 /* DMA Channel Type Indicator (Memory/Peripheral*) */
1139#define PMAP 0xF000 /* Peripheral Mapped To This Channel */
1140#define PMAP_PPI 0x0000 /* PPI Port DMA */
1141#define PMAP_EMACRX 0x1000 /* Ethernet Receive DMA */
1142#define PMAP_EMACTX 0x2000 /* Ethernet Transmit DMA */
1143#define PMAP_SPORT0RX 0x3000 /* SPORT0 Receive DMA */
1144#define PMAP_SPORT0TX 0x4000 /* SPORT0 Transmit DMA */
1145#define PMAP_SPORT1RX 0x5000 /* SPORT1 Receive DMA */
1146#define PMAP_SPORT1TX 0x6000 /* SPORT1 Transmit DMA */
1147#define PMAP_SPI 0x7000 /* SPI Port DMA */
1148#define PMAP_UART0RX 0x8000 /* UART0 Port Receive DMA */
1149#define PMAP_UART0TX 0x9000 /* UART0 Port Transmit DMA */
1150#define PMAP_UART1RX 0xA000 /* UART1 Port Receive DMA */
1151#define PMAP_UART1TX 0xB000 /* UART1 Port Transmit DMA */
1152
1153/* ************ PARALLEL PERIPHERAL INTERFACE (PPI) MASKS *************/
1154/* PPI_CONTROL Masks */
1155#define PORT_EN 0x0001 /* PPI Port Enable */
1156#define PORT_DIR 0x0002 /* PPI Port Direction */
1157#define XFR_TYPE 0x000C /* PPI Transfer Type */
1158#define PORT_CFG 0x0030 /* PPI Port Configuration */
1159#define FLD_SEL 0x0040 /* PPI Active Field Select */
1160#define PACK_EN 0x0080 /* PPI Packing Mode */
1161#define DMA32 0x0100 /* PPI 32-bit DMA Enable */
1162#define SKIP_EN 0x0200 /* PPI Skip Element Enable */
1163#define SKIP_EO 0x0400 /* PPI Skip Even/Odd Elements */
1164#define DLEN_8 0x0000 /* Data Length = 8 Bits */
1165#define DLEN_10 0x0800 /* Data Length = 10 Bits */
1166#define DLEN_11 0x1000 /* Data Length = 11 Bits */
1167#define DLEN_12 0x1800 /* Data Length = 12 Bits */
1168#define DLEN_13 0x2000 /* Data Length = 13 Bits */
1169#define DLEN_14 0x2800 /* Data Length = 14 Bits */
1170#define DLEN_15 0x3000 /* Data Length = 15 Bits */
1171#define DLEN_16 0x3800 /* Data Length = 16 Bits */
1172#define DLENGTH 0x3800 /* PPI Data Length */
1173#define POLC 0x4000 /* PPI Clock Polarity */
1174#define POLS 0x8000 /* PPI Frame Sync Polarity */
1175
1176/* PPI_STATUS Masks */
1177#define FLD 0x0400 /* Field Indicator */
1178#define FT_ERR 0x0800 /* Frame Track Error */
1179#define OVR 0x1000 /* FIFO Overflow Error */
1180#define UNDR 0x2000 /* FIFO Underrun Error */
1181#define ERR_DET 0x4000 /* Error Detected Indicator */
1182#define ERR_NCOR 0x8000 /* Error Not Corrected Indicator */
1183
1184
1185/* ******************** TWO-WIRE INTERFACE (TWI) MASKS ***********************/
1186/* TWI_CLKDIV Macros (Use: *pTWI_CLKDIV = CLKLOW(x)|CLKHI(y); ) */
1187#define CLKLOW(x) ((x) & 0xFF) /* Periods Clock Is Held Low */
1188#define CLKHI(y) (((y)&0xFF)<<0x8) /* Periods Before New Clock Low */
1189
1190/* TWI_PRESCALE Masks */
1191#define PRESCALE 0x007F /* SCLKs Per Internal Time Reference (10MHz) */
1192#define TWI_ENA 0x0080 /* TWI Enable */
1193#define SCCB 0x0200 /* SCCB Compatibility Enable */
1194
1195/* TWI_SLAVE_CTL Masks */
1196#define SEN 0x0001 /* Slave Enable */
1197#define SADD_LEN 0x0002 /* Slave Address Length */
1198#define STDVAL 0x0004 /* Slave Transmit Data Valid */
1199#define NAK 0x0008 /* NAK/ACK* Generated At Conclusion Of Transfer */
1200#define GEN 0x0010 /* General Call Adrress Matching Enabled */
1201
1202/* TWI_SLAVE_STAT Masks */
1203#define SDIR 0x0001 /* Slave Transfer Direction (Transmit/Receive*) */
1204#define GCALL 0x0002 /* General Call Indicator */
1205
1206/* TWI_MASTER_CTL Masks */
1207#define MEN 0x0001 /* Master Mode Enable */
1208#define MADD_LEN 0x0002 /* Master Address Length */
1209#define MDIR 0x0004 /* Master Transmit Direction (RX/TX*) */
1210#define FAST 0x0008 /* Use Fast Mode Timing Specs */
1211#define STOP 0x0010 /* Issue Stop Condition */
1212#define RSTART 0x0020 /* Repeat Start or Stop* At End Of Transfer */
1213#define DCNT 0x3FC0 /* Data Bytes To Transfer */
1214#define SDAOVR 0x4000 /* Serial Data Override */
1215#define SCLOVR 0x8000 /* Serial Clock Override */
1216
1217/* TWI_MASTER_STAT Masks */
1218#define MPROG 0x0001 /* Master Transfer In Progress */
1219#define LOSTARB 0x0002 /* Lost Arbitration Indicator (Xfer Aborted) */
1220#define ANAK 0x0004 /* Address Not Acknowledged */
1221#define DNAK 0x0008 /* Data Not Acknowledged */
1222#define BUFRDERR 0x0010 /* Buffer Read Error */
1223#define BUFWRERR 0x0020 /* Buffer Write Error */
1224#define SDASEN 0x0040 /* Serial Data Sense */
1225#define SCLSEN 0x0080 /* Serial Clock Sense */
1226#define BUSBUSY 0x0100 /* Bus Busy Indicator */
1227
1228/* TWI_INT_SRC and TWI_INT_ENABLE Masks */
1229#define SINIT 0x0001 /* Slave Transfer Initiated */
1230#define SCOMP 0x0002 /* Slave Transfer Complete */
1231#define SERR 0x0004 /* Slave Transfer Error */
1232#define SOVF 0x0008 /* Slave Overflow */
1233#define MCOMP 0x0010 /* Master Transfer Complete */
1234#define MERR 0x0020 /* Master Transfer Error */
1235#define XMTSERV 0x0040 /* Transmit FIFO Service */
1236#define RCVSERV 0x0080 /* Receive FIFO Service */
1237
1238/* TWI_FIFO_CTRL Masks */
1239#define XMTFLUSH 0x0001 /* Transmit Buffer Flush */
1240#define RCVFLUSH 0x0002 /* Receive Buffer Flush */
1241#define XMTINTLEN 0x0004 /* Transmit Buffer Interrupt Length */
1242#define RCVINTLEN 0x0008 /* Receive Buffer Interrupt Length */
1243
1244/* TWI_FIFO_STAT Masks */
1245#define XMTSTAT 0x0003 /* Transmit FIFO Status */
1246#define XMT_EMPTY 0x0000 /* Transmit FIFO Empty */
1247#define XMT_HALF 0x0001 /* Transmit FIFO Has 1 Byte To Write */
1248#define XMT_FULL 0x0003 /* Transmit FIFO Full (2 Bytes To Write) */
1249
1250#define RCVSTAT 0x000C /* Receive FIFO Status */
1251#define RCV_EMPTY 0x0000 /* Receive FIFO Empty */
1252#define RCV_HALF 0x0004 /* Receive FIFO Has 1 Byte To Read */
1253#define RCV_FULL 0x000C /* Receive FIFO Full (2 Bytes To Read) */
1254
1255
1256/* ******************* PIN CONTROL REGISTER MASKS ************************/
1257/* PORT_MUX Masks */
1258#define PJSE 0x0001 /* Port J SPI/SPORT Enable */
1259#define PJSE_SPORT 0x0000 /* Enable TFS0/DT0PRI */
1260#define PJSE_SPI 0x0001 /* Enable SPI_SSEL3:2 */
1261
1262#define PJCE(x) (((x)&0x3)<<1) /* Port J CAN/SPI/SPORT Enable */
1263#define PJCE_SPORT 0x0000 /* Enable DR0SEC/DT0SEC */
1264#define PJCE_CAN 0x0002 /* Enable CAN RX/TX */
1265#define PJCE_SPI 0x0004 /* Enable SPI_SSEL7 */
1266
1267#define PFDE 0x0008 /* Port F DMA Request Enable */
1268#define PFDE_UART 0x0000 /* Enable UART0 RX/TX */
1269#define PFDE_DMA 0x0008 /* Enable DMAR1:0 */
1270
1271#define PFTE 0x0010 /* Port F Timer Enable */
1272#define PFTE_UART 0x0000 /* Enable UART1 RX/TX */
1273#define PFTE_TIMER 0x0010 /* Enable TMR7:6 */
1274
1275#define PFS6E 0x0020 /* Port F SPI SSEL 6 Enable */
1276#define PFS6E_TIMER 0x0000 /* Enable TMR5 */
1277#define PFS6E_SPI 0x0020 /* Enable SPI_SSEL6 */
1278
1279#define PFS5E 0x0040 /* Port F SPI SSEL 5 Enable */
1280#define PFS5E_TIMER 0x0000 /* Enable TMR4 */
1281#define PFS5E_SPI 0x0040 /* Enable SPI_SSEL5 */
1282
1283#define PFS4E 0x0080 /* Port F SPI SSEL 4 Enable */
1284#define PFS4E_TIMER 0x0000 /* Enable TMR3 */
1285#define PFS4E_SPI 0x0080 /* Enable SPI_SSEL4 */
1286
1287#define PFFE 0x0100 /* Port F PPI Frame Sync Enable */
1288#define PFFE_TIMER 0x0000 /* Enable TMR2 */
1289#define PFFE_PPI 0x0100 /* Enable PPI FS3 */
1290
1291#define PGSE 0x0200 /* Port G SPORT1 Secondary Enable */
1292#define PGSE_PPI 0x0000 /* Enable PPI D9:8 */
1293#define PGSE_SPORT 0x0200 /* Enable DR1SEC/DT1SEC */
1294
1295#define PGRE 0x0400 /* Port G SPORT1 Receive Enable */
1296#define PGRE_PPI 0x0000 /* Enable PPI D12:10 */
1297#define PGRE_SPORT 0x0400 /* Enable DR1PRI/RFS1/RSCLK1 */
1298
1299#define PGTE 0x0800 /* Port G SPORT1 Transmit Enable */
1300#define PGTE_PPI 0x0000 /* Enable PPI D15:13 */
1301#define PGTE_SPORT 0x0800 /* Enable DT1PRI/TFS1/TSCLK1 */
1302
1303
1304/* ****************** HANDSHAKE DMA (HDMA) MASKS *********************/
1305/* HDMAx_CTL Masks */
1306#define HMDMAEN 0x0001 /* Enable Handshake DMA 0/1 */
1307#define REP 0x0002 /* HDMA Request Polarity */
1308#define UTE 0x0004 /* Urgency Threshold Enable */
1309#define OIE 0x0010 /* Overflow Interrupt Enable */
1310#define BDIE 0x0020 /* Block Done Interrupt Enable */
1311#define MBDI 0x0040 /* Mask Block Done IRQ If Pending ECNT */
1312#define DRQ 0x0300 /* HDMA Request Type */
1313#define DRQ_NONE 0x0000 /* No Request */
1314#define DRQ_SINGLE 0x0100 /* Channels Request Single */
1315#define DRQ_MULTI 0x0200 /* Channels Request Multi (Default) */
1316#define DRQ_URGENT 0x0300 /* Channels Request Multi Urgent */
1317#define RBC 0x1000 /* Reload BCNT With IBCNT */
1318#define PS 0x2000 /* HDMA Pin Status */
1319#define OI 0x4000 /* Overflow Interrupt Generated */
1320#define BDI 0x8000 /* Block Done Interrupt Generated */
1321
1322/* entry addresses of the user-callable Boot ROM functions */
1323
1324#define _BOOTROM_RESET 0xEF000000
1325#define _BOOTROM_FINAL_INIT 0xEF000002
1326#define _BOOTROM_DO_MEMORY_DMA 0xEF000006
1327#define _BOOTROM_BOOT_DXE_FLASH 0xEF000008
1328#define _BOOTROM_BOOT_DXE_SPI 0xEF00000A
1329#define _BOOTROM_BOOT_DXE_TWI 0xEF00000C
1330#define _BOOTROM_GET_DXE_ADDRESS_FLASH 0xEF000010
1331#define _BOOTROM_GET_DXE_ADDRESS_SPI 0xEF000012
1332#define _BOOTROM_GET_DXE_ADDRESS_TWI 0xEF000014
1333
1334/* Alternate Deprecated Macros Provided For Backwards Code Compatibility */
1335#define PGDE_UART PFDE_UART
1336#define PGDE_DMA PFDE_DMA
1337#define CKELOW SCKELOW
1338
1339/* HOST Port Registers */
1340
1341#define HOST_CONTROL 0xffc03400 /* HOST Control Register */
1342#define HOST_STATUS 0xffc03404 /* HOST Status Register */
1343#define HOST_TIMEOUT 0xffc03408 /* HOST Acknowledge Mode Timeout Register */
1344
1345/* Counter Registers */
1346
1347#define CNT_CONFIG 0xffc03500 /* Configuration Register */
1348#define CNT_IMASK 0xffc03504 /* Interrupt Mask Register */
1349#define CNT_STATUS 0xffc03508 /* Status Register */
1350#define CNT_COMMAND 0xffc0350c /* Command Register */
1351#define CNT_DEBOUNCE 0xffc03510 /* Debounce Register */
1352#define CNT_COUNTER 0xffc03514 /* Counter Register */
1353#define CNT_MAX 0xffc03518 /* Maximal Count Register */
1354#define CNT_MIN 0xffc0351c /* Minimal Count Register */
1355
1356/* OTP/FUSE Registers */
1357
1358#define OTP_CONTROL 0xffc03600 /* OTP/Fuse Control Register */
1359#define OTP_BEN 0xffc03604 /* OTP/Fuse Byte Enable */
1360#define OTP_STATUS 0xffc03608 /* OTP/Fuse Status */
1361#define OTP_TIMING 0xffc0360c /* OTP/Fuse Access Timing */
1362
1363/* Security Registers */
1364
1365#define SECURE_SYSSWT 0xffc03620 /* Secure System Switches */
1366#define SECURE_CONTROL 0xffc03624 /* Secure Control */
1367#define SECURE_STATUS 0xffc03628 /* Secure Status */
1368
1369/* OTP Read/Write Data Buffer Registers */
1370
1371#define OTP_DATA0 0xffc03680 /* OTP/Fuse Data (OTP_DATA0-3) accesses the fuse read write buffer */
1372#define OTP_DATA1 0xffc03684 /* OTP/Fuse Data (OTP_DATA0-3) accesses the fuse read write buffer */
1373#define OTP_DATA2 0xffc03688 /* OTP/Fuse Data (OTP_DATA0-3) accesses the fuse read write buffer */
1374#define OTP_DATA3 0xffc0368c /* OTP/Fuse Data (OTP_DATA0-3) accesses the fuse read write buffer */
1375
1376/* Motor Control PWM Registers */
1377
1378#define PWM_CTRL 0xffc03700 /* PWM Control Register */
1379#define PWM_STAT 0xffc03704 /* PWM Status Register */
1380#define PWM_TM 0xffc03708 /* PWM Period Register */
1381#define PWM_DT 0xffc0370c /* PWM Dead Time Register */
1382#define PWM_GATE 0xffc03710 /* PWM Chopping Control */
1383#define PWM_CHA 0xffc03714 /* PWM Channel A Duty Control */
1384#define PWM_CHB 0xffc03718 /* PWM Channel B Duty Control */
1385#define PWM_CHC 0xffc0371c /* PWM Channel C Duty Control */
1386#define PWM_SEG 0xffc03720 /* PWM Crossover and Output Enable */
1387#define PWM_SYNCWT 0xffc03724 /* PWM Sync Pluse Width Control */
1388#define PWM_CHAL 0xffc03728 /* PWM Channel AL Duty Control (SR mode only) */
1389#define PWM_CHBL 0xffc0372c /* PWM Channel BL Duty Control (SR mode only) */
1390#define PWM_CHCL 0xffc03730 /* PWM Channel CL Duty Control (SR mode only) */
1391#define PWM_LSI 0xffc03734 /* PWM Low Side Invert (SR mode only) */
1392#define PWM_STAT2 0xffc03738 /* PWM Status Register 2 */
1393
1394
1395/* ********************************************************** */
1396/* SINGLE BIT MACRO PAIRS (bit mask and negated one) */
1397/* and MULTI BIT READ MACROS */
1398/* ********************************************************** */
1399
1400/* Bit masks for HOST_CONTROL */
1401
1402#define HOST_CNTR_HOST_EN 0x1 /* Host Enable */
1403#define HOST_CNTR_nHOST_EN 0x0
1404#define HOST_CNTR_HOST_END 0x2 /* Host Endianess */
1405#define HOST_CNTR_nHOST_END 0x0
1406#define HOST_CNTR_DATA_SIZE 0x4 /* Data Size */
1407#define HOST_CNTR_nDATA_SIZE 0x0
1408#define HOST_CNTR_HOST_RST 0x8 /* Host Reset */
1409#define HOST_CNTR_nHOST_RST 0x0
1410#define HOST_CNTR_HRDY_OVR 0x20 /* Host Ready Override */
1411#define HOST_CNTR_nHRDY_OVR 0x0
1412#define HOST_CNTR_INT_MODE 0x40 /* Interrupt Mode */
1413#define HOST_CNTR_nINT_MODE 0x0
1414#define HOST_CNTR_BT_EN 0x80 /* Bus Timeout Enable */
1415#define HOST_CNTR_ nBT_EN 0x0
1416#define HOST_CNTR_EHW 0x100 /* Enable Host Write */
1417#define HOST_CNTR_nEHW 0x0
1418#define HOST_CNTR_EHR 0x200 /* Enable Host Read */
1419#define HOST_CNTR_nEHR 0x0
1420#define HOST_CNTR_BDR 0x400 /* Burst DMA Requests */
1421#define HOST_CNTR_nBDR 0x0
1422
1423/* Bit masks for HOST_STATUS */
1424
1425#define HOST_STAT_READY 0x1 /* DMA Ready */
1426#define HOST_STAT_nREADY 0x0
1427#define HOST_STAT_FIFOFULL 0x2 /* FIFO Full */
1428#define HOST_STAT_nFIFOFULL 0x0
1429#define HOST_STAT_FIFOEMPTY 0x4 /* FIFO Empty */
1430#define HOST_STAT_nFIFOEMPTY 0x0
1431#define HOST_STAT_COMPLETE 0x8 /* DMA Complete */
1432#define HOST_STAT_nCOMPLETE 0x0
1433#define HOST_STAT_HSHK 0x10 /* Host Handshake */
1434#define HOST_STAT_nHSHK 0x0
1435#define HOST_STAT_TIMEOUT 0x20 /* Host Timeout */
1436#define HOST_STAT_nTIMEOUT 0x0
1437#define HOST_STAT_HIRQ 0x40 /* Host Interrupt Request */
1438#define HOST_STAT_nHIRQ 0x0
1439#define HOST_STAT_ALLOW_CNFG 0x80 /* Allow New Configuration */
1440#define HOST_STAT_nALLOW_CNFG 0x0
1441#define HOST_STAT_DMA_DIR 0x100 /* DMA Direction */
1442#define HOST_STAT_nDMA_DIR 0x0
1443#define HOST_STAT_BTE 0x200 /* Bus Timeout Enabled */
1444#define HOST_STAT_nBTE 0x0
1445#define HOST_STAT_HOSTRD_DONE 0x8000 /* Host Read Completion Interrupt */
1446#define HOST_STAT_nHOSTRD_DONE 0x0
1447
1448/* Bit masks for HOST_TIMEOUT */
1449
1450#define HOST_COUNT_TIMEOUT 0x7ff /* Host Timeout count */
1451
1452/* Bit masks for SECURE_SYSSWT */
1453
1454#define EMUDABL 0x1 /* Emulation Disable. */
1455#define nEMUDABL 0x0
1456#define RSTDABL 0x2 /* Reset Disable */
1457#define nRSTDABL 0x0
1458#define L1IDABL 0x1c /* L1 Instruction Memory Disable. */
1459#define L1DADABL 0xe0 /* L1 Data Bank A Memory Disable. */
1460#define L1DBDABL 0x700 /* L1 Data Bank B Memory Disable. */
1461#define DMA0OVR 0x800 /* DMA0 Memory Access Override */
1462#define nDMA0OVR 0x0
1463#define DMA1OVR 0x1000 /* DMA1 Memory Access Override */
1464#define nDMA1OVR 0x0
1465#define EMUOVR 0x4000 /* Emulation Override */
1466#define nEMUOVR 0x0
1467#define OTPSEN 0x8000 /* OTP Secrets Enable. */
1468#define nOTPSEN 0x0
1469#define L2DABL 0x70000 /* L2 Memory Disable. */
1470
1471/* Bit masks for SECURE_CONTROL */
1472
1473#define SECURE0 0x1 /* SECURE 0 */
1474#define nSECURE0 0x0
1475#define SECURE1 0x2 /* SECURE 1 */
1476#define nSECURE1 0x0
1477#define SECURE2 0x4 /* SECURE 2 */
1478#define nSECURE2 0x0
1479#define SECURE3 0x8 /* SECURE 3 */
1480#define nSECURE3 0x0
1481
1482/* Bit masks for SECURE_STATUS */
1483
1484#define SECMODE 0x3 /* Secured Mode Control State */
1485#define NMI 0x4 /* Non Maskable Interrupt */
1486#define nNMI 0x0
1487#define AFVALID 0x8 /* Authentication Firmware Valid */
1488#define nAFVALID 0x0
1489#define AFEXIT 0x10 /* Authentication Firmware Exit */
1490#define nAFEXIT 0x0
1491#define SECSTAT 0xe0 /* Secure Status */
1492
1493
1494
1495#endif /* _DEF_BF51X_H */
diff --git a/arch/blackfin/mach-bf518/include/mach/gpio.h b/arch/blackfin/mach-bf518/include/mach/gpio.h
index 9af6ce0f6321..b480705bfc2e 100644
--- a/arch/blackfin/mach-bf518/include/mach/gpio.h
+++ b/arch/blackfin/mach-bf518/include/mach/gpio.h
@@ -55,4 +55,8 @@
55#define PORT_G GPIO_PG0 55#define PORT_G GPIO_PG0
56#define PORT_H GPIO_PH0 56#define PORT_H GPIO_PH0
57 57
58#include <mach-common/ports-f.h>
59#include <mach-common/ports-g.h>
60#include <mach-common/ports-h.h>
61
58#endif /* _MACH_GPIO_H_ */ 62#endif /* _MACH_GPIO_H_ */
diff --git a/arch/blackfin/mach-bf518/include/mach/pll.h b/arch/blackfin/mach-bf518/include/mach/pll.h
index d5502988896b..94cca674d835 100644
--- a/arch/blackfin/mach-bf518/include/mach/pll.h
+++ b/arch/blackfin/mach-bf518/include/mach/pll.h
@@ -1,63 +1 @@
1/* #include <mach-common/pll.h>
2 * Copyright 2008 Analog Devices Inc.
3 *
4 * Licensed under the GPL-2 or later
5 */
6
7#ifndef _MACH_PLL_H
8#define _MACH_PLL_H
9
10#include <asm/blackfin.h>
11#include <asm/irqflags.h>
12
13/* Writing to PLL_CTL initiates a PLL relock sequence. */
14static __inline__ void bfin_write_PLL_CTL(unsigned int val)
15{
16 unsigned long flags, iwr0, iwr1;
17
18 if (val == bfin_read_PLL_CTL())
19 return;
20
21 flags = hard_local_irq_save();
22 /* Enable the PLL Wakeup bit in SIC IWR */
23 iwr0 = bfin_read32(SIC_IWR0);
24 iwr1 = bfin_read32(SIC_IWR1);
25 /* Only allow PPL Wakeup) */
26 bfin_write32(SIC_IWR0, IWR_ENABLE(0));
27 bfin_write32(SIC_IWR1, 0);
28
29 bfin_write16(PLL_CTL, val);
30 SSYNC();
31 asm("IDLE;");
32
33 bfin_write32(SIC_IWR0, iwr0);
34 bfin_write32(SIC_IWR1, iwr1);
35 hard_local_irq_restore(flags);
36}
37
38/* Writing to VR_CTL initiates a PLL relock sequence. */
39static __inline__ void bfin_write_VR_CTL(unsigned int val)
40{
41 unsigned long flags, iwr0, iwr1;
42
43 if (val == bfin_read_VR_CTL())
44 return;
45
46 flags = hard_local_irq_save();
47 /* Enable the PLL Wakeup bit in SIC IWR */
48 iwr0 = bfin_read32(SIC_IWR0);
49 iwr1 = bfin_read32(SIC_IWR1);
50 /* Only allow PPL Wakeup) */
51 bfin_write32(SIC_IWR0, IWR_ENABLE(0));
52 bfin_write32(SIC_IWR1, 0);
53
54 bfin_write16(VR_CTL, val);
55 SSYNC();
56 asm("IDLE;");
57
58 bfin_write32(SIC_IWR0, iwr0);
59 bfin_write32(SIC_IWR1, iwr1);
60 hard_local_irq_restore(flags);
61}
62
63#endif /* _MACH_PLL_H */
diff --git a/arch/blackfin/mach-bf527/boards/ad7160eval.c b/arch/blackfin/mach-bf527/boards/ad7160eval.c
index 52295fff5577..ccab4c689dc3 100644
--- a/arch/blackfin/mach-bf527/boards/ad7160eval.c
+++ b/arch/blackfin/mach-bf527/boards/ad7160eval.c
@@ -67,6 +67,7 @@ static struct musb_hdrc_config musb_config = {
67 * if it is the case. 67 * if it is the case.
68 */ 68 */
69 .gpio_vrsel_active = 1, 69 .gpio_vrsel_active = 1,
70 .clkin = 24, /* musb CLKIN in MHZ */
70}; 71};
71 72
72static struct musb_hdrc_platform_data musb_plat = { 73static struct musb_hdrc_platform_data musb_plat = {
@@ -419,7 +420,7 @@ static struct resource bfin_uart0_resources[] = {
419 }, 420 },
420}; 421};
421 422
422unsigned short bfin_uart0_peripherals[] = { 423static unsigned short bfin_uart0_peripherals[] = {
423 P_UART0_TX, P_UART0_RX, 0 424 P_UART0_TX, P_UART0_RX, 0
424}; 425};
425 426
@@ -474,7 +475,7 @@ static struct resource bfin_uart1_resources[] = {
474#endif 475#endif
475}; 476};
476 477
477unsigned short bfin_uart1_peripherals[] = { 478static unsigned short bfin_uart1_peripherals[] = {
478 P_UART1_TX, P_UART1_RX, 0 479 P_UART1_TX, P_UART1_RX, 0
479}; 480};
480 481
@@ -627,9 +628,9 @@ static struct resource bfin_sport0_uart_resources[] = {
627 }, 628 },
628}; 629};
629 630
630unsigned short bfin_sport0_peripherals[] = { 631static unsigned short bfin_sport0_peripherals[] = {
631 P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS, 632 P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
632 P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0 633 P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
633}; 634};
634 635
635static struct platform_device bfin_sport0_uart_device = { 636static struct platform_device bfin_sport0_uart_device = {
@@ -661,9 +662,9 @@ static struct resource bfin_sport1_uart_resources[] = {
661 }, 662 },
662}; 663};
663 664
664unsigned short bfin_sport1_peripherals[] = { 665static unsigned short bfin_sport1_peripherals[] = {
665 P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS, 666 P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
666 P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0 667 P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
667}; 668};
668 669
669static struct platform_device bfin_sport1_uart_device = { 670static struct platform_device bfin_sport1_uart_device = {
diff --git a/arch/blackfin/mach-bf527/boards/cm_bf527.c b/arch/blackfin/mach-bf527/boards/cm_bf527.c
index 50533edc3994..c9d6dc88f0e6 100644
--- a/arch/blackfin/mach-bf527/boards/cm_bf527.c
+++ b/arch/blackfin/mach-bf527/boards/cm_bf527.c
@@ -104,6 +104,7 @@ static struct musb_hdrc_config musb_config = {
104 * if it is the case. 104 * if it is the case.
105 */ 105 */
106 .gpio_vrsel_active = 1, 106 .gpio_vrsel_active = 1,
107 .clkin = 24, /* musb CLKIN in MHZ */
107}; 108};
108 109
109static struct musb_hdrc_platform_data musb_plat = { 110static struct musb_hdrc_platform_data musb_plat = {
@@ -614,7 +615,7 @@ static struct resource bfin_uart0_resources[] = {
614 }, 615 },
615}; 616};
616 617
617unsigned short bfin_uart0_peripherals[] = { 618static unsigned short bfin_uart0_peripherals[] = {
618 P_UART0_TX, P_UART0_RX, 0 619 P_UART0_TX, P_UART0_RX, 0
619}; 620};
620 621
@@ -669,7 +670,7 @@ static struct resource bfin_uart1_resources[] = {
669#endif 670#endif
670}; 671};
671 672
672unsigned short bfin_uart1_peripherals[] = { 673static unsigned short bfin_uart1_peripherals[] = {
673 P_UART1_TX, P_UART1_RX, 0 674 P_UART1_TX, P_UART1_RX, 0
674}; 675};
675 676
@@ -801,9 +802,9 @@ static struct resource bfin_sport0_uart_resources[] = {
801 }, 802 },
802}; 803};
803 804
804unsigned short bfin_sport0_peripherals[] = { 805static unsigned short bfin_sport0_peripherals[] = {
805 P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS, 806 P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
806 P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0 807 P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
807}; 808};
808 809
809static struct platform_device bfin_sport0_uart_device = { 810static struct platform_device bfin_sport0_uart_device = {
@@ -835,9 +836,9 @@ static struct resource bfin_sport1_uart_resources[] = {
835 }, 836 },
836}; 837};
837 838
838unsigned short bfin_sport1_peripherals[] = { 839static unsigned short bfin_sport1_peripherals[] = {
839 P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS, 840 P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
840 P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0 841 P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
841}; 842};
842 843
843static struct platform_device bfin_sport1_uart_device = { 844static struct platform_device bfin_sport1_uart_device = {
diff --git a/arch/blackfin/mach-bf527/boards/ezbrd.c b/arch/blackfin/mach-bf527/boards/ezbrd.c
index d06177b5fe22..b7101aa6e3aa 100644
--- a/arch/blackfin/mach-bf527/boards/ezbrd.c
+++ b/arch/blackfin/mach-bf527/boards/ezbrd.c
@@ -68,6 +68,7 @@ static struct musb_hdrc_config musb_config = {
68 * if it is the case. 68 * if it is the case.
69 */ 69 */
70 .gpio_vrsel_active = 1, 70 .gpio_vrsel_active = 1,
71 .clkin = 24, /* musb CLKIN in MHZ */
71}; 72};
72 73
73static struct musb_hdrc_platform_data musb_plat = { 74static struct musb_hdrc_platform_data musb_plat = {
@@ -499,7 +500,7 @@ static struct resource bfin_uart0_resources[] = {
499 }, 500 },
500}; 501};
501 502
502unsigned short bfin_uart0_peripherals[] = { 503static unsigned short bfin_uart0_peripherals[] = {
503 P_UART0_TX, P_UART0_RX, 0 504 P_UART0_TX, P_UART0_RX, 0
504}; 505};
505 506
@@ -554,7 +555,7 @@ static struct resource bfin_uart1_resources[] = {
554#endif 555#endif
555}; 556};
556 557
557unsigned short bfin_uart1_peripherals[] = { 558static unsigned short bfin_uart1_peripherals[] = {
558 P_UART1_TX, P_UART1_RX, 0 559 P_UART1_TX, P_UART1_RX, 0
559}; 560};
560 561
@@ -681,9 +682,9 @@ static struct resource bfin_sport0_uart_resources[] = {
681 }, 682 },
682}; 683};
683 684
684unsigned short bfin_sport0_peripherals[] = { 685static unsigned short bfin_sport0_peripherals[] = {
685 P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS, 686 P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
686 P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0 687 P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
687}; 688};
688 689
689static struct platform_device bfin_sport0_uart_device = { 690static struct platform_device bfin_sport0_uart_device = {
@@ -715,9 +716,9 @@ static struct resource bfin_sport1_uart_resources[] = {
715 }, 716 },
716}; 717};
717 718
718unsigned short bfin_sport1_peripherals[] = { 719static unsigned short bfin_sport1_peripherals[] = {
719 P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS, 720 P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
720 P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0 721 P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
721}; 722};
722 723
723static struct platform_device bfin_sport1_uart_device = { 724static struct platform_device bfin_sport1_uart_device = {
diff --git a/arch/blackfin/mach-bf527/boards/ezkit.c b/arch/blackfin/mach-bf527/boards/ezkit.c
index 35a88a5a5013..2cd2ff6f3043 100644
--- a/arch/blackfin/mach-bf527/boards/ezkit.c
+++ b/arch/blackfin/mach-bf527/boards/ezkit.c
@@ -108,6 +108,7 @@ static struct musb_hdrc_config musb_config = {
108 * if it is the case. 108 * if it is the case.
109 */ 109 */
110 .gpio_vrsel_active = 1, 110 .gpio_vrsel_active = 1,
111 .clkin = 24, /* musb CLKIN in MHZ */
111}; 112};
112 113
113static struct musb_hdrc_platform_data musb_plat = { 114static struct musb_hdrc_platform_data musb_plat = {
@@ -708,7 +709,7 @@ static struct resource bfin_uart0_resources[] = {
708 }, 709 },
709}; 710};
710 711
711unsigned short bfin_uart0_peripherals[] = { 712static unsigned short bfin_uart0_peripherals[] = {
712 P_UART0_TX, P_UART0_RX, 0 713 P_UART0_TX, P_UART0_RX, 0
713}; 714};
714 715
@@ -763,7 +764,7 @@ static struct resource bfin_uart1_resources[] = {
763#endif 764#endif
764}; 765};
765 766
766unsigned short bfin_uart1_peripherals[] = { 767static unsigned short bfin_uart1_peripherals[] = {
767 P_UART1_TX, P_UART1_RX, 0 768 P_UART1_TX, P_UART1_RX, 0
768}; 769};
769 770
@@ -962,6 +963,11 @@ static struct i2c_board_info __initdata bfin_i2c_board_info[] = {
962 I2C_BOARD_INFO("ad5252", 0x2f), 963 I2C_BOARD_INFO("ad5252", 0x2f),
963 }, 964 },
964#endif 965#endif
966#if defined(CONFIG_SND_SOC_ADAU1373) || defined(CONFIG_SND_SOC_ADAU1373_MODULE)
967 {
968 I2C_BOARD_INFO("adau1373", 0x1A),
969 },
970#endif
965}; 971};
966 972
967#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) 973#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
@@ -984,9 +990,9 @@ static struct resource bfin_sport0_uart_resources[] = {
984 }, 990 },
985}; 991};
986 992
987unsigned short bfin_sport0_peripherals[] = { 993static unsigned short bfin_sport0_peripherals[] = {
988 P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS, 994 P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
989 P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0 995 P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
990}; 996};
991 997
992static struct platform_device bfin_sport0_uart_device = { 998static struct platform_device bfin_sport0_uart_device = {
@@ -1018,9 +1024,9 @@ static struct resource bfin_sport1_uart_resources[] = {
1018 }, 1024 },
1019}; 1025};
1020 1026
1021unsigned short bfin_sport1_peripherals[] = { 1027static unsigned short bfin_sport1_peripherals[] = {
1022 P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS, 1028 P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
1023 P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0 1029 P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
1024}; 1030};
1025 1031
1026static struct platform_device bfin_sport1_uart_device = { 1032static struct platform_device bfin_sport1_uart_device = {
diff --git a/arch/blackfin/mach-bf527/boards/tll6527m.c b/arch/blackfin/mach-bf527/boards/tll6527m.c
index 130861bd2589..18d303dd5627 100644
--- a/arch/blackfin/mach-bf527/boards/tll6527m.c
+++ b/arch/blackfin/mach-bf527/boards/tll6527m.c
@@ -193,7 +193,7 @@ static unsigned gpio_addr_inputs[] = {
193 GPIO_PG1, GPIO_PH9, GPIO_PH10 193 GPIO_PG1, GPIO_PH9, GPIO_PH10
194}; 194};
195 195
196static struct gpio_decoder_platfrom_data spi_decoded_cs = { 196static struct gpio_decoder_platform_data spi_decoded_cs = {
197 .base = EXP_GPIO_SPISEL_BASE, 197 .base = EXP_GPIO_SPISEL_BASE,
198 .input_addrs = gpio_addr_inputs, 198 .input_addrs = gpio_addr_inputs,
199 .nr_input_addrs = ARRAY_SIZE(gpio_addr_inputs), 199 .nr_input_addrs = ARRAY_SIZE(gpio_addr_inputs),
@@ -586,7 +586,7 @@ static struct resource bfin_uart0_resources[] = {
586 }, 586 },
587}; 587};
588 588
589unsigned short bfin_uart0_peripherals[] = { 589static unsigned short bfin_uart0_peripherals[] = {
590 P_UART0_TX, P_UART0_RX, 0 590 P_UART0_TX, P_UART0_RX, 0
591}; 591};
592 592
@@ -642,7 +642,7 @@ static struct resource bfin_uart1_resources[] = {
642#endif 642#endif
643}; 643};
644 644
645unsigned short bfin_uart1_peripherals[] = { 645static unsigned short bfin_uart1_peripherals[] = {
646 P_UART1_TX, P_UART1_RX, 0 646 P_UART1_TX, P_UART1_RX, 0
647}; 647};
648 648
@@ -799,9 +799,9 @@ static struct resource bfin_sport0_uart_resources[] = {
799 }, 799 },
800}; 800};
801 801
802unsigned short bfin_sport0_peripherals[] = { 802static unsigned short bfin_sport0_peripherals[] = {
803 P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS, 803 P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
804 P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0 804 P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
805}; 805};
806 806
807static struct platform_device bfin_sport0_uart_device = { 807static struct platform_device bfin_sport0_uart_device = {
@@ -834,9 +834,9 @@ static struct resource bfin_sport1_uart_resources[] = {
834 }, 834 },
835}; 835};
836 836
837unsigned short bfin_sport1_peripherals[] = { 837static unsigned short bfin_sport1_peripherals[] = {
838 P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS, 838 P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
839 P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0 839 P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
840}; 840};
841 841
842static struct platform_device bfin_sport1_uart_device = { 842static struct platform_device bfin_sport1_uart_device = {
diff --git a/arch/blackfin/mach-bf527/dma.c b/arch/blackfin/mach-bf527/dma.c
index 7bc7577d6c4f..1fabdefea73a 100644
--- a/arch/blackfin/mach-bf527/dma.c
+++ b/arch/blackfin/mach-bf527/dma.c
@@ -11,7 +11,7 @@
11#include <asm/blackfin.h> 11#include <asm/blackfin.h>
12#include <asm/dma.h> 12#include <asm/dma.h>
13 13
14struct dma_register *dma_io_base_addr[MAX_DMA_CHANNELS] = { 14struct dma_register * const dma_io_base_addr[MAX_DMA_CHANNELS] = {
15 (struct dma_register *) DMA0_NEXT_DESC_PTR, 15 (struct dma_register *) DMA0_NEXT_DESC_PTR,
16 (struct dma_register *) DMA1_NEXT_DESC_PTR, 16 (struct dma_register *) DMA1_NEXT_DESC_PTR,
17 (struct dma_register *) DMA2_NEXT_DESC_PTR, 17 (struct dma_register *) DMA2_NEXT_DESC_PTR,
diff --git a/arch/blackfin/mach-bf527/include/mach/bfin_serial.h b/arch/blackfin/mach-bf527/include/mach/bfin_serial.h
new file mode 100644
index 000000000000..00c603fe8218
--- /dev/null
+++ b/arch/blackfin/mach-bf527/include/mach/bfin_serial.h
@@ -0,0 +1,14 @@
1/*
2 * mach/bfin_serial.h - Blackfin UART/Serial definitions
3 *
4 * Copyright 2006-2010 Analog Devices Inc.
5 *
6 * Licensed under the GPL-2 or later.
7 */
8
9#ifndef __BFIN_MACH_SERIAL_H__
10#define __BFIN_MACH_SERIAL_H__
11
12#define BFIN_UART_NR_PORTS 2
13
14#endif
diff --git a/arch/blackfin/mach-bf527/include/mach/bfin_serial_5xx.h b/arch/blackfin/mach-bf527/include/mach/bfin_serial_5xx.h
index c1d55b878b45..960e08919def 100644
--- a/arch/blackfin/mach-bf527/include/mach/bfin_serial_5xx.h
+++ b/arch/blackfin/mach-bf527/include/mach/bfin_serial_5xx.h
@@ -4,36 +4,9 @@
4 * Licensed under the GPL-2 or later 4 * Licensed under the GPL-2 or later
5 */ 5 */
6 6
7#include <linux/serial.h>
8#include <asm/dma.h> 7#include <asm/dma.h>
9#include <asm/portmux.h> 8#include <asm/portmux.h>
10 9
11#define UART_GET_CHAR(uart) bfin_read16(((uart)->port.membase + OFFSET_RBR))
12#define UART_GET_DLL(uart) bfin_read16(((uart)->port.membase + OFFSET_DLL))
13#define UART_GET_IER(uart) bfin_read16(((uart)->port.membase + OFFSET_IER))
14#define UART_GET_DLH(uart) bfin_read16(((uart)->port.membase + OFFSET_DLH))
15#define UART_GET_IIR(uart) bfin_read16(((uart)->port.membase + OFFSET_IIR))
16#define UART_GET_LCR(uart) bfin_read16(((uart)->port.membase + OFFSET_LCR))
17#define UART_GET_GCTL(uart) bfin_read16(((uart)->port.membase + OFFSET_GCTL))
18
19#define UART_PUT_CHAR(uart, v) bfin_write16(((uart)->port.membase + OFFSET_THR), v)
20#define UART_PUT_DLL(uart, v) bfin_write16(((uart)->port.membase + OFFSET_DLL), v)
21#define UART_PUT_IER(uart, v) bfin_write16(((uart)->port.membase + OFFSET_IER), v)
22#define UART_SET_IER(uart, v) UART_PUT_IER(uart, UART_GET_IER(uart) | (v))
23#define UART_CLEAR_IER(uart, v) UART_PUT_IER(uart, UART_GET_IER(uart) & ~(v))
24#define UART_PUT_DLH(uart, v) bfin_write16(((uart)->port.membase + OFFSET_DLH), v)
25#define UART_PUT_LCR(uart, v) bfin_write16(((uart)->port.membase + OFFSET_LCR), v)
26#define UART_PUT_GCTL(uart, v) bfin_write16(((uart)->port.membase + OFFSET_GCTL), v)
27
28#define UART_SET_DLAB(uart) do { UART_PUT_LCR(uart, UART_GET_LCR(uart) | DLAB); SSYNC(); } while (0)
29#define UART_CLEAR_DLAB(uart) do { UART_PUT_LCR(uart, UART_GET_LCR(uart) & ~DLAB); SSYNC(); } while (0)
30
31#define UART_GET_CTS(x) gpio_get_value(x->cts_pin)
32#define UART_DISABLE_RTS(x) gpio_set_value(x->rts_pin, 1)
33#define UART_ENABLE_RTS(x) gpio_set_value(x->rts_pin, 0)
34#define UART_ENABLE_INTS(x, v) UART_PUT_IER(x, v)
35#define UART_DISABLE_INTS(x) UART_PUT_IER(x, 0)
36
37#if defined(CONFIG_BFIN_UART0_CTSRTS) || defined(CONFIG_BFIN_UART1_CTSRTS) 10#if defined(CONFIG_BFIN_UART0_CTSRTS) || defined(CONFIG_BFIN_UART1_CTSRTS)
38# define CONFIG_SERIAL_BFIN_CTSRTS 11# define CONFIG_SERIAL_BFIN_CTSRTS
39 12
@@ -54,50 +27,6 @@
54# endif 27# endif
55#endif 28#endif
56 29
57#define BFIN_UART_TX_FIFO_SIZE 2
58
59/*
60 * The pin configuration is different from schematic
61 */
62struct bfin_serial_port {
63 struct uart_port port;
64 unsigned int old_status;
65 int status_irq;
66 unsigned int lsr;
67#ifdef CONFIG_SERIAL_BFIN_DMA
68 int tx_done;
69 int tx_count;
70 struct circ_buf rx_dma_buf;
71 struct timer_list rx_dma_timer;
72 int rx_dma_nrows;
73 unsigned int tx_dma_channel;
74 unsigned int rx_dma_channel;
75 struct work_struct tx_dma_workqueue;
76#endif
77#ifdef CONFIG_SERIAL_BFIN_CTSRTS
78 struct timer_list cts_timer;
79 int cts_pin;
80 int rts_pin;
81#endif
82};
83
84/* The hardware clears the LSR bits upon read, so we need to cache
85 * some of the more fun bits in software so they don't get lost
86 * when checking the LSR in other code paths (TX).
87 */
88static inline unsigned int UART_GET_LSR(struct bfin_serial_port *uart)
89{
90 unsigned int lsr = bfin_read16(uart->port.membase + OFFSET_LSR);
91 uart->lsr |= (lsr & (BI|FE|PE|OE));
92 return lsr | uart->lsr;
93}
94
95static inline void UART_CLEAR_LSR(struct bfin_serial_port *uart)
96{
97 uart->lsr = 0;
98 bfin_write16(uart->port.membase + OFFSET_LSR, -1);
99}
100
101struct bfin_serial_res { 30struct bfin_serial_res {
102 unsigned long uart_base_addr; 31 unsigned long uart_base_addr;
103 int uart_irq; 32 int uart_irq;
@@ -146,3 +75,5 @@ struct bfin_serial_res bfin_serial_resource[] = {
146}; 75};
147 76
148#define DRIVER_NAME "bfin-uart" 77#define DRIVER_NAME "bfin-uart"
78
79#include <asm/bfin_serial.h>
diff --git a/arch/blackfin/mach-bf527/include/mach/blackfin.h b/arch/blackfin/mach-bf527/include/mach/blackfin.h
index f714c5de3073..e1d279274487 100644
--- a/arch/blackfin/mach-bf527/include/mach/blackfin.h
+++ b/arch/blackfin/mach-bf527/include/mach/blackfin.h
@@ -1,49 +1,37 @@
1/* 1/*
2 * Copyright 2007-2009 Analog Devices Inc. 2 * Copyright 2007-2010 Analog Devices Inc.
3 * 3 *
4 * Licensed under the GPL-2 or later 4 * Licensed under the GPL-2 or later.
5 */ 5 */
6 6
7#ifndef _MACH_BLACKFIN_H_ 7#ifndef _MACH_BLACKFIN_H_
8#define _MACH_BLACKFIN_H_ 8#define _MACH_BLACKFIN_H_
9 9
10#include "bf527.h" 10#include "bf527.h"
11#include "defBF522.h"
12#include "anomaly.h" 11#include "anomaly.h"
13 12
14#if defined(CONFIG_BF527) || defined(CONFIG_BF526) 13#include <asm/def_LPBlackfin.h>
15#include "defBF527.h" 14#if defined(CONFIG_BF523) || defined(CONFIG_BF522)
15# include "defBF522.h"
16#endif 16#endif
17
18#if defined(CONFIG_BF525) || defined(CONFIG_BF524) 17#if defined(CONFIG_BF525) || defined(CONFIG_BF524)
19#include "defBF525.h" 18# include "defBF525.h"
20#endif 19#endif
21
22#if !defined(__ASSEMBLY__)
23#include "cdefBF522.h"
24
25#if defined(CONFIG_BF527) || defined(CONFIG_BF526) 20#if defined(CONFIG_BF527) || defined(CONFIG_BF526)
26#include "cdefBF527.h" 21# include "defBF527.h"
27#endif 22#endif
28 23
29#if defined(CONFIG_BF525) || defined(CONFIG_BF524) 24#if !defined(__ASSEMBLY__)
30#include "cdefBF525.h" 25# include <asm/cdef_LPBlackfin.h>
31#endif 26# if defined(CONFIG_BF523) || defined(CONFIG_BF522)
27# include "cdefBF522.h"
28# endif
29# if defined(CONFIG_BF525) || defined(CONFIG_BF524)
30# include "cdefBF525.h"
31# endif
32# if defined(CONFIG_BF527) || defined(CONFIG_BF526)
33# include "cdefBF527.h"
34# endif
32#endif 35#endif
33 36
34#define BFIN_UART_NR_PORTS 2
35
36#define OFFSET_THR 0x00 /* Transmit Holding register */
37#define OFFSET_RBR 0x00 /* Receive Buffer register */
38#define OFFSET_DLL 0x00 /* Divisor Latch (Low-Byte) */
39#define OFFSET_IER 0x04 /* Interrupt Enable Register */
40#define OFFSET_DLH 0x04 /* Divisor Latch (High-Byte) */
41#define OFFSET_IIR 0x08 /* Interrupt Identification Register */
42#define OFFSET_LCR 0x0C /* Line Control Register */
43#define OFFSET_MCR 0x10 /* Modem Control Register */
44#define OFFSET_LSR 0x14 /* Line Status Register */
45#define OFFSET_MSR 0x18 /* Modem Status Register */
46#define OFFSET_SCR 0x1C /* SCR Scratch Register */
47#define OFFSET_GCTL 0x24 /* Global Control Register */
48
49#endif 37#endif
diff --git a/arch/blackfin/mach-bf527/include/mach/cdefBF522.h b/arch/blackfin/mach-bf527/include/mach/cdefBF522.h
index 1079af8c7aef..618dfcdfa91a 100644
--- a/arch/blackfin/mach-bf527/include/mach/cdefBF522.h
+++ b/arch/blackfin/mach-bf527/include/mach/cdefBF522.h
@@ -1,21 +1,1095 @@
1/* 1/*
2 * Copyright 2007-2008 Analog Devices Inc. 2 * Copyright 2007-2010 Analog Devices Inc.
3 * 3 *
4 * Licensed under the GPL-2 or later 4 * Licensed under the GPL-2 or later.
5 */ 5 */
6 6
7#ifndef _CDEF_BF522_H 7#ifndef _CDEF_BF522_H
8#define _CDEF_BF522_H 8#define _CDEF_BF522_H
9 9
10/* include all Core registers and bit definitions */ 10/* Clock and System Control (0xFFC00000 - 0xFFC000FF) */
11#include "defBF522.h" 11#define bfin_read_PLL_CTL() bfin_read16(PLL_CTL)
12#define bfin_read_PLL_DIV() bfin_read16(PLL_DIV)
13#define bfin_write_PLL_DIV(val) bfin_write16(PLL_DIV, val)
14#define bfin_read_VR_CTL() bfin_read16(VR_CTL)
15#define bfin_read_PLL_STAT() bfin_read16(PLL_STAT)
16#define bfin_write_PLL_STAT(val) bfin_write16(PLL_STAT, val)
17#define bfin_read_PLL_LOCKCNT() bfin_read16(PLL_LOCKCNT)
18#define bfin_write_PLL_LOCKCNT(val) bfin_write16(PLL_LOCKCNT, val)
19#define bfin_read_CHIPID() bfin_read32(CHIPID)
20#define bfin_write_CHIPID(val) bfin_write32(CHIPID, val)
12 21
13/* include core specific register pointer definitions */
14#include <asm/cdef_LPBlackfin.h>
15 22
16/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF522 */ 23/* System Interrupt Controller (0xFFC00100 - 0xFFC001FF) */
24#define bfin_read_SWRST() bfin_read16(SWRST)
25#define bfin_write_SWRST(val) bfin_write16(SWRST, val)
26#define bfin_read_SYSCR() bfin_read16(SYSCR)
27#define bfin_write_SYSCR(val) bfin_write16(SYSCR, val)
17 28
18/* include cdefBF52x_base.h for the set of #defines that are common to all ADSP-BF52x processors */ 29#define bfin_read_SIC_RVECT() bfin_read32(SIC_RVECT)
19#include "cdefBF52x_base.h" 30#define bfin_write_SIC_RVECT(val) bfin_write32(SIC_RVECT, val)
31#define bfin_read_SIC_IMASK0() bfin_read32(SIC_IMASK0)
32#define bfin_write_SIC_IMASK0(val) bfin_write32(SIC_IMASK0, val)
33#define bfin_read_SIC_IMASK(x) bfin_read32(SIC_IMASK0 + (x << 6))
34#define bfin_write_SIC_IMASK(x, val) bfin_write32((SIC_IMASK0 + (x << 6)), val)
35
36#define bfin_read_SIC_IAR0() bfin_read32(SIC_IAR0)
37#define bfin_write_SIC_IAR0(val) bfin_write32(SIC_IAR0, val)
38#define bfin_read_SIC_IAR1() bfin_read32(SIC_IAR1)
39#define bfin_write_SIC_IAR1(val) bfin_write32(SIC_IAR1, val)
40#define bfin_read_SIC_IAR2() bfin_read32(SIC_IAR2)
41#define bfin_write_SIC_IAR2(val) bfin_write32(SIC_IAR2, val)
42#define bfin_read_SIC_IAR3() bfin_read32(SIC_IAR3)
43#define bfin_write_SIC_IAR3(val) bfin_write32(SIC_IAR3, val)
44
45#define bfin_read_SIC_ISR0() bfin_read32(SIC_ISR0)
46#define bfin_write_SIC_ISR0(val) bfin_write32(SIC_ISR0, val)
47#define bfin_read_SIC_ISR(x) bfin_read32(SIC_ISR0 + (x << 6))
48#define bfin_write_SIC_ISR(x, val) bfin_write32((SIC_ISR0 + (x << 6)), val)
49
50#define bfin_read_SIC_IWR0() bfin_read32(SIC_IWR0)
51#define bfin_write_SIC_IWR0(val) bfin_write32(SIC_IWR0, val)
52#define bfin_read_SIC_IWR(x) bfin_read32(SIC_IWR0 + (x << 6))
53#define bfin_write_SIC_IWR(x, val) bfin_write32((SIC_IWR0 + (x << 6)), val)
54
55/* SIC Additions to ADSP-BF52x (0xFFC0014C - 0xFFC00162) */
56
57#define bfin_read_SIC_IMASK1() bfin_read32(SIC_IMASK1)
58#define bfin_write_SIC_IMASK1(val) bfin_write32(SIC_IMASK1, val)
59#define bfin_read_SIC_IAR4() bfin_read32(SIC_IAR4)
60#define bfin_write_SIC_IAR4(val) bfin_write32(SIC_IAR4, val)
61#define bfin_read_SIC_IAR5() bfin_read32(SIC_IAR5)
62#define bfin_write_SIC_IAR5(val) bfin_write32(SIC_IAR5, val)
63#define bfin_read_SIC_IAR6() bfin_read32(SIC_IAR6)
64#define bfin_write_SIC_IAR6(val) bfin_write32(SIC_IAR6, val)
65#define bfin_read_SIC_IAR7() bfin_read32(SIC_IAR7)
66#define bfin_write_SIC_IAR7(val) bfin_write32(SIC_IAR7, val)
67#define bfin_read_SIC_ISR1() bfin_read32(SIC_ISR1)
68#define bfin_write_SIC_ISR1(val) bfin_write32(SIC_ISR1, val)
69#define bfin_read_SIC_IWR1() bfin_read32(SIC_IWR1)
70#define bfin_write_SIC_IWR1(val) bfin_write32(SIC_IWR1, val)
71
72/* Watchdog Timer (0xFFC00200 - 0xFFC002FF) */
73#define bfin_read_WDOG_CTL() bfin_read16(WDOG_CTL)
74#define bfin_write_WDOG_CTL(val) bfin_write16(WDOG_CTL, val)
75#define bfin_read_WDOG_CNT() bfin_read32(WDOG_CNT)
76#define bfin_write_WDOG_CNT(val) bfin_write32(WDOG_CNT, val)
77#define bfin_read_WDOG_STAT() bfin_read32(WDOG_STAT)
78#define bfin_write_WDOG_STAT(val) bfin_write32(WDOG_STAT, val)
79
80
81/* Real Time Clock (0xFFC00300 - 0xFFC003FF) */
82#define bfin_read_RTC_STAT() bfin_read32(RTC_STAT)
83#define bfin_write_RTC_STAT(val) bfin_write32(RTC_STAT, val)
84#define bfin_read_RTC_ICTL() bfin_read16(RTC_ICTL)
85#define bfin_write_RTC_ICTL(val) bfin_write16(RTC_ICTL, val)
86#define bfin_read_RTC_ISTAT() bfin_read16(RTC_ISTAT)
87#define bfin_write_RTC_ISTAT(val) bfin_write16(RTC_ISTAT, val)
88#define bfin_read_RTC_SWCNT() bfin_read16(RTC_SWCNT)
89#define bfin_write_RTC_SWCNT(val) bfin_write16(RTC_SWCNT, val)
90#define bfin_read_RTC_ALARM() bfin_read32(RTC_ALARM)
91#define bfin_write_RTC_ALARM(val) bfin_write32(RTC_ALARM, val)
92#define bfin_read_RTC_FAST() bfin_read16(RTC_FAST)
93#define bfin_write_RTC_FAST(val) bfin_write16(RTC_FAST, val)
94#define bfin_read_RTC_PREN() bfin_read16(RTC_PREN)
95#define bfin_write_RTC_PREN(val) bfin_write16(RTC_PREN, val)
96
97
98/* UART0 Controller (0xFFC00400 - 0xFFC004FF) */
99#define bfin_read_UART0_THR() bfin_read16(UART0_THR)
100#define bfin_write_UART0_THR(val) bfin_write16(UART0_THR, val)
101#define bfin_read_UART0_RBR() bfin_read16(UART0_RBR)
102#define bfin_write_UART0_RBR(val) bfin_write16(UART0_RBR, val)
103#define bfin_read_UART0_DLL() bfin_read16(UART0_DLL)
104#define bfin_write_UART0_DLL(val) bfin_write16(UART0_DLL, val)
105#define bfin_read_UART0_IER() bfin_read16(UART0_IER)
106#define bfin_write_UART0_IER(val) bfin_write16(UART0_IER, val)
107#define bfin_read_UART0_DLH() bfin_read16(UART0_DLH)
108#define bfin_write_UART0_DLH(val) bfin_write16(UART0_DLH, val)
109#define bfin_read_UART0_IIR() bfin_read16(UART0_IIR)
110#define bfin_write_UART0_IIR(val) bfin_write16(UART0_IIR, val)
111#define bfin_read_UART0_LCR() bfin_read16(UART0_LCR)
112#define bfin_write_UART0_LCR(val) bfin_write16(UART0_LCR, val)
113#define bfin_read_UART0_MCR() bfin_read16(UART0_MCR)
114#define bfin_write_UART0_MCR(val) bfin_write16(UART0_MCR, val)
115#define bfin_read_UART0_LSR() bfin_read16(UART0_LSR)
116#define bfin_write_UART0_LSR(val) bfin_write16(UART0_LSR, val)
117#define bfin_read_UART0_MSR() bfin_read16(UART0_MSR)
118#define bfin_write_UART0_MSR(val) bfin_write16(UART0_MSR, val)
119#define bfin_read_UART0_SCR() bfin_read16(UART0_SCR)
120#define bfin_write_UART0_SCR(val) bfin_write16(UART0_SCR, val)
121#define bfin_read_UART0_GCTL() bfin_read16(UART0_GCTL)
122#define bfin_write_UART0_GCTL(val) bfin_write16(UART0_GCTL, val)
123
124
125/* SPI Controller (0xFFC00500 - 0xFFC005FF) */
126#define bfin_read_SPI_CTL() bfin_read16(SPI_CTL)
127#define bfin_write_SPI_CTL(val) bfin_write16(SPI_CTL, val)
128#define bfin_read_SPI_FLG() bfin_read16(SPI_FLG)
129#define bfin_write_SPI_FLG(val) bfin_write16(SPI_FLG, val)
130#define bfin_read_SPI_STAT() bfin_read16(SPI_STAT)
131#define bfin_write_SPI_STAT(val) bfin_write16(SPI_STAT, val)
132#define bfin_read_SPI_TDBR() bfin_read16(SPI_TDBR)
133#define bfin_write_SPI_TDBR(val) bfin_write16(SPI_TDBR, val)
134#define bfin_read_SPI_RDBR() bfin_read16(SPI_RDBR)
135#define bfin_write_SPI_RDBR(val) bfin_write16(SPI_RDBR, val)
136#define bfin_read_SPI_BAUD() bfin_read16(SPI_BAUD)
137#define bfin_write_SPI_BAUD(val) bfin_write16(SPI_BAUD, val)
138#define bfin_read_SPI_SHADOW() bfin_read16(SPI_SHADOW)
139#define bfin_write_SPI_SHADOW(val) bfin_write16(SPI_SHADOW, val)
140
141
142/* TIMER0-7 Registers (0xFFC00600 - 0xFFC006FF) */
143#define bfin_read_TIMER0_CONFIG() bfin_read16(TIMER0_CONFIG)
144#define bfin_write_TIMER0_CONFIG(val) bfin_write16(TIMER0_CONFIG, val)
145#define bfin_read_TIMER0_COUNTER() bfin_read32(TIMER0_COUNTER)
146#define bfin_write_TIMER0_COUNTER(val) bfin_write32(TIMER0_COUNTER, val)
147#define bfin_read_TIMER0_PERIOD() bfin_read32(TIMER0_PERIOD)
148#define bfin_write_TIMER0_PERIOD(val) bfin_write32(TIMER0_PERIOD, val)
149#define bfin_read_TIMER0_WIDTH() bfin_read32(TIMER0_WIDTH)
150#define bfin_write_TIMER0_WIDTH(val) bfin_write32(TIMER0_WIDTH, val)
151
152#define bfin_read_TIMER1_CONFIG() bfin_read16(TIMER1_CONFIG)
153#define bfin_write_TIMER1_CONFIG(val) bfin_write16(TIMER1_CONFIG, val)
154#define bfin_read_TIMER1_COUNTER() bfin_read32(TIMER1_COUNTER)
155#define bfin_write_TIMER1_COUNTER(val) bfin_write32(TIMER1_COUNTER, val)
156#define bfin_read_TIMER1_PERIOD() bfin_read32(TIMER1_PERIOD)
157#define bfin_write_TIMER1_PERIOD(val) bfin_write32(TIMER1_PERIOD, val)
158#define bfin_read_TIMER1_WIDTH() bfin_read32(TIMER1_WIDTH)
159#define bfin_write_TIMER1_WIDTH(val) bfin_write32(TIMER1_WIDTH, val)
160
161#define bfin_read_TIMER2_CONFIG() bfin_read16(TIMER2_CONFIG)
162#define bfin_write_TIMER2_CONFIG(val) bfin_write16(TIMER2_CONFIG, val)
163#define bfin_read_TIMER2_COUNTER() bfin_read32(TIMER2_COUNTER)
164#define bfin_write_TIMER2_COUNTER(val) bfin_write32(TIMER2_COUNTER, val)
165#define bfin_read_TIMER2_PERIOD() bfin_read32(TIMER2_PERIOD)
166#define bfin_write_TIMER2_PERIOD(val) bfin_write32(TIMER2_PERIOD, val)
167#define bfin_read_TIMER2_WIDTH() bfin_read32(TIMER2_WIDTH)
168#define bfin_write_TIMER2_WIDTH(val) bfin_write32(TIMER2_WIDTH, val)
169
170#define bfin_read_TIMER3_CONFIG() bfin_read16(TIMER3_CONFIG)
171#define bfin_write_TIMER3_CONFIG(val) bfin_write16(TIMER3_CONFIG, val)
172#define bfin_read_TIMER3_COUNTER() bfin_read32(TIMER3_COUNTER)
173#define bfin_write_TIMER3_COUNTER(val) bfin_write32(TIMER3_COUNTER, val)
174#define bfin_read_TIMER3_PERIOD() bfin_read32(TIMER3_PERIOD)
175#define bfin_write_TIMER3_PERIOD(val) bfin_write32(TIMER3_PERIOD, val)
176#define bfin_read_TIMER3_WIDTH() bfin_read32(TIMER3_WIDTH)
177#define bfin_write_TIMER3_WIDTH(val) bfin_write32(TIMER3_WIDTH, val)
178
179#define bfin_read_TIMER4_CONFIG() bfin_read16(TIMER4_CONFIG)
180#define bfin_write_TIMER4_CONFIG(val) bfin_write16(TIMER4_CONFIG, val)
181#define bfin_read_TIMER4_COUNTER() bfin_read32(TIMER4_COUNTER)
182#define bfin_write_TIMER4_COUNTER(val) bfin_write32(TIMER4_COUNTER, val)
183#define bfin_read_TIMER4_PERIOD() bfin_read32(TIMER4_PERIOD)
184#define bfin_write_TIMER4_PERIOD(val) bfin_write32(TIMER4_PERIOD, val)
185#define bfin_read_TIMER4_WIDTH() bfin_read32(TIMER4_WIDTH)
186#define bfin_write_TIMER4_WIDTH(val) bfin_write32(TIMER4_WIDTH, val)
187
188#define bfin_read_TIMER5_CONFIG() bfin_read16(TIMER5_CONFIG)
189#define bfin_write_TIMER5_CONFIG(val) bfin_write16(TIMER5_CONFIG, val)
190#define bfin_read_TIMER5_COUNTER() bfin_read32(TIMER5_COUNTER)
191#define bfin_write_TIMER5_COUNTER(val) bfin_write32(TIMER5_COUNTER, val)
192#define bfin_read_TIMER5_PERIOD() bfin_read32(TIMER5_PERIOD)
193#define bfin_write_TIMER5_PERIOD(val) bfin_write32(TIMER5_PERIOD, val)
194#define bfin_read_TIMER5_WIDTH() bfin_read32(TIMER5_WIDTH)
195#define bfin_write_TIMER5_WIDTH(val) bfin_write32(TIMER5_WIDTH, val)
196
197#define bfin_read_TIMER6_CONFIG() bfin_read16(TIMER6_CONFIG)
198#define bfin_write_TIMER6_CONFIG(val) bfin_write16(TIMER6_CONFIG, val)
199#define bfin_read_TIMER6_COUNTER() bfin_read32(TIMER6_COUNTER)
200#define bfin_write_TIMER6_COUNTER(val) bfin_write32(TIMER6_COUNTER, val)
201#define bfin_read_TIMER6_PERIOD() bfin_read32(TIMER6_PERIOD)
202#define bfin_write_TIMER6_PERIOD(val) bfin_write32(TIMER6_PERIOD, val)
203#define bfin_read_TIMER6_WIDTH() bfin_read32(TIMER6_WIDTH)
204#define bfin_write_TIMER6_WIDTH(val) bfin_write32(TIMER6_WIDTH, val)
205
206#define bfin_read_TIMER7_CONFIG() bfin_read16(TIMER7_CONFIG)
207#define bfin_write_TIMER7_CONFIG(val) bfin_write16(TIMER7_CONFIG, val)
208#define bfin_read_TIMER7_COUNTER() bfin_read32(TIMER7_COUNTER)
209#define bfin_write_TIMER7_COUNTER(val) bfin_write32(TIMER7_COUNTER, val)
210#define bfin_read_TIMER7_PERIOD() bfin_read32(TIMER7_PERIOD)
211#define bfin_write_TIMER7_PERIOD(val) bfin_write32(TIMER7_PERIOD, val)
212#define bfin_read_TIMER7_WIDTH() bfin_read32(TIMER7_WIDTH)
213#define bfin_write_TIMER7_WIDTH(val) bfin_write32(TIMER7_WIDTH, val)
214
215#define bfin_read_TIMER_ENABLE() bfin_read16(TIMER_ENABLE)
216#define bfin_write_TIMER_ENABLE(val) bfin_write16(TIMER_ENABLE, val)
217#define bfin_read_TIMER_DISABLE() bfin_read16(TIMER_DISABLE)
218#define bfin_write_TIMER_DISABLE(val) bfin_write16(TIMER_DISABLE, val)
219#define bfin_read_TIMER_STATUS() bfin_read32(TIMER_STATUS)
220#define bfin_write_TIMER_STATUS(val) bfin_write32(TIMER_STATUS, val)
221
222
223/* General Purpose I/O Port F (0xFFC00700 - 0xFFC007FF) */
224#define bfin_read_PORTFIO() bfin_read16(PORTFIO)
225#define bfin_write_PORTFIO(val) bfin_write16(PORTFIO, val)
226#define bfin_read_PORTFIO_CLEAR() bfin_read16(PORTFIO_CLEAR)
227#define bfin_write_PORTFIO_CLEAR(val) bfin_write16(PORTFIO_CLEAR, val)
228#define bfin_read_PORTFIO_SET() bfin_read16(PORTFIO_SET)
229#define bfin_write_PORTFIO_SET(val) bfin_write16(PORTFIO_SET, val)
230#define bfin_read_PORTFIO_TOGGLE() bfin_read16(PORTFIO_TOGGLE)
231#define bfin_write_PORTFIO_TOGGLE(val) bfin_write16(PORTFIO_TOGGLE, val)
232#define bfin_read_PORTFIO_MASKA() bfin_read16(PORTFIO_MASKA)
233#define bfin_write_PORTFIO_MASKA(val) bfin_write16(PORTFIO_MASKA, val)
234#define bfin_read_PORTFIO_MASKA_CLEAR() bfin_read16(PORTFIO_MASKA_CLEAR)
235#define bfin_write_PORTFIO_MASKA_CLEAR(val) bfin_write16(PORTFIO_MASKA_CLEAR, val)
236#define bfin_read_PORTFIO_MASKA_SET() bfin_read16(PORTFIO_MASKA_SET)
237#define bfin_write_PORTFIO_MASKA_SET(val) bfin_write16(PORTFIO_MASKA_SET, val)
238#define bfin_read_PORTFIO_MASKA_TOGGLE() bfin_read16(PORTFIO_MASKA_TOGGLE)
239#define bfin_write_PORTFIO_MASKA_TOGGLE(val) bfin_write16(PORTFIO_MASKA_TOGGLE, val)
240#define bfin_read_PORTFIO_MASKB() bfin_read16(PORTFIO_MASKB)
241#define bfin_write_PORTFIO_MASKB(val) bfin_write16(PORTFIO_MASKB, val)
242#define bfin_read_PORTFIO_MASKB_CLEAR() bfin_read16(PORTFIO_MASKB_CLEAR)
243#define bfin_write_PORTFIO_MASKB_CLEAR(val) bfin_write16(PORTFIO_MASKB_CLEAR, val)
244#define bfin_read_PORTFIO_MASKB_SET() bfin_read16(PORTFIO_MASKB_SET)
245#define bfin_write_PORTFIO_MASKB_SET(val) bfin_write16(PORTFIO_MASKB_SET, val)
246#define bfin_read_PORTFIO_MASKB_TOGGLE() bfin_read16(PORTFIO_MASKB_TOGGLE)
247#define bfin_write_PORTFIO_MASKB_TOGGLE(val) bfin_write16(PORTFIO_MASKB_TOGGLE, val)
248#define bfin_read_PORTFIO_DIR() bfin_read16(PORTFIO_DIR)
249#define bfin_write_PORTFIO_DIR(val) bfin_write16(PORTFIO_DIR, val)
250#define bfin_read_PORTFIO_POLAR() bfin_read16(PORTFIO_POLAR)
251#define bfin_write_PORTFIO_POLAR(val) bfin_write16(PORTFIO_POLAR, val)
252#define bfin_read_PORTFIO_EDGE() bfin_read16(PORTFIO_EDGE)
253#define bfin_write_PORTFIO_EDGE(val) bfin_write16(PORTFIO_EDGE, val)
254#define bfin_read_PORTFIO_BOTH() bfin_read16(PORTFIO_BOTH)
255#define bfin_write_PORTFIO_BOTH(val) bfin_write16(PORTFIO_BOTH, val)
256#define bfin_read_PORTFIO_INEN() bfin_read16(PORTFIO_INEN)
257#define bfin_write_PORTFIO_INEN(val) bfin_write16(PORTFIO_INEN, val)
258
259
260/* SPORT0 Controller (0xFFC00800 - 0xFFC008FF) */
261#define bfin_read_SPORT0_TCR1() bfin_read16(SPORT0_TCR1)
262#define bfin_write_SPORT0_TCR1(val) bfin_write16(SPORT0_TCR1, val)
263#define bfin_read_SPORT0_TCR2() bfin_read16(SPORT0_TCR2)
264#define bfin_write_SPORT0_TCR2(val) bfin_write16(SPORT0_TCR2, val)
265#define bfin_read_SPORT0_TCLKDIV() bfin_read16(SPORT0_TCLKDIV)
266#define bfin_write_SPORT0_TCLKDIV(val) bfin_write16(SPORT0_TCLKDIV, val)
267#define bfin_read_SPORT0_TFSDIV() bfin_read16(SPORT0_TFSDIV)
268#define bfin_write_SPORT0_TFSDIV(val) bfin_write16(SPORT0_TFSDIV, val)
269#define bfin_read_SPORT0_TX() bfin_read32(SPORT0_TX)
270#define bfin_write_SPORT0_TX(val) bfin_write32(SPORT0_TX, val)
271#define bfin_read_SPORT0_RX() bfin_read32(SPORT0_RX)
272#define bfin_write_SPORT0_RX(val) bfin_write32(SPORT0_RX, val)
273#define bfin_read_SPORT0_TX32() bfin_read32(SPORT0_TX)
274#define bfin_write_SPORT0_TX32(val) bfin_write32(SPORT0_TX, val)
275#define bfin_read_SPORT0_RX32() bfin_read32(SPORT0_RX)
276#define bfin_write_SPORT0_RX32(val) bfin_write32(SPORT0_RX, val)
277#define bfin_read_SPORT0_TX16() bfin_read16(SPORT0_TX)
278#define bfin_write_SPORT0_TX16(val) bfin_write16(SPORT0_TX, val)
279#define bfin_read_SPORT0_RX16() bfin_read16(SPORT0_RX)
280#define bfin_write_SPORT0_RX16(val) bfin_write16(SPORT0_RX, val)
281#define bfin_read_SPORT0_RCR1() bfin_read16(SPORT0_RCR1)
282#define bfin_write_SPORT0_RCR1(val) bfin_write16(SPORT0_RCR1, val)
283#define bfin_read_SPORT0_RCR2() bfin_read16(SPORT0_RCR2)
284#define bfin_write_SPORT0_RCR2(val) bfin_write16(SPORT0_RCR2, val)
285#define bfin_read_SPORT0_RCLKDIV() bfin_read16(SPORT0_RCLKDIV)
286#define bfin_write_SPORT0_RCLKDIV(val) bfin_write16(SPORT0_RCLKDIV, val)
287#define bfin_read_SPORT0_RFSDIV() bfin_read16(SPORT0_RFSDIV)
288#define bfin_write_SPORT0_RFSDIV(val) bfin_write16(SPORT0_RFSDIV, val)
289#define bfin_read_SPORT0_STAT() bfin_read16(SPORT0_STAT)
290#define bfin_write_SPORT0_STAT(val) bfin_write16(SPORT0_STAT, val)
291#define bfin_read_SPORT0_CHNL() bfin_read16(SPORT0_CHNL)
292#define bfin_write_SPORT0_CHNL(val) bfin_write16(SPORT0_CHNL, val)
293#define bfin_read_SPORT0_MCMC1() bfin_read16(SPORT0_MCMC1)
294#define bfin_write_SPORT0_MCMC1(val) bfin_write16(SPORT0_MCMC1, val)
295#define bfin_read_SPORT0_MCMC2() bfin_read16(SPORT0_MCMC2)
296#define bfin_write_SPORT0_MCMC2(val) bfin_write16(SPORT0_MCMC2, val)
297#define bfin_read_SPORT0_MTCS0() bfin_read32(SPORT0_MTCS0)
298#define bfin_write_SPORT0_MTCS0(val) bfin_write32(SPORT0_MTCS0, val)
299#define bfin_read_SPORT0_MTCS1() bfin_read32(SPORT0_MTCS1)
300#define bfin_write_SPORT0_MTCS1(val) bfin_write32(SPORT0_MTCS1, val)
301#define bfin_read_SPORT0_MTCS2() bfin_read32(SPORT0_MTCS2)
302#define bfin_write_SPORT0_MTCS2(val) bfin_write32(SPORT0_MTCS2, val)
303#define bfin_read_SPORT0_MTCS3() bfin_read32(SPORT0_MTCS3)
304#define bfin_write_SPORT0_MTCS3(val) bfin_write32(SPORT0_MTCS3, val)
305#define bfin_read_SPORT0_MRCS0() bfin_read32(SPORT0_MRCS0)
306#define bfin_write_SPORT0_MRCS0(val) bfin_write32(SPORT0_MRCS0, val)
307#define bfin_read_SPORT0_MRCS1() bfin_read32(SPORT0_MRCS1)
308#define bfin_write_SPORT0_MRCS1(val) bfin_write32(SPORT0_MRCS1, val)
309#define bfin_read_SPORT0_MRCS2() bfin_read32(SPORT0_MRCS2)
310#define bfin_write_SPORT0_MRCS2(val) bfin_write32(SPORT0_MRCS2, val)
311#define bfin_read_SPORT0_MRCS3() bfin_read32(SPORT0_MRCS3)
312#define bfin_write_SPORT0_MRCS3(val) bfin_write32(SPORT0_MRCS3, val)
313
314
315/* SPORT1 Controller (0xFFC00900 - 0xFFC009FF) */
316#define bfin_read_SPORT1_TCR1() bfin_read16(SPORT1_TCR1)
317#define bfin_write_SPORT1_TCR1(val) bfin_write16(SPORT1_TCR1, val)
318#define bfin_read_SPORT1_TCR2() bfin_read16(SPORT1_TCR2)
319#define bfin_write_SPORT1_TCR2(val) bfin_write16(SPORT1_TCR2, val)
320#define bfin_read_SPORT1_TCLKDIV() bfin_read16(SPORT1_TCLKDIV)
321#define bfin_write_SPORT1_TCLKDIV(val) bfin_write16(SPORT1_TCLKDIV, val)
322#define bfin_read_SPORT1_TFSDIV() bfin_read16(SPORT1_TFSDIV)
323#define bfin_write_SPORT1_TFSDIV(val) bfin_write16(SPORT1_TFSDIV, val)
324#define bfin_read_SPORT1_TX() bfin_read32(SPORT1_TX)
325#define bfin_write_SPORT1_TX(val) bfin_write32(SPORT1_TX, val)
326#define bfin_read_SPORT1_RX() bfin_read32(SPORT1_RX)
327#define bfin_write_SPORT1_RX(val) bfin_write32(SPORT1_RX, val)
328#define bfin_read_SPORT1_TX32() bfin_read32(SPORT1_TX)
329#define bfin_write_SPORT1_TX32(val) bfin_write32(SPORT1_TX, val)
330#define bfin_read_SPORT1_RX32() bfin_read32(SPORT1_RX)
331#define bfin_write_SPORT1_RX32(val) bfin_write32(SPORT1_RX, val)
332#define bfin_read_SPORT1_TX16() bfin_read16(SPORT1_TX)
333#define bfin_write_SPORT1_TX16(val) bfin_write16(SPORT1_TX, val)
334#define bfin_read_SPORT1_RX16() bfin_read16(SPORT1_RX)
335#define bfin_write_SPORT1_RX16(val) bfin_write16(SPORT1_RX, val)
336#define bfin_read_SPORT1_RCR1() bfin_read16(SPORT1_RCR1)
337#define bfin_write_SPORT1_RCR1(val) bfin_write16(SPORT1_RCR1, val)
338#define bfin_read_SPORT1_RCR2() bfin_read16(SPORT1_RCR2)
339#define bfin_write_SPORT1_RCR2(val) bfin_write16(SPORT1_RCR2, val)
340#define bfin_read_SPORT1_RCLKDIV() bfin_read16(SPORT1_RCLKDIV)
341#define bfin_write_SPORT1_RCLKDIV(val) bfin_write16(SPORT1_RCLKDIV, val)
342#define bfin_read_SPORT1_RFSDIV() bfin_read16(SPORT1_RFSDIV)
343#define bfin_write_SPORT1_RFSDIV(val) bfin_write16(SPORT1_RFSDIV, val)
344#define bfin_read_SPORT1_STAT() bfin_read16(SPORT1_STAT)
345#define bfin_write_SPORT1_STAT(val) bfin_write16(SPORT1_STAT, val)
346#define bfin_read_SPORT1_CHNL() bfin_read16(SPORT1_CHNL)
347#define bfin_write_SPORT1_CHNL(val) bfin_write16(SPORT1_CHNL, val)
348#define bfin_read_SPORT1_MCMC1() bfin_read16(SPORT1_MCMC1)
349#define bfin_write_SPORT1_MCMC1(val) bfin_write16(SPORT1_MCMC1, val)
350#define bfin_read_SPORT1_MCMC2() bfin_read16(SPORT1_MCMC2)
351#define bfin_write_SPORT1_MCMC2(val) bfin_write16(SPORT1_MCMC2, val)
352#define bfin_read_SPORT1_MTCS0() bfin_read32(SPORT1_MTCS0)
353#define bfin_write_SPORT1_MTCS0(val) bfin_write32(SPORT1_MTCS0, val)
354#define bfin_read_SPORT1_MTCS1() bfin_read32(SPORT1_MTCS1)
355#define bfin_write_SPORT1_MTCS1(val) bfin_write32(SPORT1_MTCS1, val)
356#define bfin_read_SPORT1_MTCS2() bfin_read32(SPORT1_MTCS2)
357#define bfin_write_SPORT1_MTCS2(val) bfin_write32(SPORT1_MTCS2, val)
358#define bfin_read_SPORT1_MTCS3() bfin_read32(SPORT1_MTCS3)
359#define bfin_write_SPORT1_MTCS3(val) bfin_write32(SPORT1_MTCS3, val)
360#define bfin_read_SPORT1_MRCS0() bfin_read32(SPORT1_MRCS0)
361#define bfin_write_SPORT1_MRCS0(val) bfin_write32(SPORT1_MRCS0, val)
362#define bfin_read_SPORT1_MRCS1() bfin_read32(SPORT1_MRCS1)
363#define bfin_write_SPORT1_MRCS1(val) bfin_write32(SPORT1_MRCS1, val)
364#define bfin_read_SPORT1_MRCS2() bfin_read32(SPORT1_MRCS2)
365#define bfin_write_SPORT1_MRCS2(val) bfin_write32(SPORT1_MRCS2, val)
366#define bfin_read_SPORT1_MRCS3() bfin_read32(SPORT1_MRCS3)
367#define bfin_write_SPORT1_MRCS3(val) bfin_write32(SPORT1_MRCS3, val)
368
369
370/* External Bus Interface Unit (0xFFC00A00 - 0xFFC00AFF) */
371#define bfin_read_EBIU_AMGCTL() bfin_read16(EBIU_AMGCTL)
372#define bfin_write_EBIU_AMGCTL(val) bfin_write16(EBIU_AMGCTL, val)
373#define bfin_read_EBIU_AMBCTL0() bfin_read32(EBIU_AMBCTL0)
374#define bfin_write_EBIU_AMBCTL0(val) bfin_write32(EBIU_AMBCTL0, val)
375#define bfin_read_EBIU_AMBCTL1() bfin_read32(EBIU_AMBCTL1)
376#define bfin_write_EBIU_AMBCTL1(val) bfin_write32(EBIU_AMBCTL1, val)
377#define bfin_read_EBIU_SDGCTL() bfin_read32(EBIU_SDGCTL)
378#define bfin_write_EBIU_SDGCTL(val) bfin_write32(EBIU_SDGCTL, val)
379#define bfin_read_EBIU_SDBCTL() bfin_read16(EBIU_SDBCTL)
380#define bfin_write_EBIU_SDBCTL(val) bfin_write16(EBIU_SDBCTL, val)
381#define bfin_read_EBIU_SDRRC() bfin_read16(EBIU_SDRRC)
382#define bfin_write_EBIU_SDRRC(val) bfin_write16(EBIU_SDRRC, val)
383#define bfin_read_EBIU_SDSTAT() bfin_read16(EBIU_SDSTAT)
384#define bfin_write_EBIU_SDSTAT(val) bfin_write16(EBIU_SDSTAT, val)
385
386
387/* DMA Traffic Control Registers */
388#define bfin_read_DMAC_TC_PER() bfin_read16(DMAC_TC_PER)
389#define bfin_write_DMAC_TC_PER(val) bfin_write16(DMAC_TC_PER, val)
390#define bfin_read_DMAC_TC_CNT() bfin_read16(DMAC_TC_CNT)
391#define bfin_write_DMAC_TC_CNT(val) bfin_write16(DMAC_TC_CNT, val)
392
393/* DMA Controller */
394#define bfin_read_DMA0_CONFIG() bfin_read16(DMA0_CONFIG)
395#define bfin_write_DMA0_CONFIG(val) bfin_write16(DMA0_CONFIG, val)
396#define bfin_read_DMA0_NEXT_DESC_PTR() bfin_read32(DMA0_NEXT_DESC_PTR)
397#define bfin_write_DMA0_NEXT_DESC_PTR(val) bfin_write32(DMA0_NEXT_DESC_PTR, val)
398#define bfin_read_DMA0_START_ADDR() bfin_read32(DMA0_START_ADDR)
399#define bfin_write_DMA0_START_ADDR(val) bfin_write32(DMA0_START_ADDR, val)
400#define bfin_read_DMA0_X_COUNT() bfin_read16(DMA0_X_COUNT)
401#define bfin_write_DMA0_X_COUNT(val) bfin_write16(DMA0_X_COUNT, val)
402#define bfin_read_DMA0_Y_COUNT() bfin_read16(DMA0_Y_COUNT)
403#define bfin_write_DMA0_Y_COUNT(val) bfin_write16(DMA0_Y_COUNT, val)
404#define bfin_read_DMA0_X_MODIFY() bfin_read16(DMA0_X_MODIFY)
405#define bfin_write_DMA0_X_MODIFY(val) bfin_write16(DMA0_X_MODIFY, val)
406#define bfin_read_DMA0_Y_MODIFY() bfin_read16(DMA0_Y_MODIFY)
407#define bfin_write_DMA0_Y_MODIFY(val) bfin_write16(DMA0_Y_MODIFY, val)
408#define bfin_read_DMA0_CURR_DESC_PTR() bfin_read32(DMA0_CURR_DESC_PTR)
409#define bfin_write_DMA0_CURR_DESC_PTR(val) bfin_write32(DMA0_CURR_DESC_PTR, val)
410#define bfin_read_DMA0_CURR_ADDR() bfin_read32(DMA0_CURR_ADDR)
411#define bfin_write_DMA0_CURR_ADDR(val) bfin_write32(DMA0_CURR_ADDR, val)
412#define bfin_read_DMA0_CURR_X_COUNT() bfin_read16(DMA0_CURR_X_COUNT)
413#define bfin_write_DMA0_CURR_X_COUNT(val) bfin_write16(DMA0_CURR_X_COUNT, val)
414#define bfin_read_DMA0_CURR_Y_COUNT() bfin_read16(DMA0_CURR_Y_COUNT)
415#define bfin_write_DMA0_CURR_Y_COUNT(val) bfin_write16(DMA0_CURR_Y_COUNT, val)
416#define bfin_read_DMA0_IRQ_STATUS() bfin_read16(DMA0_IRQ_STATUS)
417#define bfin_write_DMA0_IRQ_STATUS(val) bfin_write16(DMA0_IRQ_STATUS, val)
418#define bfin_read_DMA0_PERIPHERAL_MAP() bfin_read16(DMA0_PERIPHERAL_MAP)
419#define bfin_write_DMA0_PERIPHERAL_MAP(val) bfin_write16(DMA0_PERIPHERAL_MAP, val)
420
421#define bfin_read_DMA1_CONFIG() bfin_read16(DMA1_CONFIG)
422#define bfin_write_DMA1_CONFIG(val) bfin_write16(DMA1_CONFIG, val)
423#define bfin_read_DMA1_NEXT_DESC_PTR() bfin_read32(DMA1_NEXT_DESC_PTR)
424#define bfin_write_DMA1_NEXT_DESC_PTR(val) bfin_write32(DMA1_NEXT_DESC_PTR, val)
425#define bfin_read_DMA1_START_ADDR() bfin_read32(DMA1_START_ADDR)
426#define bfin_write_DMA1_START_ADDR(val) bfin_write32(DMA1_START_ADDR, val)
427#define bfin_read_DMA1_X_COUNT() bfin_read16(DMA1_X_COUNT)
428#define bfin_write_DMA1_X_COUNT(val) bfin_write16(DMA1_X_COUNT, val)
429#define bfin_read_DMA1_Y_COUNT() bfin_read16(DMA1_Y_COUNT)
430#define bfin_write_DMA1_Y_COUNT(val) bfin_write16(DMA1_Y_COUNT, val)
431#define bfin_read_DMA1_X_MODIFY() bfin_read16(DMA1_X_MODIFY)
432#define bfin_write_DMA1_X_MODIFY(val) bfin_write16(DMA1_X_MODIFY, val)
433#define bfin_read_DMA1_Y_MODIFY() bfin_read16(DMA1_Y_MODIFY)
434#define bfin_write_DMA1_Y_MODIFY(val) bfin_write16(DMA1_Y_MODIFY, val)
435#define bfin_read_DMA1_CURR_DESC_PTR() bfin_read32(DMA1_CURR_DESC_PTR)
436#define bfin_write_DMA1_CURR_DESC_PTR(val) bfin_write32(DMA1_CURR_DESC_PTR, val)
437#define bfin_read_DMA1_CURR_ADDR() bfin_read32(DMA1_CURR_ADDR)
438#define bfin_write_DMA1_CURR_ADDR(val) bfin_write32(DMA1_CURR_ADDR, val)
439#define bfin_read_DMA1_CURR_X_COUNT() bfin_read16(DMA1_CURR_X_COUNT)
440#define bfin_write_DMA1_CURR_X_COUNT(val) bfin_write16(DMA1_CURR_X_COUNT, val)
441#define bfin_read_DMA1_CURR_Y_COUNT() bfin_read16(DMA1_CURR_Y_COUNT)
442#define bfin_write_DMA1_CURR_Y_COUNT(val) bfin_write16(DMA1_CURR_Y_COUNT, val)
443#define bfin_read_DMA1_IRQ_STATUS() bfin_read16(DMA1_IRQ_STATUS)
444#define bfin_write_DMA1_IRQ_STATUS(val) bfin_write16(DMA1_IRQ_STATUS, val)
445#define bfin_read_DMA1_PERIPHERAL_MAP() bfin_read16(DMA1_PERIPHERAL_MAP)
446#define bfin_write_DMA1_PERIPHERAL_MAP(val) bfin_write16(DMA1_PERIPHERAL_MAP, val)
447
448#define bfin_read_DMA2_CONFIG() bfin_read16(DMA2_CONFIG)
449#define bfin_write_DMA2_CONFIG(val) bfin_write16(DMA2_CONFIG, val)
450#define bfin_read_DMA2_NEXT_DESC_PTR() bfin_read32(DMA2_NEXT_DESC_PTR)
451#define bfin_write_DMA2_NEXT_DESC_PTR(val) bfin_write32(DMA2_NEXT_DESC_PTR, val)
452#define bfin_read_DMA2_START_ADDR() bfin_read32(DMA2_START_ADDR)
453#define bfin_write_DMA2_START_ADDR(val) bfin_write32(DMA2_START_ADDR, val)
454#define bfin_read_DMA2_X_COUNT() bfin_read16(DMA2_X_COUNT)
455#define bfin_write_DMA2_X_COUNT(val) bfin_write16(DMA2_X_COUNT, val)
456#define bfin_read_DMA2_Y_COUNT() bfin_read16(DMA2_Y_COUNT)
457#define bfin_write_DMA2_Y_COUNT(val) bfin_write16(DMA2_Y_COUNT, val)
458#define bfin_read_DMA2_X_MODIFY() bfin_read16(DMA2_X_MODIFY)
459#define bfin_write_DMA2_X_MODIFY(val) bfin_write16(DMA2_X_MODIFY, val)
460#define bfin_read_DMA2_Y_MODIFY() bfin_read16(DMA2_Y_MODIFY)
461#define bfin_write_DMA2_Y_MODIFY(val) bfin_write16(DMA2_Y_MODIFY, val)
462#define bfin_read_DMA2_CURR_DESC_PTR() bfin_read32(DMA2_CURR_DESC_PTR)
463#define bfin_write_DMA2_CURR_DESC_PTR(val) bfin_write32(DMA2_CURR_DESC_PTR, val)
464#define bfin_read_DMA2_CURR_ADDR() bfin_read32(DMA2_CURR_ADDR)
465#define bfin_write_DMA2_CURR_ADDR(val) bfin_write32(DMA2_CURR_ADDR, val)
466#define bfin_read_DMA2_CURR_X_COUNT() bfin_read16(DMA2_CURR_X_COUNT)
467#define bfin_write_DMA2_CURR_X_COUNT(val) bfin_write16(DMA2_CURR_X_COUNT, val)
468#define bfin_read_DMA2_CURR_Y_COUNT() bfin_read16(DMA2_CURR_Y_COUNT)
469#define bfin_write_DMA2_CURR_Y_COUNT(val) bfin_write16(DMA2_CURR_Y_COUNT, val)
470#define bfin_read_DMA2_IRQ_STATUS() bfin_read16(DMA2_IRQ_STATUS)
471#define bfin_write_DMA2_IRQ_STATUS(val) bfin_write16(DMA2_IRQ_STATUS, val)
472#define bfin_read_DMA2_PERIPHERAL_MAP() bfin_read16(DMA2_PERIPHERAL_MAP)
473#define bfin_write_DMA2_PERIPHERAL_MAP(val) bfin_write16(DMA2_PERIPHERAL_MAP, val)
474
475#define bfin_read_DMA3_CONFIG() bfin_read16(DMA3_CONFIG)
476#define bfin_write_DMA3_CONFIG(val) bfin_write16(DMA3_CONFIG, val)
477#define bfin_read_DMA3_NEXT_DESC_PTR() bfin_read32(DMA3_NEXT_DESC_PTR)
478#define bfin_write_DMA3_NEXT_DESC_PTR(val) bfin_write32(DMA3_NEXT_DESC_PTR, val)
479#define bfin_read_DMA3_START_ADDR() bfin_read32(DMA3_START_ADDR)
480#define bfin_write_DMA3_START_ADDR(val) bfin_write32(DMA3_START_ADDR, val)
481#define bfin_read_DMA3_X_COUNT() bfin_read16(DMA3_X_COUNT)
482#define bfin_write_DMA3_X_COUNT(val) bfin_write16(DMA3_X_COUNT, val)
483#define bfin_read_DMA3_Y_COUNT() bfin_read16(DMA3_Y_COUNT)
484#define bfin_write_DMA3_Y_COUNT(val) bfin_write16(DMA3_Y_COUNT, val)
485#define bfin_read_DMA3_X_MODIFY() bfin_read16(DMA3_X_MODIFY)
486#define bfin_write_DMA3_X_MODIFY(val) bfin_write16(DMA3_X_MODIFY, val)
487#define bfin_read_DMA3_Y_MODIFY() bfin_read16(DMA3_Y_MODIFY)
488#define bfin_write_DMA3_Y_MODIFY(val) bfin_write16(DMA3_Y_MODIFY, val)
489#define bfin_read_DMA3_CURR_DESC_PTR() bfin_read32(DMA3_CURR_DESC_PTR)
490#define bfin_write_DMA3_CURR_DESC_PTR(val) bfin_write32(DMA3_CURR_DESC_PTR, val)
491#define bfin_read_DMA3_CURR_ADDR() bfin_read32(DMA3_CURR_ADDR)
492#define bfin_write_DMA3_CURR_ADDR(val) bfin_write32(DMA3_CURR_ADDR, val)
493#define bfin_read_DMA3_CURR_X_COUNT() bfin_read16(DMA3_CURR_X_COUNT)
494#define bfin_write_DMA3_CURR_X_COUNT(val) bfin_write16(DMA3_CURR_X_COUNT, val)
495#define bfin_read_DMA3_CURR_Y_COUNT() bfin_read16(DMA3_CURR_Y_COUNT)
496#define bfin_write_DMA3_CURR_Y_COUNT(val) bfin_write16(DMA3_CURR_Y_COUNT, val)
497#define bfin_read_DMA3_IRQ_STATUS() bfin_read16(DMA3_IRQ_STATUS)
498#define bfin_write_DMA3_IRQ_STATUS(val) bfin_write16(DMA3_IRQ_STATUS, val)
499#define bfin_read_DMA3_PERIPHERAL_MAP() bfin_read16(DMA3_PERIPHERAL_MAP)
500#define bfin_write_DMA3_PERIPHERAL_MAP(val) bfin_write16(DMA3_PERIPHERAL_MAP, val)
501
502#define bfin_read_DMA4_CONFIG() bfin_read16(DMA4_CONFIG)
503#define bfin_write_DMA4_CONFIG(val) bfin_write16(DMA4_CONFIG, val)
504#define bfin_read_DMA4_NEXT_DESC_PTR() bfin_read32(DMA4_NEXT_DESC_PTR)
505#define bfin_write_DMA4_NEXT_DESC_PTR(val) bfin_write32(DMA4_NEXT_DESC_PTR, val)
506#define bfin_read_DMA4_START_ADDR() bfin_read32(DMA4_START_ADDR)
507#define bfin_write_DMA4_START_ADDR(val) bfin_write32(DMA4_START_ADDR, val)
508#define bfin_read_DMA4_X_COUNT() bfin_read16(DMA4_X_COUNT)
509#define bfin_write_DMA4_X_COUNT(val) bfin_write16(DMA4_X_COUNT, val)
510#define bfin_read_DMA4_Y_COUNT() bfin_read16(DMA4_Y_COUNT)
511#define bfin_write_DMA4_Y_COUNT(val) bfin_write16(DMA4_Y_COUNT, val)
512#define bfin_read_DMA4_X_MODIFY() bfin_read16(DMA4_X_MODIFY)
513#define bfin_write_DMA4_X_MODIFY(val) bfin_write16(DMA4_X_MODIFY, val)
514#define bfin_read_DMA4_Y_MODIFY() bfin_read16(DMA4_Y_MODIFY)
515#define bfin_write_DMA4_Y_MODIFY(val) bfin_write16(DMA4_Y_MODIFY, val)
516#define bfin_read_DMA4_CURR_DESC_PTR() bfin_read32(DMA4_CURR_DESC_PTR)
517#define bfin_write_DMA4_CURR_DESC_PTR(val) bfin_write32(DMA4_CURR_DESC_PTR, val)
518#define bfin_read_DMA4_CURR_ADDR() bfin_read32(DMA4_CURR_ADDR)
519#define bfin_write_DMA4_CURR_ADDR(val) bfin_write32(DMA4_CURR_ADDR, val)
520#define bfin_read_DMA4_CURR_X_COUNT() bfin_read16(DMA4_CURR_X_COUNT)
521#define bfin_write_DMA4_CURR_X_COUNT(val) bfin_write16(DMA4_CURR_X_COUNT, val)
522#define bfin_read_DMA4_CURR_Y_COUNT() bfin_read16(DMA4_CURR_Y_COUNT)
523#define bfin_write_DMA4_CURR_Y_COUNT(val) bfin_write16(DMA4_CURR_Y_COUNT, val)
524#define bfin_read_DMA4_IRQ_STATUS() bfin_read16(DMA4_IRQ_STATUS)
525#define bfin_write_DMA4_IRQ_STATUS(val) bfin_write16(DMA4_IRQ_STATUS, val)
526#define bfin_read_DMA4_PERIPHERAL_MAP() bfin_read16(DMA4_PERIPHERAL_MAP)
527#define bfin_write_DMA4_PERIPHERAL_MAP(val) bfin_write16(DMA4_PERIPHERAL_MAP, val)
528
529#define bfin_read_DMA5_CONFIG() bfin_read16(DMA5_CONFIG)
530#define bfin_write_DMA5_CONFIG(val) bfin_write16(DMA5_CONFIG, val)
531#define bfin_read_DMA5_NEXT_DESC_PTR() bfin_read32(DMA5_NEXT_DESC_PTR)
532#define bfin_write_DMA5_NEXT_DESC_PTR(val) bfin_write32(DMA5_NEXT_DESC_PTR, val)
533#define bfin_read_DMA5_START_ADDR() bfin_read32(DMA5_START_ADDR)
534#define bfin_write_DMA5_START_ADDR(val) bfin_write32(DMA5_START_ADDR, val)
535#define bfin_read_DMA5_X_COUNT() bfin_read16(DMA5_X_COUNT)
536#define bfin_write_DMA5_X_COUNT(val) bfin_write16(DMA5_X_COUNT, val)
537#define bfin_read_DMA5_Y_COUNT() bfin_read16(DMA5_Y_COUNT)
538#define bfin_write_DMA5_Y_COUNT(val) bfin_write16(DMA5_Y_COUNT, val)
539#define bfin_read_DMA5_X_MODIFY() bfin_read16(DMA5_X_MODIFY)
540#define bfin_write_DMA5_X_MODIFY(val) bfin_write16(DMA5_X_MODIFY, val)
541#define bfin_read_DMA5_Y_MODIFY() bfin_read16(DMA5_Y_MODIFY)
542#define bfin_write_DMA5_Y_MODIFY(val) bfin_write16(DMA5_Y_MODIFY, val)
543#define bfin_read_DMA5_CURR_DESC_PTR() bfin_read32(DMA5_CURR_DESC_PTR)
544#define bfin_write_DMA5_CURR_DESC_PTR(val) bfin_write32(DMA5_CURR_DESC_PTR, val)
545#define bfin_read_DMA5_CURR_ADDR() bfin_read32(DMA5_CURR_ADDR)
546#define bfin_write_DMA5_CURR_ADDR(val) bfin_write32(DMA5_CURR_ADDR, val)
547#define bfin_read_DMA5_CURR_X_COUNT() bfin_read16(DMA5_CURR_X_COUNT)
548#define bfin_write_DMA5_CURR_X_COUNT(val) bfin_write16(DMA5_CURR_X_COUNT, val)
549#define bfin_read_DMA5_CURR_Y_COUNT() bfin_read16(DMA5_CURR_Y_COUNT)
550#define bfin_write_DMA5_CURR_Y_COUNT(val) bfin_write16(DMA5_CURR_Y_COUNT, val)
551#define bfin_read_DMA5_IRQ_STATUS() bfin_read16(DMA5_IRQ_STATUS)
552#define bfin_write_DMA5_IRQ_STATUS(val) bfin_write16(DMA5_IRQ_STATUS, val)
553#define bfin_read_DMA5_PERIPHERAL_MAP() bfin_read16(DMA5_PERIPHERAL_MAP)
554#define bfin_write_DMA5_PERIPHERAL_MAP(val) bfin_write16(DMA5_PERIPHERAL_MAP, val)
555
556#define bfin_read_DMA6_CONFIG() bfin_read16(DMA6_CONFIG)
557#define bfin_write_DMA6_CONFIG(val) bfin_write16(DMA6_CONFIG, val)
558#define bfin_read_DMA6_NEXT_DESC_PTR() bfin_read32(DMA6_NEXT_DESC_PTR)
559#define bfin_write_DMA6_NEXT_DESC_PTR(val) bfin_write32(DMA6_NEXT_DESC_PTR, val)
560#define bfin_read_DMA6_START_ADDR() bfin_read32(DMA6_START_ADDR)
561#define bfin_write_DMA6_START_ADDR(val) bfin_write32(DMA6_START_ADDR, val)
562#define bfin_read_DMA6_X_COUNT() bfin_read16(DMA6_X_COUNT)
563#define bfin_write_DMA6_X_COUNT(val) bfin_write16(DMA6_X_COUNT, val)
564#define bfin_read_DMA6_Y_COUNT() bfin_read16(DMA6_Y_COUNT)
565#define bfin_write_DMA6_Y_COUNT(val) bfin_write16(DMA6_Y_COUNT, val)
566#define bfin_read_DMA6_X_MODIFY() bfin_read16(DMA6_X_MODIFY)
567#define bfin_write_DMA6_X_MODIFY(val) bfin_write16(DMA6_X_MODIFY, val)
568#define bfin_read_DMA6_Y_MODIFY() bfin_read16(DMA6_Y_MODIFY)
569#define bfin_write_DMA6_Y_MODIFY(val) bfin_write16(DMA6_Y_MODIFY, val)
570#define bfin_read_DMA6_CURR_DESC_PTR() bfin_read32(DMA6_CURR_DESC_PTR)
571#define bfin_write_DMA6_CURR_DESC_PTR(val) bfin_write32(DMA6_CURR_DESC_PTR, val)
572#define bfin_read_DMA6_CURR_ADDR() bfin_read32(DMA6_CURR_ADDR)
573#define bfin_write_DMA6_CURR_ADDR(val) bfin_write32(DMA6_CURR_ADDR, val)
574#define bfin_read_DMA6_CURR_X_COUNT() bfin_read16(DMA6_CURR_X_COUNT)
575#define bfin_write_DMA6_CURR_X_COUNT(val) bfin_write16(DMA6_CURR_X_COUNT, val)
576#define bfin_read_DMA6_CURR_Y_COUNT() bfin_read16(DMA6_CURR_Y_COUNT)
577#define bfin_write_DMA6_CURR_Y_COUNT(val) bfin_write16(DMA6_CURR_Y_COUNT, val)
578#define bfin_read_DMA6_IRQ_STATUS() bfin_read16(DMA6_IRQ_STATUS)
579#define bfin_write_DMA6_IRQ_STATUS(val) bfin_write16(DMA6_IRQ_STATUS, val)
580#define bfin_read_DMA6_PERIPHERAL_MAP() bfin_read16(DMA6_PERIPHERAL_MAP)
581#define bfin_write_DMA6_PERIPHERAL_MAP(val) bfin_write16(DMA6_PERIPHERAL_MAP, val)
582
583#define bfin_read_DMA7_CONFIG() bfin_read16(DMA7_CONFIG)
584#define bfin_write_DMA7_CONFIG(val) bfin_write16(DMA7_CONFIG, val)
585#define bfin_read_DMA7_NEXT_DESC_PTR() bfin_read32(DMA7_NEXT_DESC_PTR)
586#define bfin_write_DMA7_NEXT_DESC_PTR(val) bfin_write32(DMA7_NEXT_DESC_PTR, val)
587#define bfin_read_DMA7_START_ADDR() bfin_read32(DMA7_START_ADDR)
588#define bfin_write_DMA7_START_ADDR(val) bfin_write32(DMA7_START_ADDR, val)
589#define bfin_read_DMA7_X_COUNT() bfin_read16(DMA7_X_COUNT)
590#define bfin_write_DMA7_X_COUNT(val) bfin_write16(DMA7_X_COUNT, val)
591#define bfin_read_DMA7_Y_COUNT() bfin_read16(DMA7_Y_COUNT)
592#define bfin_write_DMA7_Y_COUNT(val) bfin_write16(DMA7_Y_COUNT, val)
593#define bfin_read_DMA7_X_MODIFY() bfin_read16(DMA7_X_MODIFY)
594#define bfin_write_DMA7_X_MODIFY(val) bfin_write16(DMA7_X_MODIFY, val)
595#define bfin_read_DMA7_Y_MODIFY() bfin_read16(DMA7_Y_MODIFY)
596#define bfin_write_DMA7_Y_MODIFY(val) bfin_write16(DMA7_Y_MODIFY, val)
597#define bfin_read_DMA7_CURR_DESC_PTR() bfin_read32(DMA7_CURR_DESC_PTR)
598#define bfin_write_DMA7_CURR_DESC_PTR(val) bfin_write32(DMA7_CURR_DESC_PTR, val)
599#define bfin_read_DMA7_CURR_ADDR() bfin_read32(DMA7_CURR_ADDR)
600#define bfin_write_DMA7_CURR_ADDR(val) bfin_write32(DMA7_CURR_ADDR, val)
601#define bfin_read_DMA7_CURR_X_COUNT() bfin_read16(DMA7_CURR_X_COUNT)
602#define bfin_write_DMA7_CURR_X_COUNT(val) bfin_write16(DMA7_CURR_X_COUNT, val)
603#define bfin_read_DMA7_CURR_Y_COUNT() bfin_read16(DMA7_CURR_Y_COUNT)
604#define bfin_write_DMA7_CURR_Y_COUNT(val) bfin_write16(DMA7_CURR_Y_COUNT, val)
605#define bfin_read_DMA7_IRQ_STATUS() bfin_read16(DMA7_IRQ_STATUS)
606#define bfin_write_DMA7_IRQ_STATUS(val) bfin_write16(DMA7_IRQ_STATUS, val)
607#define bfin_read_DMA7_PERIPHERAL_MAP() bfin_read16(DMA7_PERIPHERAL_MAP)
608#define bfin_write_DMA7_PERIPHERAL_MAP(val) bfin_write16(DMA7_PERIPHERAL_MAP, val)
609
610#define bfin_read_DMA8_CONFIG() bfin_read16(DMA8_CONFIG)
611#define bfin_write_DMA8_CONFIG(val) bfin_write16(DMA8_CONFIG, val)
612#define bfin_read_DMA8_NEXT_DESC_PTR() bfin_read32(DMA8_NEXT_DESC_PTR)
613#define bfin_write_DMA8_NEXT_DESC_PTR(val) bfin_write32(DMA8_NEXT_DESC_PTR, val)
614#define bfin_read_DMA8_START_ADDR() bfin_read32(DMA8_START_ADDR)
615#define bfin_write_DMA8_START_ADDR(val) bfin_write32(DMA8_START_ADDR, val)
616#define bfin_read_DMA8_X_COUNT() bfin_read16(DMA8_X_COUNT)
617#define bfin_write_DMA8_X_COUNT(val) bfin_write16(DMA8_X_COUNT, val)
618#define bfin_read_DMA8_Y_COUNT() bfin_read16(DMA8_Y_COUNT)
619#define bfin_write_DMA8_Y_COUNT(val) bfin_write16(DMA8_Y_COUNT, val)
620#define bfin_read_DMA8_X_MODIFY() bfin_read16(DMA8_X_MODIFY)
621#define bfin_write_DMA8_X_MODIFY(val) bfin_write16(DMA8_X_MODIFY, val)
622#define bfin_read_DMA8_Y_MODIFY() bfin_read16(DMA8_Y_MODIFY)
623#define bfin_write_DMA8_Y_MODIFY(val) bfin_write16(DMA8_Y_MODIFY, val)
624#define bfin_read_DMA8_CURR_DESC_PTR() bfin_read32(DMA8_CURR_DESC_PTR)
625#define bfin_write_DMA8_CURR_DESC_PTR(val) bfin_write32(DMA8_CURR_DESC_PTR, val)
626#define bfin_read_DMA8_CURR_ADDR() bfin_read32(DMA8_CURR_ADDR)
627#define bfin_write_DMA8_CURR_ADDR(val) bfin_write32(DMA8_CURR_ADDR, val)
628#define bfin_read_DMA8_CURR_X_COUNT() bfin_read16(DMA8_CURR_X_COUNT)
629#define bfin_write_DMA8_CURR_X_COUNT(val) bfin_write16(DMA8_CURR_X_COUNT, val)
630#define bfin_read_DMA8_CURR_Y_COUNT() bfin_read16(DMA8_CURR_Y_COUNT)
631#define bfin_write_DMA8_CURR_Y_COUNT(val) bfin_write16(DMA8_CURR_Y_COUNT, val)
632#define bfin_read_DMA8_IRQ_STATUS() bfin_read16(DMA8_IRQ_STATUS)
633#define bfin_write_DMA8_IRQ_STATUS(val) bfin_write16(DMA8_IRQ_STATUS, val)
634#define bfin_read_DMA8_PERIPHERAL_MAP() bfin_read16(DMA8_PERIPHERAL_MAP)
635#define bfin_write_DMA8_PERIPHERAL_MAP(val) bfin_write16(DMA8_PERIPHERAL_MAP, val)
636
637#define bfin_read_DMA9_CONFIG() bfin_read16(DMA9_CONFIG)
638#define bfin_write_DMA9_CONFIG(val) bfin_write16(DMA9_CONFIG, val)
639#define bfin_read_DMA9_NEXT_DESC_PTR() bfin_read32(DMA9_NEXT_DESC_PTR)
640#define bfin_write_DMA9_NEXT_DESC_PTR(val) bfin_write32(DMA9_NEXT_DESC_PTR, val)
641#define bfin_read_DMA9_START_ADDR() bfin_read32(DMA9_START_ADDR)
642#define bfin_write_DMA9_START_ADDR(val) bfin_write32(DMA9_START_ADDR, val)
643#define bfin_read_DMA9_X_COUNT() bfin_read16(DMA9_X_COUNT)
644#define bfin_write_DMA9_X_COUNT(val) bfin_write16(DMA9_X_COUNT, val)
645#define bfin_read_DMA9_Y_COUNT() bfin_read16(DMA9_Y_COUNT)
646#define bfin_write_DMA9_Y_COUNT(val) bfin_write16(DMA9_Y_COUNT, val)
647#define bfin_read_DMA9_X_MODIFY() bfin_read16(DMA9_X_MODIFY)
648#define bfin_write_DMA9_X_MODIFY(val) bfin_write16(DMA9_X_MODIFY, val)
649#define bfin_read_DMA9_Y_MODIFY() bfin_read16(DMA9_Y_MODIFY)
650#define bfin_write_DMA9_Y_MODIFY(val) bfin_write16(DMA9_Y_MODIFY, val)
651#define bfin_read_DMA9_CURR_DESC_PTR() bfin_read32(DMA9_CURR_DESC_PTR)
652#define bfin_write_DMA9_CURR_DESC_PTR(val) bfin_write32(DMA9_CURR_DESC_PTR, val)
653#define bfin_read_DMA9_CURR_ADDR() bfin_read32(DMA9_CURR_ADDR)
654#define bfin_write_DMA9_CURR_ADDR(val) bfin_write32(DMA9_CURR_ADDR, val)
655#define bfin_read_DMA9_CURR_X_COUNT() bfin_read16(DMA9_CURR_X_COUNT)
656#define bfin_write_DMA9_CURR_X_COUNT(val) bfin_write16(DMA9_CURR_X_COUNT, val)
657#define bfin_read_DMA9_CURR_Y_COUNT() bfin_read16(DMA9_CURR_Y_COUNT)
658#define bfin_write_DMA9_CURR_Y_COUNT(val) bfin_write16(DMA9_CURR_Y_COUNT, val)
659#define bfin_read_DMA9_IRQ_STATUS() bfin_read16(DMA9_IRQ_STATUS)
660#define bfin_write_DMA9_IRQ_STATUS(val) bfin_write16(DMA9_IRQ_STATUS, val)
661#define bfin_read_DMA9_PERIPHERAL_MAP() bfin_read16(DMA9_PERIPHERAL_MAP)
662#define bfin_write_DMA9_PERIPHERAL_MAP(val) bfin_write16(DMA9_PERIPHERAL_MAP, val)
663
664#define bfin_read_DMA10_CONFIG() bfin_read16(DMA10_CONFIG)
665#define bfin_write_DMA10_CONFIG(val) bfin_write16(DMA10_CONFIG, val)
666#define bfin_read_DMA10_NEXT_DESC_PTR() bfin_read32(DMA10_NEXT_DESC_PTR)
667#define bfin_write_DMA10_NEXT_DESC_PTR(val) bfin_write32(DMA10_NEXT_DESC_PTR, val)
668#define bfin_read_DMA10_START_ADDR() bfin_read32(DMA10_START_ADDR)
669#define bfin_write_DMA10_START_ADDR(val) bfin_write32(DMA10_START_ADDR, val)
670#define bfin_read_DMA10_X_COUNT() bfin_read16(DMA10_X_COUNT)
671#define bfin_write_DMA10_X_COUNT(val) bfin_write16(DMA10_X_COUNT, val)
672#define bfin_read_DMA10_Y_COUNT() bfin_read16(DMA10_Y_COUNT)
673#define bfin_write_DMA10_Y_COUNT(val) bfin_write16(DMA10_Y_COUNT, val)
674#define bfin_read_DMA10_X_MODIFY() bfin_read16(DMA10_X_MODIFY)
675#define bfin_write_DMA10_X_MODIFY(val) bfin_write16(DMA10_X_MODIFY, val)
676#define bfin_read_DMA10_Y_MODIFY() bfin_read16(DMA10_Y_MODIFY)
677#define bfin_write_DMA10_Y_MODIFY(val) bfin_write16(DMA10_Y_MODIFY, val)
678#define bfin_read_DMA10_CURR_DESC_PTR() bfin_read32(DMA10_CURR_DESC_PTR)
679#define bfin_write_DMA10_CURR_DESC_PTR(val) bfin_write32(DMA10_CURR_DESC_PTR, val)
680#define bfin_read_DMA10_CURR_ADDR() bfin_read32(DMA10_CURR_ADDR)
681#define bfin_write_DMA10_CURR_ADDR(val) bfin_write32(DMA10_CURR_ADDR, val)
682#define bfin_read_DMA10_CURR_X_COUNT() bfin_read16(DMA10_CURR_X_COUNT)
683#define bfin_write_DMA10_CURR_X_COUNT(val) bfin_write16(DMA10_CURR_X_COUNT, val)
684#define bfin_read_DMA10_CURR_Y_COUNT() bfin_read16(DMA10_CURR_Y_COUNT)
685#define bfin_write_DMA10_CURR_Y_COUNT(val) bfin_write16(DMA10_CURR_Y_COUNT, val)
686#define bfin_read_DMA10_IRQ_STATUS() bfin_read16(DMA10_IRQ_STATUS)
687#define bfin_write_DMA10_IRQ_STATUS(val) bfin_write16(DMA10_IRQ_STATUS, val)
688#define bfin_read_DMA10_PERIPHERAL_MAP() bfin_read16(DMA10_PERIPHERAL_MAP)
689#define bfin_write_DMA10_PERIPHERAL_MAP(val) bfin_write16(DMA10_PERIPHERAL_MAP, val)
690
691#define bfin_read_DMA11_CONFIG() bfin_read16(DMA11_CONFIG)
692#define bfin_write_DMA11_CONFIG(val) bfin_write16(DMA11_CONFIG, val)
693#define bfin_read_DMA11_NEXT_DESC_PTR() bfin_read32(DMA11_NEXT_DESC_PTR)
694#define bfin_write_DMA11_NEXT_DESC_PTR(val) bfin_write32(DMA11_NEXT_DESC_PTR, val)
695#define bfin_read_DMA11_START_ADDR() bfin_read32(DMA11_START_ADDR)
696#define bfin_write_DMA11_START_ADDR(val) bfin_write32(DMA11_START_ADDR, val)
697#define bfin_read_DMA11_X_COUNT() bfin_read16(DMA11_X_COUNT)
698#define bfin_write_DMA11_X_COUNT(val) bfin_write16(DMA11_X_COUNT, val)
699#define bfin_read_DMA11_Y_COUNT() bfin_read16(DMA11_Y_COUNT)
700#define bfin_write_DMA11_Y_COUNT(val) bfin_write16(DMA11_Y_COUNT, val)
701#define bfin_read_DMA11_X_MODIFY() bfin_read16(DMA11_X_MODIFY)
702#define bfin_write_DMA11_X_MODIFY(val) bfin_write16(DMA11_X_MODIFY, val)
703#define bfin_read_DMA11_Y_MODIFY() bfin_read16(DMA11_Y_MODIFY)
704#define bfin_write_DMA11_Y_MODIFY(val) bfin_write16(DMA11_Y_MODIFY, val)
705#define bfin_read_DMA11_CURR_DESC_PTR() bfin_read32(DMA11_CURR_DESC_PTR)
706#define bfin_write_DMA11_CURR_DESC_PTR(val) bfin_write32(DMA11_CURR_DESC_PTR, val)
707#define bfin_read_DMA11_CURR_ADDR() bfin_read32(DMA11_CURR_ADDR)
708#define bfin_write_DMA11_CURR_ADDR(val) bfin_write32(DMA11_CURR_ADDR, val)
709#define bfin_read_DMA11_CURR_X_COUNT() bfin_read16(DMA11_CURR_X_COUNT)
710#define bfin_write_DMA11_CURR_X_COUNT(val) bfin_write16(DMA11_CURR_X_COUNT, val)
711#define bfin_read_DMA11_CURR_Y_COUNT() bfin_read16(DMA11_CURR_Y_COUNT)
712#define bfin_write_DMA11_CURR_Y_COUNT(val) bfin_write16(DMA11_CURR_Y_COUNT, val)
713#define bfin_read_DMA11_IRQ_STATUS() bfin_read16(DMA11_IRQ_STATUS)
714#define bfin_write_DMA11_IRQ_STATUS(val) bfin_write16(DMA11_IRQ_STATUS, val)
715#define bfin_read_DMA11_PERIPHERAL_MAP() bfin_read16(DMA11_PERIPHERAL_MAP)
716#define bfin_write_DMA11_PERIPHERAL_MAP(val) bfin_write16(DMA11_PERIPHERAL_MAP, val)
717
718#define bfin_read_MDMA_D0_CONFIG() bfin_read16(MDMA_D0_CONFIG)
719#define bfin_write_MDMA_D0_CONFIG(val) bfin_write16(MDMA_D0_CONFIG, val)
720#define bfin_read_MDMA_D0_NEXT_DESC_PTR() bfin_read32(MDMA_D0_NEXT_DESC_PTR)
721#define bfin_write_MDMA_D0_NEXT_DESC_PTR(val) bfin_write32(MDMA_D0_NEXT_DESC_PTR, val)
722#define bfin_read_MDMA_D0_START_ADDR() bfin_read32(MDMA_D0_START_ADDR)
723#define bfin_write_MDMA_D0_START_ADDR(val) bfin_write32(MDMA_D0_START_ADDR, val)
724#define bfin_read_MDMA_D0_X_COUNT() bfin_read16(MDMA_D0_X_COUNT)
725#define bfin_write_MDMA_D0_X_COUNT(val) bfin_write16(MDMA_D0_X_COUNT, val)
726#define bfin_read_MDMA_D0_Y_COUNT() bfin_read16(MDMA_D0_Y_COUNT)
727#define bfin_write_MDMA_D0_Y_COUNT(val) bfin_write16(MDMA_D0_Y_COUNT, val)
728#define bfin_read_MDMA_D0_X_MODIFY() bfin_read16(MDMA_D0_X_MODIFY)
729#define bfin_write_MDMA_D0_X_MODIFY(val) bfin_write16(MDMA_D0_X_MODIFY, val)
730#define bfin_read_MDMA_D0_Y_MODIFY() bfin_read16(MDMA_D0_Y_MODIFY)
731#define bfin_write_MDMA_D0_Y_MODIFY(val) bfin_write16(MDMA_D0_Y_MODIFY, val)
732#define bfin_read_MDMA_D0_CURR_DESC_PTR() bfin_read32(MDMA_D0_CURR_DESC_PTR)
733#define bfin_write_MDMA_D0_CURR_DESC_PTR(val) bfin_write32(MDMA_D0_CURR_DESC_PTR, val)
734#define bfin_read_MDMA_D0_CURR_ADDR() bfin_read32(MDMA_D0_CURR_ADDR)
735#define bfin_write_MDMA_D0_CURR_ADDR(val) bfin_write32(MDMA_D0_CURR_ADDR, val)
736#define bfin_read_MDMA_D0_CURR_X_COUNT() bfin_read16(MDMA_D0_CURR_X_COUNT)
737#define bfin_write_MDMA_D0_CURR_X_COUNT(val) bfin_write16(MDMA_D0_CURR_X_COUNT, val)
738#define bfin_read_MDMA_D0_CURR_Y_COUNT() bfin_read16(MDMA_D0_CURR_Y_COUNT)
739#define bfin_write_MDMA_D0_CURR_Y_COUNT(val) bfin_write16(MDMA_D0_CURR_Y_COUNT, val)
740#define bfin_read_MDMA_D0_IRQ_STATUS() bfin_read16(MDMA_D0_IRQ_STATUS)
741#define bfin_write_MDMA_D0_IRQ_STATUS(val) bfin_write16(MDMA_D0_IRQ_STATUS, val)
742#define bfin_read_MDMA_D0_PERIPHERAL_MAP() bfin_read16(MDMA_D0_PERIPHERAL_MAP)
743#define bfin_write_MDMA_D0_PERIPHERAL_MAP(val) bfin_write16(MDMA_D0_PERIPHERAL_MAP, val)
744
745#define bfin_read_MDMA_S0_CONFIG() bfin_read16(MDMA_S0_CONFIG)
746#define bfin_write_MDMA_S0_CONFIG(val) bfin_write16(MDMA_S0_CONFIG, val)
747#define bfin_read_MDMA_S0_NEXT_DESC_PTR() bfin_read32(MDMA_S0_NEXT_DESC_PTR)
748#define bfin_write_MDMA_S0_NEXT_DESC_PTR(val) bfin_write32(MDMA_S0_NEXT_DESC_PTR, val)
749#define bfin_read_MDMA_S0_START_ADDR() bfin_read32(MDMA_S0_START_ADDR)
750#define bfin_write_MDMA_S0_START_ADDR(val) bfin_write32(MDMA_S0_START_ADDR, val)
751#define bfin_read_MDMA_S0_X_COUNT() bfin_read16(MDMA_S0_X_COUNT)
752#define bfin_write_MDMA_S0_X_COUNT(val) bfin_write16(MDMA_S0_X_COUNT, val)
753#define bfin_read_MDMA_S0_Y_COUNT() bfin_read16(MDMA_S0_Y_COUNT)
754#define bfin_write_MDMA_S0_Y_COUNT(val) bfin_write16(MDMA_S0_Y_COUNT, val)
755#define bfin_read_MDMA_S0_X_MODIFY() bfin_read16(MDMA_S0_X_MODIFY)
756#define bfin_write_MDMA_S0_X_MODIFY(val) bfin_write16(MDMA_S0_X_MODIFY, val)
757#define bfin_read_MDMA_S0_Y_MODIFY() bfin_read16(MDMA_S0_Y_MODIFY)
758#define bfin_write_MDMA_S0_Y_MODIFY(val) bfin_write16(MDMA_S0_Y_MODIFY, val)
759#define bfin_read_MDMA_S0_CURR_DESC_PTR() bfin_read32(MDMA_S0_CURR_DESC_PTR)
760#define bfin_write_MDMA_S0_CURR_DESC_PTR(val) bfin_write32(MDMA_S0_CURR_DESC_PTR, val)
761#define bfin_read_MDMA_S0_CURR_ADDR() bfin_read32(MDMA_S0_CURR_ADDR)
762#define bfin_write_MDMA_S0_CURR_ADDR(val) bfin_write32(MDMA_S0_CURR_ADDR, val)
763#define bfin_read_MDMA_S0_CURR_X_COUNT() bfin_read16(MDMA_S0_CURR_X_COUNT)
764#define bfin_write_MDMA_S0_CURR_X_COUNT(val) bfin_write16(MDMA_S0_CURR_X_COUNT, val)
765#define bfin_read_MDMA_S0_CURR_Y_COUNT() bfin_read16(MDMA_S0_CURR_Y_COUNT)
766#define bfin_write_MDMA_S0_CURR_Y_COUNT(val) bfin_write16(MDMA_S0_CURR_Y_COUNT, val)
767#define bfin_read_MDMA_S0_IRQ_STATUS() bfin_read16(MDMA_S0_IRQ_STATUS)
768#define bfin_write_MDMA_S0_IRQ_STATUS(val) bfin_write16(MDMA_S0_IRQ_STATUS, val)
769#define bfin_read_MDMA_S0_PERIPHERAL_MAP() bfin_read16(MDMA_S0_PERIPHERAL_MAP)
770#define bfin_write_MDMA_S0_PERIPHERAL_MAP(val) bfin_write16(MDMA_S0_PERIPHERAL_MAP, val)
771
772#define bfin_read_MDMA_D1_CONFIG() bfin_read16(MDMA_D1_CONFIG)
773#define bfin_write_MDMA_D1_CONFIG(val) bfin_write16(MDMA_D1_CONFIG, val)
774#define bfin_read_MDMA_D1_NEXT_DESC_PTR() bfin_read32(MDMA_D1_NEXT_DESC_PTR)
775#define bfin_write_MDMA_D1_NEXT_DESC_PTR(val) bfin_write32(MDMA_D1_NEXT_DESC_PTR, val)
776#define bfin_read_MDMA_D1_START_ADDR() bfin_read32(MDMA_D1_START_ADDR)
777#define bfin_write_MDMA_D1_START_ADDR(val) bfin_write32(MDMA_D1_START_ADDR, val)
778#define bfin_read_MDMA_D1_X_COUNT() bfin_read16(MDMA_D1_X_COUNT)
779#define bfin_write_MDMA_D1_X_COUNT(val) bfin_write16(MDMA_D1_X_COUNT, val)
780#define bfin_read_MDMA_D1_Y_COUNT() bfin_read16(MDMA_D1_Y_COUNT)
781#define bfin_write_MDMA_D1_Y_COUNT(val) bfin_write16(MDMA_D1_Y_COUNT, val)
782#define bfin_read_MDMA_D1_X_MODIFY() bfin_read16(MDMA_D1_X_MODIFY)
783#define bfin_write_MDMA_D1_X_MODIFY(val) bfin_write16(MDMA_D1_X_MODIFY, val)
784#define bfin_read_MDMA_D1_Y_MODIFY() bfin_read16(MDMA_D1_Y_MODIFY)
785#define bfin_write_MDMA_D1_Y_MODIFY(val) bfin_write16(MDMA_D1_Y_MODIFY, val)
786#define bfin_read_MDMA_D1_CURR_DESC_PTR() bfin_read32(MDMA_D1_CURR_DESC_PTR)
787#define bfin_write_MDMA_D1_CURR_DESC_PTR(val) bfin_write32(MDMA_D1_CURR_DESC_PTR, val)
788#define bfin_read_MDMA_D1_CURR_ADDR() bfin_read32(MDMA_D1_CURR_ADDR)
789#define bfin_write_MDMA_D1_CURR_ADDR(val) bfin_write32(MDMA_D1_CURR_ADDR, val)
790#define bfin_read_MDMA_D1_CURR_X_COUNT() bfin_read16(MDMA_D1_CURR_X_COUNT)
791#define bfin_write_MDMA_D1_CURR_X_COUNT(val) bfin_write16(MDMA_D1_CURR_X_COUNT, val)
792#define bfin_read_MDMA_D1_CURR_Y_COUNT() bfin_read16(MDMA_D1_CURR_Y_COUNT)
793#define bfin_write_MDMA_D1_CURR_Y_COUNT(val) bfin_write16(MDMA_D1_CURR_Y_COUNT, val)
794#define bfin_read_MDMA_D1_IRQ_STATUS() bfin_read16(MDMA_D1_IRQ_STATUS)
795#define bfin_write_MDMA_D1_IRQ_STATUS(val) bfin_write16(MDMA_D1_IRQ_STATUS, val)
796#define bfin_read_MDMA_D1_PERIPHERAL_MAP() bfin_read16(MDMA_D1_PERIPHERAL_MAP)
797#define bfin_write_MDMA_D1_PERIPHERAL_MAP(val) bfin_write16(MDMA_D1_PERIPHERAL_MAP, val)
798
799#define bfin_read_MDMA_S1_CONFIG() bfin_read16(MDMA_S1_CONFIG)
800#define bfin_write_MDMA_S1_CONFIG(val) bfin_write16(MDMA_S1_CONFIG, val)
801#define bfin_read_MDMA_S1_NEXT_DESC_PTR() bfin_read32(MDMA_S1_NEXT_DESC_PTR)
802#define bfin_write_MDMA_S1_NEXT_DESC_PTR(val) bfin_write32(MDMA_S1_NEXT_DESC_PTR, val)
803#define bfin_read_MDMA_S1_START_ADDR() bfin_read32(MDMA_S1_START_ADDR)
804#define bfin_write_MDMA_S1_START_ADDR(val) bfin_write32(MDMA_S1_START_ADDR, val)
805#define bfin_read_MDMA_S1_X_COUNT() bfin_read16(MDMA_S1_X_COUNT)
806#define bfin_write_MDMA_S1_X_COUNT(val) bfin_write16(MDMA_S1_X_COUNT, val)
807#define bfin_read_MDMA_S1_Y_COUNT() bfin_read16(MDMA_S1_Y_COUNT)
808#define bfin_write_MDMA_S1_Y_COUNT(val) bfin_write16(MDMA_S1_Y_COUNT, val)
809#define bfin_read_MDMA_S1_X_MODIFY() bfin_read16(MDMA_S1_X_MODIFY)
810#define bfin_write_MDMA_S1_X_MODIFY(val) bfin_write16(MDMA_S1_X_MODIFY, val)
811#define bfin_read_MDMA_S1_Y_MODIFY() bfin_read16(MDMA_S1_Y_MODIFY)
812#define bfin_write_MDMA_S1_Y_MODIFY(val) bfin_write16(MDMA_S1_Y_MODIFY, val)
813#define bfin_read_MDMA_S1_CURR_DESC_PTR() bfin_read32(MDMA_S1_CURR_DESC_PTR)
814#define bfin_write_MDMA_S1_CURR_DESC_PTR(val) bfin_write32(MDMA_S1_CURR_DESC_PTR, val)
815#define bfin_read_MDMA_S1_CURR_ADDR() bfin_read32(MDMA_S1_CURR_ADDR)
816#define bfin_write_MDMA_S1_CURR_ADDR(val) bfin_write32(MDMA_S1_CURR_ADDR, val)
817#define bfin_read_MDMA_S1_CURR_X_COUNT() bfin_read16(MDMA_S1_CURR_X_COUNT)
818#define bfin_write_MDMA_S1_CURR_X_COUNT(val) bfin_write16(MDMA_S1_CURR_X_COUNT, val)
819#define bfin_read_MDMA_S1_CURR_Y_COUNT() bfin_read16(MDMA_S1_CURR_Y_COUNT)
820#define bfin_write_MDMA_S1_CURR_Y_COUNT(val) bfin_write16(MDMA_S1_CURR_Y_COUNT, val)
821#define bfin_read_MDMA_S1_IRQ_STATUS() bfin_read16(MDMA_S1_IRQ_STATUS)
822#define bfin_write_MDMA_S1_IRQ_STATUS(val) bfin_write16(MDMA_S1_IRQ_STATUS, val)
823#define bfin_read_MDMA_S1_PERIPHERAL_MAP() bfin_read16(MDMA_S1_PERIPHERAL_MAP)
824#define bfin_write_MDMA_S1_PERIPHERAL_MAP(val) bfin_write16(MDMA_S1_PERIPHERAL_MAP, val)
825
826
827/* Parallel Peripheral Interface (0xFFC01000 - 0xFFC010FF) */
828#define bfin_read_PPI_CONTROL() bfin_read16(PPI_CONTROL)
829#define bfin_write_PPI_CONTROL(val) bfin_write16(PPI_CONTROL, val)
830#define bfin_read_PPI_STATUS() bfin_read16(PPI_STATUS)
831#define bfin_write_PPI_STATUS(val) bfin_write16(PPI_STATUS, val)
832#define bfin_clear_PPI_STATUS() bfin_write_PPI_STATUS(0xFFFF)
833#define bfin_read_PPI_DELAY() bfin_read16(PPI_DELAY)
834#define bfin_write_PPI_DELAY(val) bfin_write16(PPI_DELAY, val)
835#define bfin_read_PPI_COUNT() bfin_read16(PPI_COUNT)
836#define bfin_write_PPI_COUNT(val) bfin_write16(PPI_COUNT, val)
837#define bfin_read_PPI_FRAME() bfin_read16(PPI_FRAME)
838#define bfin_write_PPI_FRAME(val) bfin_write16(PPI_FRAME, val)
839
840
841/* Two-Wire Interface (0xFFC01400 - 0xFFC014FF) */
842
843/* General Purpose I/O Port G (0xFFC01500 - 0xFFC015FF) */
844#define bfin_read_PORTGIO() bfin_read16(PORTGIO)
845#define bfin_write_PORTGIO(val) bfin_write16(PORTGIO, val)
846#define bfin_read_PORTGIO_CLEAR() bfin_read16(PORTGIO_CLEAR)
847#define bfin_write_PORTGIO_CLEAR(val) bfin_write16(PORTGIO_CLEAR, val)
848#define bfin_read_PORTGIO_SET() bfin_read16(PORTGIO_SET)
849#define bfin_write_PORTGIO_SET(val) bfin_write16(PORTGIO_SET, val)
850#define bfin_read_PORTGIO_TOGGLE() bfin_read16(PORTGIO_TOGGLE)
851#define bfin_write_PORTGIO_TOGGLE(val) bfin_write16(PORTGIO_TOGGLE, val)
852#define bfin_read_PORTGIO_MASKA() bfin_read16(PORTGIO_MASKA)
853#define bfin_write_PORTGIO_MASKA(val) bfin_write16(PORTGIO_MASKA, val)
854#define bfin_read_PORTGIO_MASKA_CLEAR() bfin_read16(PORTGIO_MASKA_CLEAR)
855#define bfin_write_PORTGIO_MASKA_CLEAR(val) bfin_write16(PORTGIO_MASKA_CLEAR, val)
856#define bfin_read_PORTGIO_MASKA_SET() bfin_read16(PORTGIO_MASKA_SET)
857#define bfin_write_PORTGIO_MASKA_SET(val) bfin_write16(PORTGIO_MASKA_SET, val)
858#define bfin_read_PORTGIO_MASKA_TOGGLE() bfin_read16(PORTGIO_MASKA_TOGGLE)
859#define bfin_write_PORTGIO_MASKA_TOGGLE(val) bfin_write16(PORTGIO_MASKA_TOGGLE, val)
860#define bfin_read_PORTGIO_MASKB() bfin_read16(PORTGIO_MASKB)
861#define bfin_write_PORTGIO_MASKB(val) bfin_write16(PORTGIO_MASKB, val)
862#define bfin_read_PORTGIO_MASKB_CLEAR() bfin_read16(PORTGIO_MASKB_CLEAR)
863#define bfin_write_PORTGIO_MASKB_CLEAR(val) bfin_write16(PORTGIO_MASKB_CLEAR, val)
864#define bfin_read_PORTGIO_MASKB_SET() bfin_read16(PORTGIO_MASKB_SET)
865#define bfin_write_PORTGIO_MASKB_SET(val) bfin_write16(PORTGIO_MASKB_SET, val)
866#define bfin_read_PORTGIO_MASKB_TOGGLE() bfin_read16(PORTGIO_MASKB_TOGGLE)
867#define bfin_write_PORTGIO_MASKB_TOGGLE(val) bfin_write16(PORTGIO_MASKB_TOGGLE, val)
868#define bfin_read_PORTGIO_DIR() bfin_read16(PORTGIO_DIR)
869#define bfin_write_PORTGIO_DIR(val) bfin_write16(PORTGIO_DIR, val)
870#define bfin_read_PORTGIO_POLAR() bfin_read16(PORTGIO_POLAR)
871#define bfin_write_PORTGIO_POLAR(val) bfin_write16(PORTGIO_POLAR, val)
872#define bfin_read_PORTGIO_EDGE() bfin_read16(PORTGIO_EDGE)
873#define bfin_write_PORTGIO_EDGE(val) bfin_write16(PORTGIO_EDGE, val)
874#define bfin_read_PORTGIO_BOTH() bfin_read16(PORTGIO_BOTH)
875#define bfin_write_PORTGIO_BOTH(val) bfin_write16(PORTGIO_BOTH, val)
876#define bfin_read_PORTGIO_INEN() bfin_read16(PORTGIO_INEN)
877#define bfin_write_PORTGIO_INEN(val) bfin_write16(PORTGIO_INEN, val)
878
879
880/* General Purpose I/O Port H (0xFFC01700 - 0xFFC017FF) */
881#define bfin_read_PORTHIO() bfin_read16(PORTHIO)
882#define bfin_write_PORTHIO(val) bfin_write16(PORTHIO, val)
883#define bfin_read_PORTHIO_CLEAR() bfin_read16(PORTHIO_CLEAR)
884#define bfin_write_PORTHIO_CLEAR(val) bfin_write16(PORTHIO_CLEAR, val)
885#define bfin_read_PORTHIO_SET() bfin_read16(PORTHIO_SET)
886#define bfin_write_PORTHIO_SET(val) bfin_write16(PORTHIO_SET, val)
887#define bfin_read_PORTHIO_TOGGLE() bfin_read16(PORTHIO_TOGGLE)
888#define bfin_write_PORTHIO_TOGGLE(val) bfin_write16(PORTHIO_TOGGLE, val)
889#define bfin_read_PORTHIO_MASKA() bfin_read16(PORTHIO_MASKA)
890#define bfin_write_PORTHIO_MASKA(val) bfin_write16(PORTHIO_MASKA, val)
891#define bfin_read_PORTHIO_MASKA_CLEAR() bfin_read16(PORTHIO_MASKA_CLEAR)
892#define bfin_write_PORTHIO_MASKA_CLEAR(val) bfin_write16(PORTHIO_MASKA_CLEAR, val)
893#define bfin_read_PORTHIO_MASKA_SET() bfin_read16(PORTHIO_MASKA_SET)
894#define bfin_write_PORTHIO_MASKA_SET(val) bfin_write16(PORTHIO_MASKA_SET, val)
895#define bfin_read_PORTHIO_MASKA_TOGGLE() bfin_read16(PORTHIO_MASKA_TOGGLE)
896#define bfin_write_PORTHIO_MASKA_TOGGLE(val) bfin_write16(PORTHIO_MASKA_TOGGLE, val)
897#define bfin_read_PORTHIO_MASKB() bfin_read16(PORTHIO_MASKB)
898#define bfin_write_PORTHIO_MASKB(val) bfin_write16(PORTHIO_MASKB, val)
899#define bfin_read_PORTHIO_MASKB_CLEAR() bfin_read16(PORTHIO_MASKB_CLEAR)
900#define bfin_write_PORTHIO_MASKB_CLEAR(val) bfin_write16(PORTHIO_MASKB_CLEAR, val)
901#define bfin_read_PORTHIO_MASKB_SET() bfin_read16(PORTHIO_MASKB_SET)
902#define bfin_write_PORTHIO_MASKB_SET(val) bfin_write16(PORTHIO_MASKB_SET, val)
903#define bfin_read_PORTHIO_MASKB_TOGGLE() bfin_read16(PORTHIO_MASKB_TOGGLE)
904#define bfin_write_PORTHIO_MASKB_TOGGLE(val) bfin_write16(PORTHIO_MASKB_TOGGLE, val)
905#define bfin_read_PORTHIO_DIR() bfin_read16(PORTHIO_DIR)
906#define bfin_write_PORTHIO_DIR(val) bfin_write16(PORTHIO_DIR, val)
907#define bfin_read_PORTHIO_POLAR() bfin_read16(PORTHIO_POLAR)
908#define bfin_write_PORTHIO_POLAR(val) bfin_write16(PORTHIO_POLAR, val)
909#define bfin_read_PORTHIO_EDGE() bfin_read16(PORTHIO_EDGE)
910#define bfin_write_PORTHIO_EDGE(val) bfin_write16(PORTHIO_EDGE, val)
911#define bfin_read_PORTHIO_BOTH() bfin_read16(PORTHIO_BOTH)
912#define bfin_write_PORTHIO_BOTH(val) bfin_write16(PORTHIO_BOTH, val)
913#define bfin_read_PORTHIO_INEN() bfin_read16(PORTHIO_INEN)
914#define bfin_write_PORTHIO_INEN(val) bfin_write16(PORTHIO_INEN, val)
915
916
917/* UART1 Controller (0xFFC02000 - 0xFFC020FF) */
918#define bfin_read_UART1_THR() bfin_read16(UART1_THR)
919#define bfin_write_UART1_THR(val) bfin_write16(UART1_THR, val)
920#define bfin_read_UART1_RBR() bfin_read16(UART1_RBR)
921#define bfin_write_UART1_RBR(val) bfin_write16(UART1_RBR, val)
922#define bfin_read_UART1_DLL() bfin_read16(UART1_DLL)
923#define bfin_write_UART1_DLL(val) bfin_write16(UART1_DLL, val)
924#define bfin_read_UART1_IER() bfin_read16(UART1_IER)
925#define bfin_write_UART1_IER(val) bfin_write16(UART1_IER, val)
926#define bfin_read_UART1_DLH() bfin_read16(UART1_DLH)
927#define bfin_write_UART1_DLH(val) bfin_write16(UART1_DLH, val)
928#define bfin_read_UART1_IIR() bfin_read16(UART1_IIR)
929#define bfin_write_UART1_IIR(val) bfin_write16(UART1_IIR, val)
930#define bfin_read_UART1_LCR() bfin_read16(UART1_LCR)
931#define bfin_write_UART1_LCR(val) bfin_write16(UART1_LCR, val)
932#define bfin_read_UART1_MCR() bfin_read16(UART1_MCR)
933#define bfin_write_UART1_MCR(val) bfin_write16(UART1_MCR, val)
934#define bfin_read_UART1_LSR() bfin_read16(UART1_LSR)
935#define bfin_write_UART1_LSR(val) bfin_write16(UART1_LSR, val)
936#define bfin_read_UART1_MSR() bfin_read16(UART1_MSR)
937#define bfin_write_UART1_MSR(val) bfin_write16(UART1_MSR, val)
938#define bfin_read_UART1_SCR() bfin_read16(UART1_SCR)
939#define bfin_write_UART1_SCR(val) bfin_write16(UART1_SCR, val)
940#define bfin_read_UART1_GCTL() bfin_read16(UART1_GCTL)
941#define bfin_write_UART1_GCTL(val) bfin_write16(UART1_GCTL, val)
942
943/* Omit CAN register sets from the cdefBF534.h (CAN is not in the ADSP-BF52x processor) */
944
945/* Pin Control Registers (0xFFC03200 - 0xFFC032FF) */
946#define bfin_read_PORTF_FER() bfin_read16(PORTF_FER)
947#define bfin_write_PORTF_FER(val) bfin_write16(PORTF_FER, val)
948#define bfin_read_PORTG_FER() bfin_read16(PORTG_FER)
949#define bfin_write_PORTG_FER(val) bfin_write16(PORTG_FER, val)
950#define bfin_read_PORTH_FER() bfin_read16(PORTH_FER)
951#define bfin_write_PORTH_FER(val) bfin_write16(PORTH_FER, val)
952#define bfin_read_PORT_MUX() bfin_read16(PORT_MUX)
953#define bfin_write_PORT_MUX(val) bfin_write16(PORT_MUX, val)
954
955
956/* Handshake MDMA Registers (0xFFC03300 - 0xFFC033FF) */
957#define bfin_read_HMDMA0_CONTROL() bfin_read16(HMDMA0_CONTROL)
958#define bfin_write_HMDMA0_CONTROL(val) bfin_write16(HMDMA0_CONTROL, val)
959#define bfin_read_HMDMA0_ECINIT() bfin_read16(HMDMA0_ECINIT)
960#define bfin_write_HMDMA0_ECINIT(val) bfin_write16(HMDMA0_ECINIT, val)
961#define bfin_read_HMDMA0_BCINIT() bfin_read16(HMDMA0_BCINIT)
962#define bfin_write_HMDMA0_BCINIT(val) bfin_write16(HMDMA0_BCINIT, val)
963#define bfin_read_HMDMA0_ECURGENT() bfin_read16(HMDMA0_ECURGENT)
964#define bfin_write_HMDMA0_ECURGENT(val) bfin_write16(HMDMA0_ECURGENT, val)
965#define bfin_read_HMDMA0_ECOVERFLOW() bfin_read16(HMDMA0_ECOVERFLOW)
966#define bfin_write_HMDMA0_ECOVERFLOW(val) bfin_write16(HMDMA0_ECOVERFLOW, val)
967#define bfin_read_HMDMA0_ECOUNT() bfin_read16(HMDMA0_ECOUNT)
968#define bfin_write_HMDMA0_ECOUNT(val) bfin_write16(HMDMA0_ECOUNT, val)
969#define bfin_read_HMDMA0_BCOUNT() bfin_read16(HMDMA0_BCOUNT)
970#define bfin_write_HMDMA0_BCOUNT(val) bfin_write16(HMDMA0_BCOUNT, val)
971
972#define bfin_read_HMDMA1_CONTROL() bfin_read16(HMDMA1_CONTROL)
973#define bfin_write_HMDMA1_CONTROL(val) bfin_write16(HMDMA1_CONTROL, val)
974#define bfin_read_HMDMA1_ECINIT() bfin_read16(HMDMA1_ECINIT)
975#define bfin_write_HMDMA1_ECINIT(val) bfin_write16(HMDMA1_ECINIT, val)
976#define bfin_read_HMDMA1_BCINIT() bfin_read16(HMDMA1_BCINIT)
977#define bfin_write_HMDMA1_BCINIT(val) bfin_write16(HMDMA1_BCINIT, val)
978#define bfin_read_HMDMA1_ECURGENT() bfin_read16(HMDMA1_ECURGENT)
979#define bfin_write_HMDMA1_ECURGENT(val) bfin_write16(HMDMA1_ECURGENT, val)
980#define bfin_read_HMDMA1_ECOVERFLOW() bfin_read16(HMDMA1_ECOVERFLOW)
981#define bfin_write_HMDMA1_ECOVERFLOW(val) bfin_write16(HMDMA1_ECOVERFLOW, val)
982#define bfin_read_HMDMA1_ECOUNT() bfin_read16(HMDMA1_ECOUNT)
983#define bfin_write_HMDMA1_ECOUNT(val) bfin_write16(HMDMA1_ECOUNT, val)
984#define bfin_read_HMDMA1_BCOUNT() bfin_read16(HMDMA1_BCOUNT)
985#define bfin_write_HMDMA1_BCOUNT(val) bfin_write16(HMDMA1_BCOUNT, val)
986
987/* ==== end from cdefBF534.h ==== */
988
989/* GPIO PIN mux (0xFFC03210 - OxFFC03288) */
990
991#define bfin_read_PORTF_MUX() bfin_read16(PORTF_MUX)
992#define bfin_write_PORTF_MUX(val) bfin_write16(PORTF_MUX, val)
993#define bfin_read_PORTG_MUX() bfin_read16(PORTG_MUX)
994#define bfin_write_PORTG_MUX(val) bfin_write16(PORTG_MUX, val)
995#define bfin_read_PORTH_MUX() bfin_read16(PORTH_MUX)
996#define bfin_write_PORTH_MUX(val) bfin_write16(PORTH_MUX, val)
997
998#define bfin_read_PORTF_DRIVE() bfin_read16(PORTF_DRIVE)
999#define bfin_write_PORTF_DRIVE(val) bfin_write16(PORTF_DRIVE, val)
1000#define bfin_read_PORTG_DRIVE() bfin_read16(PORTG_DRIVE)
1001#define bfin_write_PORTG_DRIVE(val) bfin_write16(PORTG_DRIVE, val)
1002#define bfin_read_PORTH_DRIVE() bfin_read16(PORTH_DRIVE)
1003#define bfin_write_PORTH_DRIVE(val) bfin_write16(PORTH_DRIVE, val)
1004#define bfin_read_PORTF_SLEW() bfin_read16(PORTF_SLEW)
1005#define bfin_write_PORTF_SLEW(val) bfin_write16(PORTF_SLEW, val)
1006#define bfin_read_PORTG_SLEW() bfin_read16(PORTG_SLEW)
1007#define bfin_write_PORTG_SLEW(val) bfin_write16(PORTG_SLEW, val)
1008#define bfin_read_PORTH_SLEW() bfin_read16(PORTH_SLEW)
1009#define bfin_write_PORTH_SLEW(val) bfin_write16(PORTH_SLEW, val)
1010#define bfin_read_PORTF_HYSTERISIS() bfin_read16(PORTF_HYSTERISIS)
1011#define bfin_write_PORTF_HYSTERISIS(val) bfin_write16(PORTF_HYSTERISIS, val)
1012#define bfin_read_PORTG_HYSTERISIS() bfin_read16(PORTG_HYSTERISIS)
1013#define bfin_write_PORTG_HYSTERISIS(val) bfin_write16(PORTG_HYSTERISIS, val)
1014#define bfin_read_PORTH_HYSTERISIS() bfin_read16(PORTH_HYSTERISIS)
1015#define bfin_write_PORTH_HYSTERISIS(val) bfin_write16(PORTH_HYSTERISIS, val)
1016#define bfin_read_MISCPORT_DRIVE() bfin_read16(MISCPORT_DRIVE)
1017#define bfin_write_MISCPORT_DRIVE(val) bfin_write16(MISCPORT_DRIVE, val)
1018#define bfin_read_MISCPORT_SLEW() bfin_read16(MISCPORT_SLEW)
1019#define bfin_write_MISCPORT_SLEW(val) bfin_write16(MISCPORT_SLEW, val)
1020#define bfin_read_MISCPORT_HYSTERISIS() bfin_read16(MISCPORT_HYSTERISIS)
1021#define bfin_write_MISCPORT_HYSTERISIS(val) bfin_write16(MISCPORT_HYSTERISIS, val)
1022
1023/* HOST Port Registers */
1024
1025#define bfin_read_HOST_CONTROL() bfin_read16(HOST_CONTROL)
1026#define bfin_write_HOST_CONTROL(val) bfin_write16(HOST_CONTROL, val)
1027#define bfin_read_HOST_STATUS() bfin_read16(HOST_STATUS)
1028#define bfin_write_HOST_STATUS(val) bfin_write16(HOST_STATUS, val)
1029#define bfin_read_HOST_TIMEOUT() bfin_read16(HOST_TIMEOUT)
1030#define bfin_write_HOST_TIMEOUT(val) bfin_write16(HOST_TIMEOUT, val)
1031
1032/* Counter Registers */
1033
1034#define bfin_read_CNT_CONFIG() bfin_read16(CNT_CONFIG)
1035#define bfin_write_CNT_CONFIG(val) bfin_write16(CNT_CONFIG, val)
1036#define bfin_read_CNT_IMASK() bfin_read16(CNT_IMASK)
1037#define bfin_write_CNT_IMASK(val) bfin_write16(CNT_IMASK, val)
1038#define bfin_read_CNT_STATUS() bfin_read16(CNT_STATUS)
1039#define bfin_write_CNT_STATUS(val) bfin_write16(CNT_STATUS, val)
1040#define bfin_read_CNT_COMMAND() bfin_read16(CNT_COMMAND)
1041#define bfin_write_CNT_COMMAND(val) bfin_write16(CNT_COMMAND, val)
1042#define bfin_read_CNT_DEBOUNCE() bfin_read16(CNT_DEBOUNCE)
1043#define bfin_write_CNT_DEBOUNCE(val) bfin_write16(CNT_DEBOUNCE, val)
1044#define bfin_read_CNT_COUNTER() bfin_read32(CNT_COUNTER)
1045#define bfin_write_CNT_COUNTER(val) bfin_write32(CNT_COUNTER, val)
1046#define bfin_read_CNT_MAX() bfin_read32(CNT_MAX)
1047#define bfin_write_CNT_MAX(val) bfin_write32(CNT_MAX, val)
1048#define bfin_read_CNT_MIN() bfin_read32(CNT_MIN)
1049#define bfin_write_CNT_MIN(val) bfin_write32(CNT_MIN, val)
1050
1051/* Security Registers */
1052
1053#define bfin_read_SECURE_SYSSWT() bfin_read32(SECURE_SYSSWT)
1054#define bfin_write_SECURE_SYSSWT(val) bfin_write32(SECURE_SYSSWT, val)
1055#define bfin_read_SECURE_CONTROL() bfin_read16(SECURE_CONTROL)
1056#define bfin_write_SECURE_CONTROL(val) bfin_write16(SECURE_CONTROL, val)
1057#define bfin_read_SECURE_STATUS() bfin_read16(SECURE_STATUS)
1058#define bfin_write_SECURE_STATUS(val) bfin_write16(SECURE_STATUS, val)
1059
1060/* NFC Registers */
1061
1062#define bfin_read_NFC_CTL() bfin_read16(NFC_CTL)
1063#define bfin_write_NFC_CTL(val) bfin_write16(NFC_CTL, val)
1064#define bfin_read_NFC_STAT() bfin_read16(NFC_STAT)
1065#define bfin_write_NFC_STAT(val) bfin_write16(NFC_STAT, val)
1066#define bfin_read_NFC_IRQSTAT() bfin_read16(NFC_IRQSTAT)
1067#define bfin_write_NFC_IRQSTAT(val) bfin_write16(NFC_IRQSTAT, val)
1068#define bfin_read_NFC_IRQMASK() bfin_read16(NFC_IRQMASK)
1069#define bfin_write_NFC_IRQMASK(val) bfin_write16(NFC_IRQMASK, val)
1070#define bfin_read_NFC_ECC0() bfin_read16(NFC_ECC0)
1071#define bfin_write_NFC_ECC0(val) bfin_write16(NFC_ECC0, val)
1072#define bfin_read_NFC_ECC1() bfin_read16(NFC_ECC1)
1073#define bfin_write_NFC_ECC1(val) bfin_write16(NFC_ECC1, val)
1074#define bfin_read_NFC_ECC2() bfin_read16(NFC_ECC2)
1075#define bfin_write_NFC_ECC2(val) bfin_write16(NFC_ECC2, val)
1076#define bfin_read_NFC_ECC3() bfin_read16(NFC_ECC3)
1077#define bfin_write_NFC_ECC3(val) bfin_write16(NFC_ECC3, val)
1078#define bfin_read_NFC_COUNT() bfin_read16(NFC_COUNT)
1079#define bfin_write_NFC_COUNT(val) bfin_write16(NFC_COUNT, val)
1080#define bfin_read_NFC_RST() bfin_read16(NFC_RST)
1081#define bfin_write_NFC_RST(val) bfin_write16(NFC_RST, val)
1082#define bfin_read_NFC_PGCTL() bfin_read16(NFC_PGCTL)
1083#define bfin_write_NFC_PGCTL(val) bfin_write16(NFC_PGCTL, val)
1084#define bfin_read_NFC_READ() bfin_read16(NFC_READ)
1085#define bfin_write_NFC_READ(val) bfin_write16(NFC_READ, val)
1086#define bfin_read_NFC_ADDR() bfin_read16(NFC_ADDR)
1087#define bfin_write_NFC_ADDR(val) bfin_write16(NFC_ADDR, val)
1088#define bfin_read_NFC_CMD() bfin_read16(NFC_CMD)
1089#define bfin_write_NFC_CMD(val) bfin_write16(NFC_CMD, val)
1090#define bfin_read_NFC_DATA_WR() bfin_read16(NFC_DATA_WR)
1091#define bfin_write_NFC_DATA_WR(val) bfin_write16(NFC_DATA_WR, val)
1092#define bfin_read_NFC_DATA_RD() bfin_read16(NFC_DATA_RD)
1093#define bfin_write_NFC_DATA_RD(val) bfin_write16(NFC_DATA_RD, val)
20 1094
21#endif /* _CDEF_BF522_H */ 1095#endif /* _CDEF_BF522_H */
diff --git a/arch/blackfin/mach-bf527/include/mach/cdefBF525.h b/arch/blackfin/mach-bf527/include/mach/cdefBF525.h
index d7e2751c6bcc..d90a85b6b6b9 100644
--- a/arch/blackfin/mach-bf527/include/mach/cdefBF525.h
+++ b/arch/blackfin/mach-bf527/include/mach/cdefBF525.h
@@ -1,15 +1,12 @@
1/* 1/*
2 * Copyright 2007-2008 Analog Devices Inc. 2 * Copyright 2007-2010 Analog Devices Inc.
3 * 3 *
4 * Licensed under the GPL-2 or later 4 * Licensed under the GPL-2 or later.
5 */ 5 */
6 6
7#ifndef _CDEF_BF525_H 7#ifndef _CDEF_BF525_H
8#define _CDEF_BF525_H 8#define _CDEF_BF525_H
9 9
10/* include all Core registers and bit definitions */
11#include "defBF525.h"
12
13/* BF525 is BF522 + USB */ 10/* BF525 is BF522 + USB */
14#include "cdefBF522.h" 11#include "cdefBF522.h"
15 12
diff --git a/arch/blackfin/mach-bf527/include/mach/cdefBF527.h b/arch/blackfin/mach-bf527/include/mach/cdefBF527.h
index c7ba544d50b6..eb22f5866105 100644
--- a/arch/blackfin/mach-bf527/include/mach/cdefBF527.h
+++ b/arch/blackfin/mach-bf527/include/mach/cdefBF527.h
@@ -1,15 +1,12 @@
1/* 1/*
2 * Copyright 2007-2008 Analog Devices Inc. 2 * Copyright 2007-2010 Analog Devices Inc.
3 * 3 *
4 * Licensed under the GPL-2 or later 4 * Licensed under the GPL-2 or later.
5 */ 5 */
6 6
7#ifndef _CDEF_BF527_H 7#ifndef _CDEF_BF527_H
8#define _CDEF_BF527_H 8#define _CDEF_BF527_H
9 9
10/* include all Core registers and bit definitions */
11#include "defBF527.h"
12
13/* BF527 is BF525 + EMAC */ 10/* BF527 is BF525 + EMAC */
14#include "cdefBF525.h" 11#include "cdefBF525.h"
15 12
diff --git a/arch/blackfin/mach-bf527/include/mach/cdefBF52x_base.h b/arch/blackfin/mach-bf527/include/mach/cdefBF52x_base.h
deleted file mode 100644
index 3048b52bf46a..000000000000
--- a/arch/blackfin/mach-bf527/include/mach/cdefBF52x_base.h
+++ /dev/null
@@ -1,1113 +0,0 @@
1/*
2 * Copyright 2007-2008 Analog Devices Inc.
3 *
4 * Licensed under the GPL-2 or later
5 */
6
7#ifndef _CDEF_BF52X_H
8#define _CDEF_BF52X_H
9
10#include <asm/blackfin.h>
11
12#include "defBF52x_base.h"
13
14/* Include core specific register pointer definitions */
15#include <asm/cdef_LPBlackfin.h>
16
17/* ==== begin from cdefBF534.h ==== */
18
19/* Clock and System Control (0xFFC00000 - 0xFFC000FF) */
20#define bfin_read_PLL_CTL() bfin_read16(PLL_CTL)
21#define bfin_read_PLL_DIV() bfin_read16(PLL_DIV)
22#define bfin_write_PLL_DIV(val) bfin_write16(PLL_DIV, val)
23#define bfin_read_VR_CTL() bfin_read16(VR_CTL)
24#define bfin_read_PLL_STAT() bfin_read16(PLL_STAT)
25#define bfin_write_PLL_STAT(val) bfin_write16(PLL_STAT, val)
26#define bfin_read_PLL_LOCKCNT() bfin_read16(PLL_LOCKCNT)
27#define bfin_write_PLL_LOCKCNT(val) bfin_write16(PLL_LOCKCNT, val)
28#define bfin_read_CHIPID() bfin_read32(CHIPID)
29#define bfin_write_CHIPID(val) bfin_write32(CHIPID, val)
30
31
32/* System Interrupt Controller (0xFFC00100 - 0xFFC001FF) */
33#define bfin_read_SWRST() bfin_read16(SWRST)
34#define bfin_write_SWRST(val) bfin_write16(SWRST, val)
35#define bfin_read_SYSCR() bfin_read16(SYSCR)
36#define bfin_write_SYSCR(val) bfin_write16(SYSCR, val)
37
38#define bfin_read_SIC_RVECT() bfin_read32(SIC_RVECT)
39#define bfin_write_SIC_RVECT(val) bfin_write32(SIC_RVECT, val)
40#define bfin_read_SIC_IMASK0() bfin_read32(SIC_IMASK0)
41#define bfin_write_SIC_IMASK0(val) bfin_write32(SIC_IMASK0, val)
42#define bfin_read_SIC_IMASK(x) bfin_read32(SIC_IMASK0 + (x << 6))
43#define bfin_write_SIC_IMASK(x, val) bfin_write32((SIC_IMASK0 + (x << 6)), val)
44
45#define bfin_read_SIC_IAR0() bfin_read32(SIC_IAR0)
46#define bfin_write_SIC_IAR0(val) bfin_write32(SIC_IAR0, val)
47#define bfin_read_SIC_IAR1() bfin_read32(SIC_IAR1)
48#define bfin_write_SIC_IAR1(val) bfin_write32(SIC_IAR1, val)
49#define bfin_read_SIC_IAR2() bfin_read32(SIC_IAR2)
50#define bfin_write_SIC_IAR2(val) bfin_write32(SIC_IAR2, val)
51#define bfin_read_SIC_IAR3() bfin_read32(SIC_IAR3)
52#define bfin_write_SIC_IAR3(val) bfin_write32(SIC_IAR3, val)
53
54#define bfin_read_SIC_ISR0() bfin_read32(SIC_ISR0)
55#define bfin_write_SIC_ISR0(val) bfin_write32(SIC_ISR0, val)
56#define bfin_read_SIC_ISR(x) bfin_read32(SIC_ISR0 + (x << 6))
57#define bfin_write_SIC_ISR(x, val) bfin_write32((SIC_ISR0 + (x << 6)), val)
58
59#define bfin_read_SIC_IWR0() bfin_read32(SIC_IWR0)
60#define bfin_write_SIC_IWR0(val) bfin_write32(SIC_IWR0, val)
61#define bfin_read_SIC_IWR(x) bfin_read32(SIC_IWR0 + (x << 6))
62#define bfin_write_SIC_IWR(x, val) bfin_write32((SIC_IWR0 + (x << 6)), val)
63
64/* SIC Additions to ADSP-BF52x (0xFFC0014C - 0xFFC00162) */
65
66#define bfin_read_SIC_IMASK1() bfin_read32(SIC_IMASK1)
67#define bfin_write_SIC_IMASK1(val) bfin_write32(SIC_IMASK1, val)
68#define bfin_read_SIC_IAR4() bfin_read32(SIC_IAR4)
69#define bfin_write_SIC_IAR4(val) bfin_write32(SIC_IAR4, val)
70#define bfin_read_SIC_IAR5() bfin_read32(SIC_IAR5)
71#define bfin_write_SIC_IAR5(val) bfin_write32(SIC_IAR5, val)
72#define bfin_read_SIC_IAR6() bfin_read32(SIC_IAR6)
73#define bfin_write_SIC_IAR6(val) bfin_write32(SIC_IAR6, val)
74#define bfin_read_SIC_IAR7() bfin_read32(SIC_IAR7)
75#define bfin_write_SIC_IAR7(val) bfin_write32(SIC_IAR7, val)
76#define bfin_read_SIC_ISR1() bfin_read32(SIC_ISR1)
77#define bfin_write_SIC_ISR1(val) bfin_write32(SIC_ISR1, val)
78#define bfin_read_SIC_IWR1() bfin_read32(SIC_IWR1)
79#define bfin_write_SIC_IWR1(val) bfin_write32(SIC_IWR1, val)
80
81/* Watchdog Timer (0xFFC00200 - 0xFFC002FF) */
82#define bfin_read_WDOG_CTL() bfin_read16(WDOG_CTL)
83#define bfin_write_WDOG_CTL(val) bfin_write16(WDOG_CTL, val)
84#define bfin_read_WDOG_CNT() bfin_read32(WDOG_CNT)
85#define bfin_write_WDOG_CNT(val) bfin_write32(WDOG_CNT, val)
86#define bfin_read_WDOG_STAT() bfin_read32(WDOG_STAT)
87#define bfin_write_WDOG_STAT(val) bfin_write32(WDOG_STAT, val)
88
89
90/* Real Time Clock (0xFFC00300 - 0xFFC003FF) */
91#define bfin_read_RTC_STAT() bfin_read32(RTC_STAT)
92#define bfin_write_RTC_STAT(val) bfin_write32(RTC_STAT, val)
93#define bfin_read_RTC_ICTL() bfin_read16(RTC_ICTL)
94#define bfin_write_RTC_ICTL(val) bfin_write16(RTC_ICTL, val)
95#define bfin_read_RTC_ISTAT() bfin_read16(RTC_ISTAT)
96#define bfin_write_RTC_ISTAT(val) bfin_write16(RTC_ISTAT, val)
97#define bfin_read_RTC_SWCNT() bfin_read16(RTC_SWCNT)
98#define bfin_write_RTC_SWCNT(val) bfin_write16(RTC_SWCNT, val)
99#define bfin_read_RTC_ALARM() bfin_read32(RTC_ALARM)
100#define bfin_write_RTC_ALARM(val) bfin_write32(RTC_ALARM, val)
101#define bfin_read_RTC_FAST() bfin_read16(RTC_FAST)
102#define bfin_write_RTC_FAST(val) bfin_write16(RTC_FAST, val)
103#define bfin_read_RTC_PREN() bfin_read16(RTC_PREN)
104#define bfin_write_RTC_PREN(val) bfin_write16(RTC_PREN, val)
105
106
107/* UART0 Controller (0xFFC00400 - 0xFFC004FF) */
108#define bfin_read_UART0_THR() bfin_read16(UART0_THR)
109#define bfin_write_UART0_THR(val) bfin_write16(UART0_THR, val)
110#define bfin_read_UART0_RBR() bfin_read16(UART0_RBR)
111#define bfin_write_UART0_RBR(val) bfin_write16(UART0_RBR, val)
112#define bfin_read_UART0_DLL() bfin_read16(UART0_DLL)
113#define bfin_write_UART0_DLL(val) bfin_write16(UART0_DLL, val)
114#define bfin_read_UART0_IER() bfin_read16(UART0_IER)
115#define bfin_write_UART0_IER(val) bfin_write16(UART0_IER, val)
116#define bfin_read_UART0_DLH() bfin_read16(UART0_DLH)
117#define bfin_write_UART0_DLH(val) bfin_write16(UART0_DLH, val)
118#define bfin_read_UART0_IIR() bfin_read16(UART0_IIR)
119#define bfin_write_UART0_IIR(val) bfin_write16(UART0_IIR, val)
120#define bfin_read_UART0_LCR() bfin_read16(UART0_LCR)
121#define bfin_write_UART0_LCR(val) bfin_write16(UART0_LCR, val)
122#define bfin_read_UART0_MCR() bfin_read16(UART0_MCR)
123#define bfin_write_UART0_MCR(val) bfin_write16(UART0_MCR, val)
124#define bfin_read_UART0_LSR() bfin_read16(UART0_LSR)
125#define bfin_write_UART0_LSR(val) bfin_write16(UART0_LSR, val)
126#define bfin_read_UART0_MSR() bfin_read16(UART0_MSR)
127#define bfin_write_UART0_MSR(val) bfin_write16(UART0_MSR, val)
128#define bfin_read_UART0_SCR() bfin_read16(UART0_SCR)
129#define bfin_write_UART0_SCR(val) bfin_write16(UART0_SCR, val)
130#define bfin_read_UART0_GCTL() bfin_read16(UART0_GCTL)
131#define bfin_write_UART0_GCTL(val) bfin_write16(UART0_GCTL, val)
132
133
134/* SPI Controller (0xFFC00500 - 0xFFC005FF) */
135#define bfin_read_SPI_CTL() bfin_read16(SPI_CTL)
136#define bfin_write_SPI_CTL(val) bfin_write16(SPI_CTL, val)
137#define bfin_read_SPI_FLG() bfin_read16(SPI_FLG)
138#define bfin_write_SPI_FLG(val) bfin_write16(SPI_FLG, val)
139#define bfin_read_SPI_STAT() bfin_read16(SPI_STAT)
140#define bfin_write_SPI_STAT(val) bfin_write16(SPI_STAT, val)
141#define bfin_read_SPI_TDBR() bfin_read16(SPI_TDBR)
142#define bfin_write_SPI_TDBR(val) bfin_write16(SPI_TDBR, val)
143#define bfin_read_SPI_RDBR() bfin_read16(SPI_RDBR)
144#define bfin_write_SPI_RDBR(val) bfin_write16(SPI_RDBR, val)
145#define bfin_read_SPI_BAUD() bfin_read16(SPI_BAUD)
146#define bfin_write_SPI_BAUD(val) bfin_write16(SPI_BAUD, val)
147#define bfin_read_SPI_SHADOW() bfin_read16(SPI_SHADOW)
148#define bfin_write_SPI_SHADOW(val) bfin_write16(SPI_SHADOW, val)
149
150
151/* TIMER0-7 Registers (0xFFC00600 - 0xFFC006FF) */
152#define bfin_read_TIMER0_CONFIG() bfin_read16(TIMER0_CONFIG)
153#define bfin_write_TIMER0_CONFIG(val) bfin_write16(TIMER0_CONFIG, val)
154#define bfin_read_TIMER0_COUNTER() bfin_read32(TIMER0_COUNTER)
155#define bfin_write_TIMER0_COUNTER(val) bfin_write32(TIMER0_COUNTER, val)
156#define bfin_read_TIMER0_PERIOD() bfin_read32(TIMER0_PERIOD)
157#define bfin_write_TIMER0_PERIOD(val) bfin_write32(TIMER0_PERIOD, val)
158#define bfin_read_TIMER0_WIDTH() bfin_read32(TIMER0_WIDTH)
159#define bfin_write_TIMER0_WIDTH(val) bfin_write32(TIMER0_WIDTH, val)
160
161#define bfin_read_TIMER1_CONFIG() bfin_read16(TIMER1_CONFIG)
162#define bfin_write_TIMER1_CONFIG(val) bfin_write16(TIMER1_CONFIG, val)
163#define bfin_read_TIMER1_COUNTER() bfin_read32(TIMER1_COUNTER)
164#define bfin_write_TIMER1_COUNTER(val) bfin_write32(TIMER1_COUNTER, val)
165#define bfin_read_TIMER1_PERIOD() bfin_read32(TIMER1_PERIOD)
166#define bfin_write_TIMER1_PERIOD(val) bfin_write32(TIMER1_PERIOD, val)
167#define bfin_read_TIMER1_WIDTH() bfin_read32(TIMER1_WIDTH)
168#define bfin_write_TIMER1_WIDTH(val) bfin_write32(TIMER1_WIDTH, val)
169
170#define bfin_read_TIMER2_CONFIG() bfin_read16(TIMER2_CONFIG)
171#define bfin_write_TIMER2_CONFIG(val) bfin_write16(TIMER2_CONFIG, val)
172#define bfin_read_TIMER2_COUNTER() bfin_read32(TIMER2_COUNTER)
173#define bfin_write_TIMER2_COUNTER(val) bfin_write32(TIMER2_COUNTER, val)
174#define bfin_read_TIMER2_PERIOD() bfin_read32(TIMER2_PERIOD)
175#define bfin_write_TIMER2_PERIOD(val) bfin_write32(TIMER2_PERIOD, val)
176#define bfin_read_TIMER2_WIDTH() bfin_read32(TIMER2_WIDTH)
177#define bfin_write_TIMER2_WIDTH(val) bfin_write32(TIMER2_WIDTH, val)
178
179#define bfin_read_TIMER3_CONFIG() bfin_read16(TIMER3_CONFIG)
180#define bfin_write_TIMER3_CONFIG(val) bfin_write16(TIMER3_CONFIG, val)
181#define bfin_read_TIMER3_COUNTER() bfin_read32(TIMER3_COUNTER)
182#define bfin_write_TIMER3_COUNTER(val) bfin_write32(TIMER3_COUNTER, val)
183#define bfin_read_TIMER3_PERIOD() bfin_read32(TIMER3_PERIOD)
184#define bfin_write_TIMER3_PERIOD(val) bfin_write32(TIMER3_PERIOD, val)
185#define bfin_read_TIMER3_WIDTH() bfin_read32(TIMER3_WIDTH)
186#define bfin_write_TIMER3_WIDTH(val) bfin_write32(TIMER3_WIDTH, val)
187
188#define bfin_read_TIMER4_CONFIG() bfin_read16(TIMER4_CONFIG)
189#define bfin_write_TIMER4_CONFIG(val) bfin_write16(TIMER4_CONFIG, val)
190#define bfin_read_TIMER4_COUNTER() bfin_read32(TIMER4_COUNTER)
191#define bfin_write_TIMER4_COUNTER(val) bfin_write32(TIMER4_COUNTER, val)
192#define bfin_read_TIMER4_PERIOD() bfin_read32(TIMER4_PERIOD)
193#define bfin_write_TIMER4_PERIOD(val) bfin_write32(TIMER4_PERIOD, val)
194#define bfin_read_TIMER4_WIDTH() bfin_read32(TIMER4_WIDTH)
195#define bfin_write_TIMER4_WIDTH(val) bfin_write32(TIMER4_WIDTH, val)
196
197#define bfin_read_TIMER5_CONFIG() bfin_read16(TIMER5_CONFIG)
198#define bfin_write_TIMER5_CONFIG(val) bfin_write16(TIMER5_CONFIG, val)
199#define bfin_read_TIMER5_COUNTER() bfin_read32(TIMER5_COUNTER)
200#define bfin_write_TIMER5_COUNTER(val) bfin_write32(TIMER5_COUNTER, val)
201#define bfin_read_TIMER5_PERIOD() bfin_read32(TIMER5_PERIOD)
202#define bfin_write_TIMER5_PERIOD(val) bfin_write32(TIMER5_PERIOD, val)
203#define bfin_read_TIMER5_WIDTH() bfin_read32(TIMER5_WIDTH)
204#define bfin_write_TIMER5_WIDTH(val) bfin_write32(TIMER5_WIDTH, val)
205
206#define bfin_read_TIMER6_CONFIG() bfin_read16(TIMER6_CONFIG)
207#define bfin_write_TIMER6_CONFIG(val) bfin_write16(TIMER6_CONFIG, val)
208#define bfin_read_TIMER6_COUNTER() bfin_read32(TIMER6_COUNTER)
209#define bfin_write_TIMER6_COUNTER(val) bfin_write32(TIMER6_COUNTER, val)
210#define bfin_read_TIMER6_PERIOD() bfin_read32(TIMER6_PERIOD)
211#define bfin_write_TIMER6_PERIOD(val) bfin_write32(TIMER6_PERIOD, val)
212#define bfin_read_TIMER6_WIDTH() bfin_read32(TIMER6_WIDTH)
213#define bfin_write_TIMER6_WIDTH(val) bfin_write32(TIMER6_WIDTH, val)
214
215#define bfin_read_TIMER7_CONFIG() bfin_read16(TIMER7_CONFIG)
216#define bfin_write_TIMER7_CONFIG(val) bfin_write16(TIMER7_CONFIG, val)
217#define bfin_read_TIMER7_COUNTER() bfin_read32(TIMER7_COUNTER)
218#define bfin_write_TIMER7_COUNTER(val) bfin_write32(TIMER7_COUNTER, val)
219#define bfin_read_TIMER7_PERIOD() bfin_read32(TIMER7_PERIOD)
220#define bfin_write_TIMER7_PERIOD(val) bfin_write32(TIMER7_PERIOD, val)
221#define bfin_read_TIMER7_WIDTH() bfin_read32(TIMER7_WIDTH)
222#define bfin_write_TIMER7_WIDTH(val) bfin_write32(TIMER7_WIDTH, val)
223
224#define bfin_read_TIMER_ENABLE() bfin_read16(TIMER_ENABLE)
225#define bfin_write_TIMER_ENABLE(val) bfin_write16(TIMER_ENABLE, val)
226#define bfin_read_TIMER_DISABLE() bfin_read16(TIMER_DISABLE)
227#define bfin_write_TIMER_DISABLE(val) bfin_write16(TIMER_DISABLE, val)
228#define bfin_read_TIMER_STATUS() bfin_read32(TIMER_STATUS)
229#define bfin_write_TIMER_STATUS(val) bfin_write32(TIMER_STATUS, val)
230
231
232/* General Purpose I/O Port F (0xFFC00700 - 0xFFC007FF) */
233#define bfin_read_PORTFIO() bfin_read16(PORTFIO)
234#define bfin_write_PORTFIO(val) bfin_write16(PORTFIO, val)
235#define bfin_read_PORTFIO_CLEAR() bfin_read16(PORTFIO_CLEAR)
236#define bfin_write_PORTFIO_CLEAR(val) bfin_write16(PORTFIO_CLEAR, val)
237#define bfin_read_PORTFIO_SET() bfin_read16(PORTFIO_SET)
238#define bfin_write_PORTFIO_SET(val) bfin_write16(PORTFIO_SET, val)
239#define bfin_read_PORTFIO_TOGGLE() bfin_read16(PORTFIO_TOGGLE)
240#define bfin_write_PORTFIO_TOGGLE(val) bfin_write16(PORTFIO_TOGGLE, val)
241#define bfin_read_PORTFIO_MASKA() bfin_read16(PORTFIO_MASKA)
242#define bfin_write_PORTFIO_MASKA(val) bfin_write16(PORTFIO_MASKA, val)
243#define bfin_read_PORTFIO_MASKA_CLEAR() bfin_read16(PORTFIO_MASKA_CLEAR)
244#define bfin_write_PORTFIO_MASKA_CLEAR(val) bfin_write16(PORTFIO_MASKA_CLEAR, val)
245#define bfin_read_PORTFIO_MASKA_SET() bfin_read16(PORTFIO_MASKA_SET)
246#define bfin_write_PORTFIO_MASKA_SET(val) bfin_write16(PORTFIO_MASKA_SET, val)
247#define bfin_read_PORTFIO_MASKA_TOGGLE() bfin_read16(PORTFIO_MASKA_TOGGLE)
248#define bfin_write_PORTFIO_MASKA_TOGGLE(val) bfin_write16(PORTFIO_MASKA_TOGGLE, val)
249#define bfin_read_PORTFIO_MASKB() bfin_read16(PORTFIO_MASKB)
250#define bfin_write_PORTFIO_MASKB(val) bfin_write16(PORTFIO_MASKB, val)
251#define bfin_read_PORTFIO_MASKB_CLEAR() bfin_read16(PORTFIO_MASKB_CLEAR)
252#define bfin_write_PORTFIO_MASKB_CLEAR(val) bfin_write16(PORTFIO_MASKB_CLEAR, val)
253#define bfin_read_PORTFIO_MASKB_SET() bfin_read16(PORTFIO_MASKB_SET)
254#define bfin_write_PORTFIO_MASKB_SET(val) bfin_write16(PORTFIO_MASKB_SET, val)
255#define bfin_read_PORTFIO_MASKB_TOGGLE() bfin_read16(PORTFIO_MASKB_TOGGLE)
256#define bfin_write_PORTFIO_MASKB_TOGGLE(val) bfin_write16(PORTFIO_MASKB_TOGGLE, val)
257#define bfin_read_PORTFIO_DIR() bfin_read16(PORTFIO_DIR)
258#define bfin_write_PORTFIO_DIR(val) bfin_write16(PORTFIO_DIR, val)
259#define bfin_read_PORTFIO_POLAR() bfin_read16(PORTFIO_POLAR)
260#define bfin_write_PORTFIO_POLAR(val) bfin_write16(PORTFIO_POLAR, val)
261#define bfin_read_PORTFIO_EDGE() bfin_read16(PORTFIO_EDGE)
262#define bfin_write_PORTFIO_EDGE(val) bfin_write16(PORTFIO_EDGE, val)
263#define bfin_read_PORTFIO_BOTH() bfin_read16(PORTFIO_BOTH)
264#define bfin_write_PORTFIO_BOTH(val) bfin_write16(PORTFIO_BOTH, val)
265#define bfin_read_PORTFIO_INEN() bfin_read16(PORTFIO_INEN)
266#define bfin_write_PORTFIO_INEN(val) bfin_write16(PORTFIO_INEN, val)
267
268
269/* SPORT0 Controller (0xFFC00800 - 0xFFC008FF) */
270#define bfin_read_SPORT0_TCR1() bfin_read16(SPORT0_TCR1)
271#define bfin_write_SPORT0_TCR1(val) bfin_write16(SPORT0_TCR1, val)
272#define bfin_read_SPORT0_TCR2() bfin_read16(SPORT0_TCR2)
273#define bfin_write_SPORT0_TCR2(val) bfin_write16(SPORT0_TCR2, val)
274#define bfin_read_SPORT0_TCLKDIV() bfin_read16(SPORT0_TCLKDIV)
275#define bfin_write_SPORT0_TCLKDIV(val) bfin_write16(SPORT0_TCLKDIV, val)
276#define bfin_read_SPORT0_TFSDIV() bfin_read16(SPORT0_TFSDIV)
277#define bfin_write_SPORT0_TFSDIV(val) bfin_write16(SPORT0_TFSDIV, val)
278#define bfin_read_SPORT0_TX() bfin_read32(SPORT0_TX)
279#define bfin_write_SPORT0_TX(val) bfin_write32(SPORT0_TX, val)
280#define bfin_read_SPORT0_RX() bfin_read32(SPORT0_RX)
281#define bfin_write_SPORT0_RX(val) bfin_write32(SPORT0_RX, val)
282#define bfin_read_SPORT0_TX32() bfin_read32(SPORT0_TX)
283#define bfin_write_SPORT0_TX32(val) bfin_write32(SPORT0_TX, val)
284#define bfin_read_SPORT0_RX32() bfin_read32(SPORT0_RX)
285#define bfin_write_SPORT0_RX32(val) bfin_write32(SPORT0_RX, val)
286#define bfin_read_SPORT0_TX16() bfin_read16(SPORT0_TX)
287#define bfin_write_SPORT0_TX16(val) bfin_write16(SPORT0_TX, val)
288#define bfin_read_SPORT0_RX16() bfin_read16(SPORT0_RX)
289#define bfin_write_SPORT0_RX16(val) bfin_write16(SPORT0_RX, val)
290#define bfin_read_SPORT0_RCR1() bfin_read16(SPORT0_RCR1)
291#define bfin_write_SPORT0_RCR1(val) bfin_write16(SPORT0_RCR1, val)
292#define bfin_read_SPORT0_RCR2() bfin_read16(SPORT0_RCR2)
293#define bfin_write_SPORT0_RCR2(val) bfin_write16(SPORT0_RCR2, val)
294#define bfin_read_SPORT0_RCLKDIV() bfin_read16(SPORT0_RCLKDIV)
295#define bfin_write_SPORT0_RCLKDIV(val) bfin_write16(SPORT0_RCLKDIV, val)
296#define bfin_read_SPORT0_RFSDIV() bfin_read16(SPORT0_RFSDIV)
297#define bfin_write_SPORT0_RFSDIV(val) bfin_write16(SPORT0_RFSDIV, val)
298#define bfin_read_SPORT0_STAT() bfin_read16(SPORT0_STAT)
299#define bfin_write_SPORT0_STAT(val) bfin_write16(SPORT0_STAT, val)
300#define bfin_read_SPORT0_CHNL() bfin_read16(SPORT0_CHNL)
301#define bfin_write_SPORT0_CHNL(val) bfin_write16(SPORT0_CHNL, val)
302#define bfin_read_SPORT0_MCMC1() bfin_read16(SPORT0_MCMC1)
303#define bfin_write_SPORT0_MCMC1(val) bfin_write16(SPORT0_MCMC1, val)
304#define bfin_read_SPORT0_MCMC2() bfin_read16(SPORT0_MCMC2)
305#define bfin_write_SPORT0_MCMC2(val) bfin_write16(SPORT0_MCMC2, val)
306#define bfin_read_SPORT0_MTCS0() bfin_read32(SPORT0_MTCS0)
307#define bfin_write_SPORT0_MTCS0(val) bfin_write32(SPORT0_MTCS0, val)
308#define bfin_read_SPORT0_MTCS1() bfin_read32(SPORT0_MTCS1)
309#define bfin_write_SPORT0_MTCS1(val) bfin_write32(SPORT0_MTCS1, val)
310#define bfin_read_SPORT0_MTCS2() bfin_read32(SPORT0_MTCS2)
311#define bfin_write_SPORT0_MTCS2(val) bfin_write32(SPORT0_MTCS2, val)
312#define bfin_read_SPORT0_MTCS3() bfin_read32(SPORT0_MTCS3)
313#define bfin_write_SPORT0_MTCS3(val) bfin_write32(SPORT0_MTCS3, val)
314#define bfin_read_SPORT0_MRCS0() bfin_read32(SPORT0_MRCS0)
315#define bfin_write_SPORT0_MRCS0(val) bfin_write32(SPORT0_MRCS0, val)
316#define bfin_read_SPORT0_MRCS1() bfin_read32(SPORT0_MRCS1)
317#define bfin_write_SPORT0_MRCS1(val) bfin_write32(SPORT0_MRCS1, val)
318#define bfin_read_SPORT0_MRCS2() bfin_read32(SPORT0_MRCS2)
319#define bfin_write_SPORT0_MRCS2(val) bfin_write32(SPORT0_MRCS2, val)
320#define bfin_read_SPORT0_MRCS3() bfin_read32(SPORT0_MRCS3)
321#define bfin_write_SPORT0_MRCS3(val) bfin_write32(SPORT0_MRCS3, val)
322
323
324/* SPORT1 Controller (0xFFC00900 - 0xFFC009FF) */
325#define bfin_read_SPORT1_TCR1() bfin_read16(SPORT1_TCR1)
326#define bfin_write_SPORT1_TCR1(val) bfin_write16(SPORT1_TCR1, val)
327#define bfin_read_SPORT1_TCR2() bfin_read16(SPORT1_TCR2)
328#define bfin_write_SPORT1_TCR2(val) bfin_write16(SPORT1_TCR2, val)
329#define bfin_read_SPORT1_TCLKDIV() bfin_read16(SPORT1_TCLKDIV)
330#define bfin_write_SPORT1_TCLKDIV(val) bfin_write16(SPORT1_TCLKDIV, val)
331#define bfin_read_SPORT1_TFSDIV() bfin_read16(SPORT1_TFSDIV)
332#define bfin_write_SPORT1_TFSDIV(val) bfin_write16(SPORT1_TFSDIV, val)
333#define bfin_read_SPORT1_TX() bfin_read32(SPORT1_TX)
334#define bfin_write_SPORT1_TX(val) bfin_write32(SPORT1_TX, val)
335#define bfin_read_SPORT1_RX() bfin_read32(SPORT1_RX)
336#define bfin_write_SPORT1_RX(val) bfin_write32(SPORT1_RX, val)
337#define bfin_read_SPORT1_TX32() bfin_read32(SPORT1_TX)
338#define bfin_write_SPORT1_TX32(val) bfin_write32(SPORT1_TX, val)
339#define bfin_read_SPORT1_RX32() bfin_read32(SPORT1_RX)
340#define bfin_write_SPORT1_RX32(val) bfin_write32(SPORT1_RX, val)
341#define bfin_read_SPORT1_TX16() bfin_read16(SPORT1_TX)
342#define bfin_write_SPORT1_TX16(val) bfin_write16(SPORT1_TX, val)
343#define bfin_read_SPORT1_RX16() bfin_read16(SPORT1_RX)
344#define bfin_write_SPORT1_RX16(val) bfin_write16(SPORT1_RX, val)
345#define bfin_read_SPORT1_RCR1() bfin_read16(SPORT1_RCR1)
346#define bfin_write_SPORT1_RCR1(val) bfin_write16(SPORT1_RCR1, val)
347#define bfin_read_SPORT1_RCR2() bfin_read16(SPORT1_RCR2)
348#define bfin_write_SPORT1_RCR2(val) bfin_write16(SPORT1_RCR2, val)
349#define bfin_read_SPORT1_RCLKDIV() bfin_read16(SPORT1_RCLKDIV)
350#define bfin_write_SPORT1_RCLKDIV(val) bfin_write16(SPORT1_RCLKDIV, val)
351#define bfin_read_SPORT1_RFSDIV() bfin_read16(SPORT1_RFSDIV)
352#define bfin_write_SPORT1_RFSDIV(val) bfin_write16(SPORT1_RFSDIV, val)
353#define bfin_read_SPORT1_STAT() bfin_read16(SPORT1_STAT)
354#define bfin_write_SPORT1_STAT(val) bfin_write16(SPORT1_STAT, val)
355#define bfin_read_SPORT1_CHNL() bfin_read16(SPORT1_CHNL)
356#define bfin_write_SPORT1_CHNL(val) bfin_write16(SPORT1_CHNL, val)
357#define bfin_read_SPORT1_MCMC1() bfin_read16(SPORT1_MCMC1)
358#define bfin_write_SPORT1_MCMC1(val) bfin_write16(SPORT1_MCMC1, val)
359#define bfin_read_SPORT1_MCMC2() bfin_read16(SPORT1_MCMC2)
360#define bfin_write_SPORT1_MCMC2(val) bfin_write16(SPORT1_MCMC2, val)
361#define bfin_read_SPORT1_MTCS0() bfin_read32(SPORT1_MTCS0)
362#define bfin_write_SPORT1_MTCS0(val) bfin_write32(SPORT1_MTCS0, val)
363#define bfin_read_SPORT1_MTCS1() bfin_read32(SPORT1_MTCS1)
364#define bfin_write_SPORT1_MTCS1(val) bfin_write32(SPORT1_MTCS1, val)
365#define bfin_read_SPORT1_MTCS2() bfin_read32(SPORT1_MTCS2)
366#define bfin_write_SPORT1_MTCS2(val) bfin_write32(SPORT1_MTCS2, val)
367#define bfin_read_SPORT1_MTCS3() bfin_read32(SPORT1_MTCS3)
368#define bfin_write_SPORT1_MTCS3(val) bfin_write32(SPORT1_MTCS3, val)
369#define bfin_read_SPORT1_MRCS0() bfin_read32(SPORT1_MRCS0)
370#define bfin_write_SPORT1_MRCS0(val) bfin_write32(SPORT1_MRCS0, val)
371#define bfin_read_SPORT1_MRCS1() bfin_read32(SPORT1_MRCS1)
372#define bfin_write_SPORT1_MRCS1(val) bfin_write32(SPORT1_MRCS1, val)
373#define bfin_read_SPORT1_MRCS2() bfin_read32(SPORT1_MRCS2)
374#define bfin_write_SPORT1_MRCS2(val) bfin_write32(SPORT1_MRCS2, val)
375#define bfin_read_SPORT1_MRCS3() bfin_read32(SPORT1_MRCS3)
376#define bfin_write_SPORT1_MRCS3(val) bfin_write32(SPORT1_MRCS3, val)
377
378
379/* External Bus Interface Unit (0xFFC00A00 - 0xFFC00AFF) */
380#define bfin_read_EBIU_AMGCTL() bfin_read16(EBIU_AMGCTL)
381#define bfin_write_EBIU_AMGCTL(val) bfin_write16(EBIU_AMGCTL, val)
382#define bfin_read_EBIU_AMBCTL0() bfin_read32(EBIU_AMBCTL0)
383#define bfin_write_EBIU_AMBCTL0(val) bfin_write32(EBIU_AMBCTL0, val)
384#define bfin_read_EBIU_AMBCTL1() bfin_read32(EBIU_AMBCTL1)
385#define bfin_write_EBIU_AMBCTL1(val) bfin_write32(EBIU_AMBCTL1, val)
386#define bfin_read_EBIU_SDGCTL() bfin_read32(EBIU_SDGCTL)
387#define bfin_write_EBIU_SDGCTL(val) bfin_write32(EBIU_SDGCTL, val)
388#define bfin_read_EBIU_SDBCTL() bfin_read16(EBIU_SDBCTL)
389#define bfin_write_EBIU_SDBCTL(val) bfin_write16(EBIU_SDBCTL, val)
390#define bfin_read_EBIU_SDRRC() bfin_read16(EBIU_SDRRC)
391#define bfin_write_EBIU_SDRRC(val) bfin_write16(EBIU_SDRRC, val)
392#define bfin_read_EBIU_SDSTAT() bfin_read16(EBIU_SDSTAT)
393#define bfin_write_EBIU_SDSTAT(val) bfin_write16(EBIU_SDSTAT, val)
394
395
396/* DMA Traffic Control Registers */
397#define bfin_read_DMA_TC_PER() bfin_read16(DMA_TC_PER)
398#define bfin_write_DMA_TC_PER(val) bfin_write16(DMA_TC_PER, val)
399#define bfin_read_DMA_TC_CNT() bfin_read16(DMA_TC_CNT)
400#define bfin_write_DMA_TC_CNT(val) bfin_write16(DMA_TC_CNT, val)
401
402/* Alternate deprecated register names (below) provided for backwards code compatibility */
403#define bfin_read_DMA_TCPER() bfin_read16(DMA_TCPER)
404#define bfin_write_DMA_TCPER(val) bfin_write16(DMA_TCPER, val)
405#define bfin_read_DMA_TCCNT() bfin_read16(DMA_TCCNT)
406#define bfin_write_DMA_TCCNT(val) bfin_write16(DMA_TCCNT, val)
407
408/* DMA Controller */
409#define bfin_read_DMA0_CONFIG() bfin_read16(DMA0_CONFIG)
410#define bfin_write_DMA0_CONFIG(val) bfin_write16(DMA0_CONFIG, val)
411#define bfin_read_DMA0_NEXT_DESC_PTR() bfin_read32(DMA0_NEXT_DESC_PTR)
412#define bfin_write_DMA0_NEXT_DESC_PTR(val) bfin_write32(DMA0_NEXT_DESC_PTR, val)
413#define bfin_read_DMA0_START_ADDR() bfin_read32(DMA0_START_ADDR)
414#define bfin_write_DMA0_START_ADDR(val) bfin_write32(DMA0_START_ADDR, val)
415#define bfin_read_DMA0_X_COUNT() bfin_read16(DMA0_X_COUNT)
416#define bfin_write_DMA0_X_COUNT(val) bfin_write16(DMA0_X_COUNT, val)
417#define bfin_read_DMA0_Y_COUNT() bfin_read16(DMA0_Y_COUNT)
418#define bfin_write_DMA0_Y_COUNT(val) bfin_write16(DMA0_Y_COUNT, val)
419#define bfin_read_DMA0_X_MODIFY() bfin_read16(DMA0_X_MODIFY)
420#define bfin_write_DMA0_X_MODIFY(val) bfin_write16(DMA0_X_MODIFY, val)
421#define bfin_read_DMA0_Y_MODIFY() bfin_read16(DMA0_Y_MODIFY)
422#define bfin_write_DMA0_Y_MODIFY(val) bfin_write16(DMA0_Y_MODIFY, val)
423#define bfin_read_DMA0_CURR_DESC_PTR() bfin_read32(DMA0_CURR_DESC_PTR)
424#define bfin_write_DMA0_CURR_DESC_PTR(val) bfin_write32(DMA0_CURR_DESC_PTR, val)
425#define bfin_read_DMA0_CURR_ADDR() bfin_read32(DMA0_CURR_ADDR)
426#define bfin_write_DMA0_CURR_ADDR(val) bfin_write32(DMA0_CURR_ADDR, val)
427#define bfin_read_DMA0_CURR_X_COUNT() bfin_read16(DMA0_CURR_X_COUNT)
428#define bfin_write_DMA0_CURR_X_COUNT(val) bfin_write16(DMA0_CURR_X_COUNT, val)
429#define bfin_read_DMA0_CURR_Y_COUNT() bfin_read16(DMA0_CURR_Y_COUNT)
430#define bfin_write_DMA0_CURR_Y_COUNT(val) bfin_write16(DMA0_CURR_Y_COUNT, val)
431#define bfin_read_DMA0_IRQ_STATUS() bfin_read16(DMA0_IRQ_STATUS)
432#define bfin_write_DMA0_IRQ_STATUS(val) bfin_write16(DMA0_IRQ_STATUS, val)
433#define bfin_read_DMA0_PERIPHERAL_MAP() bfin_read16(DMA0_PERIPHERAL_MAP)
434#define bfin_write_DMA0_PERIPHERAL_MAP(val) bfin_write16(DMA0_PERIPHERAL_MAP, val)
435
436#define bfin_read_DMA1_CONFIG() bfin_read16(DMA1_CONFIG)
437#define bfin_write_DMA1_CONFIG(val) bfin_write16(DMA1_CONFIG, val)
438#define bfin_read_DMA1_NEXT_DESC_PTR() bfin_read32(DMA1_NEXT_DESC_PTR)
439#define bfin_write_DMA1_NEXT_DESC_PTR(val) bfin_write32(DMA1_NEXT_DESC_PTR, val)
440#define bfin_read_DMA1_START_ADDR() bfin_read32(DMA1_START_ADDR)
441#define bfin_write_DMA1_START_ADDR(val) bfin_write32(DMA1_START_ADDR, val)
442#define bfin_read_DMA1_X_COUNT() bfin_read16(DMA1_X_COUNT)
443#define bfin_write_DMA1_X_COUNT(val) bfin_write16(DMA1_X_COUNT, val)
444#define bfin_read_DMA1_Y_COUNT() bfin_read16(DMA1_Y_COUNT)
445#define bfin_write_DMA1_Y_COUNT(val) bfin_write16(DMA1_Y_COUNT, val)
446#define bfin_read_DMA1_X_MODIFY() bfin_read16(DMA1_X_MODIFY)
447#define bfin_write_DMA1_X_MODIFY(val) bfin_write16(DMA1_X_MODIFY, val)
448#define bfin_read_DMA1_Y_MODIFY() bfin_read16(DMA1_Y_MODIFY)
449#define bfin_write_DMA1_Y_MODIFY(val) bfin_write16(DMA1_Y_MODIFY, val)
450#define bfin_read_DMA1_CURR_DESC_PTR() bfin_read32(DMA1_CURR_DESC_PTR)
451#define bfin_write_DMA1_CURR_DESC_PTR(val) bfin_write32(DMA1_CURR_DESC_PTR, val)
452#define bfin_read_DMA1_CURR_ADDR() bfin_read32(DMA1_CURR_ADDR)
453#define bfin_write_DMA1_CURR_ADDR(val) bfin_write32(DMA1_CURR_ADDR, val)
454#define bfin_read_DMA1_CURR_X_COUNT() bfin_read16(DMA1_CURR_X_COUNT)
455#define bfin_write_DMA1_CURR_X_COUNT(val) bfin_write16(DMA1_CURR_X_COUNT, val)
456#define bfin_read_DMA1_CURR_Y_COUNT() bfin_read16(DMA1_CURR_Y_COUNT)
457#define bfin_write_DMA1_CURR_Y_COUNT(val) bfin_write16(DMA1_CURR_Y_COUNT, val)
458#define bfin_read_DMA1_IRQ_STATUS() bfin_read16(DMA1_IRQ_STATUS)
459#define bfin_write_DMA1_IRQ_STATUS(val) bfin_write16(DMA1_IRQ_STATUS, val)
460#define bfin_read_DMA1_PERIPHERAL_MAP() bfin_read16(DMA1_PERIPHERAL_MAP)
461#define bfin_write_DMA1_PERIPHERAL_MAP(val) bfin_write16(DMA1_PERIPHERAL_MAP, val)
462
463#define bfin_read_DMA2_CONFIG() bfin_read16(DMA2_CONFIG)
464#define bfin_write_DMA2_CONFIG(val) bfin_write16(DMA2_CONFIG, val)
465#define bfin_read_DMA2_NEXT_DESC_PTR() bfin_read32(DMA2_NEXT_DESC_PTR)
466#define bfin_write_DMA2_NEXT_DESC_PTR(val) bfin_write32(DMA2_NEXT_DESC_PTR, val)
467#define bfin_read_DMA2_START_ADDR() bfin_read32(DMA2_START_ADDR)
468#define bfin_write_DMA2_START_ADDR(val) bfin_write32(DMA2_START_ADDR, val)
469#define bfin_read_DMA2_X_COUNT() bfin_read16(DMA2_X_COUNT)
470#define bfin_write_DMA2_X_COUNT(val) bfin_write16(DMA2_X_COUNT, val)
471#define bfin_read_DMA2_Y_COUNT() bfin_read16(DMA2_Y_COUNT)
472#define bfin_write_DMA2_Y_COUNT(val) bfin_write16(DMA2_Y_COUNT, val)
473#define bfin_read_DMA2_X_MODIFY() bfin_read16(DMA2_X_MODIFY)
474#define bfin_write_DMA2_X_MODIFY(val) bfin_write16(DMA2_X_MODIFY, val)
475#define bfin_read_DMA2_Y_MODIFY() bfin_read16(DMA2_Y_MODIFY)
476#define bfin_write_DMA2_Y_MODIFY(val) bfin_write16(DMA2_Y_MODIFY, val)
477#define bfin_read_DMA2_CURR_DESC_PTR() bfin_read32(DMA2_CURR_DESC_PTR)
478#define bfin_write_DMA2_CURR_DESC_PTR(val) bfin_write32(DMA2_CURR_DESC_PTR, val)
479#define bfin_read_DMA2_CURR_ADDR() bfin_read32(DMA2_CURR_ADDR)
480#define bfin_write_DMA2_CURR_ADDR(val) bfin_write32(DMA2_CURR_ADDR, val)
481#define bfin_read_DMA2_CURR_X_COUNT() bfin_read16(DMA2_CURR_X_COUNT)
482#define bfin_write_DMA2_CURR_X_COUNT(val) bfin_write16(DMA2_CURR_X_COUNT, val)
483#define bfin_read_DMA2_CURR_Y_COUNT() bfin_read16(DMA2_CURR_Y_COUNT)
484#define bfin_write_DMA2_CURR_Y_COUNT(val) bfin_write16(DMA2_CURR_Y_COUNT, val)
485#define bfin_read_DMA2_IRQ_STATUS() bfin_read16(DMA2_IRQ_STATUS)
486#define bfin_write_DMA2_IRQ_STATUS(val) bfin_write16(DMA2_IRQ_STATUS, val)
487#define bfin_read_DMA2_PERIPHERAL_MAP() bfin_read16(DMA2_PERIPHERAL_MAP)
488#define bfin_write_DMA2_PERIPHERAL_MAP(val) bfin_write16(DMA2_PERIPHERAL_MAP, val)
489
490#define bfin_read_DMA3_CONFIG() bfin_read16(DMA3_CONFIG)
491#define bfin_write_DMA3_CONFIG(val) bfin_write16(DMA3_CONFIG, val)
492#define bfin_read_DMA3_NEXT_DESC_PTR() bfin_read32(DMA3_NEXT_DESC_PTR)
493#define bfin_write_DMA3_NEXT_DESC_PTR(val) bfin_write32(DMA3_NEXT_DESC_PTR, val)
494#define bfin_read_DMA3_START_ADDR() bfin_read32(DMA3_START_ADDR)
495#define bfin_write_DMA3_START_ADDR(val) bfin_write32(DMA3_START_ADDR, val)
496#define bfin_read_DMA3_X_COUNT() bfin_read16(DMA3_X_COUNT)
497#define bfin_write_DMA3_X_COUNT(val) bfin_write16(DMA3_X_COUNT, val)
498#define bfin_read_DMA3_Y_COUNT() bfin_read16(DMA3_Y_COUNT)
499#define bfin_write_DMA3_Y_COUNT(val) bfin_write16(DMA3_Y_COUNT, val)
500#define bfin_read_DMA3_X_MODIFY() bfin_read16(DMA3_X_MODIFY)
501#define bfin_write_DMA3_X_MODIFY(val) bfin_write16(DMA3_X_MODIFY, val)
502#define bfin_read_DMA3_Y_MODIFY() bfin_read16(DMA3_Y_MODIFY)
503#define bfin_write_DMA3_Y_MODIFY(val) bfin_write16(DMA3_Y_MODIFY, val)
504#define bfin_read_DMA3_CURR_DESC_PTR() bfin_read32(DMA3_CURR_DESC_PTR)
505#define bfin_write_DMA3_CURR_DESC_PTR(val) bfin_write32(DMA3_CURR_DESC_PTR, val)
506#define bfin_read_DMA3_CURR_ADDR() bfin_read32(DMA3_CURR_ADDR)
507#define bfin_write_DMA3_CURR_ADDR(val) bfin_write32(DMA3_CURR_ADDR, val)
508#define bfin_read_DMA3_CURR_X_COUNT() bfin_read16(DMA3_CURR_X_COUNT)
509#define bfin_write_DMA3_CURR_X_COUNT(val) bfin_write16(DMA3_CURR_X_COUNT, val)
510#define bfin_read_DMA3_CURR_Y_COUNT() bfin_read16(DMA3_CURR_Y_COUNT)
511#define bfin_write_DMA3_CURR_Y_COUNT(val) bfin_write16(DMA3_CURR_Y_COUNT, val)
512#define bfin_read_DMA3_IRQ_STATUS() bfin_read16(DMA3_IRQ_STATUS)
513#define bfin_write_DMA3_IRQ_STATUS(val) bfin_write16(DMA3_IRQ_STATUS, val)
514#define bfin_read_DMA3_PERIPHERAL_MAP() bfin_read16(DMA3_PERIPHERAL_MAP)
515#define bfin_write_DMA3_PERIPHERAL_MAP(val) bfin_write16(DMA3_PERIPHERAL_MAP, val)
516
517#define bfin_read_DMA4_CONFIG() bfin_read16(DMA4_CONFIG)
518#define bfin_write_DMA4_CONFIG(val) bfin_write16(DMA4_CONFIG, val)
519#define bfin_read_DMA4_NEXT_DESC_PTR() bfin_read32(DMA4_NEXT_DESC_PTR)
520#define bfin_write_DMA4_NEXT_DESC_PTR(val) bfin_write32(DMA4_NEXT_DESC_PTR, val)
521#define bfin_read_DMA4_START_ADDR() bfin_read32(DMA4_START_ADDR)
522#define bfin_write_DMA4_START_ADDR(val) bfin_write32(DMA4_START_ADDR, val)
523#define bfin_read_DMA4_X_COUNT() bfin_read16(DMA4_X_COUNT)
524#define bfin_write_DMA4_X_COUNT(val) bfin_write16(DMA4_X_COUNT, val)
525#define bfin_read_DMA4_Y_COUNT() bfin_read16(DMA4_Y_COUNT)
526#define bfin_write_DMA4_Y_COUNT(val) bfin_write16(DMA4_Y_COUNT, val)
527#define bfin_read_DMA4_X_MODIFY() bfin_read16(DMA4_X_MODIFY)
528#define bfin_write_DMA4_X_MODIFY(val) bfin_write16(DMA4_X_MODIFY, val)
529#define bfin_read_DMA4_Y_MODIFY() bfin_read16(DMA4_Y_MODIFY)
530#define bfin_write_DMA4_Y_MODIFY(val) bfin_write16(DMA4_Y_MODIFY, val)
531#define bfin_read_DMA4_CURR_DESC_PTR() bfin_read32(DMA4_CURR_DESC_PTR)
532#define bfin_write_DMA4_CURR_DESC_PTR(val) bfin_write32(DMA4_CURR_DESC_PTR, val)
533#define bfin_read_DMA4_CURR_ADDR() bfin_read32(DMA4_CURR_ADDR)
534#define bfin_write_DMA4_CURR_ADDR(val) bfin_write32(DMA4_CURR_ADDR, val)
535#define bfin_read_DMA4_CURR_X_COUNT() bfin_read16(DMA4_CURR_X_COUNT)
536#define bfin_write_DMA4_CURR_X_COUNT(val) bfin_write16(DMA4_CURR_X_COUNT, val)
537#define bfin_read_DMA4_CURR_Y_COUNT() bfin_read16(DMA4_CURR_Y_COUNT)
538#define bfin_write_DMA4_CURR_Y_COUNT(val) bfin_write16(DMA4_CURR_Y_COUNT, val)
539#define bfin_read_DMA4_IRQ_STATUS() bfin_read16(DMA4_IRQ_STATUS)
540#define bfin_write_DMA4_IRQ_STATUS(val) bfin_write16(DMA4_IRQ_STATUS, val)
541#define bfin_read_DMA4_PERIPHERAL_MAP() bfin_read16(DMA4_PERIPHERAL_MAP)
542#define bfin_write_DMA4_PERIPHERAL_MAP(val) bfin_write16(DMA4_PERIPHERAL_MAP, val)
543
544#define bfin_read_DMA5_CONFIG() bfin_read16(DMA5_CONFIG)
545#define bfin_write_DMA5_CONFIG(val) bfin_write16(DMA5_CONFIG, val)
546#define bfin_read_DMA5_NEXT_DESC_PTR() bfin_read32(DMA5_NEXT_DESC_PTR)
547#define bfin_write_DMA5_NEXT_DESC_PTR(val) bfin_write32(DMA5_NEXT_DESC_PTR, val)
548#define bfin_read_DMA5_START_ADDR() bfin_read32(DMA5_START_ADDR)
549#define bfin_write_DMA5_START_ADDR(val) bfin_write32(DMA5_START_ADDR, val)
550#define bfin_read_DMA5_X_COUNT() bfin_read16(DMA5_X_COUNT)
551#define bfin_write_DMA5_X_COUNT(val) bfin_write16(DMA5_X_COUNT, val)
552#define bfin_read_DMA5_Y_COUNT() bfin_read16(DMA5_Y_COUNT)
553#define bfin_write_DMA5_Y_COUNT(val) bfin_write16(DMA5_Y_COUNT, val)
554#define bfin_read_DMA5_X_MODIFY() bfin_read16(DMA5_X_MODIFY)
555#define bfin_write_DMA5_X_MODIFY(val) bfin_write16(DMA5_X_MODIFY, val)
556#define bfin_read_DMA5_Y_MODIFY() bfin_read16(DMA5_Y_MODIFY)
557#define bfin_write_DMA5_Y_MODIFY(val) bfin_write16(DMA5_Y_MODIFY, val)
558#define bfin_read_DMA5_CURR_DESC_PTR() bfin_read32(DMA5_CURR_DESC_PTR)
559#define bfin_write_DMA5_CURR_DESC_PTR(val) bfin_write32(DMA5_CURR_DESC_PTR, val)
560#define bfin_read_DMA5_CURR_ADDR() bfin_read32(DMA5_CURR_ADDR)
561#define bfin_write_DMA5_CURR_ADDR(val) bfin_write32(DMA5_CURR_ADDR, val)
562#define bfin_read_DMA5_CURR_X_COUNT() bfin_read16(DMA5_CURR_X_COUNT)
563#define bfin_write_DMA5_CURR_X_COUNT(val) bfin_write16(DMA5_CURR_X_COUNT, val)
564#define bfin_read_DMA5_CURR_Y_COUNT() bfin_read16(DMA5_CURR_Y_COUNT)
565#define bfin_write_DMA5_CURR_Y_COUNT(val) bfin_write16(DMA5_CURR_Y_COUNT, val)
566#define bfin_read_DMA5_IRQ_STATUS() bfin_read16(DMA5_IRQ_STATUS)
567#define bfin_write_DMA5_IRQ_STATUS(val) bfin_write16(DMA5_IRQ_STATUS, val)
568#define bfin_read_DMA5_PERIPHERAL_MAP() bfin_read16(DMA5_PERIPHERAL_MAP)
569#define bfin_write_DMA5_PERIPHERAL_MAP(val) bfin_write16(DMA5_PERIPHERAL_MAP, val)
570
571#define bfin_read_DMA6_CONFIG() bfin_read16(DMA6_CONFIG)
572#define bfin_write_DMA6_CONFIG(val) bfin_write16(DMA6_CONFIG, val)
573#define bfin_read_DMA6_NEXT_DESC_PTR() bfin_read32(DMA6_NEXT_DESC_PTR)
574#define bfin_write_DMA6_NEXT_DESC_PTR(val) bfin_write32(DMA6_NEXT_DESC_PTR, val)
575#define bfin_read_DMA6_START_ADDR() bfin_read32(DMA6_START_ADDR)
576#define bfin_write_DMA6_START_ADDR(val) bfin_write32(DMA6_START_ADDR, val)
577#define bfin_read_DMA6_X_COUNT() bfin_read16(DMA6_X_COUNT)
578#define bfin_write_DMA6_X_COUNT(val) bfin_write16(DMA6_X_COUNT, val)
579#define bfin_read_DMA6_Y_COUNT() bfin_read16(DMA6_Y_COUNT)
580#define bfin_write_DMA6_Y_COUNT(val) bfin_write16(DMA6_Y_COUNT, val)
581#define bfin_read_DMA6_X_MODIFY() bfin_read16(DMA6_X_MODIFY)
582#define bfin_write_DMA6_X_MODIFY(val) bfin_write16(DMA6_X_MODIFY, val)
583#define bfin_read_DMA6_Y_MODIFY() bfin_read16(DMA6_Y_MODIFY)
584#define bfin_write_DMA6_Y_MODIFY(val) bfin_write16(DMA6_Y_MODIFY, val)
585#define bfin_read_DMA6_CURR_DESC_PTR() bfin_read32(DMA6_CURR_DESC_PTR)
586#define bfin_write_DMA6_CURR_DESC_PTR(val) bfin_write32(DMA6_CURR_DESC_PTR, val)
587#define bfin_read_DMA6_CURR_ADDR() bfin_read32(DMA6_CURR_ADDR)
588#define bfin_write_DMA6_CURR_ADDR(val) bfin_write32(DMA6_CURR_ADDR, val)
589#define bfin_read_DMA6_CURR_X_COUNT() bfin_read16(DMA6_CURR_X_COUNT)
590#define bfin_write_DMA6_CURR_X_COUNT(val) bfin_write16(DMA6_CURR_X_COUNT, val)
591#define bfin_read_DMA6_CURR_Y_COUNT() bfin_read16(DMA6_CURR_Y_COUNT)
592#define bfin_write_DMA6_CURR_Y_COUNT(val) bfin_write16(DMA6_CURR_Y_COUNT, val)
593#define bfin_read_DMA6_IRQ_STATUS() bfin_read16(DMA6_IRQ_STATUS)
594#define bfin_write_DMA6_IRQ_STATUS(val) bfin_write16(DMA6_IRQ_STATUS, val)
595#define bfin_read_DMA6_PERIPHERAL_MAP() bfin_read16(DMA6_PERIPHERAL_MAP)
596#define bfin_write_DMA6_PERIPHERAL_MAP(val) bfin_write16(DMA6_PERIPHERAL_MAP, val)
597
598#define bfin_read_DMA7_CONFIG() bfin_read16(DMA7_CONFIG)
599#define bfin_write_DMA7_CONFIG(val) bfin_write16(DMA7_CONFIG, val)
600#define bfin_read_DMA7_NEXT_DESC_PTR() bfin_read32(DMA7_NEXT_DESC_PTR)
601#define bfin_write_DMA7_NEXT_DESC_PTR(val) bfin_write32(DMA7_NEXT_DESC_PTR, val)
602#define bfin_read_DMA7_START_ADDR() bfin_read32(DMA7_START_ADDR)
603#define bfin_write_DMA7_START_ADDR(val) bfin_write32(DMA7_START_ADDR, val)
604#define bfin_read_DMA7_X_COUNT() bfin_read16(DMA7_X_COUNT)
605#define bfin_write_DMA7_X_COUNT(val) bfin_write16(DMA7_X_COUNT, val)
606#define bfin_read_DMA7_Y_COUNT() bfin_read16(DMA7_Y_COUNT)
607#define bfin_write_DMA7_Y_COUNT(val) bfin_write16(DMA7_Y_COUNT, val)
608#define bfin_read_DMA7_X_MODIFY() bfin_read16(DMA7_X_MODIFY)
609#define bfin_write_DMA7_X_MODIFY(val) bfin_write16(DMA7_X_MODIFY, val)
610#define bfin_read_DMA7_Y_MODIFY() bfin_read16(DMA7_Y_MODIFY)
611#define bfin_write_DMA7_Y_MODIFY(val) bfin_write16(DMA7_Y_MODIFY, val)
612#define bfin_read_DMA7_CURR_DESC_PTR() bfin_read32(DMA7_CURR_DESC_PTR)
613#define bfin_write_DMA7_CURR_DESC_PTR(val) bfin_write32(DMA7_CURR_DESC_PTR, val)
614#define bfin_read_DMA7_CURR_ADDR() bfin_read32(DMA7_CURR_ADDR)
615#define bfin_write_DMA7_CURR_ADDR(val) bfin_write32(DMA7_CURR_ADDR, val)
616#define bfin_read_DMA7_CURR_X_COUNT() bfin_read16(DMA7_CURR_X_COUNT)
617#define bfin_write_DMA7_CURR_X_COUNT(val) bfin_write16(DMA7_CURR_X_COUNT, val)
618#define bfin_read_DMA7_CURR_Y_COUNT() bfin_read16(DMA7_CURR_Y_COUNT)
619#define bfin_write_DMA7_CURR_Y_COUNT(val) bfin_write16(DMA7_CURR_Y_COUNT, val)
620#define bfin_read_DMA7_IRQ_STATUS() bfin_read16(DMA7_IRQ_STATUS)
621#define bfin_write_DMA7_IRQ_STATUS(val) bfin_write16(DMA7_IRQ_STATUS, val)
622#define bfin_read_DMA7_PERIPHERAL_MAP() bfin_read16(DMA7_PERIPHERAL_MAP)
623#define bfin_write_DMA7_PERIPHERAL_MAP(val) bfin_write16(DMA7_PERIPHERAL_MAP, val)
624
625#define bfin_read_DMA8_CONFIG() bfin_read16(DMA8_CONFIG)
626#define bfin_write_DMA8_CONFIG(val) bfin_write16(DMA8_CONFIG, val)
627#define bfin_read_DMA8_NEXT_DESC_PTR() bfin_read32(DMA8_NEXT_DESC_PTR)
628#define bfin_write_DMA8_NEXT_DESC_PTR(val) bfin_write32(DMA8_NEXT_DESC_PTR, val)
629#define bfin_read_DMA8_START_ADDR() bfin_read32(DMA8_START_ADDR)
630#define bfin_write_DMA8_START_ADDR(val) bfin_write32(DMA8_START_ADDR, val)
631#define bfin_read_DMA8_X_COUNT() bfin_read16(DMA8_X_COUNT)
632#define bfin_write_DMA8_X_COUNT(val) bfin_write16(DMA8_X_COUNT, val)
633#define bfin_read_DMA8_Y_COUNT() bfin_read16(DMA8_Y_COUNT)
634#define bfin_write_DMA8_Y_COUNT(val) bfin_write16(DMA8_Y_COUNT, val)
635#define bfin_read_DMA8_X_MODIFY() bfin_read16(DMA8_X_MODIFY)
636#define bfin_write_DMA8_X_MODIFY(val) bfin_write16(DMA8_X_MODIFY, val)
637#define bfin_read_DMA8_Y_MODIFY() bfin_read16(DMA8_Y_MODIFY)
638#define bfin_write_DMA8_Y_MODIFY(val) bfin_write16(DMA8_Y_MODIFY, val)
639#define bfin_read_DMA8_CURR_DESC_PTR() bfin_read32(DMA8_CURR_DESC_PTR)
640#define bfin_write_DMA8_CURR_DESC_PTR(val) bfin_write32(DMA8_CURR_DESC_PTR, val)
641#define bfin_read_DMA8_CURR_ADDR() bfin_read32(DMA8_CURR_ADDR)
642#define bfin_write_DMA8_CURR_ADDR(val) bfin_write32(DMA8_CURR_ADDR, val)
643#define bfin_read_DMA8_CURR_X_COUNT() bfin_read16(DMA8_CURR_X_COUNT)
644#define bfin_write_DMA8_CURR_X_COUNT(val) bfin_write16(DMA8_CURR_X_COUNT, val)
645#define bfin_read_DMA8_CURR_Y_COUNT() bfin_read16(DMA8_CURR_Y_COUNT)
646#define bfin_write_DMA8_CURR_Y_COUNT(val) bfin_write16(DMA8_CURR_Y_COUNT, val)
647#define bfin_read_DMA8_IRQ_STATUS() bfin_read16(DMA8_IRQ_STATUS)
648#define bfin_write_DMA8_IRQ_STATUS(val) bfin_write16(DMA8_IRQ_STATUS, val)
649#define bfin_read_DMA8_PERIPHERAL_MAP() bfin_read16(DMA8_PERIPHERAL_MAP)
650#define bfin_write_DMA8_PERIPHERAL_MAP(val) bfin_write16(DMA8_PERIPHERAL_MAP, val)
651
652#define bfin_read_DMA9_CONFIG() bfin_read16(DMA9_CONFIG)
653#define bfin_write_DMA9_CONFIG(val) bfin_write16(DMA9_CONFIG, val)
654#define bfin_read_DMA9_NEXT_DESC_PTR() bfin_read32(DMA9_NEXT_DESC_PTR)
655#define bfin_write_DMA9_NEXT_DESC_PTR(val) bfin_write32(DMA9_NEXT_DESC_PTR, val)
656#define bfin_read_DMA9_START_ADDR() bfin_read32(DMA9_START_ADDR)
657#define bfin_write_DMA9_START_ADDR(val) bfin_write32(DMA9_START_ADDR, val)
658#define bfin_read_DMA9_X_COUNT() bfin_read16(DMA9_X_COUNT)
659#define bfin_write_DMA9_X_COUNT(val) bfin_write16(DMA9_X_COUNT, val)
660#define bfin_read_DMA9_Y_COUNT() bfin_read16(DMA9_Y_COUNT)
661#define bfin_write_DMA9_Y_COUNT(val) bfin_write16(DMA9_Y_COUNT, val)
662#define bfin_read_DMA9_X_MODIFY() bfin_read16(DMA9_X_MODIFY)
663#define bfin_write_DMA9_X_MODIFY(val) bfin_write16(DMA9_X_MODIFY, val)
664#define bfin_read_DMA9_Y_MODIFY() bfin_read16(DMA9_Y_MODIFY)
665#define bfin_write_DMA9_Y_MODIFY(val) bfin_write16(DMA9_Y_MODIFY, val)
666#define bfin_read_DMA9_CURR_DESC_PTR() bfin_read32(DMA9_CURR_DESC_PTR)
667#define bfin_write_DMA9_CURR_DESC_PTR(val) bfin_write32(DMA9_CURR_DESC_PTR, val)
668#define bfin_read_DMA9_CURR_ADDR() bfin_read32(DMA9_CURR_ADDR)
669#define bfin_write_DMA9_CURR_ADDR(val) bfin_write32(DMA9_CURR_ADDR, val)
670#define bfin_read_DMA9_CURR_X_COUNT() bfin_read16(DMA9_CURR_X_COUNT)
671#define bfin_write_DMA9_CURR_X_COUNT(val) bfin_write16(DMA9_CURR_X_COUNT, val)
672#define bfin_read_DMA9_CURR_Y_COUNT() bfin_read16(DMA9_CURR_Y_COUNT)
673#define bfin_write_DMA9_CURR_Y_COUNT(val) bfin_write16(DMA9_CURR_Y_COUNT, val)
674#define bfin_read_DMA9_IRQ_STATUS() bfin_read16(DMA9_IRQ_STATUS)
675#define bfin_write_DMA9_IRQ_STATUS(val) bfin_write16(DMA9_IRQ_STATUS, val)
676#define bfin_read_DMA9_PERIPHERAL_MAP() bfin_read16(DMA9_PERIPHERAL_MAP)
677#define bfin_write_DMA9_PERIPHERAL_MAP(val) bfin_write16(DMA9_PERIPHERAL_MAP, val)
678
679#define bfin_read_DMA10_CONFIG() bfin_read16(DMA10_CONFIG)
680#define bfin_write_DMA10_CONFIG(val) bfin_write16(DMA10_CONFIG, val)
681#define bfin_read_DMA10_NEXT_DESC_PTR() bfin_read32(DMA10_NEXT_DESC_PTR)
682#define bfin_write_DMA10_NEXT_DESC_PTR(val) bfin_write32(DMA10_NEXT_DESC_PTR, val)
683#define bfin_read_DMA10_START_ADDR() bfin_read32(DMA10_START_ADDR)
684#define bfin_write_DMA10_START_ADDR(val) bfin_write32(DMA10_START_ADDR, val)
685#define bfin_read_DMA10_X_COUNT() bfin_read16(DMA10_X_COUNT)
686#define bfin_write_DMA10_X_COUNT(val) bfin_write16(DMA10_X_COUNT, val)
687#define bfin_read_DMA10_Y_COUNT() bfin_read16(DMA10_Y_COUNT)
688#define bfin_write_DMA10_Y_COUNT(val) bfin_write16(DMA10_Y_COUNT, val)
689#define bfin_read_DMA10_X_MODIFY() bfin_read16(DMA10_X_MODIFY)
690#define bfin_write_DMA10_X_MODIFY(val) bfin_write16(DMA10_X_MODIFY, val)
691#define bfin_read_DMA10_Y_MODIFY() bfin_read16(DMA10_Y_MODIFY)
692#define bfin_write_DMA10_Y_MODIFY(val) bfin_write16(DMA10_Y_MODIFY, val)
693#define bfin_read_DMA10_CURR_DESC_PTR() bfin_read32(DMA10_CURR_DESC_PTR)
694#define bfin_write_DMA10_CURR_DESC_PTR(val) bfin_write32(DMA10_CURR_DESC_PTR, val)
695#define bfin_read_DMA10_CURR_ADDR() bfin_read32(DMA10_CURR_ADDR)
696#define bfin_write_DMA10_CURR_ADDR(val) bfin_write32(DMA10_CURR_ADDR, val)
697#define bfin_read_DMA10_CURR_X_COUNT() bfin_read16(DMA10_CURR_X_COUNT)
698#define bfin_write_DMA10_CURR_X_COUNT(val) bfin_write16(DMA10_CURR_X_COUNT, val)
699#define bfin_read_DMA10_CURR_Y_COUNT() bfin_read16(DMA10_CURR_Y_COUNT)
700#define bfin_write_DMA10_CURR_Y_COUNT(val) bfin_write16(DMA10_CURR_Y_COUNT, val)
701#define bfin_read_DMA10_IRQ_STATUS() bfin_read16(DMA10_IRQ_STATUS)
702#define bfin_write_DMA10_IRQ_STATUS(val) bfin_write16(DMA10_IRQ_STATUS, val)
703#define bfin_read_DMA10_PERIPHERAL_MAP() bfin_read16(DMA10_PERIPHERAL_MAP)
704#define bfin_write_DMA10_PERIPHERAL_MAP(val) bfin_write16(DMA10_PERIPHERAL_MAP, val)
705
706#define bfin_read_DMA11_CONFIG() bfin_read16(DMA11_CONFIG)
707#define bfin_write_DMA11_CONFIG(val) bfin_write16(DMA11_CONFIG, val)
708#define bfin_read_DMA11_NEXT_DESC_PTR() bfin_read32(DMA11_NEXT_DESC_PTR)
709#define bfin_write_DMA11_NEXT_DESC_PTR(val) bfin_write32(DMA11_NEXT_DESC_PTR, val)
710#define bfin_read_DMA11_START_ADDR() bfin_read32(DMA11_START_ADDR)
711#define bfin_write_DMA11_START_ADDR(val) bfin_write32(DMA11_START_ADDR, val)
712#define bfin_read_DMA11_X_COUNT() bfin_read16(DMA11_X_COUNT)
713#define bfin_write_DMA11_X_COUNT(val) bfin_write16(DMA11_X_COUNT, val)
714#define bfin_read_DMA11_Y_COUNT() bfin_read16(DMA11_Y_COUNT)
715#define bfin_write_DMA11_Y_COUNT(val) bfin_write16(DMA11_Y_COUNT, val)
716#define bfin_read_DMA11_X_MODIFY() bfin_read16(DMA11_X_MODIFY)
717#define bfin_write_DMA11_X_MODIFY(val) bfin_write16(DMA11_X_MODIFY, val)
718#define bfin_read_DMA11_Y_MODIFY() bfin_read16(DMA11_Y_MODIFY)
719#define bfin_write_DMA11_Y_MODIFY(val) bfin_write16(DMA11_Y_MODIFY, val)
720#define bfin_read_DMA11_CURR_DESC_PTR() bfin_read32(DMA11_CURR_DESC_PTR)
721#define bfin_write_DMA11_CURR_DESC_PTR(val) bfin_write32(DMA11_CURR_DESC_PTR, val)
722#define bfin_read_DMA11_CURR_ADDR() bfin_read32(DMA11_CURR_ADDR)
723#define bfin_write_DMA11_CURR_ADDR(val) bfin_write32(DMA11_CURR_ADDR, val)
724#define bfin_read_DMA11_CURR_X_COUNT() bfin_read16(DMA11_CURR_X_COUNT)
725#define bfin_write_DMA11_CURR_X_COUNT(val) bfin_write16(DMA11_CURR_X_COUNT, val)
726#define bfin_read_DMA11_CURR_Y_COUNT() bfin_read16(DMA11_CURR_Y_COUNT)
727#define bfin_write_DMA11_CURR_Y_COUNT(val) bfin_write16(DMA11_CURR_Y_COUNT, val)
728#define bfin_read_DMA11_IRQ_STATUS() bfin_read16(DMA11_IRQ_STATUS)
729#define bfin_write_DMA11_IRQ_STATUS(val) bfin_write16(DMA11_IRQ_STATUS, val)
730#define bfin_read_DMA11_PERIPHERAL_MAP() bfin_read16(DMA11_PERIPHERAL_MAP)
731#define bfin_write_DMA11_PERIPHERAL_MAP(val) bfin_write16(DMA11_PERIPHERAL_MAP, val)
732
733#define bfin_read_MDMA_D0_CONFIG() bfin_read16(MDMA_D0_CONFIG)
734#define bfin_write_MDMA_D0_CONFIG(val) bfin_write16(MDMA_D0_CONFIG, val)
735#define bfin_read_MDMA_D0_NEXT_DESC_PTR() bfin_read32(MDMA_D0_NEXT_DESC_PTR)
736#define bfin_write_MDMA_D0_NEXT_DESC_PTR(val) bfin_write32(MDMA_D0_NEXT_DESC_PTR, val)
737#define bfin_read_MDMA_D0_START_ADDR() bfin_read32(MDMA_D0_START_ADDR)
738#define bfin_write_MDMA_D0_START_ADDR(val) bfin_write32(MDMA_D0_START_ADDR, val)
739#define bfin_read_MDMA_D0_X_COUNT() bfin_read16(MDMA_D0_X_COUNT)
740#define bfin_write_MDMA_D0_X_COUNT(val) bfin_write16(MDMA_D0_X_COUNT, val)
741#define bfin_read_MDMA_D0_Y_COUNT() bfin_read16(MDMA_D0_Y_COUNT)
742#define bfin_write_MDMA_D0_Y_COUNT(val) bfin_write16(MDMA_D0_Y_COUNT, val)
743#define bfin_read_MDMA_D0_X_MODIFY() bfin_read16(MDMA_D0_X_MODIFY)
744#define bfin_write_MDMA_D0_X_MODIFY(val) bfin_write16(MDMA_D0_X_MODIFY, val)
745#define bfin_read_MDMA_D0_Y_MODIFY() bfin_read16(MDMA_D0_Y_MODIFY)
746#define bfin_write_MDMA_D0_Y_MODIFY(val) bfin_write16(MDMA_D0_Y_MODIFY, val)
747#define bfin_read_MDMA_D0_CURR_DESC_PTR() bfin_read32(MDMA_D0_CURR_DESC_PTR)
748#define bfin_write_MDMA_D0_CURR_DESC_PTR(val) bfin_write32(MDMA_D0_CURR_DESC_PTR, val)
749#define bfin_read_MDMA_D0_CURR_ADDR() bfin_read32(MDMA_D0_CURR_ADDR)
750#define bfin_write_MDMA_D0_CURR_ADDR(val) bfin_write32(MDMA_D0_CURR_ADDR, val)
751#define bfin_read_MDMA_D0_CURR_X_COUNT() bfin_read16(MDMA_D0_CURR_X_COUNT)
752#define bfin_write_MDMA_D0_CURR_X_COUNT(val) bfin_write16(MDMA_D0_CURR_X_COUNT, val)
753#define bfin_read_MDMA_D0_CURR_Y_COUNT() bfin_read16(MDMA_D0_CURR_Y_COUNT)
754#define bfin_write_MDMA_D0_CURR_Y_COUNT(val) bfin_write16(MDMA_D0_CURR_Y_COUNT, val)
755#define bfin_read_MDMA_D0_IRQ_STATUS() bfin_read16(MDMA_D0_IRQ_STATUS)
756#define bfin_write_MDMA_D0_IRQ_STATUS(val) bfin_write16(MDMA_D0_IRQ_STATUS, val)
757#define bfin_read_MDMA_D0_PERIPHERAL_MAP() bfin_read16(MDMA_D0_PERIPHERAL_MAP)
758#define bfin_write_MDMA_D0_PERIPHERAL_MAP(val) bfin_write16(MDMA_D0_PERIPHERAL_MAP, val)
759
760#define bfin_read_MDMA_S0_CONFIG() bfin_read16(MDMA_S0_CONFIG)
761#define bfin_write_MDMA_S0_CONFIG(val) bfin_write16(MDMA_S0_CONFIG, val)
762#define bfin_read_MDMA_S0_NEXT_DESC_PTR() bfin_read32(MDMA_S0_NEXT_DESC_PTR)
763#define bfin_write_MDMA_S0_NEXT_DESC_PTR(val) bfin_write32(MDMA_S0_NEXT_DESC_PTR, val)
764#define bfin_read_MDMA_S0_START_ADDR() bfin_read32(MDMA_S0_START_ADDR)
765#define bfin_write_MDMA_S0_START_ADDR(val) bfin_write32(MDMA_S0_START_ADDR, val)
766#define bfin_read_MDMA_S0_X_COUNT() bfin_read16(MDMA_S0_X_COUNT)
767#define bfin_write_MDMA_S0_X_COUNT(val) bfin_write16(MDMA_S0_X_COUNT, val)
768#define bfin_read_MDMA_S0_Y_COUNT() bfin_read16(MDMA_S0_Y_COUNT)
769#define bfin_write_MDMA_S0_Y_COUNT(val) bfin_write16(MDMA_S0_Y_COUNT, val)
770#define bfin_read_MDMA_S0_X_MODIFY() bfin_read16(MDMA_S0_X_MODIFY)
771#define bfin_write_MDMA_S0_X_MODIFY(val) bfin_write16(MDMA_S0_X_MODIFY, val)
772#define bfin_read_MDMA_S0_Y_MODIFY() bfin_read16(MDMA_S0_Y_MODIFY)
773#define bfin_write_MDMA_S0_Y_MODIFY(val) bfin_write16(MDMA_S0_Y_MODIFY, val)
774#define bfin_read_MDMA_S0_CURR_DESC_PTR() bfin_read32(MDMA_S0_CURR_DESC_PTR)
775#define bfin_write_MDMA_S0_CURR_DESC_PTR(val) bfin_write32(MDMA_S0_CURR_DESC_PTR, val)
776#define bfin_read_MDMA_S0_CURR_ADDR() bfin_read32(MDMA_S0_CURR_ADDR)
777#define bfin_write_MDMA_S0_CURR_ADDR(val) bfin_write32(MDMA_S0_CURR_ADDR, val)
778#define bfin_read_MDMA_S0_CURR_X_COUNT() bfin_read16(MDMA_S0_CURR_X_COUNT)
779#define bfin_write_MDMA_S0_CURR_X_COUNT(val) bfin_write16(MDMA_S0_CURR_X_COUNT, val)
780#define bfin_read_MDMA_S0_CURR_Y_COUNT() bfin_read16(MDMA_S0_CURR_Y_COUNT)
781#define bfin_write_MDMA_S0_CURR_Y_COUNT(val) bfin_write16(MDMA_S0_CURR_Y_COUNT, val)
782#define bfin_read_MDMA_S0_IRQ_STATUS() bfin_read16(MDMA_S0_IRQ_STATUS)
783#define bfin_write_MDMA_S0_IRQ_STATUS(val) bfin_write16(MDMA_S0_IRQ_STATUS, val)
784#define bfin_read_MDMA_S0_PERIPHERAL_MAP() bfin_read16(MDMA_S0_PERIPHERAL_MAP)
785#define bfin_write_MDMA_S0_PERIPHERAL_MAP(val) bfin_write16(MDMA_S0_PERIPHERAL_MAP, val)
786
787#define bfin_read_MDMA_D1_CONFIG() bfin_read16(MDMA_D1_CONFIG)
788#define bfin_write_MDMA_D1_CONFIG(val) bfin_write16(MDMA_D1_CONFIG, val)
789#define bfin_read_MDMA_D1_NEXT_DESC_PTR() bfin_read32(MDMA_D1_NEXT_DESC_PTR)
790#define bfin_write_MDMA_D1_NEXT_DESC_PTR(val) bfin_write32(MDMA_D1_NEXT_DESC_PTR, val)
791#define bfin_read_MDMA_D1_START_ADDR() bfin_read32(MDMA_D1_START_ADDR)
792#define bfin_write_MDMA_D1_START_ADDR(val) bfin_write32(MDMA_D1_START_ADDR, val)
793#define bfin_read_MDMA_D1_X_COUNT() bfin_read16(MDMA_D1_X_COUNT)
794#define bfin_write_MDMA_D1_X_COUNT(val) bfin_write16(MDMA_D1_X_COUNT, val)
795#define bfin_read_MDMA_D1_Y_COUNT() bfin_read16(MDMA_D1_Y_COUNT)
796#define bfin_write_MDMA_D1_Y_COUNT(val) bfin_write16(MDMA_D1_Y_COUNT, val)
797#define bfin_read_MDMA_D1_X_MODIFY() bfin_read16(MDMA_D1_X_MODIFY)
798#define bfin_write_MDMA_D1_X_MODIFY(val) bfin_write16(MDMA_D1_X_MODIFY, val)
799#define bfin_read_MDMA_D1_Y_MODIFY() bfin_read16(MDMA_D1_Y_MODIFY)
800#define bfin_write_MDMA_D1_Y_MODIFY(val) bfin_write16(MDMA_D1_Y_MODIFY, val)
801#define bfin_read_MDMA_D1_CURR_DESC_PTR() bfin_read32(MDMA_D1_CURR_DESC_PTR)
802#define bfin_write_MDMA_D1_CURR_DESC_PTR(val) bfin_write32(MDMA_D1_CURR_DESC_PTR, val)
803#define bfin_read_MDMA_D1_CURR_ADDR() bfin_read32(MDMA_D1_CURR_ADDR)
804#define bfin_write_MDMA_D1_CURR_ADDR(val) bfin_write32(MDMA_D1_CURR_ADDR, val)
805#define bfin_read_MDMA_D1_CURR_X_COUNT() bfin_read16(MDMA_D1_CURR_X_COUNT)
806#define bfin_write_MDMA_D1_CURR_X_COUNT(val) bfin_write16(MDMA_D1_CURR_X_COUNT, val)
807#define bfin_read_MDMA_D1_CURR_Y_COUNT() bfin_read16(MDMA_D1_CURR_Y_COUNT)
808#define bfin_write_MDMA_D1_CURR_Y_COUNT(val) bfin_write16(MDMA_D1_CURR_Y_COUNT, val)
809#define bfin_read_MDMA_D1_IRQ_STATUS() bfin_read16(MDMA_D1_IRQ_STATUS)
810#define bfin_write_MDMA_D1_IRQ_STATUS(val) bfin_write16(MDMA_D1_IRQ_STATUS, val)
811#define bfin_read_MDMA_D1_PERIPHERAL_MAP() bfin_read16(MDMA_D1_PERIPHERAL_MAP)
812#define bfin_write_MDMA_D1_PERIPHERAL_MAP(val) bfin_write16(MDMA_D1_PERIPHERAL_MAP, val)
813
814#define bfin_read_MDMA_S1_CONFIG() bfin_read16(MDMA_S1_CONFIG)
815#define bfin_write_MDMA_S1_CONFIG(val) bfin_write16(MDMA_S1_CONFIG, val)
816#define bfin_read_MDMA_S1_NEXT_DESC_PTR() bfin_read32(MDMA_S1_NEXT_DESC_PTR)
817#define bfin_write_MDMA_S1_NEXT_DESC_PTR(val) bfin_write32(MDMA_S1_NEXT_DESC_PTR, val)
818#define bfin_read_MDMA_S1_START_ADDR() bfin_read32(MDMA_S1_START_ADDR)
819#define bfin_write_MDMA_S1_START_ADDR(val) bfin_write32(MDMA_S1_START_ADDR, val)
820#define bfin_read_MDMA_S1_X_COUNT() bfin_read16(MDMA_S1_X_COUNT)
821#define bfin_write_MDMA_S1_X_COUNT(val) bfin_write16(MDMA_S1_X_COUNT, val)
822#define bfin_read_MDMA_S1_Y_COUNT() bfin_read16(MDMA_S1_Y_COUNT)
823#define bfin_write_MDMA_S1_Y_COUNT(val) bfin_write16(MDMA_S1_Y_COUNT, val)
824#define bfin_read_MDMA_S1_X_MODIFY() bfin_read16(MDMA_S1_X_MODIFY)
825#define bfin_write_MDMA_S1_X_MODIFY(val) bfin_write16(MDMA_S1_X_MODIFY, val)
826#define bfin_read_MDMA_S1_Y_MODIFY() bfin_read16(MDMA_S1_Y_MODIFY)
827#define bfin_write_MDMA_S1_Y_MODIFY(val) bfin_write16(MDMA_S1_Y_MODIFY, val)
828#define bfin_read_MDMA_S1_CURR_DESC_PTR() bfin_read32(MDMA_S1_CURR_DESC_PTR)
829#define bfin_write_MDMA_S1_CURR_DESC_PTR(val) bfin_write32(MDMA_S1_CURR_DESC_PTR, val)
830#define bfin_read_MDMA_S1_CURR_ADDR() bfin_read32(MDMA_S1_CURR_ADDR)
831#define bfin_write_MDMA_S1_CURR_ADDR(val) bfin_write32(MDMA_S1_CURR_ADDR, val)
832#define bfin_read_MDMA_S1_CURR_X_COUNT() bfin_read16(MDMA_S1_CURR_X_COUNT)
833#define bfin_write_MDMA_S1_CURR_X_COUNT(val) bfin_write16(MDMA_S1_CURR_X_COUNT, val)
834#define bfin_read_MDMA_S1_CURR_Y_COUNT() bfin_read16(MDMA_S1_CURR_Y_COUNT)
835#define bfin_write_MDMA_S1_CURR_Y_COUNT(val) bfin_write16(MDMA_S1_CURR_Y_COUNT, val)
836#define bfin_read_MDMA_S1_IRQ_STATUS() bfin_read16(MDMA_S1_IRQ_STATUS)
837#define bfin_write_MDMA_S1_IRQ_STATUS(val) bfin_write16(MDMA_S1_IRQ_STATUS, val)
838#define bfin_read_MDMA_S1_PERIPHERAL_MAP() bfin_read16(MDMA_S1_PERIPHERAL_MAP)
839#define bfin_write_MDMA_S1_PERIPHERAL_MAP(val) bfin_write16(MDMA_S1_PERIPHERAL_MAP, val)
840
841
842/* Parallel Peripheral Interface (0xFFC01000 - 0xFFC010FF) */
843#define bfin_read_PPI_CONTROL() bfin_read16(PPI_CONTROL)
844#define bfin_write_PPI_CONTROL(val) bfin_write16(PPI_CONTROL, val)
845#define bfin_read_PPI_STATUS() bfin_read16(PPI_STATUS)
846#define bfin_write_PPI_STATUS(val) bfin_write16(PPI_STATUS, val)
847#define bfin_clear_PPI_STATUS() bfin_write_PPI_STATUS(0xFFFF)
848#define bfin_read_PPI_DELAY() bfin_read16(PPI_DELAY)
849#define bfin_write_PPI_DELAY(val) bfin_write16(PPI_DELAY, val)
850#define bfin_read_PPI_COUNT() bfin_read16(PPI_COUNT)
851#define bfin_write_PPI_COUNT(val) bfin_write16(PPI_COUNT, val)
852#define bfin_read_PPI_FRAME() bfin_read16(PPI_FRAME)
853#define bfin_write_PPI_FRAME(val) bfin_write16(PPI_FRAME, val)
854
855
856/* Two-Wire Interface (0xFFC01400 - 0xFFC014FF) */
857
858/* General Purpose I/O Port G (0xFFC01500 - 0xFFC015FF) */
859#define bfin_read_PORTGIO() bfin_read16(PORTGIO)
860#define bfin_write_PORTGIO(val) bfin_write16(PORTGIO, val)
861#define bfin_read_PORTGIO_CLEAR() bfin_read16(PORTGIO_CLEAR)
862#define bfin_write_PORTGIO_CLEAR(val) bfin_write16(PORTGIO_CLEAR, val)
863#define bfin_read_PORTGIO_SET() bfin_read16(PORTGIO_SET)
864#define bfin_write_PORTGIO_SET(val) bfin_write16(PORTGIO_SET, val)
865#define bfin_read_PORTGIO_TOGGLE() bfin_read16(PORTGIO_TOGGLE)
866#define bfin_write_PORTGIO_TOGGLE(val) bfin_write16(PORTGIO_TOGGLE, val)
867#define bfin_read_PORTGIO_MASKA() bfin_read16(PORTGIO_MASKA)
868#define bfin_write_PORTGIO_MASKA(val) bfin_write16(PORTGIO_MASKA, val)
869#define bfin_read_PORTGIO_MASKA_CLEAR() bfin_read16(PORTGIO_MASKA_CLEAR)
870#define bfin_write_PORTGIO_MASKA_CLEAR(val) bfin_write16(PORTGIO_MASKA_CLEAR, val)
871#define bfin_read_PORTGIO_MASKA_SET() bfin_read16(PORTGIO_MASKA_SET)
872#define bfin_write_PORTGIO_MASKA_SET(val) bfin_write16(PORTGIO_MASKA_SET, val)
873#define bfin_read_PORTGIO_MASKA_TOGGLE() bfin_read16(PORTGIO_MASKA_TOGGLE)
874#define bfin_write_PORTGIO_MASKA_TOGGLE(val) bfin_write16(PORTGIO_MASKA_TOGGLE, val)
875#define bfin_read_PORTGIO_MASKB() bfin_read16(PORTGIO_MASKB)
876#define bfin_write_PORTGIO_MASKB(val) bfin_write16(PORTGIO_MASKB, val)
877#define bfin_read_PORTGIO_MASKB_CLEAR() bfin_read16(PORTGIO_MASKB_CLEAR)
878#define bfin_write_PORTGIO_MASKB_CLEAR(val) bfin_write16(PORTGIO_MASKB_CLEAR, val)
879#define bfin_read_PORTGIO_MASKB_SET() bfin_read16(PORTGIO_MASKB_SET)
880#define bfin_write_PORTGIO_MASKB_SET(val) bfin_write16(PORTGIO_MASKB_SET, val)
881#define bfin_read_PORTGIO_MASKB_TOGGLE() bfin_read16(PORTGIO_MASKB_TOGGLE)
882#define bfin_write_PORTGIO_MASKB_TOGGLE(val) bfin_write16(PORTGIO_MASKB_TOGGLE, val)
883#define bfin_read_PORTGIO_DIR() bfin_read16(PORTGIO_DIR)
884#define bfin_write_PORTGIO_DIR(val) bfin_write16(PORTGIO_DIR, val)
885#define bfin_read_PORTGIO_POLAR() bfin_read16(PORTGIO_POLAR)
886#define bfin_write_PORTGIO_POLAR(val) bfin_write16(PORTGIO_POLAR, val)
887#define bfin_read_PORTGIO_EDGE() bfin_read16(PORTGIO_EDGE)
888#define bfin_write_PORTGIO_EDGE(val) bfin_write16(PORTGIO_EDGE, val)
889#define bfin_read_PORTGIO_BOTH() bfin_read16(PORTGIO_BOTH)
890#define bfin_write_PORTGIO_BOTH(val) bfin_write16(PORTGIO_BOTH, val)
891#define bfin_read_PORTGIO_INEN() bfin_read16(PORTGIO_INEN)
892#define bfin_write_PORTGIO_INEN(val) bfin_write16(PORTGIO_INEN, val)
893
894
895/* General Purpose I/O Port H (0xFFC01700 - 0xFFC017FF) */
896#define bfin_read_PORTHIO() bfin_read16(PORTHIO)
897#define bfin_write_PORTHIO(val) bfin_write16(PORTHIO, val)
898#define bfin_read_PORTHIO_CLEAR() bfin_read16(PORTHIO_CLEAR)
899#define bfin_write_PORTHIO_CLEAR(val) bfin_write16(PORTHIO_CLEAR, val)
900#define bfin_read_PORTHIO_SET() bfin_read16(PORTHIO_SET)
901#define bfin_write_PORTHIO_SET(val) bfin_write16(PORTHIO_SET, val)
902#define bfin_read_PORTHIO_TOGGLE() bfin_read16(PORTHIO_TOGGLE)
903#define bfin_write_PORTHIO_TOGGLE(val) bfin_write16(PORTHIO_TOGGLE, val)
904#define bfin_read_PORTHIO_MASKA() bfin_read16(PORTHIO_MASKA)
905#define bfin_write_PORTHIO_MASKA(val) bfin_write16(PORTHIO_MASKA, val)
906#define bfin_read_PORTHIO_MASKA_CLEAR() bfin_read16(PORTHIO_MASKA_CLEAR)
907#define bfin_write_PORTHIO_MASKA_CLEAR(val) bfin_write16(PORTHIO_MASKA_CLEAR, val)
908#define bfin_read_PORTHIO_MASKA_SET() bfin_read16(PORTHIO_MASKA_SET)
909#define bfin_write_PORTHIO_MASKA_SET(val) bfin_write16(PORTHIO_MASKA_SET, val)
910#define bfin_read_PORTHIO_MASKA_TOGGLE() bfin_read16(PORTHIO_MASKA_TOGGLE)
911#define bfin_write_PORTHIO_MASKA_TOGGLE(val) bfin_write16(PORTHIO_MASKA_TOGGLE, val)
912#define bfin_read_PORTHIO_MASKB() bfin_read16(PORTHIO_MASKB)
913#define bfin_write_PORTHIO_MASKB(val) bfin_write16(PORTHIO_MASKB, val)
914#define bfin_read_PORTHIO_MASKB_CLEAR() bfin_read16(PORTHIO_MASKB_CLEAR)
915#define bfin_write_PORTHIO_MASKB_CLEAR(val) bfin_write16(PORTHIO_MASKB_CLEAR, val)
916#define bfin_read_PORTHIO_MASKB_SET() bfin_read16(PORTHIO_MASKB_SET)
917#define bfin_write_PORTHIO_MASKB_SET(val) bfin_write16(PORTHIO_MASKB_SET, val)
918#define bfin_read_PORTHIO_MASKB_TOGGLE() bfin_read16(PORTHIO_MASKB_TOGGLE)
919#define bfin_write_PORTHIO_MASKB_TOGGLE(val) bfin_write16(PORTHIO_MASKB_TOGGLE, val)
920#define bfin_read_PORTHIO_DIR() bfin_read16(PORTHIO_DIR)
921#define bfin_write_PORTHIO_DIR(val) bfin_write16(PORTHIO_DIR, val)
922#define bfin_read_PORTHIO_POLAR() bfin_read16(PORTHIO_POLAR)
923#define bfin_write_PORTHIO_POLAR(val) bfin_write16(PORTHIO_POLAR, val)
924#define bfin_read_PORTHIO_EDGE() bfin_read16(PORTHIO_EDGE)
925#define bfin_write_PORTHIO_EDGE(val) bfin_write16(PORTHIO_EDGE, val)
926#define bfin_read_PORTHIO_BOTH() bfin_read16(PORTHIO_BOTH)
927#define bfin_write_PORTHIO_BOTH(val) bfin_write16(PORTHIO_BOTH, val)
928#define bfin_read_PORTHIO_INEN() bfin_read16(PORTHIO_INEN)
929#define bfin_write_PORTHIO_INEN(val) bfin_write16(PORTHIO_INEN, val)
930
931
932/* UART1 Controller (0xFFC02000 - 0xFFC020FF) */
933#define bfin_read_UART1_THR() bfin_read16(UART1_THR)
934#define bfin_write_UART1_THR(val) bfin_write16(UART1_THR, val)
935#define bfin_read_UART1_RBR() bfin_read16(UART1_RBR)
936#define bfin_write_UART1_RBR(val) bfin_write16(UART1_RBR, val)
937#define bfin_read_UART1_DLL() bfin_read16(UART1_DLL)
938#define bfin_write_UART1_DLL(val) bfin_write16(UART1_DLL, val)
939#define bfin_read_UART1_IER() bfin_read16(UART1_IER)
940#define bfin_write_UART1_IER(val) bfin_write16(UART1_IER, val)
941#define bfin_read_UART1_DLH() bfin_read16(UART1_DLH)
942#define bfin_write_UART1_DLH(val) bfin_write16(UART1_DLH, val)
943#define bfin_read_UART1_IIR() bfin_read16(UART1_IIR)
944#define bfin_write_UART1_IIR(val) bfin_write16(UART1_IIR, val)
945#define bfin_read_UART1_LCR() bfin_read16(UART1_LCR)
946#define bfin_write_UART1_LCR(val) bfin_write16(UART1_LCR, val)
947#define bfin_read_UART1_MCR() bfin_read16(UART1_MCR)
948#define bfin_write_UART1_MCR(val) bfin_write16(UART1_MCR, val)
949#define bfin_read_UART1_LSR() bfin_read16(UART1_LSR)
950#define bfin_write_UART1_LSR(val) bfin_write16(UART1_LSR, val)
951#define bfin_read_UART1_MSR() bfin_read16(UART1_MSR)
952#define bfin_write_UART1_MSR(val) bfin_write16(UART1_MSR, val)
953#define bfin_read_UART1_SCR() bfin_read16(UART1_SCR)
954#define bfin_write_UART1_SCR(val) bfin_write16(UART1_SCR, val)
955#define bfin_read_UART1_GCTL() bfin_read16(UART1_GCTL)
956#define bfin_write_UART1_GCTL(val) bfin_write16(UART1_GCTL, val)
957
958/* Omit CAN register sets from the cdefBF534.h (CAN is not in the ADSP-BF52x processor) */
959
960/* Pin Control Registers (0xFFC03200 - 0xFFC032FF) */
961#define bfin_read_PORTF_FER() bfin_read16(PORTF_FER)
962#define bfin_write_PORTF_FER(val) bfin_write16(PORTF_FER, val)
963#define bfin_read_PORTG_FER() bfin_read16(PORTG_FER)
964#define bfin_write_PORTG_FER(val) bfin_write16(PORTG_FER, val)
965#define bfin_read_PORTH_FER() bfin_read16(PORTH_FER)
966#define bfin_write_PORTH_FER(val) bfin_write16(PORTH_FER, val)
967#define bfin_read_PORT_MUX() bfin_read16(PORT_MUX)
968#define bfin_write_PORT_MUX(val) bfin_write16(PORT_MUX, val)
969
970
971/* Handshake MDMA Registers (0xFFC03300 - 0xFFC033FF) */
972#define bfin_read_HMDMA0_CONTROL() bfin_read16(HMDMA0_CONTROL)
973#define bfin_write_HMDMA0_CONTROL(val) bfin_write16(HMDMA0_CONTROL, val)
974#define bfin_read_HMDMA0_ECINIT() bfin_read16(HMDMA0_ECINIT)
975#define bfin_write_HMDMA0_ECINIT(val) bfin_write16(HMDMA0_ECINIT, val)
976#define bfin_read_HMDMA0_BCINIT() bfin_read16(HMDMA0_BCINIT)
977#define bfin_write_HMDMA0_BCINIT(val) bfin_write16(HMDMA0_BCINIT, val)
978#define bfin_read_HMDMA0_ECURGENT() bfin_read16(HMDMA0_ECURGENT)
979#define bfin_write_HMDMA0_ECURGENT(val) bfin_write16(HMDMA0_ECURGENT, val)
980#define bfin_read_HMDMA0_ECOVERFLOW() bfin_read16(HMDMA0_ECOVERFLOW)
981#define bfin_write_HMDMA0_ECOVERFLOW(val) bfin_write16(HMDMA0_ECOVERFLOW, val)
982#define bfin_read_HMDMA0_ECOUNT() bfin_read16(HMDMA0_ECOUNT)
983#define bfin_write_HMDMA0_ECOUNT(val) bfin_write16(HMDMA0_ECOUNT, val)
984#define bfin_read_HMDMA0_BCOUNT() bfin_read16(HMDMA0_BCOUNT)
985#define bfin_write_HMDMA0_BCOUNT(val) bfin_write16(HMDMA0_BCOUNT, val)
986
987#define bfin_read_HMDMA1_CONTROL() bfin_read16(HMDMA1_CONTROL)
988#define bfin_write_HMDMA1_CONTROL(val) bfin_write16(HMDMA1_CONTROL, val)
989#define bfin_read_HMDMA1_ECINIT() bfin_read16(HMDMA1_ECINIT)
990#define bfin_write_HMDMA1_ECINIT(val) bfin_write16(HMDMA1_ECINIT, val)
991#define bfin_read_HMDMA1_BCINIT() bfin_read16(HMDMA1_BCINIT)
992#define bfin_write_HMDMA1_BCINIT(val) bfin_write16(HMDMA1_BCINIT, val)
993#define bfin_read_HMDMA1_ECURGENT() bfin_read16(HMDMA1_ECURGENT)
994#define bfin_write_HMDMA1_ECURGENT(val) bfin_write16(HMDMA1_ECURGENT, val)
995#define bfin_read_HMDMA1_ECOVERFLOW() bfin_read16(HMDMA1_ECOVERFLOW)
996#define bfin_write_HMDMA1_ECOVERFLOW(val) bfin_write16(HMDMA1_ECOVERFLOW, val)
997#define bfin_read_HMDMA1_ECOUNT() bfin_read16(HMDMA1_ECOUNT)
998#define bfin_write_HMDMA1_ECOUNT(val) bfin_write16(HMDMA1_ECOUNT, val)
999#define bfin_read_HMDMA1_BCOUNT() bfin_read16(HMDMA1_BCOUNT)
1000#define bfin_write_HMDMA1_BCOUNT(val) bfin_write16(HMDMA1_BCOUNT, val)
1001
1002/* ==== end from cdefBF534.h ==== */
1003
1004/* GPIO PIN mux (0xFFC03210 - OxFFC03288) */
1005
1006#define bfin_read_PORTF_MUX() bfin_read16(PORTF_MUX)
1007#define bfin_write_PORTF_MUX(val) bfin_write16(PORTF_MUX, val)
1008#define bfin_read_PORTG_MUX() bfin_read16(PORTG_MUX)
1009#define bfin_write_PORTG_MUX(val) bfin_write16(PORTG_MUX, val)
1010#define bfin_read_PORTH_MUX() bfin_read16(PORTH_MUX)
1011#define bfin_write_PORTH_MUX(val) bfin_write16(PORTH_MUX, val)
1012
1013#define bfin_read_PORTF_DRIVE() bfin_read16(PORTF_DRIVE)
1014#define bfin_write_PORTF_DRIVE(val) bfin_write16(PORTF_DRIVE, val)
1015#define bfin_read_PORTG_DRIVE() bfin_read16(PORTG_DRIVE)
1016#define bfin_write_PORTG_DRIVE(val) bfin_write16(PORTG_DRIVE, val)
1017#define bfin_read_PORTH_DRIVE() bfin_read16(PORTH_DRIVE)
1018#define bfin_write_PORTH_DRIVE(val) bfin_write16(PORTH_DRIVE, val)
1019#define bfin_read_PORTF_SLEW() bfin_read16(PORTF_SLEW)
1020#define bfin_write_PORTF_SLEW(val) bfin_write16(PORTF_SLEW, val)
1021#define bfin_read_PORTG_SLEW() bfin_read16(PORTG_SLEW)
1022#define bfin_write_PORTG_SLEW(val) bfin_write16(PORTG_SLEW, val)
1023#define bfin_read_PORTH_SLEW() bfin_read16(PORTH_SLEW)
1024#define bfin_write_PORTH_SLEW(val) bfin_write16(PORTH_SLEW, val)
1025#define bfin_read_PORTF_HYSTERISIS() bfin_read16(PORTF_HYSTERISIS)
1026#define bfin_write_PORTF_HYSTERISIS(val) bfin_write16(PORTF_HYSTERISIS, val)
1027#define bfin_read_PORTG_HYSTERISIS() bfin_read16(PORTG_HYSTERISIS)
1028#define bfin_write_PORTG_HYSTERISIS(val) bfin_write16(PORTG_HYSTERISIS, val)
1029#define bfin_read_PORTH_HYSTERISIS() bfin_read16(PORTH_HYSTERISIS)
1030#define bfin_write_PORTH_HYSTERISIS(val) bfin_write16(PORTH_HYSTERISIS, val)
1031#define bfin_read_MISCPORT_DRIVE() bfin_read16(MISCPORT_DRIVE)
1032#define bfin_write_MISCPORT_DRIVE(val) bfin_write16(MISCPORT_DRIVE, val)
1033#define bfin_read_MISCPORT_SLEW() bfin_read16(MISCPORT_SLEW)
1034#define bfin_write_MISCPORT_SLEW(val) bfin_write16(MISCPORT_SLEW, val)
1035#define bfin_read_MISCPORT_HYSTERISIS() bfin_read16(MISCPORT_HYSTERISIS)
1036#define bfin_write_MISCPORT_HYSTERISIS(val) bfin_write16(MISCPORT_HYSTERISIS, val)
1037
1038/* HOST Port Registers */
1039
1040#define bfin_read_HOST_CONTROL() bfin_read16(HOST_CONTROL)
1041#define bfin_write_HOST_CONTROL(val) bfin_write16(HOST_CONTROL, val)
1042#define bfin_read_HOST_STATUS() bfin_read16(HOST_STATUS)
1043#define bfin_write_HOST_STATUS(val) bfin_write16(HOST_STATUS, val)
1044#define bfin_read_HOST_TIMEOUT() bfin_read16(HOST_TIMEOUT)
1045#define bfin_write_HOST_TIMEOUT(val) bfin_write16(HOST_TIMEOUT, val)
1046
1047/* Counter Registers */
1048
1049#define bfin_read_CNT_CONFIG() bfin_read16(CNT_CONFIG)
1050#define bfin_write_CNT_CONFIG(val) bfin_write16(CNT_CONFIG, val)
1051#define bfin_read_CNT_IMASK() bfin_read16(CNT_IMASK)
1052#define bfin_write_CNT_IMASK(val) bfin_write16(CNT_IMASK, val)
1053#define bfin_read_CNT_STATUS() bfin_read16(CNT_STATUS)
1054#define bfin_write_CNT_STATUS(val) bfin_write16(CNT_STATUS, val)
1055#define bfin_read_CNT_COMMAND() bfin_read16(CNT_COMMAND)
1056#define bfin_write_CNT_COMMAND(val) bfin_write16(CNT_COMMAND, val)
1057#define bfin_read_CNT_DEBOUNCE() bfin_read16(CNT_DEBOUNCE)
1058#define bfin_write_CNT_DEBOUNCE(val) bfin_write16(CNT_DEBOUNCE, val)
1059#define bfin_read_CNT_COUNTER() bfin_read32(CNT_COUNTER)
1060#define bfin_write_CNT_COUNTER(val) bfin_write32(CNT_COUNTER, val)
1061#define bfin_read_CNT_MAX() bfin_read32(CNT_MAX)
1062#define bfin_write_CNT_MAX(val) bfin_write32(CNT_MAX, val)
1063#define bfin_read_CNT_MIN() bfin_read32(CNT_MIN)
1064#define bfin_write_CNT_MIN(val) bfin_write32(CNT_MIN, val)
1065
1066/* Security Registers */
1067
1068#define bfin_read_SECURE_SYSSWT() bfin_read32(SECURE_SYSSWT)
1069#define bfin_write_SECURE_SYSSWT(val) bfin_write32(SECURE_SYSSWT, val)
1070#define bfin_read_SECURE_CONTROL() bfin_read16(SECURE_CONTROL)
1071#define bfin_write_SECURE_CONTROL(val) bfin_write16(SECURE_CONTROL, val)
1072#define bfin_read_SECURE_STATUS() bfin_read16(SECURE_STATUS)
1073#define bfin_write_SECURE_STATUS(val) bfin_write16(SECURE_STATUS, val)
1074
1075/* NFC Registers */
1076
1077#define bfin_read_NFC_CTL() bfin_read16(NFC_CTL)
1078#define bfin_write_NFC_CTL(val) bfin_write16(NFC_CTL, val)
1079#define bfin_read_NFC_STAT() bfin_read16(NFC_STAT)
1080#define bfin_write_NFC_STAT(val) bfin_write16(NFC_STAT, val)
1081#define bfin_read_NFC_IRQSTAT() bfin_read16(NFC_IRQSTAT)
1082#define bfin_write_NFC_IRQSTAT(val) bfin_write16(NFC_IRQSTAT, val)
1083#define bfin_read_NFC_IRQMASK() bfin_read16(NFC_IRQMASK)
1084#define bfin_write_NFC_IRQMASK(val) bfin_write16(NFC_IRQMASK, val)
1085#define bfin_read_NFC_ECC0() bfin_read16(NFC_ECC0)
1086#define bfin_write_NFC_ECC0(val) bfin_write16(NFC_ECC0, val)
1087#define bfin_read_NFC_ECC1() bfin_read16(NFC_ECC1)
1088#define bfin_write_NFC_ECC1(val) bfin_write16(NFC_ECC1, val)
1089#define bfin_read_NFC_ECC2() bfin_read16(NFC_ECC2)
1090#define bfin_write_NFC_ECC2(val) bfin_write16(NFC_ECC2, val)
1091#define bfin_read_NFC_ECC3() bfin_read16(NFC_ECC3)
1092#define bfin_write_NFC_ECC3(val) bfin_write16(NFC_ECC3, val)
1093#define bfin_read_NFC_COUNT() bfin_read16(NFC_COUNT)
1094#define bfin_write_NFC_COUNT(val) bfin_write16(NFC_COUNT, val)
1095#define bfin_read_NFC_RST() bfin_read16(NFC_RST)
1096#define bfin_write_NFC_RST(val) bfin_write16(NFC_RST, val)
1097#define bfin_read_NFC_PGCTL() bfin_read16(NFC_PGCTL)
1098#define bfin_write_NFC_PGCTL(val) bfin_write16(NFC_PGCTL, val)
1099#define bfin_read_NFC_READ() bfin_read16(NFC_READ)
1100#define bfin_write_NFC_READ(val) bfin_write16(NFC_READ, val)
1101#define bfin_read_NFC_ADDR() bfin_read16(NFC_ADDR)
1102#define bfin_write_NFC_ADDR(val) bfin_write16(NFC_ADDR, val)
1103#define bfin_read_NFC_CMD() bfin_read16(NFC_CMD)
1104#define bfin_write_NFC_CMD(val) bfin_write16(NFC_CMD, val)
1105#define bfin_read_NFC_DATA_WR() bfin_read16(NFC_DATA_WR)
1106#define bfin_write_NFC_DATA_WR(val) bfin_write16(NFC_DATA_WR, val)
1107#define bfin_read_NFC_DATA_RD() bfin_read16(NFC_DATA_RD)
1108#define bfin_write_NFC_DATA_RD(val) bfin_write16(NFC_DATA_RD, val)
1109
1110/* These need to be last due to the cdef/linux inter-dependencies */
1111#include <asm/irq.h>
1112
1113#endif /* _CDEF_BF52X_H */
diff --git a/arch/blackfin/mach-bf527/include/mach/defBF522.h b/arch/blackfin/mach-bf527/include/mach/defBF522.h
index cb139a254810..89f5420ee6cd 100644
--- a/arch/blackfin/mach-bf527/include/mach/defBF522.h
+++ b/arch/blackfin/mach-bf527/include/mach/defBF522.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2007-2008 Analog Devices Inc. 2 * Copyright 2007-2010 Analog Devices Inc.
3 * 3 *
4 * Licensed under the ADI BSD license or the GPL-2 (or later) 4 * Licensed under the ADI BSD license or the GPL-2 (or later)
5 */ 5 */
@@ -7,12 +7,1393 @@
7#ifndef _DEF_BF522_H 7#ifndef _DEF_BF522_H
8#define _DEF_BF522_H 8#define _DEF_BF522_H
9 9
10/* Include all Core registers and bit definitions */ 10/* ************************************************************** */
11#include <asm/def_LPBlackfin.h> 11/* SYSTEM & MMR ADDRESS DEFINITIONS COMMON TO ALL ADSP-BF52x */
12/* ************************************************************** */
12 13
13/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF522 */ 14/* ==== begin from defBF534.h ==== */
14 15
15/* Include defBF52x_base.h for the set of #defines that are common to all ADSP-BF52x processors */ 16/* Clock and System Control (0xFFC00000 - 0xFFC000FF) */
16#include "defBF52x_base.h" 17#define PLL_CTL 0xFFC00000 /* PLL Control Register */
18#define PLL_DIV 0xFFC00004 /* PLL Divide Register */
19#define VR_CTL 0xFFC00008 /* Voltage Regulator Control Register */
20#define PLL_STAT 0xFFC0000C /* PLL Status Register */
21#define PLL_LOCKCNT 0xFFC00010 /* PLL Lock Count Register */
22#define CHIPID 0xFFC00014 /* Device ID Register */
23
24
25/* System Interrupt Controller (0xFFC00100 - 0xFFC001FF) */
26#define SWRST 0xFFC00100 /* Software Reset Register */
27#define SYSCR 0xFFC00104 /* System Configuration Register */
28#define SIC_RVECT 0xFFC00108 /* Interrupt Reset Vector Address Register */
29
30#define SIC_IMASK0 0xFFC0010C /* Interrupt Mask Register */
31#define SIC_IAR0 0xFFC00110 /* Interrupt Assignment Register 0 */
32#define SIC_IAR1 0xFFC00114 /* Interrupt Assignment Register 1 */
33#define SIC_IAR2 0xFFC00118 /* Interrupt Assignment Register 2 */
34#define SIC_IAR3 0xFFC0011C /* Interrupt Assignment Register 3 */
35#define SIC_ISR0 0xFFC00120 /* Interrupt Status Register */
36#define SIC_IWR0 0xFFC00124 /* Interrupt Wakeup Register */
37
38/* SIC Additions to ADSP-BF52x (0xFFC0014C - 0xFFC00162) */
39#define SIC_IMASK1 0xFFC0014C /* Interrupt Mask register of SIC2 */
40#define SIC_IAR4 0xFFC00150 /* Interrupt Assignment register4 */
41#define SIC_IAR5 0xFFC00154 /* Interrupt Assignment register5 */
42#define SIC_IAR6 0xFFC00158 /* Interrupt Assignment register6 */
43#define SIC_IAR7 0xFFC0015C /* Interrupt Assignment register7 */
44#define SIC_ISR1 0xFFC00160 /* Interrupt Statur register */
45#define SIC_IWR1 0xFFC00164 /* Interrupt Wakeup register */
46
47
48/* Watchdog Timer (0xFFC00200 - 0xFFC002FF) */
49#define WDOG_CTL 0xFFC00200 /* Watchdog Control Register */
50#define WDOG_CNT 0xFFC00204 /* Watchdog Count Register */
51#define WDOG_STAT 0xFFC00208 /* Watchdog Status Register */
52
53
54/* Real Time Clock (0xFFC00300 - 0xFFC003FF) */
55#define RTC_STAT 0xFFC00300 /* RTC Status Register */
56#define RTC_ICTL 0xFFC00304 /* RTC Interrupt Control Register */
57#define RTC_ISTAT 0xFFC00308 /* RTC Interrupt Status Register */
58#define RTC_SWCNT 0xFFC0030C /* RTC Stopwatch Count Register */
59#define RTC_ALARM 0xFFC00310 /* RTC Alarm Time Register */
60#define RTC_FAST 0xFFC00314 /* RTC Prescaler Enable Register */
61#define RTC_PREN 0xFFC00314 /* RTC Prescaler Enable Alternate Macro */
62
63
64/* UART0 Controller (0xFFC00400 - 0xFFC004FF) */
65#define UART0_THR 0xFFC00400 /* Transmit Holding register */
66#define UART0_RBR 0xFFC00400 /* Receive Buffer register */
67#define UART0_DLL 0xFFC00400 /* Divisor Latch (Low-Byte) */
68#define UART0_IER 0xFFC00404 /* Interrupt Enable Register */
69#define UART0_DLH 0xFFC00404 /* Divisor Latch (High-Byte) */
70#define UART0_IIR 0xFFC00408 /* Interrupt Identification Register */
71#define UART0_LCR 0xFFC0040C /* Line Control Register */
72#define UART0_MCR 0xFFC00410 /* Modem Control Register */
73#define UART0_LSR 0xFFC00414 /* Line Status Register */
74#define UART0_MSR 0xFFC00418 /* Modem Status Register */
75#define UART0_SCR 0xFFC0041C /* SCR Scratch Register */
76#define UART0_GCTL 0xFFC00424 /* Global Control Register */
77
78
79/* SPI Controller (0xFFC00500 - 0xFFC005FF) */
80#define SPI0_REGBASE 0xFFC00500
81#define SPI_CTL 0xFFC00500 /* SPI Control Register */
82#define SPI_FLG 0xFFC00504 /* SPI Flag register */
83#define SPI_STAT 0xFFC00508 /* SPI Status register */
84#define SPI_TDBR 0xFFC0050C /* SPI Transmit Data Buffer Register */
85#define SPI_RDBR 0xFFC00510 /* SPI Receive Data Buffer Register */
86#define SPI_BAUD 0xFFC00514 /* SPI Baud rate Register */
87#define SPI_SHADOW 0xFFC00518 /* SPI_RDBR Shadow Register */
88
89
90/* TIMER0-7 Registers (0xFFC00600 - 0xFFC006FF) */
91#define TIMER0_CONFIG 0xFFC00600 /* Timer 0 Configuration Register */
92#define TIMER0_COUNTER 0xFFC00604 /* Timer 0 Counter Register */
93#define TIMER0_PERIOD 0xFFC00608 /* Timer 0 Period Register */
94#define TIMER0_WIDTH 0xFFC0060C /* Timer 0 Width Register */
95
96#define TIMER1_CONFIG 0xFFC00610 /* Timer 1 Configuration Register */
97#define TIMER1_COUNTER 0xFFC00614 /* Timer 1 Counter Register */
98#define TIMER1_PERIOD 0xFFC00618 /* Timer 1 Period Register */
99#define TIMER1_WIDTH 0xFFC0061C /* Timer 1 Width Register */
100
101#define TIMER2_CONFIG 0xFFC00620 /* Timer 2 Configuration Register */
102#define TIMER2_COUNTER 0xFFC00624 /* Timer 2 Counter Register */
103#define TIMER2_PERIOD 0xFFC00628 /* Timer 2 Period Register */
104#define TIMER2_WIDTH 0xFFC0062C /* Timer 2 Width Register */
105
106#define TIMER3_CONFIG 0xFFC00630 /* Timer 3 Configuration Register */
107#define TIMER3_COUNTER 0xFFC00634 /* Timer 3 Counter Register */
108#define TIMER3_PERIOD 0xFFC00638 /* Timer 3 Period Register */
109#define TIMER3_WIDTH 0xFFC0063C /* Timer 3 Width Register */
110
111#define TIMER4_CONFIG 0xFFC00640 /* Timer 4 Configuration Register */
112#define TIMER4_COUNTER 0xFFC00644 /* Timer 4 Counter Register */
113#define TIMER4_PERIOD 0xFFC00648 /* Timer 4 Period Register */
114#define TIMER4_WIDTH 0xFFC0064C /* Timer 4 Width Register */
115
116#define TIMER5_CONFIG 0xFFC00650 /* Timer 5 Configuration Register */
117#define TIMER5_COUNTER 0xFFC00654 /* Timer 5 Counter Register */
118#define TIMER5_PERIOD 0xFFC00658 /* Timer 5 Period Register */
119#define TIMER5_WIDTH 0xFFC0065C /* Timer 5 Width Register */
120
121#define TIMER6_CONFIG 0xFFC00660 /* Timer 6 Configuration Register */
122#define TIMER6_COUNTER 0xFFC00664 /* Timer 6 Counter Register */
123#define TIMER6_PERIOD 0xFFC00668 /* Timer 6 Period Register */
124#define TIMER6_WIDTH 0xFFC0066C /* Timer 6 Width Register */
125
126#define TIMER7_CONFIG 0xFFC00670 /* Timer 7 Configuration Register */
127#define TIMER7_COUNTER 0xFFC00674 /* Timer 7 Counter Register */
128#define TIMER7_PERIOD 0xFFC00678 /* Timer 7 Period Register */
129#define TIMER7_WIDTH 0xFFC0067C /* Timer 7 Width Register */
130
131#define TIMER_ENABLE 0xFFC00680 /* Timer Enable Register */
132#define TIMER_DISABLE 0xFFC00684 /* Timer Disable Register */
133#define TIMER_STATUS 0xFFC00688 /* Timer Status Register */
134
135
136/* General Purpose I/O Port F (0xFFC00700 - 0xFFC007FF) */
137#define PORTFIO 0xFFC00700 /* Port F I/O Pin State Specify Register */
138#define PORTFIO_CLEAR 0xFFC00704 /* Port F I/O Peripheral Interrupt Clear Register */
139#define PORTFIO_SET 0xFFC00708 /* Port F I/O Peripheral Interrupt Set Register */
140#define PORTFIO_TOGGLE 0xFFC0070C /* Port F I/O Pin State Toggle Register */
141#define PORTFIO_MASKA 0xFFC00710 /* Port F I/O Mask State Specify Interrupt A Register */
142#define PORTFIO_MASKA_CLEAR 0xFFC00714 /* Port F I/O Mask Disable Interrupt A Register */
143#define PORTFIO_MASKA_SET 0xFFC00718 /* Port F I/O Mask Enable Interrupt A Register */
144#define PORTFIO_MASKA_TOGGLE 0xFFC0071C /* Port F I/O Mask Toggle Enable Interrupt A Register */
145#define PORTFIO_MASKB 0xFFC00720 /* Port F I/O Mask State Specify Interrupt B Register */
146#define PORTFIO_MASKB_CLEAR 0xFFC00724 /* Port F I/O Mask Disable Interrupt B Register */
147#define PORTFIO_MASKB_SET 0xFFC00728 /* Port F I/O Mask Enable Interrupt B Register */
148#define PORTFIO_MASKB_TOGGLE 0xFFC0072C /* Port F I/O Mask Toggle Enable Interrupt B Register */
149#define PORTFIO_DIR 0xFFC00730 /* Port F I/O Direction Register */
150#define PORTFIO_POLAR 0xFFC00734 /* Port F I/O Source Polarity Register */
151#define PORTFIO_EDGE 0xFFC00738 /* Port F I/O Source Sensitivity Register */
152#define PORTFIO_BOTH 0xFFC0073C /* Port F I/O Set on BOTH Edges Register */
153#define PORTFIO_INEN 0xFFC00740 /* Port F I/O Input Enable Register */
154
155
156/* SPORT0 Controller (0xFFC00800 - 0xFFC008FF) */
157#define SPORT0_TCR1 0xFFC00800 /* SPORT0 Transmit Configuration 1 Register */
158#define SPORT0_TCR2 0xFFC00804 /* SPORT0 Transmit Configuration 2 Register */
159#define SPORT0_TCLKDIV 0xFFC00808 /* SPORT0 Transmit Clock Divider */
160#define SPORT0_TFSDIV 0xFFC0080C /* SPORT0 Transmit Frame Sync Divider */
161#define SPORT0_TX 0xFFC00810 /* SPORT0 TX Data Register */
162#define SPORT0_RX 0xFFC00818 /* SPORT0 RX Data Register */
163#define SPORT0_RCR1 0xFFC00820 /* SPORT0 Transmit Configuration 1 Register */
164#define SPORT0_RCR2 0xFFC00824 /* SPORT0 Transmit Configuration 2 Register */
165#define SPORT0_RCLKDIV 0xFFC00828 /* SPORT0 Receive Clock Divider */
166#define SPORT0_RFSDIV 0xFFC0082C /* SPORT0 Receive Frame Sync Divider */
167#define SPORT0_STAT 0xFFC00830 /* SPORT0 Status Register */
168#define SPORT0_CHNL 0xFFC00834 /* SPORT0 Current Channel Register */
169#define SPORT0_MCMC1 0xFFC00838 /* SPORT0 Multi-Channel Configuration Register 1 */
170#define SPORT0_MCMC2 0xFFC0083C /* SPORT0 Multi-Channel Configuration Register 2 */
171#define SPORT0_MTCS0 0xFFC00840 /* SPORT0 Multi-Channel Transmit Select Register 0 */
172#define SPORT0_MTCS1 0xFFC00844 /* SPORT0 Multi-Channel Transmit Select Register 1 */
173#define SPORT0_MTCS2 0xFFC00848 /* SPORT0 Multi-Channel Transmit Select Register 2 */
174#define SPORT0_MTCS3 0xFFC0084C /* SPORT0 Multi-Channel Transmit Select Register 3 */
175#define SPORT0_MRCS0 0xFFC00850 /* SPORT0 Multi-Channel Receive Select Register 0 */
176#define SPORT0_MRCS1 0xFFC00854 /* SPORT0 Multi-Channel Receive Select Register 1 */
177#define SPORT0_MRCS2 0xFFC00858 /* SPORT0 Multi-Channel Receive Select Register 2 */
178#define SPORT0_MRCS3 0xFFC0085C /* SPORT0 Multi-Channel Receive Select Register 3 */
179
180
181/* SPORT1 Controller (0xFFC00900 - 0xFFC009FF) */
182#define SPORT1_TCR1 0xFFC00900 /* SPORT1 Transmit Configuration 1 Register */
183#define SPORT1_TCR2 0xFFC00904 /* SPORT1 Transmit Configuration 2 Register */
184#define SPORT1_TCLKDIV 0xFFC00908 /* SPORT1 Transmit Clock Divider */
185#define SPORT1_TFSDIV 0xFFC0090C /* SPORT1 Transmit Frame Sync Divider */
186#define SPORT1_TX 0xFFC00910 /* SPORT1 TX Data Register */
187#define SPORT1_RX 0xFFC00918 /* SPORT1 RX Data Register */
188#define SPORT1_RCR1 0xFFC00920 /* SPORT1 Transmit Configuration 1 Register */
189#define SPORT1_RCR2 0xFFC00924 /* SPORT1 Transmit Configuration 2 Register */
190#define SPORT1_RCLKDIV 0xFFC00928 /* SPORT1 Receive Clock Divider */
191#define SPORT1_RFSDIV 0xFFC0092C /* SPORT1 Receive Frame Sync Divider */
192#define SPORT1_STAT 0xFFC00930 /* SPORT1 Status Register */
193#define SPORT1_CHNL 0xFFC00934 /* SPORT1 Current Channel Register */
194#define SPORT1_MCMC1 0xFFC00938 /* SPORT1 Multi-Channel Configuration Register 1 */
195#define SPORT1_MCMC2 0xFFC0093C /* SPORT1 Multi-Channel Configuration Register 2 */
196#define SPORT1_MTCS0 0xFFC00940 /* SPORT1 Multi-Channel Transmit Select Register 0 */
197#define SPORT1_MTCS1 0xFFC00944 /* SPORT1 Multi-Channel Transmit Select Register 1 */
198#define SPORT1_MTCS2 0xFFC00948 /* SPORT1 Multi-Channel Transmit Select Register 2 */
199#define SPORT1_MTCS3 0xFFC0094C /* SPORT1 Multi-Channel Transmit Select Register 3 */
200#define SPORT1_MRCS0 0xFFC00950 /* SPORT1 Multi-Channel Receive Select Register 0 */
201#define SPORT1_MRCS1 0xFFC00954 /* SPORT1 Multi-Channel Receive Select Register 1 */
202#define SPORT1_MRCS2 0xFFC00958 /* SPORT1 Multi-Channel Receive Select Register 2 */
203#define SPORT1_MRCS3 0xFFC0095C /* SPORT1 Multi-Channel Receive Select Register 3 */
204
205
206/* External Bus Interface Unit (0xFFC00A00 - 0xFFC00AFF) */
207#define EBIU_AMGCTL 0xFFC00A00 /* Asynchronous Memory Global Control Register */
208#define EBIU_AMBCTL0 0xFFC00A04 /* Asynchronous Memory Bank Control Register 0 */
209#define EBIU_AMBCTL1 0xFFC00A08 /* Asynchronous Memory Bank Control Register 1 */
210#define EBIU_SDGCTL 0xFFC00A10 /* SDRAM Global Control Register */
211#define EBIU_SDBCTL 0xFFC00A14 /* SDRAM Bank Control Register */
212#define EBIU_SDRRC 0xFFC00A18 /* SDRAM Refresh Rate Control Register */
213#define EBIU_SDSTAT 0xFFC00A1C /* SDRAM Status Register */
214
215
216/* DMA Traffic Control Registers */
217#define DMAC_TC_PER 0xFFC00B0C /* Traffic Control Periods Register */
218#define DMAC_TC_CNT 0xFFC00B10 /* Traffic Control Current Counts Register */
219
220/* DMA Controller (0xFFC00C00 - 0xFFC00FFF) */
221#define DMA0_NEXT_DESC_PTR 0xFFC00C00 /* DMA Channel 0 Next Descriptor Pointer Register */
222#define DMA0_START_ADDR 0xFFC00C04 /* DMA Channel 0 Start Address Register */
223#define DMA0_CONFIG 0xFFC00C08 /* DMA Channel 0 Configuration Register */
224#define DMA0_X_COUNT 0xFFC00C10 /* DMA Channel 0 X Count Register */
225#define DMA0_X_MODIFY 0xFFC00C14 /* DMA Channel 0 X Modify Register */
226#define DMA0_Y_COUNT 0xFFC00C18 /* DMA Channel 0 Y Count Register */
227#define DMA0_Y_MODIFY 0xFFC00C1C /* DMA Channel 0 Y Modify Register */
228#define DMA0_CURR_DESC_PTR 0xFFC00C20 /* DMA Channel 0 Current Descriptor Pointer Register */
229#define DMA0_CURR_ADDR 0xFFC00C24 /* DMA Channel 0 Current Address Register */
230#define DMA0_IRQ_STATUS 0xFFC00C28 /* DMA Channel 0 Interrupt/Status Register */
231#define DMA0_PERIPHERAL_MAP 0xFFC00C2C /* DMA Channel 0 Peripheral Map Register */
232#define DMA0_CURR_X_COUNT 0xFFC00C30 /* DMA Channel 0 Current X Count Register */
233#define DMA0_CURR_Y_COUNT 0xFFC00C38 /* DMA Channel 0 Current Y Count Register */
234
235#define DMA1_NEXT_DESC_PTR 0xFFC00C40 /* DMA Channel 1 Next Descriptor Pointer Register */
236#define DMA1_START_ADDR 0xFFC00C44 /* DMA Channel 1 Start Address Register */
237#define DMA1_CONFIG 0xFFC00C48 /* DMA Channel 1 Configuration Register */
238#define DMA1_X_COUNT 0xFFC00C50 /* DMA Channel 1 X Count Register */
239#define DMA1_X_MODIFY 0xFFC00C54 /* DMA Channel 1 X Modify Register */
240#define DMA1_Y_COUNT 0xFFC00C58 /* DMA Channel 1 Y Count Register */
241#define DMA1_Y_MODIFY 0xFFC00C5C /* DMA Channel 1 Y Modify Register */
242#define DMA1_CURR_DESC_PTR 0xFFC00C60 /* DMA Channel 1 Current Descriptor Pointer Register */
243#define DMA1_CURR_ADDR 0xFFC00C64 /* DMA Channel 1 Current Address Register */
244#define DMA1_IRQ_STATUS 0xFFC00C68 /* DMA Channel 1 Interrupt/Status Register */
245#define DMA1_PERIPHERAL_MAP 0xFFC00C6C /* DMA Channel 1 Peripheral Map Register */
246#define DMA1_CURR_X_COUNT 0xFFC00C70 /* DMA Channel 1 Current X Count Register */
247#define DMA1_CURR_Y_COUNT 0xFFC00C78 /* DMA Channel 1 Current Y Count Register */
248
249#define DMA2_NEXT_DESC_PTR 0xFFC00C80 /* DMA Channel 2 Next Descriptor Pointer Register */
250#define DMA2_START_ADDR 0xFFC00C84 /* DMA Channel 2 Start Address Register */
251#define DMA2_CONFIG 0xFFC00C88 /* DMA Channel 2 Configuration Register */
252#define DMA2_X_COUNT 0xFFC00C90 /* DMA Channel 2 X Count Register */
253#define DMA2_X_MODIFY 0xFFC00C94 /* DMA Channel 2 X Modify Register */
254#define DMA2_Y_COUNT 0xFFC00C98 /* DMA Channel 2 Y Count Register */
255#define DMA2_Y_MODIFY 0xFFC00C9C /* DMA Channel 2 Y Modify Register */
256#define DMA2_CURR_DESC_PTR 0xFFC00CA0 /* DMA Channel 2 Current Descriptor Pointer Register */
257#define DMA2_CURR_ADDR 0xFFC00CA4 /* DMA Channel 2 Current Address Register */
258#define DMA2_IRQ_STATUS 0xFFC00CA8 /* DMA Channel 2 Interrupt/Status Register */
259#define DMA2_PERIPHERAL_MAP 0xFFC00CAC /* DMA Channel 2 Peripheral Map Register */
260#define DMA2_CURR_X_COUNT 0xFFC00CB0 /* DMA Channel 2 Current X Count Register */
261#define DMA2_CURR_Y_COUNT 0xFFC00CB8 /* DMA Channel 2 Current Y Count Register */
262
263#define DMA3_NEXT_DESC_PTR 0xFFC00CC0 /* DMA Channel 3 Next Descriptor Pointer Register */
264#define DMA3_START_ADDR 0xFFC00CC4 /* DMA Channel 3 Start Address Register */
265#define DMA3_CONFIG 0xFFC00CC8 /* DMA Channel 3 Configuration Register */
266#define DMA3_X_COUNT 0xFFC00CD0 /* DMA Channel 3 X Count Register */
267#define DMA3_X_MODIFY 0xFFC00CD4 /* DMA Channel 3 X Modify Register */
268#define DMA3_Y_COUNT 0xFFC00CD8 /* DMA Channel 3 Y Count Register */
269#define DMA3_Y_MODIFY 0xFFC00CDC /* DMA Channel 3 Y Modify Register */
270#define DMA3_CURR_DESC_PTR 0xFFC00CE0 /* DMA Channel 3 Current Descriptor Pointer Register */
271#define DMA3_CURR_ADDR 0xFFC00CE4 /* DMA Channel 3 Current Address Register */
272#define DMA3_IRQ_STATUS 0xFFC00CE8 /* DMA Channel 3 Interrupt/Status Register */
273#define DMA3_PERIPHERAL_MAP 0xFFC00CEC /* DMA Channel 3 Peripheral Map Register */
274#define DMA3_CURR_X_COUNT 0xFFC00CF0 /* DMA Channel 3 Current X Count Register */
275#define DMA3_CURR_Y_COUNT 0xFFC00CF8 /* DMA Channel 3 Current Y Count Register */
276
277#define DMA4_NEXT_DESC_PTR 0xFFC00D00 /* DMA Channel 4 Next Descriptor Pointer Register */
278#define DMA4_START_ADDR 0xFFC00D04 /* DMA Channel 4 Start Address Register */
279#define DMA4_CONFIG 0xFFC00D08 /* DMA Channel 4 Configuration Register */
280#define DMA4_X_COUNT 0xFFC00D10 /* DMA Channel 4 X Count Register */
281#define DMA4_X_MODIFY 0xFFC00D14 /* DMA Channel 4 X Modify Register */
282#define DMA4_Y_COUNT 0xFFC00D18 /* DMA Channel 4 Y Count Register */
283#define DMA4_Y_MODIFY 0xFFC00D1C /* DMA Channel 4 Y Modify Register */
284#define DMA4_CURR_DESC_PTR 0xFFC00D20 /* DMA Channel 4 Current Descriptor Pointer Register */
285#define DMA4_CURR_ADDR 0xFFC00D24 /* DMA Channel 4 Current Address Register */
286#define DMA4_IRQ_STATUS 0xFFC00D28 /* DMA Channel 4 Interrupt/Status Register */
287#define DMA4_PERIPHERAL_MAP 0xFFC00D2C /* DMA Channel 4 Peripheral Map Register */
288#define DMA4_CURR_X_COUNT 0xFFC00D30 /* DMA Channel 4 Current X Count Register */
289#define DMA4_CURR_Y_COUNT 0xFFC00D38 /* DMA Channel 4 Current Y Count Register */
290
291#define DMA5_NEXT_DESC_PTR 0xFFC00D40 /* DMA Channel 5 Next Descriptor Pointer Register */
292#define DMA5_START_ADDR 0xFFC00D44 /* DMA Channel 5 Start Address Register */
293#define DMA5_CONFIG 0xFFC00D48 /* DMA Channel 5 Configuration Register */
294#define DMA5_X_COUNT 0xFFC00D50 /* DMA Channel 5 X Count Register */
295#define DMA5_X_MODIFY 0xFFC00D54 /* DMA Channel 5 X Modify Register */
296#define DMA5_Y_COUNT 0xFFC00D58 /* DMA Channel 5 Y Count Register */
297#define DMA5_Y_MODIFY 0xFFC00D5C /* DMA Channel 5 Y Modify Register */
298#define DMA5_CURR_DESC_PTR 0xFFC00D60 /* DMA Channel 5 Current Descriptor Pointer Register */
299#define DMA5_CURR_ADDR 0xFFC00D64 /* DMA Channel 5 Current Address Register */
300#define DMA5_IRQ_STATUS 0xFFC00D68 /* DMA Channel 5 Interrupt/Status Register */
301#define DMA5_PERIPHERAL_MAP 0xFFC00D6C /* DMA Channel 5 Peripheral Map Register */
302#define DMA5_CURR_X_COUNT 0xFFC00D70 /* DMA Channel 5 Current X Count Register */
303#define DMA5_CURR_Y_COUNT 0xFFC00D78 /* DMA Channel 5 Current Y Count Register */
304
305#define DMA6_NEXT_DESC_PTR 0xFFC00D80 /* DMA Channel 6 Next Descriptor Pointer Register */
306#define DMA6_START_ADDR 0xFFC00D84 /* DMA Channel 6 Start Address Register */
307#define DMA6_CONFIG 0xFFC00D88 /* DMA Channel 6 Configuration Register */
308#define DMA6_X_COUNT 0xFFC00D90 /* DMA Channel 6 X Count Register */
309#define DMA6_X_MODIFY 0xFFC00D94 /* DMA Channel 6 X Modify Register */
310#define DMA6_Y_COUNT 0xFFC00D98 /* DMA Channel 6 Y Count Register */
311#define DMA6_Y_MODIFY 0xFFC00D9C /* DMA Channel 6 Y Modify Register */
312#define DMA6_CURR_DESC_PTR 0xFFC00DA0 /* DMA Channel 6 Current Descriptor Pointer Register */
313#define DMA6_CURR_ADDR 0xFFC00DA4 /* DMA Channel 6 Current Address Register */
314#define DMA6_IRQ_STATUS 0xFFC00DA8 /* DMA Channel 6 Interrupt/Status Register */
315#define DMA6_PERIPHERAL_MAP 0xFFC00DAC /* DMA Channel 6 Peripheral Map Register */
316#define DMA6_CURR_X_COUNT 0xFFC00DB0 /* DMA Channel 6 Current X Count Register */
317#define DMA6_CURR_Y_COUNT 0xFFC00DB8 /* DMA Channel 6 Current Y Count Register */
318
319#define DMA7_NEXT_DESC_PTR 0xFFC00DC0 /* DMA Channel 7 Next Descriptor Pointer Register */
320#define DMA7_START_ADDR 0xFFC00DC4 /* DMA Channel 7 Start Address Register */
321#define DMA7_CONFIG 0xFFC00DC8 /* DMA Channel 7 Configuration Register */
322#define DMA7_X_COUNT 0xFFC00DD0 /* DMA Channel 7 X Count Register */
323#define DMA7_X_MODIFY 0xFFC00DD4 /* DMA Channel 7 X Modify Register */
324#define DMA7_Y_COUNT 0xFFC00DD8 /* DMA Channel 7 Y Count Register */
325#define DMA7_Y_MODIFY 0xFFC00DDC /* DMA Channel 7 Y Modify Register */
326#define DMA7_CURR_DESC_PTR 0xFFC00DE0 /* DMA Channel 7 Current Descriptor Pointer Register */
327#define DMA7_CURR_ADDR 0xFFC00DE4 /* DMA Channel 7 Current Address Register */
328#define DMA7_IRQ_STATUS 0xFFC00DE8 /* DMA Channel 7 Interrupt/Status Register */
329#define DMA7_PERIPHERAL_MAP 0xFFC00DEC /* DMA Channel 7 Peripheral Map Register */
330#define DMA7_CURR_X_COUNT 0xFFC00DF0 /* DMA Channel 7 Current X Count Register */
331#define DMA7_CURR_Y_COUNT 0xFFC00DF8 /* DMA Channel 7 Current Y Count Register */
332
333#define DMA8_NEXT_DESC_PTR 0xFFC00E00 /* DMA Channel 8 Next Descriptor Pointer Register */
334#define DMA8_START_ADDR 0xFFC00E04 /* DMA Channel 8 Start Address Register */
335#define DMA8_CONFIG 0xFFC00E08 /* DMA Channel 8 Configuration Register */
336#define DMA8_X_COUNT 0xFFC00E10 /* DMA Channel 8 X Count Register */
337#define DMA8_X_MODIFY 0xFFC00E14 /* DMA Channel 8 X Modify Register */
338#define DMA8_Y_COUNT 0xFFC00E18 /* DMA Channel 8 Y Count Register */
339#define DMA8_Y_MODIFY 0xFFC00E1C /* DMA Channel 8 Y Modify Register */
340#define DMA8_CURR_DESC_PTR 0xFFC00E20 /* DMA Channel 8 Current Descriptor Pointer Register */
341#define DMA8_CURR_ADDR 0xFFC00E24 /* DMA Channel 8 Current Address Register */
342#define DMA8_IRQ_STATUS 0xFFC00E28 /* DMA Channel 8 Interrupt/Status Register */
343#define DMA8_PERIPHERAL_MAP 0xFFC00E2C /* DMA Channel 8 Peripheral Map Register */
344#define DMA8_CURR_X_COUNT 0xFFC00E30 /* DMA Channel 8 Current X Count Register */
345#define DMA8_CURR_Y_COUNT 0xFFC00E38 /* DMA Channel 8 Current Y Count Register */
346
347#define DMA9_NEXT_DESC_PTR 0xFFC00E40 /* DMA Channel 9 Next Descriptor Pointer Register */
348#define DMA9_START_ADDR 0xFFC00E44 /* DMA Channel 9 Start Address Register */
349#define DMA9_CONFIG 0xFFC00E48 /* DMA Channel 9 Configuration Register */
350#define DMA9_X_COUNT 0xFFC00E50 /* DMA Channel 9 X Count Register */
351#define DMA9_X_MODIFY 0xFFC00E54 /* DMA Channel 9 X Modify Register */
352#define DMA9_Y_COUNT 0xFFC00E58 /* DMA Channel 9 Y Count Register */
353#define DMA9_Y_MODIFY 0xFFC00E5C /* DMA Channel 9 Y Modify Register */
354#define DMA9_CURR_DESC_PTR 0xFFC00E60 /* DMA Channel 9 Current Descriptor Pointer Register */
355#define DMA9_CURR_ADDR 0xFFC00E64 /* DMA Channel 9 Current Address Register */
356#define DMA9_IRQ_STATUS 0xFFC00E68 /* DMA Channel 9 Interrupt/Status Register */
357#define DMA9_PERIPHERAL_MAP 0xFFC00E6C /* DMA Channel 9 Peripheral Map Register */
358#define DMA9_CURR_X_COUNT 0xFFC00E70 /* DMA Channel 9 Current X Count Register */
359#define DMA9_CURR_Y_COUNT 0xFFC00E78 /* DMA Channel 9 Current Y Count Register */
360
361#define DMA10_NEXT_DESC_PTR 0xFFC00E80 /* DMA Channel 10 Next Descriptor Pointer Register */
362#define DMA10_START_ADDR 0xFFC00E84 /* DMA Channel 10 Start Address Register */
363#define DMA10_CONFIG 0xFFC00E88 /* DMA Channel 10 Configuration Register */
364#define DMA10_X_COUNT 0xFFC00E90 /* DMA Channel 10 X Count Register */
365#define DMA10_X_MODIFY 0xFFC00E94 /* DMA Channel 10 X Modify Register */
366#define DMA10_Y_COUNT 0xFFC00E98 /* DMA Channel 10 Y Count Register */
367#define DMA10_Y_MODIFY 0xFFC00E9C /* DMA Channel 10 Y Modify Register */
368#define DMA10_CURR_DESC_PTR 0xFFC00EA0 /* DMA Channel 10 Current Descriptor Pointer Register */
369#define DMA10_CURR_ADDR 0xFFC00EA4 /* DMA Channel 10 Current Address Register */
370#define DMA10_IRQ_STATUS 0xFFC00EA8 /* DMA Channel 10 Interrupt/Status Register */
371#define DMA10_PERIPHERAL_MAP 0xFFC00EAC /* DMA Channel 10 Peripheral Map Register */
372#define DMA10_CURR_X_COUNT 0xFFC00EB0 /* DMA Channel 10 Current X Count Register */
373#define DMA10_CURR_Y_COUNT 0xFFC00EB8 /* DMA Channel 10 Current Y Count Register */
374
375#define DMA11_NEXT_DESC_PTR 0xFFC00EC0 /* DMA Channel 11 Next Descriptor Pointer Register */
376#define DMA11_START_ADDR 0xFFC00EC4 /* DMA Channel 11 Start Address Register */
377#define DMA11_CONFIG 0xFFC00EC8 /* DMA Channel 11 Configuration Register */
378#define DMA11_X_COUNT 0xFFC00ED0 /* DMA Channel 11 X Count Register */
379#define DMA11_X_MODIFY 0xFFC00ED4 /* DMA Channel 11 X Modify Register */
380#define DMA11_Y_COUNT 0xFFC00ED8 /* DMA Channel 11 Y Count Register */
381#define DMA11_Y_MODIFY 0xFFC00EDC /* DMA Channel 11 Y Modify Register */
382#define DMA11_CURR_DESC_PTR 0xFFC00EE0 /* DMA Channel 11 Current Descriptor Pointer Register */
383#define DMA11_CURR_ADDR 0xFFC00EE4 /* DMA Channel 11 Current Address Register */
384#define DMA11_IRQ_STATUS 0xFFC00EE8 /* DMA Channel 11 Interrupt/Status Register */
385#define DMA11_PERIPHERAL_MAP 0xFFC00EEC /* DMA Channel 11 Peripheral Map Register */
386#define DMA11_CURR_X_COUNT 0xFFC00EF0 /* DMA Channel 11 Current X Count Register */
387#define DMA11_CURR_Y_COUNT 0xFFC00EF8 /* DMA Channel 11 Current Y Count Register */
388
389#define MDMA_D0_NEXT_DESC_PTR 0xFFC00F00 /* MemDMA Stream 0 Destination Next Descriptor Pointer Register */
390#define MDMA_D0_START_ADDR 0xFFC00F04 /* MemDMA Stream 0 Destination Start Address Register */
391#define MDMA_D0_CONFIG 0xFFC00F08 /* MemDMA Stream 0 Destination Configuration Register */
392#define MDMA_D0_X_COUNT 0xFFC00F10 /* MemDMA Stream 0 Destination X Count Register */
393#define MDMA_D0_X_MODIFY 0xFFC00F14 /* MemDMA Stream 0 Destination X Modify Register */
394#define MDMA_D0_Y_COUNT 0xFFC00F18 /* MemDMA Stream 0 Destination Y Count Register */
395#define MDMA_D0_Y_MODIFY 0xFFC00F1C /* MemDMA Stream 0 Destination Y Modify Register */
396#define MDMA_D0_CURR_DESC_PTR 0xFFC00F20 /* MemDMA Stream 0 Destination Current Descriptor Pointer Register */
397#define MDMA_D0_CURR_ADDR 0xFFC00F24 /* MemDMA Stream 0 Destination Current Address Register */
398#define MDMA_D0_IRQ_STATUS 0xFFC00F28 /* MemDMA Stream 0 Destination Interrupt/Status Register */
399#define MDMA_D0_PERIPHERAL_MAP 0xFFC00F2C /* MemDMA Stream 0 Destination Peripheral Map Register */
400#define MDMA_D0_CURR_X_COUNT 0xFFC00F30 /* MemDMA Stream 0 Destination Current X Count Register */
401#define MDMA_D0_CURR_Y_COUNT 0xFFC00F38 /* MemDMA Stream 0 Destination Current Y Count Register */
402
403#define MDMA_S0_NEXT_DESC_PTR 0xFFC00F40 /* MemDMA Stream 0 Source Next Descriptor Pointer Register */
404#define MDMA_S0_START_ADDR 0xFFC00F44 /* MemDMA Stream 0 Source Start Address Register */
405#define MDMA_S0_CONFIG 0xFFC00F48 /* MemDMA Stream 0 Source Configuration Register */
406#define MDMA_S0_X_COUNT 0xFFC00F50 /* MemDMA Stream 0 Source X Count Register */
407#define MDMA_S0_X_MODIFY 0xFFC00F54 /* MemDMA Stream 0 Source X Modify Register */
408#define MDMA_S0_Y_COUNT 0xFFC00F58 /* MemDMA Stream 0 Source Y Count Register */
409#define MDMA_S0_Y_MODIFY 0xFFC00F5C /* MemDMA Stream 0 Source Y Modify Register */
410#define MDMA_S0_CURR_DESC_PTR 0xFFC00F60 /* MemDMA Stream 0 Source Current Descriptor Pointer Register */
411#define MDMA_S0_CURR_ADDR 0xFFC00F64 /* MemDMA Stream 0 Source Current Address Register */
412#define MDMA_S0_IRQ_STATUS 0xFFC00F68 /* MemDMA Stream 0 Source Interrupt/Status Register */
413#define MDMA_S0_PERIPHERAL_MAP 0xFFC00F6C /* MemDMA Stream 0 Source Peripheral Map Register */
414#define MDMA_S0_CURR_X_COUNT 0xFFC00F70 /* MemDMA Stream 0 Source Current X Count Register */
415#define MDMA_S0_CURR_Y_COUNT 0xFFC00F78 /* MemDMA Stream 0 Source Current Y Count Register */
416
417#define MDMA_D1_NEXT_DESC_PTR 0xFFC00F80 /* MemDMA Stream 1 Destination Next Descriptor Pointer Register */
418#define MDMA_D1_START_ADDR 0xFFC00F84 /* MemDMA Stream 1 Destination Start Address Register */
419#define MDMA_D1_CONFIG 0xFFC00F88 /* MemDMA Stream 1 Destination Configuration Register */
420#define MDMA_D1_X_COUNT 0xFFC00F90 /* MemDMA Stream 1 Destination X Count Register */
421#define MDMA_D1_X_MODIFY 0xFFC00F94 /* MemDMA Stream 1 Destination X Modify Register */
422#define MDMA_D1_Y_COUNT 0xFFC00F98 /* MemDMA Stream 1 Destination Y Count Register */
423#define MDMA_D1_Y_MODIFY 0xFFC00F9C /* MemDMA Stream 1 Destination Y Modify Register */
424#define MDMA_D1_CURR_DESC_PTR 0xFFC00FA0 /* MemDMA Stream 1 Destination Current Descriptor Pointer Register */
425#define MDMA_D1_CURR_ADDR 0xFFC00FA4 /* MemDMA Stream 1 Destination Current Address Register */
426#define MDMA_D1_IRQ_STATUS 0xFFC00FA8 /* MemDMA Stream 1 Destination Interrupt/Status Register */
427#define MDMA_D1_PERIPHERAL_MAP 0xFFC00FAC /* MemDMA Stream 1 Destination Peripheral Map Register */
428#define MDMA_D1_CURR_X_COUNT 0xFFC00FB0 /* MemDMA Stream 1 Destination Current X Count Register */
429#define MDMA_D1_CURR_Y_COUNT 0xFFC00FB8 /* MemDMA Stream 1 Destination Current Y Count Register */
430
431#define MDMA_S1_NEXT_DESC_PTR 0xFFC00FC0 /* MemDMA Stream 1 Source Next Descriptor Pointer Register */
432#define MDMA_S1_START_ADDR 0xFFC00FC4 /* MemDMA Stream 1 Source Start Address Register */
433#define MDMA_S1_CONFIG 0xFFC00FC8 /* MemDMA Stream 1 Source Configuration Register */
434#define MDMA_S1_X_COUNT 0xFFC00FD0 /* MemDMA Stream 1 Source X Count Register */
435#define MDMA_S1_X_MODIFY 0xFFC00FD4 /* MemDMA Stream 1 Source X Modify Register */
436#define MDMA_S1_Y_COUNT 0xFFC00FD8 /* MemDMA Stream 1 Source Y Count Register */
437#define MDMA_S1_Y_MODIFY 0xFFC00FDC /* MemDMA Stream 1 Source Y Modify Register */
438#define MDMA_S1_CURR_DESC_PTR 0xFFC00FE0 /* MemDMA Stream 1 Source Current Descriptor Pointer Register */
439#define MDMA_S1_CURR_ADDR 0xFFC00FE4 /* MemDMA Stream 1 Source Current Address Register */
440#define MDMA_S1_IRQ_STATUS 0xFFC00FE8 /* MemDMA Stream 1 Source Interrupt/Status Register */
441#define MDMA_S1_PERIPHERAL_MAP 0xFFC00FEC /* MemDMA Stream 1 Source Peripheral Map Register */
442#define MDMA_S1_CURR_X_COUNT 0xFFC00FF0 /* MemDMA Stream 1 Source Current X Count Register */
443#define MDMA_S1_CURR_Y_COUNT 0xFFC00FF8 /* MemDMA Stream 1 Source Current Y Count Register */
444
445
446/* Parallel Peripheral Interface (0xFFC01000 - 0xFFC010FF) */
447#define PPI_CONTROL 0xFFC01000 /* PPI Control Register */
448#define PPI_STATUS 0xFFC01004 /* PPI Status Register */
449#define PPI_COUNT 0xFFC01008 /* PPI Transfer Count Register */
450#define PPI_DELAY 0xFFC0100C /* PPI Delay Count Register */
451#define PPI_FRAME 0xFFC01010 /* PPI Frame Length Register */
452
453
454/* Two-Wire Interface (0xFFC01400 - 0xFFC014FF) */
455#define TWI0_REGBASE 0xFFC01400
456#define TWI0_CLKDIV 0xFFC01400 /* Serial Clock Divider Register */
457#define TWI0_CONTROL 0xFFC01404 /* TWI Control Register */
458#define TWI0_SLAVE_CTL 0xFFC01408 /* Slave Mode Control Register */
459#define TWI0_SLAVE_STAT 0xFFC0140C /* Slave Mode Status Register */
460#define TWI0_SLAVE_ADDR 0xFFC01410 /* Slave Mode Address Register */
461#define TWI0_MASTER_CTL 0xFFC01414 /* Master Mode Control Register */
462#define TWI0_MASTER_STAT 0xFFC01418 /* Master Mode Status Register */
463#define TWI0_MASTER_ADDR 0xFFC0141C /* Master Mode Address Register */
464#define TWI0_INT_STAT 0xFFC01420 /* TWI Interrupt Status Register */
465#define TWI0_INT_MASK 0xFFC01424 /* TWI Master Interrupt Mask Register */
466#define TWI0_FIFO_CTL 0xFFC01428 /* FIFO Control Register */
467#define TWI0_FIFO_STAT 0xFFC0142C /* FIFO Status Register */
468#define TWI0_XMT_DATA8 0xFFC01480 /* FIFO Transmit Data Single Byte Register */
469#define TWI0_XMT_DATA16 0xFFC01484 /* FIFO Transmit Data Double Byte Register */
470#define TWI0_RCV_DATA8 0xFFC01488 /* FIFO Receive Data Single Byte Register */
471#define TWI0_RCV_DATA16 0xFFC0148C /* FIFO Receive Data Double Byte Register */
472
473
474/* General Purpose I/O Port G (0xFFC01500 - 0xFFC015FF) */
475#define PORTGIO 0xFFC01500 /* Port G I/O Pin State Specify Register */
476#define PORTGIO_CLEAR 0xFFC01504 /* Port G I/O Peripheral Interrupt Clear Register */
477#define PORTGIO_SET 0xFFC01508 /* Port G I/O Peripheral Interrupt Set Register */
478#define PORTGIO_TOGGLE 0xFFC0150C /* Port G I/O Pin State Toggle Register */
479#define PORTGIO_MASKA 0xFFC01510 /* Port G I/O Mask State Specify Interrupt A Register */
480#define PORTGIO_MASKA_CLEAR 0xFFC01514 /* Port G I/O Mask Disable Interrupt A Register */
481#define PORTGIO_MASKA_SET 0xFFC01518 /* Port G I/O Mask Enable Interrupt A Register */
482#define PORTGIO_MASKA_TOGGLE 0xFFC0151C /* Port G I/O Mask Toggle Enable Interrupt A Register */
483#define PORTGIO_MASKB 0xFFC01520 /* Port G I/O Mask State Specify Interrupt B Register */
484#define PORTGIO_MASKB_CLEAR 0xFFC01524 /* Port G I/O Mask Disable Interrupt B Register */
485#define PORTGIO_MASKB_SET 0xFFC01528 /* Port G I/O Mask Enable Interrupt B Register */
486#define PORTGIO_MASKB_TOGGLE 0xFFC0152C /* Port G I/O Mask Toggle Enable Interrupt B Register */
487#define PORTGIO_DIR 0xFFC01530 /* Port G I/O Direction Register */
488#define PORTGIO_POLAR 0xFFC01534 /* Port G I/O Source Polarity Register */
489#define PORTGIO_EDGE 0xFFC01538 /* Port G I/O Source Sensitivity Register */
490#define PORTGIO_BOTH 0xFFC0153C /* Port G I/O Set on BOTH Edges Register */
491#define PORTGIO_INEN 0xFFC01540 /* Port G I/O Input Enable Register */
492
493
494/* General Purpose I/O Port H (0xFFC01700 - 0xFFC017FF) */
495#define PORTHIO 0xFFC01700 /* Port H I/O Pin State Specify Register */
496#define PORTHIO_CLEAR 0xFFC01704 /* Port H I/O Peripheral Interrupt Clear Register */
497#define PORTHIO_SET 0xFFC01708 /* Port H I/O Peripheral Interrupt Set Register */
498#define PORTHIO_TOGGLE 0xFFC0170C /* Port H I/O Pin State Toggle Register */
499#define PORTHIO_MASKA 0xFFC01710 /* Port H I/O Mask State Specify Interrupt A Register */
500#define PORTHIO_MASKA_CLEAR 0xFFC01714 /* Port H I/O Mask Disable Interrupt A Register */
501#define PORTHIO_MASKA_SET 0xFFC01718 /* Port H I/O Mask Enable Interrupt A Register */
502#define PORTHIO_MASKA_TOGGLE 0xFFC0171C /* Port H I/O Mask Toggle Enable Interrupt A Register */
503#define PORTHIO_MASKB 0xFFC01720 /* Port H I/O Mask State Specify Interrupt B Register */
504#define PORTHIO_MASKB_CLEAR 0xFFC01724 /* Port H I/O Mask Disable Interrupt B Register */
505#define PORTHIO_MASKB_SET 0xFFC01728 /* Port H I/O Mask Enable Interrupt B Register */
506#define PORTHIO_MASKB_TOGGLE 0xFFC0172C /* Port H I/O Mask Toggle Enable Interrupt B Register */
507#define PORTHIO_DIR 0xFFC01730 /* Port H I/O Direction Register */
508#define PORTHIO_POLAR 0xFFC01734 /* Port H I/O Source Polarity Register */
509#define PORTHIO_EDGE 0xFFC01738 /* Port H I/O Source Sensitivity Register */
510#define PORTHIO_BOTH 0xFFC0173C /* Port H I/O Set on BOTH Edges Register */
511#define PORTHIO_INEN 0xFFC01740 /* Port H I/O Input Enable Register */
512
513
514/* UART1 Controller (0xFFC02000 - 0xFFC020FF) */
515#define UART1_THR 0xFFC02000 /* Transmit Holding register */
516#define UART1_RBR 0xFFC02000 /* Receive Buffer register */
517#define UART1_DLL 0xFFC02000 /* Divisor Latch (Low-Byte) */
518#define UART1_IER 0xFFC02004 /* Interrupt Enable Register */
519#define UART1_DLH 0xFFC02004 /* Divisor Latch (High-Byte) */
520#define UART1_IIR 0xFFC02008 /* Interrupt Identification Register */
521#define UART1_LCR 0xFFC0200C /* Line Control Register */
522#define UART1_MCR 0xFFC02010 /* Modem Control Register */
523#define UART1_LSR 0xFFC02014 /* Line Status Register */
524#define UART1_MSR 0xFFC02018 /* Modem Status Register */
525#define UART1_SCR 0xFFC0201C /* SCR Scratch Register */
526#define UART1_GCTL 0xFFC02024 /* Global Control Register */
527
528
529/* Omit CAN register sets from the defBF534.h (CAN is not in the ADSP-BF52x processor) */
530
531/* Pin Control Registers (0xFFC03200 - 0xFFC032FF) */
532#define PORTF_FER 0xFFC03200 /* Port F Function Enable Register (Alternate/Flag*) */
533#define PORTG_FER 0xFFC03204 /* Port G Function Enable Register (Alternate/Flag*) */
534#define PORTH_FER 0xFFC03208 /* Port H Function Enable Register (Alternate/Flag*) */
535#define BFIN_PORT_MUX 0xFFC0320C /* Port Multiplexer Control Register */
536
537
538/* Handshake MDMA Registers (0xFFC03300 - 0xFFC033FF) */
539#define HMDMA0_CONTROL 0xFFC03300 /* Handshake MDMA0 Control Register */
540#define HMDMA0_ECINIT 0xFFC03304 /* HMDMA0 Initial Edge Count Register */
541#define HMDMA0_BCINIT 0xFFC03308 /* HMDMA0 Initial Block Count Register */
542#define HMDMA0_ECURGENT 0xFFC0330C /* HMDMA0 Urgent Edge Count Threshold Register */
543#define HMDMA0_ECOVERFLOW 0xFFC03310 /* HMDMA0 Edge Count Overflow Interrupt Register */
544#define HMDMA0_ECOUNT 0xFFC03314 /* HMDMA0 Current Edge Count Register */
545#define HMDMA0_BCOUNT 0xFFC03318 /* HMDMA0 Current Block Count Register */
546
547#define HMDMA1_CONTROL 0xFFC03340 /* Handshake MDMA1 Control Register */
548#define HMDMA1_ECINIT 0xFFC03344 /* HMDMA1 Initial Edge Count Register */
549#define HMDMA1_BCINIT 0xFFC03348 /* HMDMA1 Initial Block Count Register */
550#define HMDMA1_ECURGENT 0xFFC0334C /* HMDMA1 Urgent Edge Count Threshold Register */
551#define HMDMA1_ECOVERFLOW 0xFFC03350 /* HMDMA1 Edge Count Overflow Interrupt Register */
552#define HMDMA1_ECOUNT 0xFFC03354 /* HMDMA1 Current Edge Count Register */
553#define HMDMA1_BCOUNT 0xFFC03358 /* HMDMA1 Current Block Count Register */
554
555/* GPIO PIN mux (0xFFC03210 - OxFFC03288) */
556#define PORTF_MUX 0xFFC03210 /* Port F mux control */
557#define PORTG_MUX 0xFFC03214 /* Port G mux control */
558#define PORTH_MUX 0xFFC03218 /* Port H mux control */
559#define PORTF_DRIVE 0xFFC03220 /* Port F drive strength control */
560#define PORTG_DRIVE 0xFFC03224 /* Port G drive strength control */
561#define PORTH_DRIVE 0xFFC03228 /* Port H drive strength control */
562#define PORTF_SLEW 0xFFC03230 /* Port F slew control */
563#define PORTG_SLEW 0xFFC03234 /* Port G slew control */
564#define PORTH_SLEW 0xFFC03238 /* Port H slew control */
565#define PORTF_HYSTERISIS 0xFFC03240 /* Port F Schmitt trigger control */
566#define PORTG_HYSTERISIS 0xFFC03244 /* Port G Schmitt trigger control */
567#define PORTH_HYSTERISIS 0xFFC03248 /* Port H Schmitt trigger control */
568#define MISCPORT_DRIVE 0xFFC03280 /* Misc Port drive strength control */
569#define MISCPORT_SLEW 0xFFC03284 /* Misc Port slew control */
570#define MISCPORT_HYSTERISIS 0xFFC03288 /* Misc Port Schmitt trigger control */
571
572
573/***********************************************************************************
574** System MMR Register Bits And Macros
575**
576** Disclaimer: All macros are intended to make C and Assembly code more readable.
577** Use these macros carefully, as any that do left shifts for field
578** depositing will result in the lower order bits being destroyed. Any
579** macro that shifts left to properly position the bit-field should be
580** used as part of an OR to initialize a register and NOT as a dynamic
581** modifier UNLESS the lower order bits are saved and ORed back in when
582** the macro is used.
583*************************************************************************************/
584
585/* CHIPID Masks */
586#define CHIPID_VERSION 0xF0000000
587#define CHIPID_FAMILY 0x0FFFF000
588#define CHIPID_MANUFACTURE 0x00000FFE
589
590/* SWRST Masks */
591#define SYSTEM_RESET 0x0007 /* Initiates A System Software Reset */
592#define DOUBLE_FAULT 0x0008 /* Core Double Fault Causes Reset */
593#define RESET_DOUBLE 0x2000 /* SW Reset Generated By Core Double-Fault */
594#define RESET_WDOG 0x4000 /* SW Reset Generated By Watchdog Timer */
595#define RESET_SOFTWARE 0x8000 /* SW Reset Occurred Since Last Read Of SWRST */
596
597/* SYSCR Masks */
598#define BMODE 0x0007 /* Boot Mode - Latched During HW Reset From Mode Pins */
599#define NOBOOT 0x0010 /* Execute From L1 or ASYNC Bank 0 When BMODE = 0 */
600
601
602/* ************* SYSTEM INTERRUPT CONTROLLER MASKS *************************************/
603/* Peripheral Masks For SIC_ISR, SIC_IWR, SIC_IMASK */
604
605#if 0
606#define IRQ_PLL_WAKEUP 0x00000001 /* PLL Wakeup Interrupt */
607
608#define IRQ_ERROR1 0x00000002 /* Error Interrupt (DMA, DMARx Block, DMARx Overflow) */
609#define IRQ_ERROR2 0x00000004 /* Error Interrupt (CAN, Ethernet, SPORTx, PPI, SPI, UARTx) */
610#define IRQ_RTC 0x00000008 /* Real Time Clock Interrupt */
611#define IRQ_DMA0 0x00000010 /* DMA Channel 0 (PPI) Interrupt */
612#define IRQ_DMA3 0x00000020 /* DMA Channel 3 (SPORT0 RX) Interrupt */
613#define IRQ_DMA4 0x00000040 /* DMA Channel 4 (SPORT0 TX) Interrupt */
614#define IRQ_DMA5 0x00000080 /* DMA Channel 5 (SPORT1 RX) Interrupt */
615
616#define IRQ_DMA6 0x00000100 /* DMA Channel 6 (SPORT1 TX) Interrupt */
617#define IRQ_TWI 0x00000200 /* TWI Interrupt */
618#define IRQ_DMA7 0x00000400 /* DMA Channel 7 (SPI) Interrupt */
619#define IRQ_DMA8 0x00000800 /* DMA Channel 8 (UART0 RX) Interrupt */
620#define IRQ_DMA9 0x00001000 /* DMA Channel 9 (UART0 TX) Interrupt */
621#define IRQ_DMA10 0x00002000 /* DMA Channel 10 (UART1 RX) Interrupt */
622#define IRQ_DMA11 0x00004000 /* DMA Channel 11 (UART1 TX) Interrupt */
623#define IRQ_CAN_RX 0x00008000 /* CAN Receive Interrupt */
624
625#define IRQ_CAN_TX 0x00010000 /* CAN Transmit Interrupt */
626#define IRQ_DMA1 0x00020000 /* DMA Channel 1 (Ethernet RX) Interrupt */
627#define IRQ_PFA_PORTH 0x00020000 /* PF Port H (PF47:32) Interrupt A */
628#define IRQ_DMA2 0x00040000 /* DMA Channel 2 (Ethernet TX) Interrupt */
629#define IRQ_PFB_PORTH 0x00040000 /* PF Port H (PF47:32) Interrupt B */
630#define IRQ_TIMER0 0x00080000 /* Timer 0 Interrupt */
631#define IRQ_TIMER1 0x00100000 /* Timer 1 Interrupt */
632#define IRQ_TIMER2 0x00200000 /* Timer 2 Interrupt */
633#define IRQ_TIMER3 0x00400000 /* Timer 3 Interrupt */
634#define IRQ_TIMER4 0x00800000 /* Timer 4 Interrupt */
635
636#define IRQ_TIMER5 0x01000000 /* Timer 5 Interrupt */
637#define IRQ_TIMER6 0x02000000 /* Timer 6 Interrupt */
638#define IRQ_TIMER7 0x04000000 /* Timer 7 Interrupt */
639#define IRQ_PFA_PORTFG 0x08000000 /* PF Ports F&G (PF31:0) Interrupt A */
640#define IRQ_PFB_PORTF 0x80000000 /* PF Port F (PF15:0) Interrupt B */
641#define IRQ_DMA12 0x20000000 /* DMA Channels 12 (MDMA1 Source) RX Interrupt */
642#define IRQ_DMA13 0x20000000 /* DMA Channels 13 (MDMA1 Destination) TX Interrupt */
643#define IRQ_DMA14 0x40000000 /* DMA Channels 14 (MDMA0 Source) RX Interrupt */
644#define IRQ_DMA15 0x40000000 /* DMA Channels 15 (MDMA0 Destination) TX Interrupt */
645#define IRQ_WDOG 0x80000000 /* Software Watchdog Timer Interrupt */
646#define IRQ_PFB_PORTG 0x10000000 /* PF Port G (PF31:16) Interrupt B */
647#endif
648
649/* SIC_IAR0 Macros */
650#define P0_IVG(x) (((x)&0xF)-7) /* Peripheral #0 assigned IVG #x */
651#define P1_IVG(x) (((x)&0xF)-7) << 0x4 /* Peripheral #1 assigned IVG #x */
652#define P2_IVG(x) (((x)&0xF)-7) << 0x8 /* Peripheral #2 assigned IVG #x */
653#define P3_IVG(x) (((x)&0xF)-7) << 0xC /* Peripheral #3 assigned IVG #x */
654#define P4_IVG(x) (((x)&0xF)-7) << 0x10 /* Peripheral #4 assigned IVG #x */
655#define P5_IVG(x) (((x)&0xF)-7) << 0x14 /* Peripheral #5 assigned IVG #x */
656#define P6_IVG(x) (((x)&0xF)-7) << 0x18 /* Peripheral #6 assigned IVG #x */
657#define P7_IVG(x) (((x)&0xF)-7) << 0x1C /* Peripheral #7 assigned IVG #x */
658
659/* SIC_IAR1 Macros */
660#define P8_IVG(x) (((x)&0xF)-7) /* Peripheral #8 assigned IVG #x */
661#define P9_IVG(x) (((x)&0xF)-7) << 0x4 /* Peripheral #9 assigned IVG #x */
662#define P10_IVG(x) (((x)&0xF)-7) << 0x8 /* Peripheral #10 assigned IVG #x */
663#define P11_IVG(x) (((x)&0xF)-7) << 0xC /* Peripheral #11 assigned IVG #x */
664#define P12_IVG(x) (((x)&0xF)-7) << 0x10 /* Peripheral #12 assigned IVG #x */
665#define P13_IVG(x) (((x)&0xF)-7) << 0x14 /* Peripheral #13 assigned IVG #x */
666#define P14_IVG(x) (((x)&0xF)-7) << 0x18 /* Peripheral #14 assigned IVG #x */
667#define P15_IVG(x) (((x)&0xF)-7) << 0x1C /* Peripheral #15 assigned IVG #x */
668
669/* SIC_IAR2 Macros */
670#define P16_IVG(x) (((x)&0xF)-7) /* Peripheral #16 assigned IVG #x */
671#define P17_IVG(x) (((x)&0xF)-7) << 0x4 /* Peripheral #17 assigned IVG #x */
672#define P18_IVG(x) (((x)&0xF)-7) << 0x8 /* Peripheral #18 assigned IVG #x */
673#define P19_IVG(x) (((x)&0xF)-7) << 0xC /* Peripheral #19 assigned IVG #x */
674#define P20_IVG(x) (((x)&0xF)-7) << 0x10 /* Peripheral #20 assigned IVG #x */
675#define P21_IVG(x) (((x)&0xF)-7) << 0x14 /* Peripheral #21 assigned IVG #x */
676#define P22_IVG(x) (((x)&0xF)-7) << 0x18 /* Peripheral #22 assigned IVG #x */
677#define P23_IVG(x) (((x)&0xF)-7) << 0x1C /* Peripheral #23 assigned IVG #x */
678
679/* SIC_IAR3 Macros */
680#define P24_IVG(x) (((x)&0xF)-7) /* Peripheral #24 assigned IVG #x */
681#define P25_IVG(x) (((x)&0xF)-7) << 0x4 /* Peripheral #25 assigned IVG #x */
682#define P26_IVG(x) (((x)&0xF)-7) << 0x8 /* Peripheral #26 assigned IVG #x */
683#define P27_IVG(x) (((x)&0xF)-7) << 0xC /* Peripheral #27 assigned IVG #x */
684#define P28_IVG(x) (((x)&0xF)-7) << 0x10 /* Peripheral #28 assigned IVG #x */
685#define P29_IVG(x) (((x)&0xF)-7) << 0x14 /* Peripheral #29 assigned IVG #x */
686#define P30_IVG(x) (((x)&0xF)-7) << 0x18 /* Peripheral #30 assigned IVG #x */
687#define P31_IVG(x) (((x)&0xF)-7) << 0x1C /* Peripheral #31 assigned IVG #x */
688
689
690/* SIC_IMASK Masks */
691#define SIC_UNMASK_ALL 0x00000000 /* Unmask all peripheral interrupts */
692#define SIC_MASK_ALL 0xFFFFFFFF /* Mask all peripheral interrupts */
693#define SIC_MASK(x) (1 << ((x)&0x1F)) /* Mask Peripheral #x interrupt */
694#define SIC_UNMASK(x) (0xFFFFFFFF ^ (1 << ((x)&0x1F))) /* Unmask Peripheral #x interrupt */
695
696/* SIC_IWR Masks */
697#define IWR_DISABLE_ALL 0x00000000 /* Wakeup Disable all peripherals */
698#define IWR_ENABLE_ALL 0xFFFFFFFF /* Wakeup Enable all peripherals */
699#define IWR_ENABLE(x) (1 << ((x)&0x1F)) /* Wakeup Enable Peripheral #x */
700#define IWR_DISABLE(x) (0xFFFFFFFF ^ (1 << ((x)&0x1F))) /* Wakeup Disable Peripheral #x */
701
702/* **************** GENERAL PURPOSE TIMER MASKS **********************/
703/* TIMER_ENABLE Masks */
704#define TIMEN0 0x0001 /* Enable Timer 0 */
705#define TIMEN1 0x0002 /* Enable Timer 1 */
706#define TIMEN2 0x0004 /* Enable Timer 2 */
707#define TIMEN3 0x0008 /* Enable Timer 3 */
708#define TIMEN4 0x0010 /* Enable Timer 4 */
709#define TIMEN5 0x0020 /* Enable Timer 5 */
710#define TIMEN6 0x0040 /* Enable Timer 6 */
711#define TIMEN7 0x0080 /* Enable Timer 7 */
712
713/* TIMER_DISABLE Masks */
714#define TIMDIS0 TIMEN0 /* Disable Timer 0 */
715#define TIMDIS1 TIMEN1 /* Disable Timer 1 */
716#define TIMDIS2 TIMEN2 /* Disable Timer 2 */
717#define TIMDIS3 TIMEN3 /* Disable Timer 3 */
718#define TIMDIS4 TIMEN4 /* Disable Timer 4 */
719#define TIMDIS5 TIMEN5 /* Disable Timer 5 */
720#define TIMDIS6 TIMEN6 /* Disable Timer 6 */
721#define TIMDIS7 TIMEN7 /* Disable Timer 7 */
722
723/* TIMER_STATUS Masks */
724#define TIMIL0 0x00000001 /* Timer 0 Interrupt */
725#define TIMIL1 0x00000002 /* Timer 1 Interrupt */
726#define TIMIL2 0x00000004 /* Timer 2 Interrupt */
727#define TIMIL3 0x00000008 /* Timer 3 Interrupt */
728#define TOVF_ERR0 0x00000010 /* Timer 0 Counter Overflow */
729#define TOVF_ERR1 0x00000020 /* Timer 1 Counter Overflow */
730#define TOVF_ERR2 0x00000040 /* Timer 2 Counter Overflow */
731#define TOVF_ERR3 0x00000080 /* Timer 3 Counter Overflow */
732#define TRUN0 0x00001000 /* Timer 0 Slave Enable Status */
733#define TRUN1 0x00002000 /* Timer 1 Slave Enable Status */
734#define TRUN2 0x00004000 /* Timer 2 Slave Enable Status */
735#define TRUN3 0x00008000 /* Timer 3 Slave Enable Status */
736#define TIMIL4 0x00010000 /* Timer 4 Interrupt */
737#define TIMIL5 0x00020000 /* Timer 5 Interrupt */
738#define TIMIL6 0x00040000 /* Timer 6 Interrupt */
739#define TIMIL7 0x00080000 /* Timer 7 Interrupt */
740#define TOVF_ERR4 0x00100000 /* Timer 4 Counter Overflow */
741#define TOVF_ERR5 0x00200000 /* Timer 5 Counter Overflow */
742#define TOVF_ERR6 0x00400000 /* Timer 6 Counter Overflow */
743#define TOVF_ERR7 0x00800000 /* Timer 7 Counter Overflow */
744#define TRUN4 0x10000000 /* Timer 4 Slave Enable Status */
745#define TRUN5 0x20000000 /* Timer 5 Slave Enable Status */
746#define TRUN6 0x40000000 /* Timer 6 Slave Enable Status */
747#define TRUN7 0x80000000 /* Timer 7 Slave Enable Status */
748
749/* Alternate Deprecated Macros Provided For Backwards Code Compatibility */
750#define TOVL_ERR0 TOVF_ERR0
751#define TOVL_ERR1 TOVF_ERR1
752#define TOVL_ERR2 TOVF_ERR2
753#define TOVL_ERR3 TOVF_ERR3
754#define TOVL_ERR4 TOVF_ERR4
755#define TOVL_ERR5 TOVF_ERR5
756#define TOVL_ERR6 TOVF_ERR6
757#define TOVL_ERR7 TOVF_ERR7
758
759/* TIMERx_CONFIG Masks */
760#define PWM_OUT 0x0001 /* Pulse-Width Modulation Output Mode */
761#define WDTH_CAP 0x0002 /* Width Capture Input Mode */
762#define EXT_CLK 0x0003 /* External Clock Mode */
763#define PULSE_HI 0x0004 /* Action Pulse (Positive/Negative*) */
764#define PERIOD_CNT 0x0008 /* Period Count */
765#define IRQ_ENA 0x0010 /* Interrupt Request Enable */
766#define TIN_SEL 0x0020 /* Timer Input Select */
767#define OUT_DIS 0x0040 /* Output Pad Disable */
768#define CLK_SEL 0x0080 /* Timer Clock Select */
769#define TOGGLE_HI 0x0100 /* PWM_OUT PULSE_HI Toggle Mode */
770#define EMU_RUN 0x0200 /* Emulation Behavior Select */
771#define ERR_TYP 0xC000 /* Error Type */
772
773/* ********************* ASYNCHRONOUS MEMORY CONTROLLER MASKS *************************/
774/* EBIU_AMGCTL Masks */
775#define AMCKEN 0x0001 /* Enable CLKOUT */
776#define AMBEN_NONE 0x0000 /* All Banks Disabled */
777#define AMBEN_B0 0x0002 /* Enable Async Memory Bank 0 only */
778#define AMBEN_B0_B1 0x0004 /* Enable Async Memory Banks 0 & 1 only */
779#define AMBEN_B0_B1_B2 0x0006 /* Enable Async Memory Banks 0, 1, and 2 */
780#define AMBEN_ALL 0x0008 /* Enable Async Memory Banks (all) 0, 1, 2, and 3 */
781
782/* EBIU_AMBCTL0 Masks */
783#define B0RDYEN 0x00000001 /* Bank 0 (B0) RDY Enable */
784#define B0RDYPOL 0x00000002 /* B0 RDY Active High */
785#define B0TT_1 0x00000004 /* B0 Transition Time (Read to Write) = 1 cycle */
786#define B0TT_2 0x00000008 /* B0 Transition Time (Read to Write) = 2 cycles */
787#define B0TT_3 0x0000000C /* B0 Transition Time (Read to Write) = 3 cycles */
788#define B0TT_4 0x00000000 /* B0 Transition Time (Read to Write) = 4 cycles */
789#define B0ST_1 0x00000010 /* B0 Setup Time (AOE to Read/Write) = 1 cycle */
790#define B0ST_2 0x00000020 /* B0 Setup Time (AOE to Read/Write) = 2 cycles */
791#define B0ST_3 0x00000030 /* B0 Setup Time (AOE to Read/Write) = 3 cycles */
792#define B0ST_4 0x00000000 /* B0 Setup Time (AOE to Read/Write) = 4 cycles */
793#define B0HT_1 0x00000040 /* B0 Hold Time (~Read/Write to ~AOE) = 1 cycle */
794#define B0HT_2 0x00000080 /* B0 Hold Time (~Read/Write to ~AOE) = 2 cycles */
795#define B0HT_3 0x000000C0 /* B0 Hold Time (~Read/Write to ~AOE) = 3 cycles */
796#define B0HT_0 0x00000000 /* B0 Hold Time (~Read/Write to ~AOE) = 0 cycles */
797#define B0RAT_1 0x00000100 /* B0 Read Access Time = 1 cycle */
798#define B0RAT_2 0x00000200 /* B0 Read Access Time = 2 cycles */
799#define B0RAT_3 0x00000300 /* B0 Read Access Time = 3 cycles */
800#define B0RAT_4 0x00000400 /* B0 Read Access Time = 4 cycles */
801#define B0RAT_5 0x00000500 /* B0 Read Access Time = 5 cycles */
802#define B0RAT_6 0x00000600 /* B0 Read Access Time = 6 cycles */
803#define B0RAT_7 0x00000700 /* B0 Read Access Time = 7 cycles */
804#define B0RAT_8 0x00000800 /* B0 Read Access Time = 8 cycles */
805#define B0RAT_9 0x00000900 /* B0 Read Access Time = 9 cycles */
806#define B0RAT_10 0x00000A00 /* B0 Read Access Time = 10 cycles */
807#define B0RAT_11 0x00000B00 /* B0 Read Access Time = 11 cycles */
808#define B0RAT_12 0x00000C00 /* B0 Read Access Time = 12 cycles */
809#define B0RAT_13 0x00000D00 /* B0 Read Access Time = 13 cycles */
810#define B0RAT_14 0x00000E00 /* B0 Read Access Time = 14 cycles */
811#define B0RAT_15 0x00000F00 /* B0 Read Access Time = 15 cycles */
812#define B0WAT_1 0x00001000 /* B0 Write Access Time = 1 cycle */
813#define B0WAT_2 0x00002000 /* B0 Write Access Time = 2 cycles */
814#define B0WAT_3 0x00003000 /* B0 Write Access Time = 3 cycles */
815#define B0WAT_4 0x00004000 /* B0 Write Access Time = 4 cycles */
816#define B0WAT_5 0x00005000 /* B0 Write Access Time = 5 cycles */
817#define B0WAT_6 0x00006000 /* B0 Write Access Time = 6 cycles */
818#define B0WAT_7 0x00007000 /* B0 Write Access Time = 7 cycles */
819#define B0WAT_8 0x00008000 /* B0 Write Access Time = 8 cycles */
820#define B0WAT_9 0x00009000 /* B0 Write Access Time = 9 cycles */
821#define B0WAT_10 0x0000A000 /* B0 Write Access Time = 10 cycles */
822#define B0WAT_11 0x0000B000 /* B0 Write Access Time = 11 cycles */
823#define B0WAT_12 0x0000C000 /* B0 Write Access Time = 12 cycles */
824#define B0WAT_13 0x0000D000 /* B0 Write Access Time = 13 cycles */
825#define B0WAT_14 0x0000E000 /* B0 Write Access Time = 14 cycles */
826#define B0WAT_15 0x0000F000 /* B0 Write Access Time = 15 cycles */
827
828#define B1RDYEN 0x00010000 /* Bank 1 (B1) RDY Enable */
829#define B1RDYPOL 0x00020000 /* B1 RDY Active High */
830#define B1TT_1 0x00040000 /* B1 Transition Time (Read to Write) = 1 cycle */
831#define B1TT_2 0x00080000 /* B1 Transition Time (Read to Write) = 2 cycles */
832#define B1TT_3 0x000C0000 /* B1 Transition Time (Read to Write) = 3 cycles */
833#define B1TT_4 0x00000000 /* B1 Transition Time (Read to Write) = 4 cycles */
834#define B1ST_1 0x00100000 /* B1 Setup Time (AOE to Read/Write) = 1 cycle */
835#define B1ST_2 0x00200000 /* B1 Setup Time (AOE to Read/Write) = 2 cycles */
836#define B1ST_3 0x00300000 /* B1 Setup Time (AOE to Read/Write) = 3 cycles */
837#define B1ST_4 0x00000000 /* B1 Setup Time (AOE to Read/Write) = 4 cycles */
838#define B1HT_1 0x00400000 /* B1 Hold Time (~Read/Write to ~AOE) = 1 cycle */
839#define B1HT_2 0x00800000 /* B1 Hold Time (~Read/Write to ~AOE) = 2 cycles */
840#define B1HT_3 0x00C00000 /* B1 Hold Time (~Read/Write to ~AOE) = 3 cycles */
841#define B1HT_0 0x00000000 /* B1 Hold Time (~Read/Write to ~AOE) = 0 cycles */
842#define B1RAT_1 0x01000000 /* B1 Read Access Time = 1 cycle */
843#define B1RAT_2 0x02000000 /* B1 Read Access Time = 2 cycles */
844#define B1RAT_3 0x03000000 /* B1 Read Access Time = 3 cycles */
845#define B1RAT_4 0x04000000 /* B1 Read Access Time = 4 cycles */
846#define B1RAT_5 0x05000000 /* B1 Read Access Time = 5 cycles */
847#define B1RAT_6 0x06000000 /* B1 Read Access Time = 6 cycles */
848#define B1RAT_7 0x07000000 /* B1 Read Access Time = 7 cycles */
849#define B1RAT_8 0x08000000 /* B1 Read Access Time = 8 cycles */
850#define B1RAT_9 0x09000000 /* B1 Read Access Time = 9 cycles */
851#define B1RAT_10 0x0A000000 /* B1 Read Access Time = 10 cycles */
852#define B1RAT_11 0x0B000000 /* B1 Read Access Time = 11 cycles */
853#define B1RAT_12 0x0C000000 /* B1 Read Access Time = 12 cycles */
854#define B1RAT_13 0x0D000000 /* B1 Read Access Time = 13 cycles */
855#define B1RAT_14 0x0E000000 /* B1 Read Access Time = 14 cycles */
856#define B1RAT_15 0x0F000000 /* B1 Read Access Time = 15 cycles */
857#define B1WAT_1 0x10000000 /* B1 Write Access Time = 1 cycle */
858#define B1WAT_2 0x20000000 /* B1 Write Access Time = 2 cycles */
859#define B1WAT_3 0x30000000 /* B1 Write Access Time = 3 cycles */
860#define B1WAT_4 0x40000000 /* B1 Write Access Time = 4 cycles */
861#define B1WAT_5 0x50000000 /* B1 Write Access Time = 5 cycles */
862#define B1WAT_6 0x60000000 /* B1 Write Access Time = 6 cycles */
863#define B1WAT_7 0x70000000 /* B1 Write Access Time = 7 cycles */
864#define B1WAT_8 0x80000000 /* B1 Write Access Time = 8 cycles */
865#define B1WAT_9 0x90000000 /* B1 Write Access Time = 9 cycles */
866#define B1WAT_10 0xA0000000 /* B1 Write Access Time = 10 cycles */
867#define B1WAT_11 0xB0000000 /* B1 Write Access Time = 11 cycles */
868#define B1WAT_12 0xC0000000 /* B1 Write Access Time = 12 cycles */
869#define B1WAT_13 0xD0000000 /* B1 Write Access Time = 13 cycles */
870#define B1WAT_14 0xE0000000 /* B1 Write Access Time = 14 cycles */
871#define B1WAT_15 0xF0000000 /* B1 Write Access Time = 15 cycles */
872
873/* EBIU_AMBCTL1 Masks */
874#define B2RDYEN 0x00000001 /* Bank 2 (B2) RDY Enable */
875#define B2RDYPOL 0x00000002 /* B2 RDY Active High */
876#define B2TT_1 0x00000004 /* B2 Transition Time (Read to Write) = 1 cycle */
877#define B2TT_2 0x00000008 /* B2 Transition Time (Read to Write) = 2 cycles */
878#define B2TT_3 0x0000000C /* B2 Transition Time (Read to Write) = 3 cycles */
879#define B2TT_4 0x00000000 /* B2 Transition Time (Read to Write) = 4 cycles */
880#define B2ST_1 0x00000010 /* B2 Setup Time (AOE to Read/Write) = 1 cycle */
881#define B2ST_2 0x00000020 /* B2 Setup Time (AOE to Read/Write) = 2 cycles */
882#define B2ST_3 0x00000030 /* B2 Setup Time (AOE to Read/Write) = 3 cycles */
883#define B2ST_4 0x00000000 /* B2 Setup Time (AOE to Read/Write) = 4 cycles */
884#define B2HT_1 0x00000040 /* B2 Hold Time (~Read/Write to ~AOE) = 1 cycle */
885#define B2HT_2 0x00000080 /* B2 Hold Time (~Read/Write to ~AOE) = 2 cycles */
886#define B2HT_3 0x000000C0 /* B2 Hold Time (~Read/Write to ~AOE) = 3 cycles */
887#define B2HT_0 0x00000000 /* B2 Hold Time (~Read/Write to ~AOE) = 0 cycles */
888#define B2RAT_1 0x00000100 /* B2 Read Access Time = 1 cycle */
889#define B2RAT_2 0x00000200 /* B2 Read Access Time = 2 cycles */
890#define B2RAT_3 0x00000300 /* B2 Read Access Time = 3 cycles */
891#define B2RAT_4 0x00000400 /* B2 Read Access Time = 4 cycles */
892#define B2RAT_5 0x00000500 /* B2 Read Access Time = 5 cycles */
893#define B2RAT_6 0x00000600 /* B2 Read Access Time = 6 cycles */
894#define B2RAT_7 0x00000700 /* B2 Read Access Time = 7 cycles */
895#define B2RAT_8 0x00000800 /* B2 Read Access Time = 8 cycles */
896#define B2RAT_9 0x00000900 /* B2 Read Access Time = 9 cycles */
897#define B2RAT_10 0x00000A00 /* B2 Read Access Time = 10 cycles */
898#define B2RAT_11 0x00000B00 /* B2 Read Access Time = 11 cycles */
899#define B2RAT_12 0x00000C00 /* B2 Read Access Time = 12 cycles */
900#define B2RAT_13 0x00000D00 /* B2 Read Access Time = 13 cycles */
901#define B2RAT_14 0x00000E00 /* B2 Read Access Time = 14 cycles */
902#define B2RAT_15 0x00000F00 /* B2 Read Access Time = 15 cycles */
903#define B2WAT_1 0x00001000 /* B2 Write Access Time = 1 cycle */
904#define B2WAT_2 0x00002000 /* B2 Write Access Time = 2 cycles */
905#define B2WAT_3 0x00003000 /* B2 Write Access Time = 3 cycles */
906#define B2WAT_4 0x00004000 /* B2 Write Access Time = 4 cycles */
907#define B2WAT_5 0x00005000 /* B2 Write Access Time = 5 cycles */
908#define B2WAT_6 0x00006000 /* B2 Write Access Time = 6 cycles */
909#define B2WAT_7 0x00007000 /* B2 Write Access Time = 7 cycles */
910#define B2WAT_8 0x00008000 /* B2 Write Access Time = 8 cycles */
911#define B2WAT_9 0x00009000 /* B2 Write Access Time = 9 cycles */
912#define B2WAT_10 0x0000A000 /* B2 Write Access Time = 10 cycles */
913#define B2WAT_11 0x0000B000 /* B2 Write Access Time = 11 cycles */
914#define B2WAT_12 0x0000C000 /* B2 Write Access Time = 12 cycles */
915#define B2WAT_13 0x0000D000 /* B2 Write Access Time = 13 cycles */
916#define B2WAT_14 0x0000E000 /* B2 Write Access Time = 14 cycles */
917#define B2WAT_15 0x0000F000 /* B2 Write Access Time = 15 cycles */
918
919#define B3RDYEN 0x00010000 /* Bank 3 (B3) RDY Enable */
920#define B3RDYPOL 0x00020000 /* B3 RDY Active High */
921#define B3TT_1 0x00040000 /* B3 Transition Time (Read to Write) = 1 cycle */
922#define B3TT_2 0x00080000 /* B3 Transition Time (Read to Write) = 2 cycles */
923#define B3TT_3 0x000C0000 /* B3 Transition Time (Read to Write) = 3 cycles */
924#define B3TT_4 0x00000000 /* B3 Transition Time (Read to Write) = 4 cycles */
925#define B3ST_1 0x00100000 /* B3 Setup Time (AOE to Read/Write) = 1 cycle */
926#define B3ST_2 0x00200000 /* B3 Setup Time (AOE to Read/Write) = 2 cycles */
927#define B3ST_3 0x00300000 /* B3 Setup Time (AOE to Read/Write) = 3 cycles */
928#define B3ST_4 0x00000000 /* B3 Setup Time (AOE to Read/Write) = 4 cycles */
929#define B3HT_1 0x00400000 /* B3 Hold Time (~Read/Write to ~AOE) = 1 cycle */
930#define B3HT_2 0x00800000 /* B3 Hold Time (~Read/Write to ~AOE) = 2 cycles */
931#define B3HT_3 0x00C00000 /* B3 Hold Time (~Read/Write to ~AOE) = 3 cycles */
932#define B3HT_0 0x00000000 /* B3 Hold Time (~Read/Write to ~AOE) = 0 cycles */
933#define B3RAT_1 0x01000000 /* B3 Read Access Time = 1 cycle */
934#define B3RAT_2 0x02000000 /* B3 Read Access Time = 2 cycles */
935#define B3RAT_3 0x03000000 /* B3 Read Access Time = 3 cycles */
936#define B3RAT_4 0x04000000 /* B3 Read Access Time = 4 cycles */
937#define B3RAT_5 0x05000000 /* B3 Read Access Time = 5 cycles */
938#define B3RAT_6 0x06000000 /* B3 Read Access Time = 6 cycles */
939#define B3RAT_7 0x07000000 /* B3 Read Access Time = 7 cycles */
940#define B3RAT_8 0x08000000 /* B3 Read Access Time = 8 cycles */
941#define B3RAT_9 0x09000000 /* B3 Read Access Time = 9 cycles */
942#define B3RAT_10 0x0A000000 /* B3 Read Access Time = 10 cycles */
943#define B3RAT_11 0x0B000000 /* B3 Read Access Time = 11 cycles */
944#define B3RAT_12 0x0C000000 /* B3 Read Access Time = 12 cycles */
945#define B3RAT_13 0x0D000000 /* B3 Read Access Time = 13 cycles */
946#define B3RAT_14 0x0E000000 /* B3 Read Access Time = 14 cycles */
947#define B3RAT_15 0x0F000000 /* B3 Read Access Time = 15 cycles */
948#define B3WAT_1 0x10000000 /* B3 Write Access Time = 1 cycle */
949#define B3WAT_2 0x20000000 /* B3 Write Access Time = 2 cycles */
950#define B3WAT_3 0x30000000 /* B3 Write Access Time = 3 cycles */
951#define B3WAT_4 0x40000000 /* B3 Write Access Time = 4 cycles */
952#define B3WAT_5 0x50000000 /* B3 Write Access Time = 5 cycles */
953#define B3WAT_6 0x60000000 /* B3 Write Access Time = 6 cycles */
954#define B3WAT_7 0x70000000 /* B3 Write Access Time = 7 cycles */
955#define B3WAT_8 0x80000000 /* B3 Write Access Time = 8 cycles */
956#define B3WAT_9 0x90000000 /* B3 Write Access Time = 9 cycles */
957#define B3WAT_10 0xA0000000 /* B3 Write Access Time = 10 cycles */
958#define B3WAT_11 0xB0000000 /* B3 Write Access Time = 11 cycles */
959#define B3WAT_12 0xC0000000 /* B3 Write Access Time = 12 cycles */
960#define B3WAT_13 0xD0000000 /* B3 Write Access Time = 13 cycles */
961#define B3WAT_14 0xE0000000 /* B3 Write Access Time = 14 cycles */
962#define B3WAT_15 0xF0000000 /* B3 Write Access Time = 15 cycles */
963
964
965/* ********************** SDRAM CONTROLLER MASKS **********************************************/
966/* EBIU_SDGCTL Masks */
967#define SCTLE 0x00000001 /* Enable SDRAM Signals */
968#define CL_2 0x00000008 /* SDRAM CAS Latency = 2 cycles */
969#define CL_3 0x0000000C /* SDRAM CAS Latency = 3 cycles */
970#define PASR_ALL 0x00000000 /* All 4 SDRAM Banks Refreshed In Self-Refresh */
971#define PASR_B0_B1 0x00000010 /* SDRAM Banks 0 and 1 Are Refreshed In Self-Refresh */
972#define PASR_B0 0x00000020 /* Only SDRAM Bank 0 Is Refreshed In Self-Refresh */
973#define TRAS_1 0x00000040 /* SDRAM tRAS = 1 cycle */
974#define TRAS_2 0x00000080 /* SDRAM tRAS = 2 cycles */
975#define TRAS_3 0x000000C0 /* SDRAM tRAS = 3 cycles */
976#define TRAS_4 0x00000100 /* SDRAM tRAS = 4 cycles */
977#define TRAS_5 0x00000140 /* SDRAM tRAS = 5 cycles */
978#define TRAS_6 0x00000180 /* SDRAM tRAS = 6 cycles */
979#define TRAS_7 0x000001C0 /* SDRAM tRAS = 7 cycles */
980#define TRAS_8 0x00000200 /* SDRAM tRAS = 8 cycles */
981#define TRAS_9 0x00000240 /* SDRAM tRAS = 9 cycles */
982#define TRAS_10 0x00000280 /* SDRAM tRAS = 10 cycles */
983#define TRAS_11 0x000002C0 /* SDRAM tRAS = 11 cycles */
984#define TRAS_12 0x00000300 /* SDRAM tRAS = 12 cycles */
985#define TRAS_13 0x00000340 /* SDRAM tRAS = 13 cycles */
986#define TRAS_14 0x00000380 /* SDRAM tRAS = 14 cycles */
987#define TRAS_15 0x000003C0 /* SDRAM tRAS = 15 cycles */
988#define TRP_1 0x00000800 /* SDRAM tRP = 1 cycle */
989#define TRP_2 0x00001000 /* SDRAM tRP = 2 cycles */
990#define TRP_3 0x00001800 /* SDRAM tRP = 3 cycles */
991#define TRP_4 0x00002000 /* SDRAM tRP = 4 cycles */
992#define TRP_5 0x00002800 /* SDRAM tRP = 5 cycles */
993#define TRP_6 0x00003000 /* SDRAM tRP = 6 cycles */
994#define TRP_7 0x00003800 /* SDRAM tRP = 7 cycles */
995#define TRCD_1 0x00008000 /* SDRAM tRCD = 1 cycle */
996#define TRCD_2 0x00010000 /* SDRAM tRCD = 2 cycles */
997#define TRCD_3 0x00018000 /* SDRAM tRCD = 3 cycles */
998#define TRCD_4 0x00020000 /* SDRAM tRCD = 4 cycles */
999#define TRCD_5 0x00028000 /* SDRAM tRCD = 5 cycles */
1000#define TRCD_6 0x00030000 /* SDRAM tRCD = 6 cycles */
1001#define TRCD_7 0x00038000 /* SDRAM tRCD = 7 cycles */
1002#define TWR_1 0x00080000 /* SDRAM tWR = 1 cycle */
1003#define TWR_2 0x00100000 /* SDRAM tWR = 2 cycles */
1004#define TWR_3 0x00180000 /* SDRAM tWR = 3 cycles */
1005#define PUPSD 0x00200000 /* Power-Up Start Delay (15 SCLK Cycles Delay) */
1006#define PSM 0x00400000 /* Power-Up Sequence (Mode Register Before/After* Refresh) */
1007#define PSS 0x00800000 /* Enable Power-Up Sequence on Next SDRAM Access */
1008#define SRFS 0x01000000 /* Enable SDRAM Self-Refresh Mode */
1009#define EBUFE 0x02000000 /* Enable External Buffering Timing */
1010#define FBBRW 0x04000000 /* Enable Fast Back-To-Back Read To Write */
1011#define EMREN 0x10000000 /* Extended Mode Register Enable */
1012#define TCSR 0x20000000 /* Temp-Compensated Self-Refresh Value (85/45* Deg C) */
1013#define CDDBG 0x40000000 /* Tristate SDRAM Controls During Bus Grant */
1014
1015/* EBIU_SDBCTL Masks */
1016#define EBE 0x0001 /* Enable SDRAM External Bank */
1017#define EBSZ_16 0x0000 /* SDRAM External Bank Size = 16MB */
1018#define EBSZ_32 0x0002 /* SDRAM External Bank Size = 32MB */
1019#define EBSZ_64 0x0004 /* SDRAM External Bank Size = 64MB */
1020#define EBSZ_128 0x0006 /* SDRAM External Bank Size = 128MB */
1021#define EBSZ_256 0x0008 /* SDRAM External Bank Size = 256MB */
1022#define EBSZ_512 0x000A /* SDRAM External Bank Size = 512MB */
1023#define EBCAW_8 0x0000 /* SDRAM External Bank Column Address Width = 8 Bits */
1024#define EBCAW_9 0x0010 /* SDRAM External Bank Column Address Width = 9 Bits */
1025#define EBCAW_10 0x0020 /* SDRAM External Bank Column Address Width = 10 Bits */
1026#define EBCAW_11 0x0030 /* SDRAM External Bank Column Address Width = 11 Bits */
1027
1028/* EBIU_SDSTAT Masks */
1029#define SDCI 0x0001 /* SDRAM Controller Idle */
1030#define SDSRA 0x0002 /* SDRAM Self-Refresh Active */
1031#define SDPUA 0x0004 /* SDRAM Power-Up Active */
1032#define SDRS 0x0008 /* SDRAM Will Power-Up On Next Access */
1033#define SDEASE 0x0010 /* SDRAM EAB Sticky Error Status */
1034#define BGSTAT 0x0020 /* Bus Grant Status */
1035
1036
1037/* ************************** DMA CONTROLLER MASKS ********************************/
1038
1039/* DMAx_PERIPHERAL_MAP, MDMA_yy_PERIPHERAL_MAP Masks */
1040#define CTYPE 0x0040 /* DMA Channel Type Indicator (Memory/Peripheral*) */
1041#define PMAP 0xF000 /* Peripheral Mapped To This Channel */
1042#define PMAP_PPI 0x0000 /* PPI Port DMA */
1043#define PMAP_EMACRX 0x1000 /* Ethernet Receive DMA */
1044#define PMAP_EMACTX 0x2000 /* Ethernet Transmit DMA */
1045#define PMAP_SPORT0RX 0x3000 /* SPORT0 Receive DMA */
1046#define PMAP_SPORT0TX 0x4000 /* SPORT0 Transmit DMA */
1047#define PMAP_SPORT1RX 0x5000 /* SPORT1 Receive DMA */
1048#define PMAP_SPORT1TX 0x6000 /* SPORT1 Transmit DMA */
1049#define PMAP_SPI 0x7000 /* SPI Port DMA */
1050#define PMAP_UART0RX 0x8000 /* UART0 Port Receive DMA */
1051#define PMAP_UART0TX 0x9000 /* UART0 Port Transmit DMA */
1052#define PMAP_UART1RX 0xA000 /* UART1 Port Receive DMA */
1053#define PMAP_UART1TX 0xB000 /* UART1 Port Transmit DMA */
1054
1055/* ************ PARALLEL PERIPHERAL INTERFACE (PPI) MASKS *************/
1056/* PPI_CONTROL Masks */
1057#define PORT_EN 0x0001 /* PPI Port Enable */
1058#define PORT_DIR 0x0002 /* PPI Port Direction */
1059#define XFR_TYPE 0x000C /* PPI Transfer Type */
1060#define PORT_CFG 0x0030 /* PPI Port Configuration */
1061#define FLD_SEL 0x0040 /* PPI Active Field Select */
1062#define PACK_EN 0x0080 /* PPI Packing Mode */
1063#define DMA32 0x0100 /* PPI 32-bit DMA Enable */
1064#define SKIP_EN 0x0200 /* PPI Skip Element Enable */
1065#define SKIP_EO 0x0400 /* PPI Skip Even/Odd Elements */
1066#define DLEN_8 0x0000 /* Data Length = 8 Bits */
1067#define DLEN_10 0x0800 /* Data Length = 10 Bits */
1068#define DLEN_11 0x1000 /* Data Length = 11 Bits */
1069#define DLEN_12 0x1800 /* Data Length = 12 Bits */
1070#define DLEN_13 0x2000 /* Data Length = 13 Bits */
1071#define DLEN_14 0x2800 /* Data Length = 14 Bits */
1072#define DLEN_15 0x3000 /* Data Length = 15 Bits */
1073#define DLEN_16 0x3800 /* Data Length = 16 Bits */
1074#define DLENGTH 0x3800 /* PPI Data Length */
1075#define POLC 0x4000 /* PPI Clock Polarity */
1076#define POLS 0x8000 /* PPI Frame Sync Polarity */
1077
1078/* PPI_STATUS Masks */
1079#define FLD 0x0400 /* Field Indicator */
1080#define FT_ERR 0x0800 /* Frame Track Error */
1081#define OVR 0x1000 /* FIFO Overflow Error */
1082#define UNDR 0x2000 /* FIFO Underrun Error */
1083#define ERR_DET 0x4000 /* Error Detected Indicator */
1084#define ERR_NCOR 0x8000 /* Error Not Corrected Indicator */
1085
1086
1087/* ******************** TWO-WIRE INTERFACE (TWI) MASKS ***********************/
1088/* TWI_CLKDIV Macros (Use: *pTWI_CLKDIV = CLKLOW(x)|CLKHI(y); ) */
1089#define CLKLOW(x) ((x) & 0xFF) /* Periods Clock Is Held Low */
1090#define CLKHI(y) (((y)&0xFF)<<0x8) /* Periods Before New Clock Low */
1091
1092/* TWI_PRESCALE Masks */
1093#define PRESCALE 0x007F /* SCLKs Per Internal Time Reference (10MHz) */
1094#define TWI_ENA 0x0080 /* TWI Enable */
1095#define SCCB 0x0200 /* SCCB Compatibility Enable */
1096
1097/* TWI_SLAVE_CTL Masks */
1098#define SEN 0x0001 /* Slave Enable */
1099#define SADD_LEN 0x0002 /* Slave Address Length */
1100#define STDVAL 0x0004 /* Slave Transmit Data Valid */
1101#define NAK 0x0008 /* NAK/ACK* Generated At Conclusion Of Transfer */
1102#define GEN 0x0010 /* General Call Adrress Matching Enabled */
1103
1104/* TWI_SLAVE_STAT Masks */
1105#define SDIR 0x0001 /* Slave Transfer Direction (Transmit/Receive*) */
1106#define GCALL 0x0002 /* General Call Indicator */
1107
1108/* TWI_MASTER_CTL Masks */
1109#define MEN 0x0001 /* Master Mode Enable */
1110#define MADD_LEN 0x0002 /* Master Address Length */
1111#define MDIR 0x0004 /* Master Transmit Direction (RX/TX*) */
1112#define FAST 0x0008 /* Use Fast Mode Timing Specs */
1113#define STOP 0x0010 /* Issue Stop Condition */
1114#define RSTART 0x0020 /* Repeat Start or Stop* At End Of Transfer */
1115#define DCNT 0x3FC0 /* Data Bytes To Transfer */
1116#define SDAOVR 0x4000 /* Serial Data Override */
1117#define SCLOVR 0x8000 /* Serial Clock Override */
1118
1119/* TWI_MASTER_STAT Masks */
1120#define MPROG 0x0001 /* Master Transfer In Progress */
1121#define LOSTARB 0x0002 /* Lost Arbitration Indicator (Xfer Aborted) */
1122#define ANAK 0x0004 /* Address Not Acknowledged */
1123#define DNAK 0x0008 /* Data Not Acknowledged */
1124#define BUFRDERR 0x0010 /* Buffer Read Error */
1125#define BUFWRERR 0x0020 /* Buffer Write Error */
1126#define SDASEN 0x0040 /* Serial Data Sense */
1127#define SCLSEN 0x0080 /* Serial Clock Sense */
1128#define BUSBUSY 0x0100 /* Bus Busy Indicator */
1129
1130/* TWI_INT_SRC and TWI_INT_ENABLE Masks */
1131#define SINIT 0x0001 /* Slave Transfer Initiated */
1132#define SCOMP 0x0002 /* Slave Transfer Complete */
1133#define SERR 0x0004 /* Slave Transfer Error */
1134#define SOVF 0x0008 /* Slave Overflow */
1135#define MCOMP 0x0010 /* Master Transfer Complete */
1136#define MERR 0x0020 /* Master Transfer Error */
1137#define XMTSERV 0x0040 /* Transmit FIFO Service */
1138#define RCVSERV 0x0080 /* Receive FIFO Service */
1139
1140/* TWI_FIFO_CTRL Masks */
1141#define XMTFLUSH 0x0001 /* Transmit Buffer Flush */
1142#define RCVFLUSH 0x0002 /* Receive Buffer Flush */
1143#define XMTINTLEN 0x0004 /* Transmit Buffer Interrupt Length */
1144#define RCVINTLEN 0x0008 /* Receive Buffer Interrupt Length */
1145
1146/* TWI_FIFO_STAT Masks */
1147#define XMTSTAT 0x0003 /* Transmit FIFO Status */
1148#define XMT_EMPTY 0x0000 /* Transmit FIFO Empty */
1149#define XMT_HALF 0x0001 /* Transmit FIFO Has 1 Byte To Write */
1150#define XMT_FULL 0x0003 /* Transmit FIFO Full (2 Bytes To Write) */
1151
1152#define RCVSTAT 0x000C /* Receive FIFO Status */
1153#define RCV_EMPTY 0x0000 /* Receive FIFO Empty */
1154#define RCV_HALF 0x0004 /* Receive FIFO Has 1 Byte To Read */
1155#define RCV_FULL 0x000C /* Receive FIFO Full (2 Bytes To Read) */
1156
1157
1158/* Omit CAN masks from defBF534.h */
1159
1160/* ******************* PIN CONTROL REGISTER MASKS ************************/
1161/* PORT_MUX Masks */
1162#define PJSE 0x0001 /* Port J SPI/SPORT Enable */
1163#define PJSE_SPORT 0x0000 /* Enable TFS0/DT0PRI */
1164#define PJSE_SPI 0x0001 /* Enable SPI_SSEL3:2 */
1165
1166#define PJCE(x) (((x)&0x3)<<1) /* Port J CAN/SPI/SPORT Enable */
1167#define PJCE_SPORT 0x0000 /* Enable DR0SEC/DT0SEC */
1168#define PJCE_CAN 0x0002 /* Enable CAN RX/TX */
1169#define PJCE_SPI 0x0004 /* Enable SPI_SSEL7 */
1170
1171#define PFDE 0x0008 /* Port F DMA Request Enable */
1172#define PFDE_UART 0x0000 /* Enable UART0 RX/TX */
1173#define PFDE_DMA 0x0008 /* Enable DMAR1:0 */
1174
1175#define PFTE 0x0010 /* Port F Timer Enable */
1176#define PFTE_UART 0x0000 /* Enable UART1 RX/TX */
1177#define PFTE_TIMER 0x0010 /* Enable TMR7:6 */
1178
1179#define PFS6E 0x0020 /* Port F SPI SSEL 6 Enable */
1180#define PFS6E_TIMER 0x0000 /* Enable TMR5 */
1181#define PFS6E_SPI 0x0020 /* Enable SPI_SSEL6 */
1182
1183#define PFS5E 0x0040 /* Port F SPI SSEL 5 Enable */
1184#define PFS5E_TIMER 0x0000 /* Enable TMR4 */
1185#define PFS5E_SPI 0x0040 /* Enable SPI_SSEL5 */
1186
1187#define PFS4E 0x0080 /* Port F SPI SSEL 4 Enable */
1188#define PFS4E_TIMER 0x0000 /* Enable TMR3 */
1189#define PFS4E_SPI 0x0080 /* Enable SPI_SSEL4 */
1190
1191#define PFFE 0x0100 /* Port F PPI Frame Sync Enable */
1192#define PFFE_TIMER 0x0000 /* Enable TMR2 */
1193#define PFFE_PPI 0x0100 /* Enable PPI FS3 */
1194
1195#define PGSE 0x0200 /* Port G SPORT1 Secondary Enable */
1196#define PGSE_PPI 0x0000 /* Enable PPI D9:8 */
1197#define PGSE_SPORT 0x0200 /* Enable DR1SEC/DT1SEC */
1198
1199#define PGRE 0x0400 /* Port G SPORT1 Receive Enable */
1200#define PGRE_PPI 0x0000 /* Enable PPI D12:10 */
1201#define PGRE_SPORT 0x0400 /* Enable DR1PRI/RFS1/RSCLK1 */
1202
1203#define PGTE 0x0800 /* Port G SPORT1 Transmit Enable */
1204#define PGTE_PPI 0x0000 /* Enable PPI D15:13 */
1205#define PGTE_SPORT 0x0800 /* Enable DT1PRI/TFS1/TSCLK1 */
1206
1207
1208/* ****************** HANDSHAKE DMA (HDMA) MASKS *********************/
1209/* HDMAx_CTL Masks */
1210#define HMDMAEN 0x0001 /* Enable Handshake DMA 0/1 */
1211#define REP 0x0002 /* HDMA Request Polarity */
1212#define UTE 0x0004 /* Urgency Threshold Enable */
1213#define OIE 0x0010 /* Overflow Interrupt Enable */
1214#define BDIE 0x0020 /* Block Done Interrupt Enable */
1215#define MBDI 0x0040 /* Mask Block Done IRQ If Pending ECNT */
1216#define DRQ 0x0300 /* HDMA Request Type */
1217#define DRQ_NONE 0x0000 /* No Request */
1218#define DRQ_SINGLE 0x0100 /* Channels Request Single */
1219#define DRQ_MULTI 0x0200 /* Channels Request Multi (Default) */
1220#define DRQ_URGENT 0x0300 /* Channels Request Multi Urgent */
1221#define RBC 0x1000 /* Reload BCNT With IBCNT */
1222#define PS 0x2000 /* HDMA Pin Status */
1223#define OI 0x4000 /* Overflow Interrupt Generated */
1224#define BDI 0x8000 /* Block Done Interrupt Generated */
1225
1226/* entry addresses of the user-callable Boot ROM functions */
1227
1228#define _BOOTROM_RESET 0xEF000000
1229#define _BOOTROM_FINAL_INIT 0xEF000002
1230#define _BOOTROM_DO_MEMORY_DMA 0xEF000006
1231#define _BOOTROM_BOOT_DXE_FLASH 0xEF000008
1232#define _BOOTROM_BOOT_DXE_SPI 0xEF00000A
1233#define _BOOTROM_BOOT_DXE_TWI 0xEF00000C
1234#define _BOOTROM_GET_DXE_ADDRESS_FLASH 0xEF000010
1235#define _BOOTROM_GET_DXE_ADDRESS_SPI 0xEF000012
1236#define _BOOTROM_GET_DXE_ADDRESS_TWI 0xEF000014
1237
1238/* Alternate Deprecated Macros Provided For Backwards Code Compatibility */
1239#define PGDE_UART PFDE_UART
1240#define PGDE_DMA PFDE_DMA
1241#define CKELOW SCKELOW
1242
1243/* ==== end from defBF534.h ==== */
1244
1245/* HOST Port Registers */
1246
1247#define HOST_CONTROL 0xffc03400 /* HOST Control Register */
1248#define HOST_STATUS 0xffc03404 /* HOST Status Register */
1249#define HOST_TIMEOUT 0xffc03408 /* HOST Acknowledge Mode Timeout Register */
1250
1251/* Counter Registers */
1252
1253#define CNT_CONFIG 0xffc03500 /* Configuration Register */
1254#define CNT_IMASK 0xffc03504 /* Interrupt Mask Register */
1255#define CNT_STATUS 0xffc03508 /* Status Register */
1256#define CNT_COMMAND 0xffc0350c /* Command Register */
1257#define CNT_DEBOUNCE 0xffc03510 /* Debounce Register */
1258#define CNT_COUNTER 0xffc03514 /* Counter Register */
1259#define CNT_MAX 0xffc03518 /* Maximal Count Register */
1260#define CNT_MIN 0xffc0351c /* Minimal Count Register */
1261
1262/* OTP/FUSE Registers */
1263
1264#define OTP_CONTROL 0xffc03600 /* OTP/Fuse Control Register */
1265#define OTP_BEN 0xffc03604 /* OTP/Fuse Byte Enable */
1266#define OTP_STATUS 0xffc03608 /* OTP/Fuse Status */
1267#define OTP_TIMING 0xffc0360c /* OTP/Fuse Access Timing */
1268
1269/* Security Registers */
1270
1271#define SECURE_SYSSWT 0xffc03620 /* Secure System Switches */
1272#define SECURE_CONTROL 0xffc03624 /* Secure Control */
1273#define SECURE_STATUS 0xffc03628 /* Secure Status */
1274
1275/* OTP Read/Write Data Buffer Registers */
1276
1277#define OTP_DATA0 0xffc03680 /* OTP/Fuse Data (OTP_DATA0-3) accesses the fuse read write buffer */
1278#define OTP_DATA1 0xffc03684 /* OTP/Fuse Data (OTP_DATA0-3) accesses the fuse read write buffer */
1279#define OTP_DATA2 0xffc03688 /* OTP/Fuse Data (OTP_DATA0-3) accesses the fuse read write buffer */
1280#define OTP_DATA3 0xffc0368c /* OTP/Fuse Data (OTP_DATA0-3) accesses the fuse read write buffer */
1281
1282/* NFC Registers */
1283
1284#define NFC_CTL 0xffc03700 /* NAND Control Register */
1285#define NFC_STAT 0xffc03704 /* NAND Status Register */
1286#define NFC_IRQSTAT 0xffc03708 /* NAND Interrupt Status Register */
1287#define NFC_IRQMASK 0xffc0370c /* NAND Interrupt Mask Register */
1288#define NFC_ECC0 0xffc03710 /* NAND ECC Register 0 */
1289#define NFC_ECC1 0xffc03714 /* NAND ECC Register 1 */
1290#define NFC_ECC2 0xffc03718 /* NAND ECC Register 2 */
1291#define NFC_ECC3 0xffc0371c /* NAND ECC Register 3 */
1292#define NFC_COUNT 0xffc03720 /* NAND ECC Count Register */
1293#define NFC_RST 0xffc03724 /* NAND ECC Reset Register */
1294#define NFC_PGCTL 0xffc03728 /* NAND Page Control Register */
1295#define NFC_READ 0xffc0372c /* NAND Read Data Register */
1296#define NFC_ADDR 0xffc03740 /* NAND Address Register */
1297#define NFC_CMD 0xffc03744 /* NAND Command Register */
1298#define NFC_DATA_WR 0xffc03748 /* NAND Data Write Register */
1299#define NFC_DATA_RD 0xffc0374c /* NAND Data Read Register */
1300
1301/* ********************************************************** */
1302/* SINGLE BIT MACRO PAIRS (bit mask and negated one) */
1303/* and MULTI BIT READ MACROS */
1304/* ********************************************************** */
1305
1306/* Bit masks for HOST_CONTROL */
1307
1308#define HOST_CNTR_HOST_EN 0x1 /* Host Enable */
1309#define HOST_CNTR_nHOST_EN 0x0
1310#define HOST_CNTR_HOST_END 0x2 /* Host Endianess */
1311#define HOST_CNTR_nHOST_END 0x0
1312#define HOST_CNTR_DATA_SIZE 0x4 /* Data Size */
1313#define HOST_CNTR_nDATA_SIZE 0x0
1314#define HOST_CNTR_HOST_RST 0x8 /* Host Reset */
1315#define HOST_CNTR_nHOST_RST 0x0
1316#define HOST_CNTR_HRDY_OVR 0x20 /* Host Ready Override */
1317#define HOST_CNTR_nHRDY_OVR 0x0
1318#define HOST_CNTR_INT_MODE 0x40 /* Interrupt Mode */
1319#define HOST_CNTR_nINT_MODE 0x0
1320#define HOST_CNTR_BT_EN 0x80 /* Bus Timeout Enable */
1321#define HOST_CNTR_ nBT_EN 0x0
1322#define HOST_CNTR_EHW 0x100 /* Enable Host Write */
1323#define HOST_CNTR_nEHW 0x0
1324#define HOST_CNTR_EHR 0x200 /* Enable Host Read */
1325#define HOST_CNTR_nEHR 0x0
1326#define HOST_CNTR_BDR 0x400 /* Burst DMA Requests */
1327#define HOST_CNTR_nBDR 0x0
1328
1329/* Bit masks for HOST_STATUS */
1330
1331#define HOST_STAT_READY 0x1 /* DMA Ready */
1332#define HOST_STAT_nREADY 0x0
1333#define HOST_STAT_FIFOFULL 0x2 /* FIFO Full */
1334#define HOST_STAT_nFIFOFULL 0x0
1335#define HOST_STAT_FIFOEMPTY 0x4 /* FIFO Empty */
1336#define HOST_STAT_nFIFOEMPTY 0x0
1337#define HOST_STAT_COMPLETE 0x8 /* DMA Complete */
1338#define HOST_STAT_nCOMPLETE 0x0
1339#define HOST_STAT_HSHK 0x10 /* Host Handshake */
1340#define HOST_STAT_nHSHK 0x0
1341#define HOST_STAT_TIMEOUT 0x20 /* Host Timeout */
1342#define HOST_STAT_nTIMEOUT 0x0
1343#define HOST_STAT_HIRQ 0x40 /* Host Interrupt Request */
1344#define HOST_STAT_nHIRQ 0x0
1345#define HOST_STAT_ALLOW_CNFG 0x80 /* Allow New Configuration */
1346#define HOST_STAT_nALLOW_CNFG 0x0
1347#define HOST_STAT_DMA_DIR 0x100 /* DMA Direction */
1348#define HOST_STAT_nDMA_DIR 0x0
1349#define HOST_STAT_BTE 0x200 /* Bus Timeout Enabled */
1350#define HOST_STAT_nBTE 0x0
1351#define HOST_STAT_HOSTRD_DONE 0x8000 /* Host Read Completion Interrupt */
1352#define HOST_STAT_nHOSTRD_DONE 0x0
1353
1354/* Bit masks for HOST_TIMEOUT */
1355
1356#define HOST_COUNT_TIMEOUT 0x7ff /* Host Timeout count */
1357
1358/* Bit masks for SECURE_SYSSWT */
1359
1360#define EMUDABL 0x1 /* Emulation Disable. */
1361#define nEMUDABL 0x0
1362#define RSTDABL 0x2 /* Reset Disable */
1363#define nRSTDABL 0x0
1364#define L1IDABL 0x1c /* L1 Instruction Memory Disable. */
1365#define L1DADABL 0xe0 /* L1 Data Bank A Memory Disable. */
1366#define L1DBDABL 0x700 /* L1 Data Bank B Memory Disable. */
1367#define DMA0OVR 0x800 /* DMA0 Memory Access Override */
1368#define nDMA0OVR 0x0
1369#define DMA1OVR 0x1000 /* DMA1 Memory Access Override */
1370#define nDMA1OVR 0x0
1371#define EMUOVR 0x4000 /* Emulation Override */
1372#define nEMUOVR 0x0
1373#define OTPSEN 0x8000 /* OTP Secrets Enable. */
1374#define nOTPSEN 0x0
1375#define L2DABL 0x70000 /* L2 Memory Disable. */
1376
1377/* Bit masks for SECURE_CONTROL */
1378
1379#define SECURE0 0x1 /* SECURE 0 */
1380#define nSECURE0 0x0
1381#define SECURE1 0x2 /* SECURE 1 */
1382#define nSECURE1 0x0
1383#define SECURE2 0x4 /* SECURE 2 */
1384#define nSECURE2 0x0
1385#define SECURE3 0x8 /* SECURE 3 */
1386#define nSECURE3 0x0
1387
1388/* Bit masks for SECURE_STATUS */
1389
1390#define SECMODE 0x3 /* Secured Mode Control State */
1391#define NMI 0x4 /* Non Maskable Interrupt */
1392#define nNMI 0x0
1393#define AFVALID 0x8 /* Authentication Firmware Valid */
1394#define nAFVALID 0x0
1395#define AFEXIT 0x10 /* Authentication Firmware Exit */
1396#define nAFEXIT 0x0
1397#define SECSTAT 0xe0 /* Secure Status */
17 1398
18#endif /* _DEF_BF522_H */ 1399#endif /* _DEF_BF522_H */
diff --git a/arch/blackfin/mach-bf527/include/mach/defBF525.h b/arch/blackfin/mach-bf527/include/mach/defBF525.h
index c136f7032962..cc383adfdffa 100644
--- a/arch/blackfin/mach-bf527/include/mach/defBF525.h
+++ b/arch/blackfin/mach-bf527/include/mach/defBF525.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2007-2008 Analog Devices Inc. 2 * Copyright 2007-2010 Analog Devices Inc.
3 * 3 *
4 * Licensed under the ADI BSD license or the GPL-2 (or later) 4 * Licensed under the ADI BSD license or the GPL-2 (or later)
5 */ 5 */
diff --git a/arch/blackfin/mach-bf527/include/mach/defBF527.h b/arch/blackfin/mach-bf527/include/mach/defBF527.h
index 4dd58fb33156..05369a92fbc8 100644
--- a/arch/blackfin/mach-bf527/include/mach/defBF527.h
+++ b/arch/blackfin/mach-bf527/include/mach/defBF527.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2007-2008 Analog Devices Inc. 2 * Copyright 2007-2010 Analog Devices Inc.
3 * 3 *
4 * Licensed under the ADI BSD license or the GPL-2 (or later) 4 * Licensed under the ADI BSD license or the GPL-2 (or later)
5 */ 5 */
diff --git a/arch/blackfin/mach-bf527/include/mach/defBF52x_base.h b/arch/blackfin/mach-bf527/include/mach/defBF52x_base.h
deleted file mode 100644
index 09475034c6a1..000000000000
--- a/arch/blackfin/mach-bf527/include/mach/defBF52x_base.h
+++ /dev/null
@@ -1,1506 +0,0 @@
1/*
2 * Copyright 2007-2008 Analog Devices Inc.
3 *
4 * Licensed under the ADI BSD license or the GPL-2 (or later)
5 */
6
7#ifndef _DEF_BF52X_H
8#define _DEF_BF52X_H
9
10
11/* ************************************************************** */
12/* SYSTEM & MMR ADDRESS DEFINITIONS COMMON TO ALL ADSP-BF52x */
13/* ************************************************************** */
14
15/* ==== begin from defBF534.h ==== */
16
17/* Clock and System Control (0xFFC00000 - 0xFFC000FF) */
18#define PLL_CTL 0xFFC00000 /* PLL Control Register */
19#define PLL_DIV 0xFFC00004 /* PLL Divide Register */
20#define VR_CTL 0xFFC00008 /* Voltage Regulator Control Register */
21#define PLL_STAT 0xFFC0000C /* PLL Status Register */
22#define PLL_LOCKCNT 0xFFC00010 /* PLL Lock Count Register */
23#define CHIPID 0xFFC00014 /* Device ID Register */
24
25
26/* System Interrupt Controller (0xFFC00100 - 0xFFC001FF) */
27#define SWRST 0xFFC00100 /* Software Reset Register */
28#define SYSCR 0xFFC00104 /* System Configuration Register */
29#define SIC_RVECT 0xFFC00108 /* Interrupt Reset Vector Address Register */
30
31#define SIC_IMASK0 0xFFC0010C /* Interrupt Mask Register */
32#define SIC_IAR0 0xFFC00110 /* Interrupt Assignment Register 0 */
33#define SIC_IAR1 0xFFC00114 /* Interrupt Assignment Register 1 */
34#define SIC_IAR2 0xFFC00118 /* Interrupt Assignment Register 2 */
35#define SIC_IAR3 0xFFC0011C /* Interrupt Assignment Register 3 */
36#define SIC_ISR0 0xFFC00120 /* Interrupt Status Register */
37#define SIC_IWR0 0xFFC00124 /* Interrupt Wakeup Register */
38
39/* SIC Additions to ADSP-BF52x (0xFFC0014C - 0xFFC00162) */
40#define SIC_IMASK1 0xFFC0014C /* Interrupt Mask register of SIC2 */
41#define SIC_IAR4 0xFFC00150 /* Interrupt Assignment register4 */
42#define SIC_IAR5 0xFFC00154 /* Interrupt Assignment register5 */
43#define SIC_IAR6 0xFFC00158 /* Interrupt Assignment register6 */
44#define SIC_IAR7 0xFFC0015C /* Interrupt Assignment register7 */
45#define SIC_ISR1 0xFFC00160 /* Interrupt Statur register */
46#define SIC_IWR1 0xFFC00164 /* Interrupt Wakeup register */
47
48
49/* Watchdog Timer (0xFFC00200 - 0xFFC002FF) */
50#define WDOG_CTL 0xFFC00200 /* Watchdog Control Register */
51#define WDOG_CNT 0xFFC00204 /* Watchdog Count Register */
52#define WDOG_STAT 0xFFC00208 /* Watchdog Status Register */
53
54
55/* Real Time Clock (0xFFC00300 - 0xFFC003FF) */
56#define RTC_STAT 0xFFC00300 /* RTC Status Register */
57#define RTC_ICTL 0xFFC00304 /* RTC Interrupt Control Register */
58#define RTC_ISTAT 0xFFC00308 /* RTC Interrupt Status Register */
59#define RTC_SWCNT 0xFFC0030C /* RTC Stopwatch Count Register */
60#define RTC_ALARM 0xFFC00310 /* RTC Alarm Time Register */
61#define RTC_FAST 0xFFC00314 /* RTC Prescaler Enable Register */
62#define RTC_PREN 0xFFC00314 /* RTC Prescaler Enable Alternate Macro */
63
64
65/* UART0 Controller (0xFFC00400 - 0xFFC004FF) */
66#define UART0_THR 0xFFC00400 /* Transmit Holding register */
67#define UART0_RBR 0xFFC00400 /* Receive Buffer register */
68#define UART0_DLL 0xFFC00400 /* Divisor Latch (Low-Byte) */
69#define UART0_IER 0xFFC00404 /* Interrupt Enable Register */
70#define UART0_DLH 0xFFC00404 /* Divisor Latch (High-Byte) */
71#define UART0_IIR 0xFFC00408 /* Interrupt Identification Register */
72#define UART0_LCR 0xFFC0040C /* Line Control Register */
73#define UART0_MCR 0xFFC00410 /* Modem Control Register */
74#define UART0_LSR 0xFFC00414 /* Line Status Register */
75#define UART0_MSR 0xFFC00418 /* Modem Status Register */
76#define UART0_SCR 0xFFC0041C /* SCR Scratch Register */
77#define UART0_GCTL 0xFFC00424 /* Global Control Register */
78
79
80/* SPI Controller (0xFFC00500 - 0xFFC005FF) */
81#define SPI0_REGBASE 0xFFC00500
82#define SPI_CTL 0xFFC00500 /* SPI Control Register */
83#define SPI_FLG 0xFFC00504 /* SPI Flag register */
84#define SPI_STAT 0xFFC00508 /* SPI Status register */
85#define SPI_TDBR 0xFFC0050C /* SPI Transmit Data Buffer Register */
86#define SPI_RDBR 0xFFC00510 /* SPI Receive Data Buffer Register */
87#define SPI_BAUD 0xFFC00514 /* SPI Baud rate Register */
88#define SPI_SHADOW 0xFFC00518 /* SPI_RDBR Shadow Register */
89
90
91/* TIMER0-7 Registers (0xFFC00600 - 0xFFC006FF) */
92#define TIMER0_CONFIG 0xFFC00600 /* Timer 0 Configuration Register */
93#define TIMER0_COUNTER 0xFFC00604 /* Timer 0 Counter Register */
94#define TIMER0_PERIOD 0xFFC00608 /* Timer 0 Period Register */
95#define TIMER0_WIDTH 0xFFC0060C /* Timer 0 Width Register */
96
97#define TIMER1_CONFIG 0xFFC00610 /* Timer 1 Configuration Register */
98#define TIMER1_COUNTER 0xFFC00614 /* Timer 1 Counter Register */
99#define TIMER1_PERIOD 0xFFC00618 /* Timer 1 Period Register */
100#define TIMER1_WIDTH 0xFFC0061C /* Timer 1 Width Register */
101
102#define TIMER2_CONFIG 0xFFC00620 /* Timer 2 Configuration Register */
103#define TIMER2_COUNTER 0xFFC00624 /* Timer 2 Counter Register */
104#define TIMER2_PERIOD 0xFFC00628 /* Timer 2 Period Register */
105#define TIMER2_WIDTH 0xFFC0062C /* Timer 2 Width Register */
106
107#define TIMER3_CONFIG 0xFFC00630 /* Timer 3 Configuration Register */
108#define TIMER3_COUNTER 0xFFC00634 /* Timer 3 Counter Register */
109#define TIMER3_PERIOD 0xFFC00638 /* Timer 3 Period Register */
110#define TIMER3_WIDTH 0xFFC0063C /* Timer 3 Width Register */
111
112#define TIMER4_CONFIG 0xFFC00640 /* Timer 4 Configuration Register */
113#define TIMER4_COUNTER 0xFFC00644 /* Timer 4 Counter Register */
114#define TIMER4_PERIOD 0xFFC00648 /* Timer 4 Period Register */
115#define TIMER4_WIDTH 0xFFC0064C /* Timer 4 Width Register */
116
117#define TIMER5_CONFIG 0xFFC00650 /* Timer 5 Configuration Register */
118#define TIMER5_COUNTER 0xFFC00654 /* Timer 5 Counter Register */
119#define TIMER5_PERIOD 0xFFC00658 /* Timer 5 Period Register */
120#define TIMER5_WIDTH 0xFFC0065C /* Timer 5 Width Register */
121
122#define TIMER6_CONFIG 0xFFC00660 /* Timer 6 Configuration Register */
123#define TIMER6_COUNTER 0xFFC00664 /* Timer 6 Counter Register */
124#define TIMER6_PERIOD 0xFFC00668 /* Timer 6 Period Register */
125#define TIMER6_WIDTH 0xFFC0066C /* Timer 6 Width Register */
126
127#define TIMER7_CONFIG 0xFFC00670 /* Timer 7 Configuration Register */
128#define TIMER7_COUNTER 0xFFC00674 /* Timer 7 Counter Register */
129#define TIMER7_PERIOD 0xFFC00678 /* Timer 7 Period Register */
130#define TIMER7_WIDTH 0xFFC0067C /* Timer 7 Width Register */
131
132#define TIMER_ENABLE 0xFFC00680 /* Timer Enable Register */
133#define TIMER_DISABLE 0xFFC00684 /* Timer Disable Register */
134#define TIMER_STATUS 0xFFC00688 /* Timer Status Register */
135
136
137/* General Purpose I/O Port F (0xFFC00700 - 0xFFC007FF) */
138#define PORTFIO 0xFFC00700 /* Port F I/O Pin State Specify Register */
139#define PORTFIO_CLEAR 0xFFC00704 /* Port F I/O Peripheral Interrupt Clear Register */
140#define PORTFIO_SET 0xFFC00708 /* Port F I/O Peripheral Interrupt Set Register */
141#define PORTFIO_TOGGLE 0xFFC0070C /* Port F I/O Pin State Toggle Register */
142#define PORTFIO_MASKA 0xFFC00710 /* Port F I/O Mask State Specify Interrupt A Register */
143#define PORTFIO_MASKA_CLEAR 0xFFC00714 /* Port F I/O Mask Disable Interrupt A Register */
144#define PORTFIO_MASKA_SET 0xFFC00718 /* Port F I/O Mask Enable Interrupt A Register */
145#define PORTFIO_MASKA_TOGGLE 0xFFC0071C /* Port F I/O Mask Toggle Enable Interrupt A Register */
146#define PORTFIO_MASKB 0xFFC00720 /* Port F I/O Mask State Specify Interrupt B Register */
147#define PORTFIO_MASKB_CLEAR 0xFFC00724 /* Port F I/O Mask Disable Interrupt B Register */
148#define PORTFIO_MASKB_SET 0xFFC00728 /* Port F I/O Mask Enable Interrupt B Register */
149#define PORTFIO_MASKB_TOGGLE 0xFFC0072C /* Port F I/O Mask Toggle Enable Interrupt B Register */
150#define PORTFIO_DIR 0xFFC00730 /* Port F I/O Direction Register */
151#define PORTFIO_POLAR 0xFFC00734 /* Port F I/O Source Polarity Register */
152#define PORTFIO_EDGE 0xFFC00738 /* Port F I/O Source Sensitivity Register */
153#define PORTFIO_BOTH 0xFFC0073C /* Port F I/O Set on BOTH Edges Register */
154#define PORTFIO_INEN 0xFFC00740 /* Port F I/O Input Enable Register */
155
156
157/* SPORT0 Controller (0xFFC00800 - 0xFFC008FF) */
158#define SPORT0_TCR1 0xFFC00800 /* SPORT0 Transmit Configuration 1 Register */
159#define SPORT0_TCR2 0xFFC00804 /* SPORT0 Transmit Configuration 2 Register */
160#define SPORT0_TCLKDIV 0xFFC00808 /* SPORT0 Transmit Clock Divider */
161#define SPORT0_TFSDIV 0xFFC0080C /* SPORT0 Transmit Frame Sync Divider */
162#define SPORT0_TX 0xFFC00810 /* SPORT0 TX Data Register */
163#define SPORT0_RX 0xFFC00818 /* SPORT0 RX Data Register */
164#define SPORT0_RCR1 0xFFC00820 /* SPORT0 Transmit Configuration 1 Register */
165#define SPORT0_RCR2 0xFFC00824 /* SPORT0 Transmit Configuration 2 Register */
166#define SPORT0_RCLKDIV 0xFFC00828 /* SPORT0 Receive Clock Divider */
167#define SPORT0_RFSDIV 0xFFC0082C /* SPORT0 Receive Frame Sync Divider */
168#define SPORT0_STAT 0xFFC00830 /* SPORT0 Status Register */
169#define SPORT0_CHNL 0xFFC00834 /* SPORT0 Current Channel Register */
170#define SPORT0_MCMC1 0xFFC00838 /* SPORT0 Multi-Channel Configuration Register 1 */
171#define SPORT0_MCMC2 0xFFC0083C /* SPORT0 Multi-Channel Configuration Register 2 */
172#define SPORT0_MTCS0 0xFFC00840 /* SPORT0 Multi-Channel Transmit Select Register 0 */
173#define SPORT0_MTCS1 0xFFC00844 /* SPORT0 Multi-Channel Transmit Select Register 1 */
174#define SPORT0_MTCS2 0xFFC00848 /* SPORT0 Multi-Channel Transmit Select Register 2 */
175#define SPORT0_MTCS3 0xFFC0084C /* SPORT0 Multi-Channel Transmit Select Register 3 */
176#define SPORT0_MRCS0 0xFFC00850 /* SPORT0 Multi-Channel Receive Select Register 0 */
177#define SPORT0_MRCS1 0xFFC00854 /* SPORT0 Multi-Channel Receive Select Register 1 */
178#define SPORT0_MRCS2 0xFFC00858 /* SPORT0 Multi-Channel Receive Select Register 2 */
179#define SPORT0_MRCS3 0xFFC0085C /* SPORT0 Multi-Channel Receive Select Register 3 */
180
181
182/* SPORT1 Controller (0xFFC00900 - 0xFFC009FF) */
183#define SPORT1_TCR1 0xFFC00900 /* SPORT1 Transmit Configuration 1 Register */
184#define SPORT1_TCR2 0xFFC00904 /* SPORT1 Transmit Configuration 2 Register */
185#define SPORT1_TCLKDIV 0xFFC00908 /* SPORT1 Transmit Clock Divider */
186#define SPORT1_TFSDIV 0xFFC0090C /* SPORT1 Transmit Frame Sync Divider */
187#define SPORT1_TX 0xFFC00910 /* SPORT1 TX Data Register */
188#define SPORT1_RX 0xFFC00918 /* SPORT1 RX Data Register */
189#define SPORT1_RCR1 0xFFC00920 /* SPORT1 Transmit Configuration 1 Register */
190#define SPORT1_RCR2 0xFFC00924 /* SPORT1 Transmit Configuration 2 Register */
191#define SPORT1_RCLKDIV 0xFFC00928 /* SPORT1 Receive Clock Divider */
192#define SPORT1_RFSDIV 0xFFC0092C /* SPORT1 Receive Frame Sync Divider */
193#define SPORT1_STAT 0xFFC00930 /* SPORT1 Status Register */
194#define SPORT1_CHNL 0xFFC00934 /* SPORT1 Current Channel Register */
195#define SPORT1_MCMC1 0xFFC00938 /* SPORT1 Multi-Channel Configuration Register 1 */
196#define SPORT1_MCMC2 0xFFC0093C /* SPORT1 Multi-Channel Configuration Register 2 */
197#define SPORT1_MTCS0 0xFFC00940 /* SPORT1 Multi-Channel Transmit Select Register 0 */
198#define SPORT1_MTCS1 0xFFC00944 /* SPORT1 Multi-Channel Transmit Select Register 1 */
199#define SPORT1_MTCS2 0xFFC00948 /* SPORT1 Multi-Channel Transmit Select Register 2 */
200#define SPORT1_MTCS3 0xFFC0094C /* SPORT1 Multi-Channel Transmit Select Register 3 */
201#define SPORT1_MRCS0 0xFFC00950 /* SPORT1 Multi-Channel Receive Select Register 0 */
202#define SPORT1_MRCS1 0xFFC00954 /* SPORT1 Multi-Channel Receive Select Register 1 */
203#define SPORT1_MRCS2 0xFFC00958 /* SPORT1 Multi-Channel Receive Select Register 2 */
204#define SPORT1_MRCS3 0xFFC0095C /* SPORT1 Multi-Channel Receive Select Register 3 */
205
206
207/* External Bus Interface Unit (0xFFC00A00 - 0xFFC00AFF) */
208#define EBIU_AMGCTL 0xFFC00A00 /* Asynchronous Memory Global Control Register */
209#define EBIU_AMBCTL0 0xFFC00A04 /* Asynchronous Memory Bank Control Register 0 */
210#define EBIU_AMBCTL1 0xFFC00A08 /* Asynchronous Memory Bank Control Register 1 */
211#define EBIU_SDGCTL 0xFFC00A10 /* SDRAM Global Control Register */
212#define EBIU_SDBCTL 0xFFC00A14 /* SDRAM Bank Control Register */
213#define EBIU_SDRRC 0xFFC00A18 /* SDRAM Refresh Rate Control Register */
214#define EBIU_SDSTAT 0xFFC00A1C /* SDRAM Status Register */
215
216
217/* DMA Traffic Control Registers */
218#define DMA_TC_PER 0xFFC00B0C /* Traffic Control Periods Register */
219#define DMA_TC_CNT 0xFFC00B10 /* Traffic Control Current Counts Register */
220
221/* Alternate deprecated register names (below) provided for backwards code compatibility */
222#define DMA_TCPER 0xFFC00B0C /* Traffic Control Periods Register */
223#define DMA_TCCNT 0xFFC00B10 /* Traffic Control Current Counts Register */
224
225/* DMA Controller (0xFFC00C00 - 0xFFC00FFF) */
226#define DMA0_NEXT_DESC_PTR 0xFFC00C00 /* DMA Channel 0 Next Descriptor Pointer Register */
227#define DMA0_START_ADDR 0xFFC00C04 /* DMA Channel 0 Start Address Register */
228#define DMA0_CONFIG 0xFFC00C08 /* DMA Channel 0 Configuration Register */
229#define DMA0_X_COUNT 0xFFC00C10 /* DMA Channel 0 X Count Register */
230#define DMA0_X_MODIFY 0xFFC00C14 /* DMA Channel 0 X Modify Register */
231#define DMA0_Y_COUNT 0xFFC00C18 /* DMA Channel 0 Y Count Register */
232#define DMA0_Y_MODIFY 0xFFC00C1C /* DMA Channel 0 Y Modify Register */
233#define DMA0_CURR_DESC_PTR 0xFFC00C20 /* DMA Channel 0 Current Descriptor Pointer Register */
234#define DMA0_CURR_ADDR 0xFFC00C24 /* DMA Channel 0 Current Address Register */
235#define DMA0_IRQ_STATUS 0xFFC00C28 /* DMA Channel 0 Interrupt/Status Register */
236#define DMA0_PERIPHERAL_MAP 0xFFC00C2C /* DMA Channel 0 Peripheral Map Register */
237#define DMA0_CURR_X_COUNT 0xFFC00C30 /* DMA Channel 0 Current X Count Register */
238#define DMA0_CURR_Y_COUNT 0xFFC00C38 /* DMA Channel 0 Current Y Count Register */
239
240#define DMA1_NEXT_DESC_PTR 0xFFC00C40 /* DMA Channel 1 Next Descriptor Pointer Register */
241#define DMA1_START_ADDR 0xFFC00C44 /* DMA Channel 1 Start Address Register */
242#define DMA1_CONFIG 0xFFC00C48 /* DMA Channel 1 Configuration Register */
243#define DMA1_X_COUNT 0xFFC00C50 /* DMA Channel 1 X Count Register */
244#define DMA1_X_MODIFY 0xFFC00C54 /* DMA Channel 1 X Modify Register */
245#define DMA1_Y_COUNT 0xFFC00C58 /* DMA Channel 1 Y Count Register */
246#define DMA1_Y_MODIFY 0xFFC00C5C /* DMA Channel 1 Y Modify Register */
247#define DMA1_CURR_DESC_PTR 0xFFC00C60 /* DMA Channel 1 Current Descriptor Pointer Register */
248#define DMA1_CURR_ADDR 0xFFC00C64 /* DMA Channel 1 Current Address Register */
249#define DMA1_IRQ_STATUS 0xFFC00C68 /* DMA Channel 1 Interrupt/Status Register */
250#define DMA1_PERIPHERAL_MAP 0xFFC00C6C /* DMA Channel 1 Peripheral Map Register */
251#define DMA1_CURR_X_COUNT 0xFFC00C70 /* DMA Channel 1 Current X Count Register */
252#define DMA1_CURR_Y_COUNT 0xFFC00C78 /* DMA Channel 1 Current Y Count Register */
253
254#define DMA2_NEXT_DESC_PTR 0xFFC00C80 /* DMA Channel 2 Next Descriptor Pointer Register */
255#define DMA2_START_ADDR 0xFFC00C84 /* DMA Channel 2 Start Address Register */
256#define DMA2_CONFIG 0xFFC00C88 /* DMA Channel 2 Configuration Register */
257#define DMA2_X_COUNT 0xFFC00C90 /* DMA Channel 2 X Count Register */
258#define DMA2_X_MODIFY 0xFFC00C94 /* DMA Channel 2 X Modify Register */
259#define DMA2_Y_COUNT 0xFFC00C98 /* DMA Channel 2 Y Count Register */
260#define DMA2_Y_MODIFY 0xFFC00C9C /* DMA Channel 2 Y Modify Register */
261#define DMA2_CURR_DESC_PTR 0xFFC00CA0 /* DMA Channel 2 Current Descriptor Pointer Register */
262#define DMA2_CURR_ADDR 0xFFC00CA4 /* DMA Channel 2 Current Address Register */
263#define DMA2_IRQ_STATUS 0xFFC00CA8 /* DMA Channel 2 Interrupt/Status Register */
264#define DMA2_PERIPHERAL_MAP 0xFFC00CAC /* DMA Channel 2 Peripheral Map Register */
265#define DMA2_CURR_X_COUNT 0xFFC00CB0 /* DMA Channel 2 Current X Count Register */
266#define DMA2_CURR_Y_COUNT 0xFFC00CB8 /* DMA Channel 2 Current Y Count Register */
267
268#define DMA3_NEXT_DESC_PTR 0xFFC00CC0 /* DMA Channel 3 Next Descriptor Pointer Register */
269#define DMA3_START_ADDR 0xFFC00CC4 /* DMA Channel 3 Start Address Register */
270#define DMA3_CONFIG 0xFFC00CC8 /* DMA Channel 3 Configuration Register */
271#define DMA3_X_COUNT 0xFFC00CD0 /* DMA Channel 3 X Count Register */
272#define DMA3_X_MODIFY 0xFFC00CD4 /* DMA Channel 3 X Modify Register */
273#define DMA3_Y_COUNT 0xFFC00CD8 /* DMA Channel 3 Y Count Register */
274#define DMA3_Y_MODIFY 0xFFC00CDC /* DMA Channel 3 Y Modify Register */
275#define DMA3_CURR_DESC_PTR 0xFFC00CE0 /* DMA Channel 3 Current Descriptor Pointer Register */
276#define DMA3_CURR_ADDR 0xFFC00CE4 /* DMA Channel 3 Current Address Register */
277#define DMA3_IRQ_STATUS 0xFFC00CE8 /* DMA Channel 3 Interrupt/Status Register */
278#define DMA3_PERIPHERAL_MAP 0xFFC00CEC /* DMA Channel 3 Peripheral Map Register */
279#define DMA3_CURR_X_COUNT 0xFFC00CF0 /* DMA Channel 3 Current X Count Register */
280#define DMA3_CURR_Y_COUNT 0xFFC00CF8 /* DMA Channel 3 Current Y Count Register */
281
282#define DMA4_NEXT_DESC_PTR 0xFFC00D00 /* DMA Channel 4 Next Descriptor Pointer Register */
283#define DMA4_START_ADDR 0xFFC00D04 /* DMA Channel 4 Start Address Register */
284#define DMA4_CONFIG 0xFFC00D08 /* DMA Channel 4 Configuration Register */
285#define DMA4_X_COUNT 0xFFC00D10 /* DMA Channel 4 X Count Register */
286#define DMA4_X_MODIFY 0xFFC00D14 /* DMA Channel 4 X Modify Register */
287#define DMA4_Y_COUNT 0xFFC00D18 /* DMA Channel 4 Y Count Register */
288#define DMA4_Y_MODIFY 0xFFC00D1C /* DMA Channel 4 Y Modify Register */
289#define DMA4_CURR_DESC_PTR 0xFFC00D20 /* DMA Channel 4 Current Descriptor Pointer Register */
290#define DMA4_CURR_ADDR 0xFFC00D24 /* DMA Channel 4 Current Address Register */
291#define DMA4_IRQ_STATUS 0xFFC00D28 /* DMA Channel 4 Interrupt/Status Register */
292#define DMA4_PERIPHERAL_MAP 0xFFC00D2C /* DMA Channel 4 Peripheral Map Register */
293#define DMA4_CURR_X_COUNT 0xFFC00D30 /* DMA Channel 4 Current X Count Register */
294#define DMA4_CURR_Y_COUNT 0xFFC00D38 /* DMA Channel 4 Current Y Count Register */
295
296#define DMA5_NEXT_DESC_PTR 0xFFC00D40 /* DMA Channel 5 Next Descriptor Pointer Register */
297#define DMA5_START_ADDR 0xFFC00D44 /* DMA Channel 5 Start Address Register */
298#define DMA5_CONFIG 0xFFC00D48 /* DMA Channel 5 Configuration Register */
299#define DMA5_X_COUNT 0xFFC00D50 /* DMA Channel 5 X Count Register */
300#define DMA5_X_MODIFY 0xFFC00D54 /* DMA Channel 5 X Modify Register */
301#define DMA5_Y_COUNT 0xFFC00D58 /* DMA Channel 5 Y Count Register */
302#define DMA5_Y_MODIFY 0xFFC00D5C /* DMA Channel 5 Y Modify Register */
303#define DMA5_CURR_DESC_PTR 0xFFC00D60 /* DMA Channel 5 Current Descriptor Pointer Register */
304#define DMA5_CURR_ADDR 0xFFC00D64 /* DMA Channel 5 Current Address Register */
305#define DMA5_IRQ_STATUS 0xFFC00D68 /* DMA Channel 5 Interrupt/Status Register */
306#define DMA5_PERIPHERAL_MAP 0xFFC00D6C /* DMA Channel 5 Peripheral Map Register */
307#define DMA5_CURR_X_COUNT 0xFFC00D70 /* DMA Channel 5 Current X Count Register */
308#define DMA5_CURR_Y_COUNT 0xFFC00D78 /* DMA Channel 5 Current Y Count Register */
309
310#define DMA6_NEXT_DESC_PTR 0xFFC00D80 /* DMA Channel 6 Next Descriptor Pointer Register */
311#define DMA6_START_ADDR 0xFFC00D84 /* DMA Channel 6 Start Address Register */
312#define DMA6_CONFIG 0xFFC00D88 /* DMA Channel 6 Configuration Register */
313#define DMA6_X_COUNT 0xFFC00D90 /* DMA Channel 6 X Count Register */
314#define DMA6_X_MODIFY 0xFFC00D94 /* DMA Channel 6 X Modify Register */
315#define DMA6_Y_COUNT 0xFFC00D98 /* DMA Channel 6 Y Count Register */
316#define DMA6_Y_MODIFY 0xFFC00D9C /* DMA Channel 6 Y Modify Register */
317#define DMA6_CURR_DESC_PTR 0xFFC00DA0 /* DMA Channel 6 Current Descriptor Pointer Register */
318#define DMA6_CURR_ADDR 0xFFC00DA4 /* DMA Channel 6 Current Address Register */
319#define DMA6_IRQ_STATUS 0xFFC00DA8 /* DMA Channel 6 Interrupt/Status Register */
320#define DMA6_PERIPHERAL_MAP 0xFFC00DAC /* DMA Channel 6 Peripheral Map Register */
321#define DMA6_CURR_X_COUNT 0xFFC00DB0 /* DMA Channel 6 Current X Count Register */
322#define DMA6_CURR_Y_COUNT 0xFFC00DB8 /* DMA Channel 6 Current Y Count Register */
323
324#define DMA7_NEXT_DESC_PTR 0xFFC00DC0 /* DMA Channel 7 Next Descriptor Pointer Register */
325#define DMA7_START_ADDR 0xFFC00DC4 /* DMA Channel 7 Start Address Register */
326#define DMA7_CONFIG 0xFFC00DC8 /* DMA Channel 7 Configuration Register */
327#define DMA7_X_COUNT 0xFFC00DD0 /* DMA Channel 7 X Count Register */
328#define DMA7_X_MODIFY 0xFFC00DD4 /* DMA Channel 7 X Modify Register */
329#define DMA7_Y_COUNT 0xFFC00DD8 /* DMA Channel 7 Y Count Register */
330#define DMA7_Y_MODIFY 0xFFC00DDC /* DMA Channel 7 Y Modify Register */
331#define DMA7_CURR_DESC_PTR 0xFFC00DE0 /* DMA Channel 7 Current Descriptor Pointer Register */
332#define DMA7_CURR_ADDR 0xFFC00DE4 /* DMA Channel 7 Current Address Register */
333#define DMA7_IRQ_STATUS 0xFFC00DE8 /* DMA Channel 7 Interrupt/Status Register */
334#define DMA7_PERIPHERAL_MAP 0xFFC00DEC /* DMA Channel 7 Peripheral Map Register */
335#define DMA7_CURR_X_COUNT 0xFFC00DF0 /* DMA Channel 7 Current X Count Register */
336#define DMA7_CURR_Y_COUNT 0xFFC00DF8 /* DMA Channel 7 Current Y Count Register */
337
338#define DMA8_NEXT_DESC_PTR 0xFFC00E00 /* DMA Channel 8 Next Descriptor Pointer Register */
339#define DMA8_START_ADDR 0xFFC00E04 /* DMA Channel 8 Start Address Register */
340#define DMA8_CONFIG 0xFFC00E08 /* DMA Channel 8 Configuration Register */
341#define DMA8_X_COUNT 0xFFC00E10 /* DMA Channel 8 X Count Register */
342#define DMA8_X_MODIFY 0xFFC00E14 /* DMA Channel 8 X Modify Register */
343#define DMA8_Y_COUNT 0xFFC00E18 /* DMA Channel 8 Y Count Register */
344#define DMA8_Y_MODIFY 0xFFC00E1C /* DMA Channel 8 Y Modify Register */
345#define DMA8_CURR_DESC_PTR 0xFFC00E20 /* DMA Channel 8 Current Descriptor Pointer Register */
346#define DMA8_CURR_ADDR 0xFFC00E24 /* DMA Channel 8 Current Address Register */
347#define DMA8_IRQ_STATUS 0xFFC00E28 /* DMA Channel 8 Interrupt/Status Register */
348#define DMA8_PERIPHERAL_MAP 0xFFC00E2C /* DMA Channel 8 Peripheral Map Register */
349#define DMA8_CURR_X_COUNT 0xFFC00E30 /* DMA Channel 8 Current X Count Register */
350#define DMA8_CURR_Y_COUNT 0xFFC00E38 /* DMA Channel 8 Current Y Count Register */
351
352#define DMA9_NEXT_DESC_PTR 0xFFC00E40 /* DMA Channel 9 Next Descriptor Pointer Register */
353#define DMA9_START_ADDR 0xFFC00E44 /* DMA Channel 9 Start Address Register */
354#define DMA9_CONFIG 0xFFC00E48 /* DMA Channel 9 Configuration Register */
355#define DMA9_X_COUNT 0xFFC00E50 /* DMA Channel 9 X Count Register */
356#define DMA9_X_MODIFY 0xFFC00E54 /* DMA Channel 9 X Modify Register */
357#define DMA9_Y_COUNT 0xFFC00E58 /* DMA Channel 9 Y Count Register */
358#define DMA9_Y_MODIFY 0xFFC00E5C /* DMA Channel 9 Y Modify Register */
359#define DMA9_CURR_DESC_PTR 0xFFC00E60 /* DMA Channel 9 Current Descriptor Pointer Register */
360#define DMA9_CURR_ADDR 0xFFC00E64 /* DMA Channel 9 Current Address Register */
361#define DMA9_IRQ_STATUS 0xFFC00E68 /* DMA Channel 9 Interrupt/Status Register */
362#define DMA9_PERIPHERAL_MAP 0xFFC00E6C /* DMA Channel 9 Peripheral Map Register */
363#define DMA9_CURR_X_COUNT 0xFFC00E70 /* DMA Channel 9 Current X Count Register */
364#define DMA9_CURR_Y_COUNT 0xFFC00E78 /* DMA Channel 9 Current Y Count Register */
365
366#define DMA10_NEXT_DESC_PTR 0xFFC00E80 /* DMA Channel 10 Next Descriptor Pointer Register */
367#define DMA10_START_ADDR 0xFFC00E84 /* DMA Channel 10 Start Address Register */
368#define DMA10_CONFIG 0xFFC00E88 /* DMA Channel 10 Configuration Register */
369#define DMA10_X_COUNT 0xFFC00E90 /* DMA Channel 10 X Count Register */
370#define DMA10_X_MODIFY 0xFFC00E94 /* DMA Channel 10 X Modify Register */
371#define DMA10_Y_COUNT 0xFFC00E98 /* DMA Channel 10 Y Count Register */
372#define DMA10_Y_MODIFY 0xFFC00E9C /* DMA Channel 10 Y Modify Register */
373#define DMA10_CURR_DESC_PTR 0xFFC00EA0 /* DMA Channel 10 Current Descriptor Pointer Register */
374#define DMA10_CURR_ADDR 0xFFC00EA4 /* DMA Channel 10 Current Address Register */
375#define DMA10_IRQ_STATUS 0xFFC00EA8 /* DMA Channel 10 Interrupt/Status Register */
376#define DMA10_PERIPHERAL_MAP 0xFFC00EAC /* DMA Channel 10 Peripheral Map Register */
377#define DMA10_CURR_X_COUNT 0xFFC00EB0 /* DMA Channel 10 Current X Count Register */
378#define DMA10_CURR_Y_COUNT 0xFFC00EB8 /* DMA Channel 10 Current Y Count Register */
379
380#define DMA11_NEXT_DESC_PTR 0xFFC00EC0 /* DMA Channel 11 Next Descriptor Pointer Register */
381#define DMA11_START_ADDR 0xFFC00EC4 /* DMA Channel 11 Start Address Register */
382#define DMA11_CONFIG 0xFFC00EC8 /* DMA Channel 11 Configuration Register */
383#define DMA11_X_COUNT 0xFFC00ED0 /* DMA Channel 11 X Count Register */
384#define DMA11_X_MODIFY 0xFFC00ED4 /* DMA Channel 11 X Modify Register */
385#define DMA11_Y_COUNT 0xFFC00ED8 /* DMA Channel 11 Y Count Register */
386#define DMA11_Y_MODIFY 0xFFC00EDC /* DMA Channel 11 Y Modify Register */
387#define DMA11_CURR_DESC_PTR 0xFFC00EE0 /* DMA Channel 11 Current Descriptor Pointer Register */
388#define DMA11_CURR_ADDR 0xFFC00EE4 /* DMA Channel 11 Current Address Register */
389#define DMA11_IRQ_STATUS 0xFFC00EE8 /* DMA Channel 11 Interrupt/Status Register */
390#define DMA11_PERIPHERAL_MAP 0xFFC00EEC /* DMA Channel 11 Peripheral Map Register */
391#define DMA11_CURR_X_COUNT 0xFFC00EF0 /* DMA Channel 11 Current X Count Register */
392#define DMA11_CURR_Y_COUNT 0xFFC00EF8 /* DMA Channel 11 Current Y Count Register */
393
394#define MDMA_D0_NEXT_DESC_PTR 0xFFC00F00 /* MemDMA Stream 0 Destination Next Descriptor Pointer Register */
395#define MDMA_D0_START_ADDR 0xFFC00F04 /* MemDMA Stream 0 Destination Start Address Register */
396#define MDMA_D0_CONFIG 0xFFC00F08 /* MemDMA Stream 0 Destination Configuration Register */
397#define MDMA_D0_X_COUNT 0xFFC00F10 /* MemDMA Stream 0 Destination X Count Register */
398#define MDMA_D0_X_MODIFY 0xFFC00F14 /* MemDMA Stream 0 Destination X Modify Register */
399#define MDMA_D0_Y_COUNT 0xFFC00F18 /* MemDMA Stream 0 Destination Y Count Register */
400#define MDMA_D0_Y_MODIFY 0xFFC00F1C /* MemDMA Stream 0 Destination Y Modify Register */
401#define MDMA_D0_CURR_DESC_PTR 0xFFC00F20 /* MemDMA Stream 0 Destination Current Descriptor Pointer Register */
402#define MDMA_D0_CURR_ADDR 0xFFC00F24 /* MemDMA Stream 0 Destination Current Address Register */
403#define MDMA_D0_IRQ_STATUS 0xFFC00F28 /* MemDMA Stream 0 Destination Interrupt/Status Register */
404#define MDMA_D0_PERIPHERAL_MAP 0xFFC00F2C /* MemDMA Stream 0 Destination Peripheral Map Register */
405#define MDMA_D0_CURR_X_COUNT 0xFFC00F30 /* MemDMA Stream 0 Destination Current X Count Register */
406#define MDMA_D0_CURR_Y_COUNT 0xFFC00F38 /* MemDMA Stream 0 Destination Current Y Count Register */
407
408#define MDMA_S0_NEXT_DESC_PTR 0xFFC00F40 /* MemDMA Stream 0 Source Next Descriptor Pointer Register */
409#define MDMA_S0_START_ADDR 0xFFC00F44 /* MemDMA Stream 0 Source Start Address Register */
410#define MDMA_S0_CONFIG 0xFFC00F48 /* MemDMA Stream 0 Source Configuration Register */
411#define MDMA_S0_X_COUNT 0xFFC00F50 /* MemDMA Stream 0 Source X Count Register */
412#define MDMA_S0_X_MODIFY 0xFFC00F54 /* MemDMA Stream 0 Source X Modify Register */
413#define MDMA_S0_Y_COUNT 0xFFC00F58 /* MemDMA Stream 0 Source Y Count Register */
414#define MDMA_S0_Y_MODIFY 0xFFC00F5C /* MemDMA Stream 0 Source Y Modify Register */
415#define MDMA_S0_CURR_DESC_PTR 0xFFC00F60 /* MemDMA Stream 0 Source Current Descriptor Pointer Register */
416#define MDMA_S0_CURR_ADDR 0xFFC00F64 /* MemDMA Stream 0 Source Current Address Register */
417#define MDMA_S0_IRQ_STATUS 0xFFC00F68 /* MemDMA Stream 0 Source Interrupt/Status Register */
418#define MDMA_S0_PERIPHERAL_MAP 0xFFC00F6C /* MemDMA Stream 0 Source Peripheral Map Register */
419#define MDMA_S0_CURR_X_COUNT 0xFFC00F70 /* MemDMA Stream 0 Source Current X Count Register */
420#define MDMA_S0_CURR_Y_COUNT 0xFFC00F78 /* MemDMA Stream 0 Source Current Y Count Register */
421
422#define MDMA_D1_NEXT_DESC_PTR 0xFFC00F80 /* MemDMA Stream 1 Destination Next Descriptor Pointer Register */
423#define MDMA_D1_START_ADDR 0xFFC00F84 /* MemDMA Stream 1 Destination Start Address Register */
424#define MDMA_D1_CONFIG 0xFFC00F88 /* MemDMA Stream 1 Destination Configuration Register */
425#define MDMA_D1_X_COUNT 0xFFC00F90 /* MemDMA Stream 1 Destination X Count Register */
426#define MDMA_D1_X_MODIFY 0xFFC00F94 /* MemDMA Stream 1 Destination X Modify Register */
427#define MDMA_D1_Y_COUNT 0xFFC00F98 /* MemDMA Stream 1 Destination Y Count Register */
428#define MDMA_D1_Y_MODIFY 0xFFC00F9C /* MemDMA Stream 1 Destination Y Modify Register */
429#define MDMA_D1_CURR_DESC_PTR 0xFFC00FA0 /* MemDMA Stream 1 Destination Current Descriptor Pointer Register */
430#define MDMA_D1_CURR_ADDR 0xFFC00FA4 /* MemDMA Stream 1 Destination Current Address Register */
431#define MDMA_D1_IRQ_STATUS 0xFFC00FA8 /* MemDMA Stream 1 Destination Interrupt/Status Register */
432#define MDMA_D1_PERIPHERAL_MAP 0xFFC00FAC /* MemDMA Stream 1 Destination Peripheral Map Register */
433#define MDMA_D1_CURR_X_COUNT 0xFFC00FB0 /* MemDMA Stream 1 Destination Current X Count Register */
434#define MDMA_D1_CURR_Y_COUNT 0xFFC00FB8 /* MemDMA Stream 1 Destination Current Y Count Register */
435
436#define MDMA_S1_NEXT_DESC_PTR 0xFFC00FC0 /* MemDMA Stream 1 Source Next Descriptor Pointer Register */
437#define MDMA_S1_START_ADDR 0xFFC00FC4 /* MemDMA Stream 1 Source Start Address Register */
438#define MDMA_S1_CONFIG 0xFFC00FC8 /* MemDMA Stream 1 Source Configuration Register */
439#define MDMA_S1_X_COUNT 0xFFC00FD0 /* MemDMA Stream 1 Source X Count Register */
440#define MDMA_S1_X_MODIFY 0xFFC00FD4 /* MemDMA Stream 1 Source X Modify Register */
441#define MDMA_S1_Y_COUNT 0xFFC00FD8 /* MemDMA Stream 1 Source Y Count Register */
442#define MDMA_S1_Y_MODIFY 0xFFC00FDC /* MemDMA Stream 1 Source Y Modify Register */
443#define MDMA_S1_CURR_DESC_PTR 0xFFC00FE0 /* MemDMA Stream 1 Source Current Descriptor Pointer Register */
444#define MDMA_S1_CURR_ADDR 0xFFC00FE4 /* MemDMA Stream 1 Source Current Address Register */
445#define MDMA_S1_IRQ_STATUS 0xFFC00FE8 /* MemDMA Stream 1 Source Interrupt/Status Register */
446#define MDMA_S1_PERIPHERAL_MAP 0xFFC00FEC /* MemDMA Stream 1 Source Peripheral Map Register */
447#define MDMA_S1_CURR_X_COUNT 0xFFC00FF0 /* MemDMA Stream 1 Source Current X Count Register */
448#define MDMA_S1_CURR_Y_COUNT 0xFFC00FF8 /* MemDMA Stream 1 Source Current Y Count Register */
449
450
451/* Parallel Peripheral Interface (0xFFC01000 - 0xFFC010FF) */
452#define PPI_CONTROL 0xFFC01000 /* PPI Control Register */
453#define PPI_STATUS 0xFFC01004 /* PPI Status Register */
454#define PPI_COUNT 0xFFC01008 /* PPI Transfer Count Register */
455#define PPI_DELAY 0xFFC0100C /* PPI Delay Count Register */
456#define PPI_FRAME 0xFFC01010 /* PPI Frame Length Register */
457
458
459/* Two-Wire Interface (0xFFC01400 - 0xFFC014FF) */
460#define TWI0_REGBASE 0xFFC01400
461#define TWI0_CLKDIV 0xFFC01400 /* Serial Clock Divider Register */
462#define TWI0_CONTROL 0xFFC01404 /* TWI Control Register */
463#define TWI0_SLAVE_CTL 0xFFC01408 /* Slave Mode Control Register */
464#define TWI0_SLAVE_STAT 0xFFC0140C /* Slave Mode Status Register */
465#define TWI0_SLAVE_ADDR 0xFFC01410 /* Slave Mode Address Register */
466#define TWI0_MASTER_CTL 0xFFC01414 /* Master Mode Control Register */
467#define TWI0_MASTER_STAT 0xFFC01418 /* Master Mode Status Register */
468#define TWI0_MASTER_ADDR 0xFFC0141C /* Master Mode Address Register */
469#define TWI0_INT_STAT 0xFFC01420 /* TWI Interrupt Status Register */
470#define TWI0_INT_MASK 0xFFC01424 /* TWI Master Interrupt Mask Register */
471#define TWI0_FIFO_CTL 0xFFC01428 /* FIFO Control Register */
472#define TWI0_FIFO_STAT 0xFFC0142C /* FIFO Status Register */
473#define TWI0_XMT_DATA8 0xFFC01480 /* FIFO Transmit Data Single Byte Register */
474#define TWI0_XMT_DATA16 0xFFC01484 /* FIFO Transmit Data Double Byte Register */
475#define TWI0_RCV_DATA8 0xFFC01488 /* FIFO Receive Data Single Byte Register */
476#define TWI0_RCV_DATA16 0xFFC0148C /* FIFO Receive Data Double Byte Register */
477
478
479/* General Purpose I/O Port G (0xFFC01500 - 0xFFC015FF) */
480#define PORTGIO 0xFFC01500 /* Port G I/O Pin State Specify Register */
481#define PORTGIO_CLEAR 0xFFC01504 /* Port G I/O Peripheral Interrupt Clear Register */
482#define PORTGIO_SET 0xFFC01508 /* Port G I/O Peripheral Interrupt Set Register */
483#define PORTGIO_TOGGLE 0xFFC0150C /* Port G I/O Pin State Toggle Register */
484#define PORTGIO_MASKA 0xFFC01510 /* Port G I/O Mask State Specify Interrupt A Register */
485#define PORTGIO_MASKA_CLEAR 0xFFC01514 /* Port G I/O Mask Disable Interrupt A Register */
486#define PORTGIO_MASKA_SET 0xFFC01518 /* Port G I/O Mask Enable Interrupt A Register */
487#define PORTGIO_MASKA_TOGGLE 0xFFC0151C /* Port G I/O Mask Toggle Enable Interrupt A Register */
488#define PORTGIO_MASKB 0xFFC01520 /* Port G I/O Mask State Specify Interrupt B Register */
489#define PORTGIO_MASKB_CLEAR 0xFFC01524 /* Port G I/O Mask Disable Interrupt B Register */
490#define PORTGIO_MASKB_SET 0xFFC01528 /* Port G I/O Mask Enable Interrupt B Register */
491#define PORTGIO_MASKB_TOGGLE 0xFFC0152C /* Port G I/O Mask Toggle Enable Interrupt B Register */
492#define PORTGIO_DIR 0xFFC01530 /* Port G I/O Direction Register */
493#define PORTGIO_POLAR 0xFFC01534 /* Port G I/O Source Polarity Register */
494#define PORTGIO_EDGE 0xFFC01538 /* Port G I/O Source Sensitivity Register */
495#define PORTGIO_BOTH 0xFFC0153C /* Port G I/O Set on BOTH Edges Register */
496#define PORTGIO_INEN 0xFFC01540 /* Port G I/O Input Enable Register */
497
498
499/* General Purpose I/O Port H (0xFFC01700 - 0xFFC017FF) */
500#define PORTHIO 0xFFC01700 /* Port H I/O Pin State Specify Register */
501#define PORTHIO_CLEAR 0xFFC01704 /* Port H I/O Peripheral Interrupt Clear Register */
502#define PORTHIO_SET 0xFFC01708 /* Port H I/O Peripheral Interrupt Set Register */
503#define PORTHIO_TOGGLE 0xFFC0170C /* Port H I/O Pin State Toggle Register */
504#define PORTHIO_MASKA 0xFFC01710 /* Port H I/O Mask State Specify Interrupt A Register */
505#define PORTHIO_MASKA_CLEAR 0xFFC01714 /* Port H I/O Mask Disable Interrupt A Register */
506#define PORTHIO_MASKA_SET 0xFFC01718 /* Port H I/O Mask Enable Interrupt A Register */
507#define PORTHIO_MASKA_TOGGLE 0xFFC0171C /* Port H I/O Mask Toggle Enable Interrupt A Register */
508#define PORTHIO_MASKB 0xFFC01720 /* Port H I/O Mask State Specify Interrupt B Register */
509#define PORTHIO_MASKB_CLEAR 0xFFC01724 /* Port H I/O Mask Disable Interrupt B Register */
510#define PORTHIO_MASKB_SET 0xFFC01728 /* Port H I/O Mask Enable Interrupt B Register */
511#define PORTHIO_MASKB_TOGGLE 0xFFC0172C /* Port H I/O Mask Toggle Enable Interrupt B Register */
512#define PORTHIO_DIR 0xFFC01730 /* Port H I/O Direction Register */
513#define PORTHIO_POLAR 0xFFC01734 /* Port H I/O Source Polarity Register */
514#define PORTHIO_EDGE 0xFFC01738 /* Port H I/O Source Sensitivity Register */
515#define PORTHIO_BOTH 0xFFC0173C /* Port H I/O Set on BOTH Edges Register */
516#define PORTHIO_INEN 0xFFC01740 /* Port H I/O Input Enable Register */
517
518
519/* UART1 Controller (0xFFC02000 - 0xFFC020FF) */
520#define UART1_THR 0xFFC02000 /* Transmit Holding register */
521#define UART1_RBR 0xFFC02000 /* Receive Buffer register */
522#define UART1_DLL 0xFFC02000 /* Divisor Latch (Low-Byte) */
523#define UART1_IER 0xFFC02004 /* Interrupt Enable Register */
524#define UART1_DLH 0xFFC02004 /* Divisor Latch (High-Byte) */
525#define UART1_IIR 0xFFC02008 /* Interrupt Identification Register */
526#define UART1_LCR 0xFFC0200C /* Line Control Register */
527#define UART1_MCR 0xFFC02010 /* Modem Control Register */
528#define UART1_LSR 0xFFC02014 /* Line Status Register */
529#define UART1_MSR 0xFFC02018 /* Modem Status Register */
530#define UART1_SCR 0xFFC0201C /* SCR Scratch Register */
531#define UART1_GCTL 0xFFC02024 /* Global Control Register */
532
533
534/* Omit CAN register sets from the defBF534.h (CAN is not in the ADSP-BF52x processor) */
535
536/* Pin Control Registers (0xFFC03200 - 0xFFC032FF) */
537#define PORTF_FER 0xFFC03200 /* Port F Function Enable Register (Alternate/Flag*) */
538#define PORTG_FER 0xFFC03204 /* Port G Function Enable Register (Alternate/Flag*) */
539#define PORTH_FER 0xFFC03208 /* Port H Function Enable Register (Alternate/Flag*) */
540#define BFIN_PORT_MUX 0xFFC0320C /* Port Multiplexer Control Register */
541
542
543/* Handshake MDMA Registers (0xFFC03300 - 0xFFC033FF) */
544#define HMDMA0_CONTROL 0xFFC03300 /* Handshake MDMA0 Control Register */
545#define HMDMA0_ECINIT 0xFFC03304 /* HMDMA0 Initial Edge Count Register */
546#define HMDMA0_BCINIT 0xFFC03308 /* HMDMA0 Initial Block Count Register */
547#define HMDMA0_ECURGENT 0xFFC0330C /* HMDMA0 Urgent Edge Count Threshold Register */
548#define HMDMA0_ECOVERFLOW 0xFFC03310 /* HMDMA0 Edge Count Overflow Interrupt Register */
549#define HMDMA0_ECOUNT 0xFFC03314 /* HMDMA0 Current Edge Count Register */
550#define HMDMA0_BCOUNT 0xFFC03318 /* HMDMA0 Current Block Count Register */
551
552#define HMDMA1_CONTROL 0xFFC03340 /* Handshake MDMA1 Control Register */
553#define HMDMA1_ECINIT 0xFFC03344 /* HMDMA1 Initial Edge Count Register */
554#define HMDMA1_BCINIT 0xFFC03348 /* HMDMA1 Initial Block Count Register */
555#define HMDMA1_ECURGENT 0xFFC0334C /* HMDMA1 Urgent Edge Count Threshold Register */
556#define HMDMA1_ECOVERFLOW 0xFFC03350 /* HMDMA1 Edge Count Overflow Interrupt Register */
557#define HMDMA1_ECOUNT 0xFFC03354 /* HMDMA1 Current Edge Count Register */
558#define HMDMA1_BCOUNT 0xFFC03358 /* HMDMA1 Current Block Count Register */
559
560/* GPIO PIN mux (0xFFC03210 - OxFFC03288) */
561#define PORTF_MUX 0xFFC03210 /* Port F mux control */
562#define PORTG_MUX 0xFFC03214 /* Port G mux control */
563#define PORTH_MUX 0xFFC03218 /* Port H mux control */
564#define PORTF_DRIVE 0xFFC03220 /* Port F drive strength control */
565#define PORTG_DRIVE 0xFFC03224 /* Port G drive strength control */
566#define PORTH_DRIVE 0xFFC03228 /* Port H drive strength control */
567#define PORTF_SLEW 0xFFC03230 /* Port F slew control */
568#define PORTG_SLEW 0xFFC03234 /* Port G slew control */
569#define PORTH_SLEW 0xFFC03238 /* Port H slew control */
570#define PORTF_HYSTERISIS 0xFFC03240 /* Port F Schmitt trigger control */
571#define PORTG_HYSTERISIS 0xFFC03244 /* Port G Schmitt trigger control */
572#define PORTH_HYSTERISIS 0xFFC03248 /* Port H Schmitt trigger control */
573#define MISCPORT_DRIVE 0xFFC03280 /* Misc Port drive strength control */
574#define MISCPORT_SLEW 0xFFC03284 /* Misc Port slew control */
575#define MISCPORT_HYSTERISIS 0xFFC03288 /* Misc Port Schmitt trigger control */
576
577
578/***********************************************************************************
579** System MMR Register Bits And Macros
580**
581** Disclaimer: All macros are intended to make C and Assembly code more readable.
582** Use these macros carefully, as any that do left shifts for field
583** depositing will result in the lower order bits being destroyed. Any
584** macro that shifts left to properly position the bit-field should be
585** used as part of an OR to initialize a register and NOT as a dynamic
586** modifier UNLESS the lower order bits are saved and ORed back in when
587** the macro is used.
588*************************************************************************************/
589
590/* CHIPID Masks */
591#define CHIPID_VERSION 0xF0000000
592#define CHIPID_FAMILY 0x0FFFF000
593#define CHIPID_MANUFACTURE 0x00000FFE
594
595/* SWRST Masks */
596#define SYSTEM_RESET 0x0007 /* Initiates A System Software Reset */
597#define DOUBLE_FAULT 0x0008 /* Core Double Fault Causes Reset */
598#define RESET_DOUBLE 0x2000 /* SW Reset Generated By Core Double-Fault */
599#define RESET_WDOG 0x4000 /* SW Reset Generated By Watchdog Timer */
600#define RESET_SOFTWARE 0x8000 /* SW Reset Occurred Since Last Read Of SWRST */
601
602/* SYSCR Masks */
603#define BMODE 0x0007 /* Boot Mode - Latched During HW Reset From Mode Pins */
604#define NOBOOT 0x0010 /* Execute From L1 or ASYNC Bank 0 When BMODE = 0 */
605
606
607/* ************* SYSTEM INTERRUPT CONTROLLER MASKS *************************************/
608/* Peripheral Masks For SIC_ISR, SIC_IWR, SIC_IMASK */
609
610#if 0
611#define IRQ_PLL_WAKEUP 0x00000001 /* PLL Wakeup Interrupt */
612
613#define IRQ_ERROR1 0x00000002 /* Error Interrupt (DMA, DMARx Block, DMARx Overflow) */
614#define IRQ_ERROR2 0x00000004 /* Error Interrupt (CAN, Ethernet, SPORTx, PPI, SPI, UARTx) */
615#define IRQ_RTC 0x00000008 /* Real Time Clock Interrupt */
616#define IRQ_DMA0 0x00000010 /* DMA Channel 0 (PPI) Interrupt */
617#define IRQ_DMA3 0x00000020 /* DMA Channel 3 (SPORT0 RX) Interrupt */
618#define IRQ_DMA4 0x00000040 /* DMA Channel 4 (SPORT0 TX) Interrupt */
619#define IRQ_DMA5 0x00000080 /* DMA Channel 5 (SPORT1 RX) Interrupt */
620
621#define IRQ_DMA6 0x00000100 /* DMA Channel 6 (SPORT1 TX) Interrupt */
622#define IRQ_TWI 0x00000200 /* TWI Interrupt */
623#define IRQ_DMA7 0x00000400 /* DMA Channel 7 (SPI) Interrupt */
624#define IRQ_DMA8 0x00000800 /* DMA Channel 8 (UART0 RX) Interrupt */
625#define IRQ_DMA9 0x00001000 /* DMA Channel 9 (UART0 TX) Interrupt */
626#define IRQ_DMA10 0x00002000 /* DMA Channel 10 (UART1 RX) Interrupt */
627#define IRQ_DMA11 0x00004000 /* DMA Channel 11 (UART1 TX) Interrupt */
628#define IRQ_CAN_RX 0x00008000 /* CAN Receive Interrupt */
629
630#define IRQ_CAN_TX 0x00010000 /* CAN Transmit Interrupt */
631#define IRQ_DMA1 0x00020000 /* DMA Channel 1 (Ethernet RX) Interrupt */
632#define IRQ_PFA_PORTH 0x00020000 /* PF Port H (PF47:32) Interrupt A */
633#define IRQ_DMA2 0x00040000 /* DMA Channel 2 (Ethernet TX) Interrupt */
634#define IRQ_PFB_PORTH 0x00040000 /* PF Port H (PF47:32) Interrupt B */
635#define IRQ_TIMER0 0x00080000 /* Timer 0 Interrupt */
636#define IRQ_TIMER1 0x00100000 /* Timer 1 Interrupt */
637#define IRQ_TIMER2 0x00200000 /* Timer 2 Interrupt */
638#define IRQ_TIMER3 0x00400000 /* Timer 3 Interrupt */
639#define IRQ_TIMER4 0x00800000 /* Timer 4 Interrupt */
640
641#define IRQ_TIMER5 0x01000000 /* Timer 5 Interrupt */
642#define IRQ_TIMER6 0x02000000 /* Timer 6 Interrupt */
643#define IRQ_TIMER7 0x04000000 /* Timer 7 Interrupt */
644#define IRQ_PFA_PORTFG 0x08000000 /* PF Ports F&G (PF31:0) Interrupt A */
645#define IRQ_PFB_PORTF 0x80000000 /* PF Port F (PF15:0) Interrupt B */
646#define IRQ_DMA12 0x20000000 /* DMA Channels 12 (MDMA1 Source) RX Interrupt */
647#define IRQ_DMA13 0x20000000 /* DMA Channels 13 (MDMA1 Destination) TX Interrupt */
648#define IRQ_DMA14 0x40000000 /* DMA Channels 14 (MDMA0 Source) RX Interrupt */
649#define IRQ_DMA15 0x40000000 /* DMA Channels 15 (MDMA0 Destination) TX Interrupt */
650#define IRQ_WDOG 0x80000000 /* Software Watchdog Timer Interrupt */
651#define IRQ_PFB_PORTG 0x10000000 /* PF Port G (PF31:16) Interrupt B */
652#endif
653
654/* SIC_IAR0 Macros */
655#define P0_IVG(x) (((x)&0xF)-7) /* Peripheral #0 assigned IVG #x */
656#define P1_IVG(x) (((x)&0xF)-7) << 0x4 /* Peripheral #1 assigned IVG #x */
657#define P2_IVG(x) (((x)&0xF)-7) << 0x8 /* Peripheral #2 assigned IVG #x */
658#define P3_IVG(x) (((x)&0xF)-7) << 0xC /* Peripheral #3 assigned IVG #x */
659#define P4_IVG(x) (((x)&0xF)-7) << 0x10 /* Peripheral #4 assigned IVG #x */
660#define P5_IVG(x) (((x)&0xF)-7) << 0x14 /* Peripheral #5 assigned IVG #x */
661#define P6_IVG(x) (((x)&0xF)-7) << 0x18 /* Peripheral #6 assigned IVG #x */
662#define P7_IVG(x) (((x)&0xF)-7) << 0x1C /* Peripheral #7 assigned IVG #x */
663
664/* SIC_IAR1 Macros */
665#define P8_IVG(x) (((x)&0xF)-7) /* Peripheral #8 assigned IVG #x */
666#define P9_IVG(x) (((x)&0xF)-7) << 0x4 /* Peripheral #9 assigned IVG #x */
667#define P10_IVG(x) (((x)&0xF)-7) << 0x8 /* Peripheral #10 assigned IVG #x */
668#define P11_IVG(x) (((x)&0xF)-7) << 0xC /* Peripheral #11 assigned IVG #x */
669#define P12_IVG(x) (((x)&0xF)-7) << 0x10 /* Peripheral #12 assigned IVG #x */
670#define P13_IVG(x) (((x)&0xF)-7) << 0x14 /* Peripheral #13 assigned IVG #x */
671#define P14_IVG(x) (((x)&0xF)-7) << 0x18 /* Peripheral #14 assigned IVG #x */
672#define P15_IVG(x) (((x)&0xF)-7) << 0x1C /* Peripheral #15 assigned IVG #x */
673
674/* SIC_IAR2 Macros */
675#define P16_IVG(x) (((x)&0xF)-7) /* Peripheral #16 assigned IVG #x */
676#define P17_IVG(x) (((x)&0xF)-7) << 0x4 /* Peripheral #17 assigned IVG #x */
677#define P18_IVG(x) (((x)&0xF)-7) << 0x8 /* Peripheral #18 assigned IVG #x */
678#define P19_IVG(x) (((x)&0xF)-7) << 0xC /* Peripheral #19 assigned IVG #x */
679#define P20_IVG(x) (((x)&0xF)-7) << 0x10 /* Peripheral #20 assigned IVG #x */
680#define P21_IVG(x) (((x)&0xF)-7) << 0x14 /* Peripheral #21 assigned IVG #x */
681#define P22_IVG(x) (((x)&0xF)-7) << 0x18 /* Peripheral #22 assigned IVG #x */
682#define P23_IVG(x) (((x)&0xF)-7) << 0x1C /* Peripheral #23 assigned IVG #x */
683
684/* SIC_IAR3 Macros */
685#define P24_IVG(x) (((x)&0xF)-7) /* Peripheral #24 assigned IVG #x */
686#define P25_IVG(x) (((x)&0xF)-7) << 0x4 /* Peripheral #25 assigned IVG #x */
687#define P26_IVG(x) (((x)&0xF)-7) << 0x8 /* Peripheral #26 assigned IVG #x */
688#define P27_IVG(x) (((x)&0xF)-7) << 0xC /* Peripheral #27 assigned IVG #x */
689#define P28_IVG(x) (((x)&0xF)-7) << 0x10 /* Peripheral #28 assigned IVG #x */
690#define P29_IVG(x) (((x)&0xF)-7) << 0x14 /* Peripheral #29 assigned IVG #x */
691#define P30_IVG(x) (((x)&0xF)-7) << 0x18 /* Peripheral #30 assigned IVG #x */
692#define P31_IVG(x) (((x)&0xF)-7) << 0x1C /* Peripheral #31 assigned IVG #x */
693
694
695/* SIC_IMASK Masks */
696#define SIC_UNMASK_ALL 0x00000000 /* Unmask all peripheral interrupts */
697#define SIC_MASK_ALL 0xFFFFFFFF /* Mask all peripheral interrupts */
698#define SIC_MASK(x) (1 << ((x)&0x1F)) /* Mask Peripheral #x interrupt */
699#define SIC_UNMASK(x) (0xFFFFFFFF ^ (1 << ((x)&0x1F))) /* Unmask Peripheral #x interrupt */
700
701/* SIC_IWR Masks */
702#define IWR_DISABLE_ALL 0x00000000 /* Wakeup Disable all peripherals */
703#define IWR_ENABLE_ALL 0xFFFFFFFF /* Wakeup Enable all peripherals */
704#define IWR_ENABLE(x) (1 << ((x)&0x1F)) /* Wakeup Enable Peripheral #x */
705#define IWR_DISABLE(x) (0xFFFFFFFF ^ (1 << ((x)&0x1F))) /* Wakeup Disable Peripheral #x */
706
707
708/* ************** UART CONTROLLER MASKS *************************/
709/* UARTx_LCR Masks */
710#define WLS(x) (((x)-5) & 0x03) /* Word Length Select */
711#define STB 0x04 /* Stop Bits */
712#define PEN 0x08 /* Parity Enable */
713#define EPS 0x10 /* Even Parity Select */
714#define STP 0x20 /* Stick Parity */
715#define SB 0x40 /* Set Break */
716#define DLAB 0x80 /* Divisor Latch Access */
717
718/* UARTx_MCR Mask */
719#define LOOP_ENA 0x10 /* Loopback Mode Enable */
720#define LOOP_ENA_P 0x04
721
722/* UARTx_LSR Masks */
723#define DR 0x01 /* Data Ready */
724#define OE 0x02 /* Overrun Error */
725#define PE 0x04 /* Parity Error */
726#define FE 0x08 /* Framing Error */
727#define BI 0x10 /* Break Interrupt */
728#define THRE 0x20 /* THR Empty */
729#define TEMT 0x40 /* TSR and UART_THR Empty */
730
731/* UARTx_IER Masks */
732#define ERBFI 0x01 /* Enable Receive Buffer Full Interrupt */
733#define ETBEI 0x02 /* Enable Transmit Buffer Empty Interrupt */
734#define ELSI 0x04 /* Enable RX Status Interrupt */
735
736/* UARTx_IIR Masks */
737#define NINT 0x01 /* Pending Interrupt */
738#define IIR_TX_READY 0x02 /* UART_THR empty */
739#define IIR_RX_READY 0x04 /* Receive data ready */
740#define IIR_LINE_CHANGE 0x06 /* Receive line status */
741#define IIR_STATUS 0x06 /* Highest Priority Pending Interrupt */
742
743/* UARTx_GCTL Masks */
744#define UCEN 0x01 /* Enable UARTx Clocks */
745#define IREN 0x02 /* Enable IrDA Mode */
746#define TPOLC 0x04 /* IrDA TX Polarity Change */
747#define RPOLC 0x08 /* IrDA RX Polarity Change */
748#define FPE 0x10 /* Force Parity Error On Transmit */
749#define FFE 0x20 /* Force Framing Error On Transmit */
750
751
752/* **************** GENERAL PURPOSE TIMER MASKS **********************/
753/* TIMER_ENABLE Masks */
754#define TIMEN0 0x0001 /* Enable Timer 0 */
755#define TIMEN1 0x0002 /* Enable Timer 1 */
756#define TIMEN2 0x0004 /* Enable Timer 2 */
757#define TIMEN3 0x0008 /* Enable Timer 3 */
758#define TIMEN4 0x0010 /* Enable Timer 4 */
759#define TIMEN5 0x0020 /* Enable Timer 5 */
760#define TIMEN6 0x0040 /* Enable Timer 6 */
761#define TIMEN7 0x0080 /* Enable Timer 7 */
762
763/* TIMER_DISABLE Masks */
764#define TIMDIS0 TIMEN0 /* Disable Timer 0 */
765#define TIMDIS1 TIMEN1 /* Disable Timer 1 */
766#define TIMDIS2 TIMEN2 /* Disable Timer 2 */
767#define TIMDIS3 TIMEN3 /* Disable Timer 3 */
768#define TIMDIS4 TIMEN4 /* Disable Timer 4 */
769#define TIMDIS5 TIMEN5 /* Disable Timer 5 */
770#define TIMDIS6 TIMEN6 /* Disable Timer 6 */
771#define TIMDIS7 TIMEN7 /* Disable Timer 7 */
772
773/* TIMER_STATUS Masks */
774#define TIMIL0 0x00000001 /* Timer 0 Interrupt */
775#define TIMIL1 0x00000002 /* Timer 1 Interrupt */
776#define TIMIL2 0x00000004 /* Timer 2 Interrupt */
777#define TIMIL3 0x00000008 /* Timer 3 Interrupt */
778#define TOVF_ERR0 0x00000010 /* Timer 0 Counter Overflow */
779#define TOVF_ERR1 0x00000020 /* Timer 1 Counter Overflow */
780#define TOVF_ERR2 0x00000040 /* Timer 2 Counter Overflow */
781#define TOVF_ERR3 0x00000080 /* Timer 3 Counter Overflow */
782#define TRUN0 0x00001000 /* Timer 0 Slave Enable Status */
783#define TRUN1 0x00002000 /* Timer 1 Slave Enable Status */
784#define TRUN2 0x00004000 /* Timer 2 Slave Enable Status */
785#define TRUN3 0x00008000 /* Timer 3 Slave Enable Status */
786#define TIMIL4 0x00010000 /* Timer 4 Interrupt */
787#define TIMIL5 0x00020000 /* Timer 5 Interrupt */
788#define TIMIL6 0x00040000 /* Timer 6 Interrupt */
789#define TIMIL7 0x00080000 /* Timer 7 Interrupt */
790#define TOVF_ERR4 0x00100000 /* Timer 4 Counter Overflow */
791#define TOVF_ERR5 0x00200000 /* Timer 5 Counter Overflow */
792#define TOVF_ERR6 0x00400000 /* Timer 6 Counter Overflow */
793#define TOVF_ERR7 0x00800000 /* Timer 7 Counter Overflow */
794#define TRUN4 0x10000000 /* Timer 4 Slave Enable Status */
795#define TRUN5 0x20000000 /* Timer 5 Slave Enable Status */
796#define TRUN6 0x40000000 /* Timer 6 Slave Enable Status */
797#define TRUN7 0x80000000 /* Timer 7 Slave Enable Status */
798
799/* Alternate Deprecated Macros Provided For Backwards Code Compatibility */
800#define TOVL_ERR0 TOVF_ERR0
801#define TOVL_ERR1 TOVF_ERR1
802#define TOVL_ERR2 TOVF_ERR2
803#define TOVL_ERR3 TOVF_ERR3
804#define TOVL_ERR4 TOVF_ERR4
805#define TOVL_ERR5 TOVF_ERR5
806#define TOVL_ERR6 TOVF_ERR6
807#define TOVL_ERR7 TOVF_ERR7
808
809/* TIMERx_CONFIG Masks */
810#define PWM_OUT 0x0001 /* Pulse-Width Modulation Output Mode */
811#define WDTH_CAP 0x0002 /* Width Capture Input Mode */
812#define EXT_CLK 0x0003 /* External Clock Mode */
813#define PULSE_HI 0x0004 /* Action Pulse (Positive/Negative*) */
814#define PERIOD_CNT 0x0008 /* Period Count */
815#define IRQ_ENA 0x0010 /* Interrupt Request Enable */
816#define TIN_SEL 0x0020 /* Timer Input Select */
817#define OUT_DIS 0x0040 /* Output Pad Disable */
818#define CLK_SEL 0x0080 /* Timer Clock Select */
819#define TOGGLE_HI 0x0100 /* PWM_OUT PULSE_HI Toggle Mode */
820#define EMU_RUN 0x0200 /* Emulation Behavior Select */
821#define ERR_TYP 0xC000 /* Error Type */
822
823
824/* ****************** GPIO PORTS F, G, H MASKS ***********************/
825/* General Purpose IO (0xFFC00700 - 0xFFC007FF) Masks */
826/* Port F Masks */
827#define PF0 0x0001
828#define PF1 0x0002
829#define PF2 0x0004
830#define PF3 0x0008
831#define PF4 0x0010
832#define PF5 0x0020
833#define PF6 0x0040
834#define PF7 0x0080
835#define PF8 0x0100
836#define PF9 0x0200
837#define PF10 0x0400
838#define PF11 0x0800
839#define PF12 0x1000
840#define PF13 0x2000
841#define PF14 0x4000
842#define PF15 0x8000
843
844/* Port G Masks */
845#define PG0 0x0001
846#define PG1 0x0002
847#define PG2 0x0004
848#define PG3 0x0008
849#define PG4 0x0010
850#define PG5 0x0020
851#define PG6 0x0040
852#define PG7 0x0080
853#define PG8 0x0100
854#define PG9 0x0200
855#define PG10 0x0400
856#define PG11 0x0800
857#define PG12 0x1000
858#define PG13 0x2000
859#define PG14 0x4000
860#define PG15 0x8000
861
862/* Port H Masks */
863#define PH0 0x0001
864#define PH1 0x0002
865#define PH2 0x0004
866#define PH3 0x0008
867#define PH4 0x0010
868#define PH5 0x0020
869#define PH6 0x0040
870#define PH7 0x0080
871#define PH8 0x0100
872#define PH9 0x0200
873#define PH10 0x0400
874#define PH11 0x0800
875#define PH12 0x1000
876#define PH13 0x2000
877#define PH14 0x4000
878#define PH15 0x8000
879
880/* ********************* ASYNCHRONOUS MEMORY CONTROLLER MASKS *************************/
881/* EBIU_AMGCTL Masks */
882#define AMCKEN 0x0001 /* Enable CLKOUT */
883#define AMBEN_NONE 0x0000 /* All Banks Disabled */
884#define AMBEN_B0 0x0002 /* Enable Async Memory Bank 0 only */
885#define AMBEN_B0_B1 0x0004 /* Enable Async Memory Banks 0 & 1 only */
886#define AMBEN_B0_B1_B2 0x0006 /* Enable Async Memory Banks 0, 1, and 2 */
887#define AMBEN_ALL 0x0008 /* Enable Async Memory Banks (all) 0, 1, 2, and 3 */
888
889/* EBIU_AMBCTL0 Masks */
890#define B0RDYEN 0x00000001 /* Bank 0 (B0) RDY Enable */
891#define B0RDYPOL 0x00000002 /* B0 RDY Active High */
892#define B0TT_1 0x00000004 /* B0 Transition Time (Read to Write) = 1 cycle */
893#define B0TT_2 0x00000008 /* B0 Transition Time (Read to Write) = 2 cycles */
894#define B0TT_3 0x0000000C /* B0 Transition Time (Read to Write) = 3 cycles */
895#define B0TT_4 0x00000000 /* B0 Transition Time (Read to Write) = 4 cycles */
896#define B0ST_1 0x00000010 /* B0 Setup Time (AOE to Read/Write) = 1 cycle */
897#define B0ST_2 0x00000020 /* B0 Setup Time (AOE to Read/Write) = 2 cycles */
898#define B0ST_3 0x00000030 /* B0 Setup Time (AOE to Read/Write) = 3 cycles */
899#define B0ST_4 0x00000000 /* B0 Setup Time (AOE to Read/Write) = 4 cycles */
900#define B0HT_1 0x00000040 /* B0 Hold Time (~Read/Write to ~AOE) = 1 cycle */
901#define B0HT_2 0x00000080 /* B0 Hold Time (~Read/Write to ~AOE) = 2 cycles */
902#define B0HT_3 0x000000C0 /* B0 Hold Time (~Read/Write to ~AOE) = 3 cycles */
903#define B0HT_0 0x00000000 /* B0 Hold Time (~Read/Write to ~AOE) = 0 cycles */
904#define B0RAT_1 0x00000100 /* B0 Read Access Time = 1 cycle */
905#define B0RAT_2 0x00000200 /* B0 Read Access Time = 2 cycles */
906#define B0RAT_3 0x00000300 /* B0 Read Access Time = 3 cycles */
907#define B0RAT_4 0x00000400 /* B0 Read Access Time = 4 cycles */
908#define B0RAT_5 0x00000500 /* B0 Read Access Time = 5 cycles */
909#define B0RAT_6 0x00000600 /* B0 Read Access Time = 6 cycles */
910#define B0RAT_7 0x00000700 /* B0 Read Access Time = 7 cycles */
911#define B0RAT_8 0x00000800 /* B0 Read Access Time = 8 cycles */
912#define B0RAT_9 0x00000900 /* B0 Read Access Time = 9 cycles */
913#define B0RAT_10 0x00000A00 /* B0 Read Access Time = 10 cycles */
914#define B0RAT_11 0x00000B00 /* B0 Read Access Time = 11 cycles */
915#define B0RAT_12 0x00000C00 /* B0 Read Access Time = 12 cycles */
916#define B0RAT_13 0x00000D00 /* B0 Read Access Time = 13 cycles */
917#define B0RAT_14 0x00000E00 /* B0 Read Access Time = 14 cycles */
918#define B0RAT_15 0x00000F00 /* B0 Read Access Time = 15 cycles */
919#define B0WAT_1 0x00001000 /* B0 Write Access Time = 1 cycle */
920#define B0WAT_2 0x00002000 /* B0 Write Access Time = 2 cycles */
921#define B0WAT_3 0x00003000 /* B0 Write Access Time = 3 cycles */
922#define B0WAT_4 0x00004000 /* B0 Write Access Time = 4 cycles */
923#define B0WAT_5 0x00005000 /* B0 Write Access Time = 5 cycles */
924#define B0WAT_6 0x00006000 /* B0 Write Access Time = 6 cycles */
925#define B0WAT_7 0x00007000 /* B0 Write Access Time = 7 cycles */
926#define B0WAT_8 0x00008000 /* B0 Write Access Time = 8 cycles */
927#define B0WAT_9 0x00009000 /* B0 Write Access Time = 9 cycles */
928#define B0WAT_10 0x0000A000 /* B0 Write Access Time = 10 cycles */
929#define B0WAT_11 0x0000B000 /* B0 Write Access Time = 11 cycles */
930#define B0WAT_12 0x0000C000 /* B0 Write Access Time = 12 cycles */
931#define B0WAT_13 0x0000D000 /* B0 Write Access Time = 13 cycles */
932#define B0WAT_14 0x0000E000 /* B0 Write Access Time = 14 cycles */
933#define B0WAT_15 0x0000F000 /* B0 Write Access Time = 15 cycles */
934
935#define B1RDYEN 0x00010000 /* Bank 1 (B1) RDY Enable */
936#define B1RDYPOL 0x00020000 /* B1 RDY Active High */
937#define B1TT_1 0x00040000 /* B1 Transition Time (Read to Write) = 1 cycle */
938#define B1TT_2 0x00080000 /* B1 Transition Time (Read to Write) = 2 cycles */
939#define B1TT_3 0x000C0000 /* B1 Transition Time (Read to Write) = 3 cycles */
940#define B1TT_4 0x00000000 /* B1 Transition Time (Read to Write) = 4 cycles */
941#define B1ST_1 0x00100000 /* B1 Setup Time (AOE to Read/Write) = 1 cycle */
942#define B1ST_2 0x00200000 /* B1 Setup Time (AOE to Read/Write) = 2 cycles */
943#define B1ST_3 0x00300000 /* B1 Setup Time (AOE to Read/Write) = 3 cycles */
944#define B1ST_4 0x00000000 /* B1 Setup Time (AOE to Read/Write) = 4 cycles */
945#define B1HT_1 0x00400000 /* B1 Hold Time (~Read/Write to ~AOE) = 1 cycle */
946#define B1HT_2 0x00800000 /* B1 Hold Time (~Read/Write to ~AOE) = 2 cycles */
947#define B1HT_3 0x00C00000 /* B1 Hold Time (~Read/Write to ~AOE) = 3 cycles */
948#define B1HT_0 0x00000000 /* B1 Hold Time (~Read/Write to ~AOE) = 0 cycles */
949#define B1RAT_1 0x01000000 /* B1 Read Access Time = 1 cycle */
950#define B1RAT_2 0x02000000 /* B1 Read Access Time = 2 cycles */
951#define B1RAT_3 0x03000000 /* B1 Read Access Time = 3 cycles */
952#define B1RAT_4 0x04000000 /* B1 Read Access Time = 4 cycles */
953#define B1RAT_5 0x05000000 /* B1 Read Access Time = 5 cycles */
954#define B1RAT_6 0x06000000 /* B1 Read Access Time = 6 cycles */
955#define B1RAT_7 0x07000000 /* B1 Read Access Time = 7 cycles */
956#define B1RAT_8 0x08000000 /* B1 Read Access Time = 8 cycles */
957#define B1RAT_9 0x09000000 /* B1 Read Access Time = 9 cycles */
958#define B1RAT_10 0x0A000000 /* B1 Read Access Time = 10 cycles */
959#define B1RAT_11 0x0B000000 /* B1 Read Access Time = 11 cycles */
960#define B1RAT_12 0x0C000000 /* B1 Read Access Time = 12 cycles */
961#define B1RAT_13 0x0D000000 /* B1 Read Access Time = 13 cycles */
962#define B1RAT_14 0x0E000000 /* B1 Read Access Time = 14 cycles */
963#define B1RAT_15 0x0F000000 /* B1 Read Access Time = 15 cycles */
964#define B1WAT_1 0x10000000 /* B1 Write Access Time = 1 cycle */
965#define B1WAT_2 0x20000000 /* B1 Write Access Time = 2 cycles */
966#define B1WAT_3 0x30000000 /* B1 Write Access Time = 3 cycles */
967#define B1WAT_4 0x40000000 /* B1 Write Access Time = 4 cycles */
968#define B1WAT_5 0x50000000 /* B1 Write Access Time = 5 cycles */
969#define B1WAT_6 0x60000000 /* B1 Write Access Time = 6 cycles */
970#define B1WAT_7 0x70000000 /* B1 Write Access Time = 7 cycles */
971#define B1WAT_8 0x80000000 /* B1 Write Access Time = 8 cycles */
972#define B1WAT_9 0x90000000 /* B1 Write Access Time = 9 cycles */
973#define B1WAT_10 0xA0000000 /* B1 Write Access Time = 10 cycles */
974#define B1WAT_11 0xB0000000 /* B1 Write Access Time = 11 cycles */
975#define B1WAT_12 0xC0000000 /* B1 Write Access Time = 12 cycles */
976#define B1WAT_13 0xD0000000 /* B1 Write Access Time = 13 cycles */
977#define B1WAT_14 0xE0000000 /* B1 Write Access Time = 14 cycles */
978#define B1WAT_15 0xF0000000 /* B1 Write Access Time = 15 cycles */
979
980/* EBIU_AMBCTL1 Masks */
981#define B2RDYEN 0x00000001 /* Bank 2 (B2) RDY Enable */
982#define B2RDYPOL 0x00000002 /* B2 RDY Active High */
983#define B2TT_1 0x00000004 /* B2 Transition Time (Read to Write) = 1 cycle */
984#define B2TT_2 0x00000008 /* B2 Transition Time (Read to Write) = 2 cycles */
985#define B2TT_3 0x0000000C /* B2 Transition Time (Read to Write) = 3 cycles */
986#define B2TT_4 0x00000000 /* B2 Transition Time (Read to Write) = 4 cycles */
987#define B2ST_1 0x00000010 /* B2 Setup Time (AOE to Read/Write) = 1 cycle */
988#define B2ST_2 0x00000020 /* B2 Setup Time (AOE to Read/Write) = 2 cycles */
989#define B2ST_3 0x00000030 /* B2 Setup Time (AOE to Read/Write) = 3 cycles */
990#define B2ST_4 0x00000000 /* B2 Setup Time (AOE to Read/Write) = 4 cycles */
991#define B2HT_1 0x00000040 /* B2 Hold Time (~Read/Write to ~AOE) = 1 cycle */
992#define B2HT_2 0x00000080 /* B2 Hold Time (~Read/Write to ~AOE) = 2 cycles */
993#define B2HT_3 0x000000C0 /* B2 Hold Time (~Read/Write to ~AOE) = 3 cycles */
994#define B2HT_0 0x00000000 /* B2 Hold Time (~Read/Write to ~AOE) = 0 cycles */
995#define B2RAT_1 0x00000100 /* B2 Read Access Time = 1 cycle */
996#define B2RAT_2 0x00000200 /* B2 Read Access Time = 2 cycles */
997#define B2RAT_3 0x00000300 /* B2 Read Access Time = 3 cycles */
998#define B2RAT_4 0x00000400 /* B2 Read Access Time = 4 cycles */
999#define B2RAT_5 0x00000500 /* B2 Read Access Time = 5 cycles */
1000#define B2RAT_6 0x00000600 /* B2 Read Access Time = 6 cycles */
1001#define B2RAT_7 0x00000700 /* B2 Read Access Time = 7 cycles */
1002#define B2RAT_8 0x00000800 /* B2 Read Access Time = 8 cycles */
1003#define B2RAT_9 0x00000900 /* B2 Read Access Time = 9 cycles */
1004#define B2RAT_10 0x00000A00 /* B2 Read Access Time = 10 cycles */
1005#define B2RAT_11 0x00000B00 /* B2 Read Access Time = 11 cycles */
1006#define B2RAT_12 0x00000C00 /* B2 Read Access Time = 12 cycles */
1007#define B2RAT_13 0x00000D00 /* B2 Read Access Time = 13 cycles */
1008#define B2RAT_14 0x00000E00 /* B2 Read Access Time = 14 cycles */
1009#define B2RAT_15 0x00000F00 /* B2 Read Access Time = 15 cycles */
1010#define B2WAT_1 0x00001000 /* B2 Write Access Time = 1 cycle */
1011#define B2WAT_2 0x00002000 /* B2 Write Access Time = 2 cycles */
1012#define B2WAT_3 0x00003000 /* B2 Write Access Time = 3 cycles */
1013#define B2WAT_4 0x00004000 /* B2 Write Access Time = 4 cycles */
1014#define B2WAT_5 0x00005000 /* B2 Write Access Time = 5 cycles */
1015#define B2WAT_6 0x00006000 /* B2 Write Access Time = 6 cycles */
1016#define B2WAT_7 0x00007000 /* B2 Write Access Time = 7 cycles */
1017#define B2WAT_8 0x00008000 /* B2 Write Access Time = 8 cycles */
1018#define B2WAT_9 0x00009000 /* B2 Write Access Time = 9 cycles */
1019#define B2WAT_10 0x0000A000 /* B2 Write Access Time = 10 cycles */
1020#define B2WAT_11 0x0000B000 /* B2 Write Access Time = 11 cycles */
1021#define B2WAT_12 0x0000C000 /* B2 Write Access Time = 12 cycles */
1022#define B2WAT_13 0x0000D000 /* B2 Write Access Time = 13 cycles */
1023#define B2WAT_14 0x0000E000 /* B2 Write Access Time = 14 cycles */
1024#define B2WAT_15 0x0000F000 /* B2 Write Access Time = 15 cycles */
1025
1026#define B3RDYEN 0x00010000 /* Bank 3 (B3) RDY Enable */
1027#define B3RDYPOL 0x00020000 /* B3 RDY Active High */
1028#define B3TT_1 0x00040000 /* B3 Transition Time (Read to Write) = 1 cycle */
1029#define B3TT_2 0x00080000 /* B3 Transition Time (Read to Write) = 2 cycles */
1030#define B3TT_3 0x000C0000 /* B3 Transition Time (Read to Write) = 3 cycles */
1031#define B3TT_4 0x00000000 /* B3 Transition Time (Read to Write) = 4 cycles */
1032#define B3ST_1 0x00100000 /* B3 Setup Time (AOE to Read/Write) = 1 cycle */
1033#define B3ST_2 0x00200000 /* B3 Setup Time (AOE to Read/Write) = 2 cycles */
1034#define B3ST_3 0x00300000 /* B3 Setup Time (AOE to Read/Write) = 3 cycles */
1035#define B3ST_4 0x00000000 /* B3 Setup Time (AOE to Read/Write) = 4 cycles */
1036#define B3HT_1 0x00400000 /* B3 Hold Time (~Read/Write to ~AOE) = 1 cycle */
1037#define B3HT_2 0x00800000 /* B3 Hold Time (~Read/Write to ~AOE) = 2 cycles */
1038#define B3HT_3 0x00C00000 /* B3 Hold Time (~Read/Write to ~AOE) = 3 cycles */
1039#define B3HT_0 0x00000000 /* B3 Hold Time (~Read/Write to ~AOE) = 0 cycles */
1040#define B3RAT_1 0x01000000 /* B3 Read Access Time = 1 cycle */
1041#define B3RAT_2 0x02000000 /* B3 Read Access Time = 2 cycles */
1042#define B3RAT_3 0x03000000 /* B3 Read Access Time = 3 cycles */
1043#define B3RAT_4 0x04000000 /* B3 Read Access Time = 4 cycles */
1044#define B3RAT_5 0x05000000 /* B3 Read Access Time = 5 cycles */
1045#define B3RAT_6 0x06000000 /* B3 Read Access Time = 6 cycles */
1046#define B3RAT_7 0x07000000 /* B3 Read Access Time = 7 cycles */
1047#define B3RAT_8 0x08000000 /* B3 Read Access Time = 8 cycles */
1048#define B3RAT_9 0x09000000 /* B3 Read Access Time = 9 cycles */
1049#define B3RAT_10 0x0A000000 /* B3 Read Access Time = 10 cycles */
1050#define B3RAT_11 0x0B000000 /* B3 Read Access Time = 11 cycles */
1051#define B3RAT_12 0x0C000000 /* B3 Read Access Time = 12 cycles */
1052#define B3RAT_13 0x0D000000 /* B3 Read Access Time = 13 cycles */
1053#define B3RAT_14 0x0E000000 /* B3 Read Access Time = 14 cycles */
1054#define B3RAT_15 0x0F000000 /* B3 Read Access Time = 15 cycles */
1055#define B3WAT_1 0x10000000 /* B3 Write Access Time = 1 cycle */
1056#define B3WAT_2 0x20000000 /* B3 Write Access Time = 2 cycles */
1057#define B3WAT_3 0x30000000 /* B3 Write Access Time = 3 cycles */
1058#define B3WAT_4 0x40000000 /* B3 Write Access Time = 4 cycles */
1059#define B3WAT_5 0x50000000 /* B3 Write Access Time = 5 cycles */
1060#define B3WAT_6 0x60000000 /* B3 Write Access Time = 6 cycles */
1061#define B3WAT_7 0x70000000 /* B3 Write Access Time = 7 cycles */
1062#define B3WAT_8 0x80000000 /* B3 Write Access Time = 8 cycles */
1063#define B3WAT_9 0x90000000 /* B3 Write Access Time = 9 cycles */
1064#define B3WAT_10 0xA0000000 /* B3 Write Access Time = 10 cycles */
1065#define B3WAT_11 0xB0000000 /* B3 Write Access Time = 11 cycles */
1066#define B3WAT_12 0xC0000000 /* B3 Write Access Time = 12 cycles */
1067#define B3WAT_13 0xD0000000 /* B3 Write Access Time = 13 cycles */
1068#define B3WAT_14 0xE0000000 /* B3 Write Access Time = 14 cycles */
1069#define B3WAT_15 0xF0000000 /* B3 Write Access Time = 15 cycles */
1070
1071
1072/* ********************** SDRAM CONTROLLER MASKS **********************************************/
1073/* EBIU_SDGCTL Masks */
1074#define SCTLE 0x00000001 /* Enable SDRAM Signals */
1075#define CL_2 0x00000008 /* SDRAM CAS Latency = 2 cycles */
1076#define CL_3 0x0000000C /* SDRAM CAS Latency = 3 cycles */
1077#define PASR_ALL 0x00000000 /* All 4 SDRAM Banks Refreshed In Self-Refresh */
1078#define PASR_B0_B1 0x00000010 /* SDRAM Banks 0 and 1 Are Refreshed In Self-Refresh */
1079#define PASR_B0 0x00000020 /* Only SDRAM Bank 0 Is Refreshed In Self-Refresh */
1080#define TRAS_1 0x00000040 /* SDRAM tRAS = 1 cycle */
1081#define TRAS_2 0x00000080 /* SDRAM tRAS = 2 cycles */
1082#define TRAS_3 0x000000C0 /* SDRAM tRAS = 3 cycles */
1083#define TRAS_4 0x00000100 /* SDRAM tRAS = 4 cycles */
1084#define TRAS_5 0x00000140 /* SDRAM tRAS = 5 cycles */
1085#define TRAS_6 0x00000180 /* SDRAM tRAS = 6 cycles */
1086#define TRAS_7 0x000001C0 /* SDRAM tRAS = 7 cycles */
1087#define TRAS_8 0x00000200 /* SDRAM tRAS = 8 cycles */
1088#define TRAS_9 0x00000240 /* SDRAM tRAS = 9 cycles */
1089#define TRAS_10 0x00000280 /* SDRAM tRAS = 10 cycles */
1090#define TRAS_11 0x000002C0 /* SDRAM tRAS = 11 cycles */
1091#define TRAS_12 0x00000300 /* SDRAM tRAS = 12 cycles */
1092#define TRAS_13 0x00000340 /* SDRAM tRAS = 13 cycles */
1093#define TRAS_14 0x00000380 /* SDRAM tRAS = 14 cycles */
1094#define TRAS_15 0x000003C0 /* SDRAM tRAS = 15 cycles */
1095#define TRP_1 0x00000800 /* SDRAM tRP = 1 cycle */
1096#define TRP_2 0x00001000 /* SDRAM tRP = 2 cycles */
1097#define TRP_3 0x00001800 /* SDRAM tRP = 3 cycles */
1098#define TRP_4 0x00002000 /* SDRAM tRP = 4 cycles */
1099#define TRP_5 0x00002800 /* SDRAM tRP = 5 cycles */
1100#define TRP_6 0x00003000 /* SDRAM tRP = 6 cycles */
1101#define TRP_7 0x00003800 /* SDRAM tRP = 7 cycles */
1102#define TRCD_1 0x00008000 /* SDRAM tRCD = 1 cycle */
1103#define TRCD_2 0x00010000 /* SDRAM tRCD = 2 cycles */
1104#define TRCD_3 0x00018000 /* SDRAM tRCD = 3 cycles */
1105#define TRCD_4 0x00020000 /* SDRAM tRCD = 4 cycles */
1106#define TRCD_5 0x00028000 /* SDRAM tRCD = 5 cycles */
1107#define TRCD_6 0x00030000 /* SDRAM tRCD = 6 cycles */
1108#define TRCD_7 0x00038000 /* SDRAM tRCD = 7 cycles */
1109#define TWR_1 0x00080000 /* SDRAM tWR = 1 cycle */
1110#define TWR_2 0x00100000 /* SDRAM tWR = 2 cycles */
1111#define TWR_3 0x00180000 /* SDRAM tWR = 3 cycles */
1112#define PUPSD 0x00200000 /* Power-Up Start Delay (15 SCLK Cycles Delay) */
1113#define PSM 0x00400000 /* Power-Up Sequence (Mode Register Before/After* Refresh) */
1114#define PSS 0x00800000 /* Enable Power-Up Sequence on Next SDRAM Access */
1115#define SRFS 0x01000000 /* Enable SDRAM Self-Refresh Mode */
1116#define EBUFE 0x02000000 /* Enable External Buffering Timing */
1117#define FBBRW 0x04000000 /* Enable Fast Back-To-Back Read To Write */
1118#define EMREN 0x10000000 /* Extended Mode Register Enable */
1119#define TCSR 0x20000000 /* Temp-Compensated Self-Refresh Value (85/45* Deg C) */
1120#define CDDBG 0x40000000 /* Tristate SDRAM Controls During Bus Grant */
1121
1122/* EBIU_SDBCTL Masks */
1123#define EBE 0x0001 /* Enable SDRAM External Bank */
1124#define EBSZ_16 0x0000 /* SDRAM External Bank Size = 16MB */
1125#define EBSZ_32 0x0002 /* SDRAM External Bank Size = 32MB */
1126#define EBSZ_64 0x0004 /* SDRAM External Bank Size = 64MB */
1127#define EBSZ_128 0x0006 /* SDRAM External Bank Size = 128MB */
1128#define EBSZ_256 0x0008 /* SDRAM External Bank Size = 256MB */
1129#define EBSZ_512 0x000A /* SDRAM External Bank Size = 512MB */
1130#define EBCAW_8 0x0000 /* SDRAM External Bank Column Address Width = 8 Bits */
1131#define EBCAW_9 0x0010 /* SDRAM External Bank Column Address Width = 9 Bits */
1132#define EBCAW_10 0x0020 /* SDRAM External Bank Column Address Width = 10 Bits */
1133#define EBCAW_11 0x0030 /* SDRAM External Bank Column Address Width = 11 Bits */
1134
1135/* EBIU_SDSTAT Masks */
1136#define SDCI 0x0001 /* SDRAM Controller Idle */
1137#define SDSRA 0x0002 /* SDRAM Self-Refresh Active */
1138#define SDPUA 0x0004 /* SDRAM Power-Up Active */
1139#define SDRS 0x0008 /* SDRAM Will Power-Up On Next Access */
1140#define SDEASE 0x0010 /* SDRAM EAB Sticky Error Status */
1141#define BGSTAT 0x0020 /* Bus Grant Status */
1142
1143
1144/* ************************** DMA CONTROLLER MASKS ********************************/
1145
1146/* DMAx_PERIPHERAL_MAP, MDMA_yy_PERIPHERAL_MAP Masks */
1147#define CTYPE 0x0040 /* DMA Channel Type Indicator (Memory/Peripheral*) */
1148#define PMAP 0xF000 /* Peripheral Mapped To This Channel */
1149#define PMAP_PPI 0x0000 /* PPI Port DMA */
1150#define PMAP_EMACRX 0x1000 /* Ethernet Receive DMA */
1151#define PMAP_EMACTX 0x2000 /* Ethernet Transmit DMA */
1152#define PMAP_SPORT0RX 0x3000 /* SPORT0 Receive DMA */
1153#define PMAP_SPORT0TX 0x4000 /* SPORT0 Transmit DMA */
1154#define PMAP_SPORT1RX 0x5000 /* SPORT1 Receive DMA */
1155#define PMAP_SPORT1TX 0x6000 /* SPORT1 Transmit DMA */
1156#define PMAP_SPI 0x7000 /* SPI Port DMA */
1157#define PMAP_UART0RX 0x8000 /* UART0 Port Receive DMA */
1158#define PMAP_UART0TX 0x9000 /* UART0 Port Transmit DMA */
1159#define PMAP_UART1RX 0xA000 /* UART1 Port Receive DMA */
1160#define PMAP_UART1TX 0xB000 /* UART1 Port Transmit DMA */
1161
1162/* ************ PARALLEL PERIPHERAL INTERFACE (PPI) MASKS *************/
1163/* PPI_CONTROL Masks */
1164#define PORT_EN 0x0001 /* PPI Port Enable */
1165#define PORT_DIR 0x0002 /* PPI Port Direction */
1166#define XFR_TYPE 0x000C /* PPI Transfer Type */
1167#define PORT_CFG 0x0030 /* PPI Port Configuration */
1168#define FLD_SEL 0x0040 /* PPI Active Field Select */
1169#define PACK_EN 0x0080 /* PPI Packing Mode */
1170#define DMA32 0x0100 /* PPI 32-bit DMA Enable */
1171#define SKIP_EN 0x0200 /* PPI Skip Element Enable */
1172#define SKIP_EO 0x0400 /* PPI Skip Even/Odd Elements */
1173#define DLEN_8 0x0000 /* Data Length = 8 Bits */
1174#define DLEN_10 0x0800 /* Data Length = 10 Bits */
1175#define DLEN_11 0x1000 /* Data Length = 11 Bits */
1176#define DLEN_12 0x1800 /* Data Length = 12 Bits */
1177#define DLEN_13 0x2000 /* Data Length = 13 Bits */
1178#define DLEN_14 0x2800 /* Data Length = 14 Bits */
1179#define DLEN_15 0x3000 /* Data Length = 15 Bits */
1180#define DLEN_16 0x3800 /* Data Length = 16 Bits */
1181#define DLENGTH 0x3800 /* PPI Data Length */
1182#define POLC 0x4000 /* PPI Clock Polarity */
1183#define POLS 0x8000 /* PPI Frame Sync Polarity */
1184
1185/* PPI_STATUS Masks */
1186#define FLD 0x0400 /* Field Indicator */
1187#define FT_ERR 0x0800 /* Frame Track Error */
1188#define OVR 0x1000 /* FIFO Overflow Error */
1189#define UNDR 0x2000 /* FIFO Underrun Error */
1190#define ERR_DET 0x4000 /* Error Detected Indicator */
1191#define ERR_NCOR 0x8000 /* Error Not Corrected Indicator */
1192
1193
1194/* ******************** TWO-WIRE INTERFACE (TWI) MASKS ***********************/
1195/* TWI_CLKDIV Macros (Use: *pTWI_CLKDIV = CLKLOW(x)|CLKHI(y); ) */
1196#define CLKLOW(x) ((x) & 0xFF) /* Periods Clock Is Held Low */
1197#define CLKHI(y) (((y)&0xFF)<<0x8) /* Periods Before New Clock Low */
1198
1199/* TWI_PRESCALE Masks */
1200#define PRESCALE 0x007F /* SCLKs Per Internal Time Reference (10MHz) */
1201#define TWI_ENA 0x0080 /* TWI Enable */
1202#define SCCB 0x0200 /* SCCB Compatibility Enable */
1203
1204/* TWI_SLAVE_CTL Masks */
1205#define SEN 0x0001 /* Slave Enable */
1206#define SADD_LEN 0x0002 /* Slave Address Length */
1207#define STDVAL 0x0004 /* Slave Transmit Data Valid */
1208#define NAK 0x0008 /* NAK/ACK* Generated At Conclusion Of Transfer */
1209#define GEN 0x0010 /* General Call Adrress Matching Enabled */
1210
1211/* TWI_SLAVE_STAT Masks */
1212#define SDIR 0x0001 /* Slave Transfer Direction (Transmit/Receive*) */
1213#define GCALL 0x0002 /* General Call Indicator */
1214
1215/* TWI_MASTER_CTL Masks */
1216#define MEN 0x0001 /* Master Mode Enable */
1217#define MADD_LEN 0x0002 /* Master Address Length */
1218#define MDIR 0x0004 /* Master Transmit Direction (RX/TX*) */
1219#define FAST 0x0008 /* Use Fast Mode Timing Specs */
1220#define STOP 0x0010 /* Issue Stop Condition */
1221#define RSTART 0x0020 /* Repeat Start or Stop* At End Of Transfer */
1222#define DCNT 0x3FC0 /* Data Bytes To Transfer */
1223#define SDAOVR 0x4000 /* Serial Data Override */
1224#define SCLOVR 0x8000 /* Serial Clock Override */
1225
1226/* TWI_MASTER_STAT Masks */
1227#define MPROG 0x0001 /* Master Transfer In Progress */
1228#define LOSTARB 0x0002 /* Lost Arbitration Indicator (Xfer Aborted) */
1229#define ANAK 0x0004 /* Address Not Acknowledged */
1230#define DNAK 0x0008 /* Data Not Acknowledged */
1231#define BUFRDERR 0x0010 /* Buffer Read Error */
1232#define BUFWRERR 0x0020 /* Buffer Write Error */
1233#define SDASEN 0x0040 /* Serial Data Sense */
1234#define SCLSEN 0x0080 /* Serial Clock Sense */
1235#define BUSBUSY 0x0100 /* Bus Busy Indicator */
1236
1237/* TWI_INT_SRC and TWI_INT_ENABLE Masks */
1238#define SINIT 0x0001 /* Slave Transfer Initiated */
1239#define SCOMP 0x0002 /* Slave Transfer Complete */
1240#define SERR 0x0004 /* Slave Transfer Error */
1241#define SOVF 0x0008 /* Slave Overflow */
1242#define MCOMP 0x0010 /* Master Transfer Complete */
1243#define MERR 0x0020 /* Master Transfer Error */
1244#define XMTSERV 0x0040 /* Transmit FIFO Service */
1245#define RCVSERV 0x0080 /* Receive FIFO Service */
1246
1247/* TWI_FIFO_CTRL Masks */
1248#define XMTFLUSH 0x0001 /* Transmit Buffer Flush */
1249#define RCVFLUSH 0x0002 /* Receive Buffer Flush */
1250#define XMTINTLEN 0x0004 /* Transmit Buffer Interrupt Length */
1251#define RCVINTLEN 0x0008 /* Receive Buffer Interrupt Length */
1252
1253/* TWI_FIFO_STAT Masks */
1254#define XMTSTAT 0x0003 /* Transmit FIFO Status */
1255#define XMT_EMPTY 0x0000 /* Transmit FIFO Empty */
1256#define XMT_HALF 0x0001 /* Transmit FIFO Has 1 Byte To Write */
1257#define XMT_FULL 0x0003 /* Transmit FIFO Full (2 Bytes To Write) */
1258
1259#define RCVSTAT 0x000C /* Receive FIFO Status */
1260#define RCV_EMPTY 0x0000 /* Receive FIFO Empty */
1261#define RCV_HALF 0x0004 /* Receive FIFO Has 1 Byte To Read */
1262#define RCV_FULL 0x000C /* Receive FIFO Full (2 Bytes To Read) */
1263
1264
1265/* Omit CAN masks from defBF534.h */
1266
1267/* ******************* PIN CONTROL REGISTER MASKS ************************/
1268/* PORT_MUX Masks */
1269#define PJSE 0x0001 /* Port J SPI/SPORT Enable */
1270#define PJSE_SPORT 0x0000 /* Enable TFS0/DT0PRI */
1271#define PJSE_SPI 0x0001 /* Enable SPI_SSEL3:2 */
1272
1273#define PJCE(x) (((x)&0x3)<<1) /* Port J CAN/SPI/SPORT Enable */
1274#define PJCE_SPORT 0x0000 /* Enable DR0SEC/DT0SEC */
1275#define PJCE_CAN 0x0002 /* Enable CAN RX/TX */
1276#define PJCE_SPI 0x0004 /* Enable SPI_SSEL7 */
1277
1278#define PFDE 0x0008 /* Port F DMA Request Enable */
1279#define PFDE_UART 0x0000 /* Enable UART0 RX/TX */
1280#define PFDE_DMA 0x0008 /* Enable DMAR1:0 */
1281
1282#define PFTE 0x0010 /* Port F Timer Enable */
1283#define PFTE_UART 0x0000 /* Enable UART1 RX/TX */
1284#define PFTE_TIMER 0x0010 /* Enable TMR7:6 */
1285
1286#define PFS6E 0x0020 /* Port F SPI SSEL 6 Enable */
1287#define PFS6E_TIMER 0x0000 /* Enable TMR5 */
1288#define PFS6E_SPI 0x0020 /* Enable SPI_SSEL6 */
1289
1290#define PFS5E 0x0040 /* Port F SPI SSEL 5 Enable */
1291#define PFS5E_TIMER 0x0000 /* Enable TMR4 */
1292#define PFS5E_SPI 0x0040 /* Enable SPI_SSEL5 */
1293
1294#define PFS4E 0x0080 /* Port F SPI SSEL 4 Enable */
1295#define PFS4E_TIMER 0x0000 /* Enable TMR3 */
1296#define PFS4E_SPI 0x0080 /* Enable SPI_SSEL4 */
1297
1298#define PFFE 0x0100 /* Port F PPI Frame Sync Enable */
1299#define PFFE_TIMER 0x0000 /* Enable TMR2 */
1300#define PFFE_PPI 0x0100 /* Enable PPI FS3 */
1301
1302#define PGSE 0x0200 /* Port G SPORT1 Secondary Enable */
1303#define PGSE_PPI 0x0000 /* Enable PPI D9:8 */
1304#define PGSE_SPORT 0x0200 /* Enable DR1SEC/DT1SEC */
1305
1306#define PGRE 0x0400 /* Port G SPORT1 Receive Enable */
1307#define PGRE_PPI 0x0000 /* Enable PPI D12:10 */
1308#define PGRE_SPORT 0x0400 /* Enable DR1PRI/RFS1/RSCLK1 */
1309
1310#define PGTE 0x0800 /* Port G SPORT1 Transmit Enable */
1311#define PGTE_PPI 0x0000 /* Enable PPI D15:13 */
1312#define PGTE_SPORT 0x0800 /* Enable DT1PRI/TFS1/TSCLK1 */
1313
1314
1315/* ****************** HANDSHAKE DMA (HDMA) MASKS *********************/
1316/* HDMAx_CTL Masks */
1317#define HMDMAEN 0x0001 /* Enable Handshake DMA 0/1 */
1318#define REP 0x0002 /* HDMA Request Polarity */
1319#define UTE 0x0004 /* Urgency Threshold Enable */
1320#define OIE 0x0010 /* Overflow Interrupt Enable */
1321#define BDIE 0x0020 /* Block Done Interrupt Enable */
1322#define MBDI 0x0040 /* Mask Block Done IRQ If Pending ECNT */
1323#define DRQ 0x0300 /* HDMA Request Type */
1324#define DRQ_NONE 0x0000 /* No Request */
1325#define DRQ_SINGLE 0x0100 /* Channels Request Single */
1326#define DRQ_MULTI 0x0200 /* Channels Request Multi (Default) */
1327#define DRQ_URGENT 0x0300 /* Channels Request Multi Urgent */
1328#define RBC 0x1000 /* Reload BCNT With IBCNT */
1329#define PS 0x2000 /* HDMA Pin Status */
1330#define OI 0x4000 /* Overflow Interrupt Generated */
1331#define BDI 0x8000 /* Block Done Interrupt Generated */
1332
1333/* entry addresses of the user-callable Boot ROM functions */
1334
1335#define _BOOTROM_RESET 0xEF000000
1336#define _BOOTROM_FINAL_INIT 0xEF000002
1337#define _BOOTROM_DO_MEMORY_DMA 0xEF000006
1338#define _BOOTROM_BOOT_DXE_FLASH 0xEF000008
1339#define _BOOTROM_BOOT_DXE_SPI 0xEF00000A
1340#define _BOOTROM_BOOT_DXE_TWI 0xEF00000C
1341#define _BOOTROM_GET_DXE_ADDRESS_FLASH 0xEF000010
1342#define _BOOTROM_GET_DXE_ADDRESS_SPI 0xEF000012
1343#define _BOOTROM_GET_DXE_ADDRESS_TWI 0xEF000014
1344
1345/* Alternate Deprecated Macros Provided For Backwards Code Compatibility */
1346#define PGDE_UART PFDE_UART
1347#define PGDE_DMA PFDE_DMA
1348#define CKELOW SCKELOW
1349
1350/* ==== end from defBF534.h ==== */
1351
1352/* HOST Port Registers */
1353
1354#define HOST_CONTROL 0xffc03400 /* HOST Control Register */
1355#define HOST_STATUS 0xffc03404 /* HOST Status Register */
1356#define HOST_TIMEOUT 0xffc03408 /* HOST Acknowledge Mode Timeout Register */
1357
1358/* Counter Registers */
1359
1360#define CNT_CONFIG 0xffc03500 /* Configuration Register */
1361#define CNT_IMASK 0xffc03504 /* Interrupt Mask Register */
1362#define CNT_STATUS 0xffc03508 /* Status Register */
1363#define CNT_COMMAND 0xffc0350c /* Command Register */
1364#define CNT_DEBOUNCE 0xffc03510 /* Debounce Register */
1365#define CNT_COUNTER 0xffc03514 /* Counter Register */
1366#define CNT_MAX 0xffc03518 /* Maximal Count Register */
1367#define CNT_MIN 0xffc0351c /* Minimal Count Register */
1368
1369/* OTP/FUSE Registers */
1370
1371#define OTP_CONTROL 0xffc03600 /* OTP/Fuse Control Register */
1372#define OTP_BEN 0xffc03604 /* OTP/Fuse Byte Enable */
1373#define OTP_STATUS 0xffc03608 /* OTP/Fuse Status */
1374#define OTP_TIMING 0xffc0360c /* OTP/Fuse Access Timing */
1375
1376/* Security Registers */
1377
1378#define SECURE_SYSSWT 0xffc03620 /* Secure System Switches */
1379#define SECURE_CONTROL 0xffc03624 /* Secure Control */
1380#define SECURE_STATUS 0xffc03628 /* Secure Status */
1381
1382/* OTP Read/Write Data Buffer Registers */
1383
1384#define OTP_DATA0 0xffc03680 /* OTP/Fuse Data (OTP_DATA0-3) accesses the fuse read write buffer */
1385#define OTP_DATA1 0xffc03684 /* OTP/Fuse Data (OTP_DATA0-3) accesses the fuse read write buffer */
1386#define OTP_DATA2 0xffc03688 /* OTP/Fuse Data (OTP_DATA0-3) accesses the fuse read write buffer */
1387#define OTP_DATA3 0xffc0368c /* OTP/Fuse Data (OTP_DATA0-3) accesses the fuse read write buffer */
1388
1389/* NFC Registers */
1390
1391#define NFC_CTL 0xffc03700 /* NAND Control Register */
1392#define NFC_STAT 0xffc03704 /* NAND Status Register */
1393#define NFC_IRQSTAT 0xffc03708 /* NAND Interrupt Status Register */
1394#define NFC_IRQMASK 0xffc0370c /* NAND Interrupt Mask Register */
1395#define NFC_ECC0 0xffc03710 /* NAND ECC Register 0 */
1396#define NFC_ECC1 0xffc03714 /* NAND ECC Register 1 */
1397#define NFC_ECC2 0xffc03718 /* NAND ECC Register 2 */
1398#define NFC_ECC3 0xffc0371c /* NAND ECC Register 3 */
1399#define NFC_COUNT 0xffc03720 /* NAND ECC Count Register */
1400#define NFC_RST 0xffc03724 /* NAND ECC Reset Register */
1401#define NFC_PGCTL 0xffc03728 /* NAND Page Control Register */
1402#define NFC_READ 0xffc0372c /* NAND Read Data Register */
1403#define NFC_ADDR 0xffc03740 /* NAND Address Register */
1404#define NFC_CMD 0xffc03744 /* NAND Command Register */
1405#define NFC_DATA_WR 0xffc03748 /* NAND Data Write Register */
1406#define NFC_DATA_RD 0xffc0374c /* NAND Data Read Register */
1407
1408/* ********************************************************** */
1409/* SINGLE BIT MACRO PAIRS (bit mask and negated one) */
1410/* and MULTI BIT READ MACROS */
1411/* ********************************************************** */
1412
1413/* Bit masks for HOST_CONTROL */
1414
1415#define HOST_CNTR_HOST_EN 0x1 /* Host Enable */
1416#define HOST_CNTR_nHOST_EN 0x0
1417#define HOST_CNTR_HOST_END 0x2 /* Host Endianess */
1418#define HOST_CNTR_nHOST_END 0x0
1419#define HOST_CNTR_DATA_SIZE 0x4 /* Data Size */
1420#define HOST_CNTR_nDATA_SIZE 0x0
1421#define HOST_CNTR_HOST_RST 0x8 /* Host Reset */
1422#define HOST_CNTR_nHOST_RST 0x0
1423#define HOST_CNTR_HRDY_OVR 0x20 /* Host Ready Override */
1424#define HOST_CNTR_nHRDY_OVR 0x0
1425#define HOST_CNTR_INT_MODE 0x40 /* Interrupt Mode */
1426#define HOST_CNTR_nINT_MODE 0x0
1427#define HOST_CNTR_BT_EN 0x80 /* Bus Timeout Enable */
1428#define HOST_CNTR_ nBT_EN 0x0
1429#define HOST_CNTR_EHW 0x100 /* Enable Host Write */
1430#define HOST_CNTR_nEHW 0x0
1431#define HOST_CNTR_EHR 0x200 /* Enable Host Read */
1432#define HOST_CNTR_nEHR 0x0
1433#define HOST_CNTR_BDR 0x400 /* Burst DMA Requests */
1434#define HOST_CNTR_nBDR 0x0
1435
1436/* Bit masks for HOST_STATUS */
1437
1438#define HOST_STAT_READY 0x1 /* DMA Ready */
1439#define HOST_STAT_nREADY 0x0
1440#define HOST_STAT_FIFOFULL 0x2 /* FIFO Full */
1441#define HOST_STAT_nFIFOFULL 0x0
1442#define HOST_STAT_FIFOEMPTY 0x4 /* FIFO Empty */
1443#define HOST_STAT_nFIFOEMPTY 0x0
1444#define HOST_STAT_COMPLETE 0x8 /* DMA Complete */
1445#define HOST_STAT_nCOMPLETE 0x0
1446#define HOST_STAT_HSHK 0x10 /* Host Handshake */
1447#define HOST_STAT_nHSHK 0x0
1448#define HOST_STAT_TIMEOUT 0x20 /* Host Timeout */
1449#define HOST_STAT_nTIMEOUT 0x0
1450#define HOST_STAT_HIRQ 0x40 /* Host Interrupt Request */
1451#define HOST_STAT_nHIRQ 0x0
1452#define HOST_STAT_ALLOW_CNFG 0x80 /* Allow New Configuration */
1453#define HOST_STAT_nALLOW_CNFG 0x0
1454#define HOST_STAT_DMA_DIR 0x100 /* DMA Direction */
1455#define HOST_STAT_nDMA_DIR 0x0
1456#define HOST_STAT_BTE 0x200 /* Bus Timeout Enabled */
1457#define HOST_STAT_nBTE 0x0
1458#define HOST_STAT_HOSTRD_DONE 0x8000 /* Host Read Completion Interrupt */
1459#define HOST_STAT_nHOSTRD_DONE 0x0
1460
1461/* Bit masks for HOST_TIMEOUT */
1462
1463#define HOST_COUNT_TIMEOUT 0x7ff /* Host Timeout count */
1464
1465/* Bit masks for SECURE_SYSSWT */
1466
1467#define EMUDABL 0x1 /* Emulation Disable. */
1468#define nEMUDABL 0x0
1469#define RSTDABL 0x2 /* Reset Disable */
1470#define nRSTDABL 0x0
1471#define L1IDABL 0x1c /* L1 Instruction Memory Disable. */
1472#define L1DADABL 0xe0 /* L1 Data Bank A Memory Disable. */
1473#define L1DBDABL 0x700 /* L1 Data Bank B Memory Disable. */
1474#define DMA0OVR 0x800 /* DMA0 Memory Access Override */
1475#define nDMA0OVR 0x0
1476#define DMA1OVR 0x1000 /* DMA1 Memory Access Override */
1477#define nDMA1OVR 0x0
1478#define EMUOVR 0x4000 /* Emulation Override */
1479#define nEMUOVR 0x0
1480#define OTPSEN 0x8000 /* OTP Secrets Enable. */
1481#define nOTPSEN 0x0
1482#define L2DABL 0x70000 /* L2 Memory Disable. */
1483
1484/* Bit masks for SECURE_CONTROL */
1485
1486#define SECURE0 0x1 /* SECURE 0 */
1487#define nSECURE0 0x0
1488#define SECURE1 0x2 /* SECURE 1 */
1489#define nSECURE1 0x0
1490#define SECURE2 0x4 /* SECURE 2 */
1491#define nSECURE2 0x0
1492#define SECURE3 0x8 /* SECURE 3 */
1493#define nSECURE3 0x0
1494
1495/* Bit masks for SECURE_STATUS */
1496
1497#define SECMODE 0x3 /* Secured Mode Control State */
1498#define NMI 0x4 /* Non Maskable Interrupt */
1499#define nNMI 0x0
1500#define AFVALID 0x8 /* Authentication Firmware Valid */
1501#define nAFVALID 0x0
1502#define AFEXIT 0x10 /* Authentication Firmware Exit */
1503#define nAFEXIT 0x0
1504#define SECSTAT 0xe0 /* Secure Status */
1505
1506#endif /* _DEF_BF52X_H */
diff --git a/arch/blackfin/mach-bf527/include/mach/gpio.h b/arch/blackfin/mach-bf527/include/mach/gpio.h
index f80c2995efdb..fba606b699c3 100644
--- a/arch/blackfin/mach-bf527/include/mach/gpio.h
+++ b/arch/blackfin/mach-bf527/include/mach/gpio.h
@@ -62,4 +62,8 @@
62#define PORT_G GPIO_PG0 62#define PORT_G GPIO_PG0
63#define PORT_H GPIO_PH0 63#define PORT_H GPIO_PH0
64 64
65#include <mach-common/ports-f.h>
66#include <mach-common/ports-g.h>
67#include <mach-common/ports-h.h>
68
65#endif /* _MACH_GPIO_H_ */ 69#endif /* _MACH_GPIO_H_ */
diff --git a/arch/blackfin/mach-bf527/include/mach/pll.h b/arch/blackfin/mach-bf527/include/mach/pll.h
index 24f1d7c02325..94cca674d835 100644
--- a/arch/blackfin/mach-bf527/include/mach/pll.h
+++ b/arch/blackfin/mach-bf527/include/mach/pll.h
@@ -1,63 +1 @@
1/* #include <mach-common/pll.h>
2 * Copyright 2007-2008 Analog Devices Inc.
3 *
4 * Licensed under the GPL-2 or later
5 */
6
7#ifndef _MACH_PLL_H
8#define _MACH_PLL_H
9
10#include <asm/blackfin.h>
11#include <asm/irqflags.h>
12
13/* Writing to PLL_CTL initiates a PLL relock sequence. */
14static __inline__ void bfin_write_PLL_CTL(unsigned int val)
15{
16 unsigned long flags, iwr0, iwr1;
17
18 if (val == bfin_read_PLL_CTL())
19 return;
20
21 flags = hard_local_irq_save();
22 /* Enable the PLL Wakeup bit in SIC IWR */
23 iwr0 = bfin_read32(SIC_IWR0);
24 iwr1 = bfin_read32(SIC_IWR1);
25 /* Only allow PPL Wakeup) */
26 bfin_write32(SIC_IWR0, IWR_ENABLE(0));
27 bfin_write32(SIC_IWR1, 0);
28
29 bfin_write16(PLL_CTL, val);
30 SSYNC();
31 asm("IDLE;");
32
33 bfin_write32(SIC_IWR0, iwr0);
34 bfin_write32(SIC_IWR1, iwr1);
35 hard_local_irq_restore(flags);
36}
37
38/* Writing to VR_CTL initiates a PLL relock sequence. */
39static __inline__ void bfin_write_VR_CTL(unsigned int val)
40{
41 unsigned long flags, iwr0, iwr1;
42
43 if (val == bfin_read_VR_CTL())
44 return;
45
46 flags = hard_local_irq_save();
47 /* Enable the PLL Wakeup bit in SIC IWR */
48 iwr0 = bfin_read32(SIC_IWR0);
49 iwr1 = bfin_read32(SIC_IWR1);
50 /* Only allow PPL Wakeup) */
51 bfin_write32(SIC_IWR0, IWR_ENABLE(0));
52 bfin_write32(SIC_IWR1, 0);
53
54 bfin_write16(VR_CTL, val);
55 SSYNC();
56 asm("IDLE;");
57
58 bfin_write32(SIC_IWR0, iwr0);
59 bfin_write32(SIC_IWR1, iwr1);
60 hard_local_irq_restore(flags);
61}
62
63#endif /* _MACH_PLL_H */
diff --git a/arch/blackfin/mach-bf533/boards/H8606.c b/arch/blackfin/mach-bf533/boards/H8606.c
index 2ce7b16faee1..d4bfcea56828 100644
--- a/arch/blackfin/mach-bf533/boards/H8606.c
+++ b/arch/blackfin/mach-bf533/boards/H8606.c
@@ -286,7 +286,7 @@ static struct resource bfin_uart0_resources[] = {
286 }, 286 },
287}; 287};
288 288
289unsigned short bfin_uart0_peripherals[] = { 289static unsigned short bfin_uart0_peripherals[] = {
290 P_UART0_TX, P_UART0_RX, 0 290 P_UART0_TX, P_UART0_RX, 0
291}; 291};
292 292
diff --git a/arch/blackfin/mach-bf533/boards/blackstamp.c b/arch/blackfin/mach-bf533/boards/blackstamp.c
index 20c102285bef..87b5af3693c1 100644
--- a/arch/blackfin/mach-bf533/boards/blackstamp.c
+++ b/arch/blackfin/mach-bf533/boards/blackstamp.c
@@ -25,7 +25,6 @@
25#include <asm/bfin5xx_spi.h> 25#include <asm/bfin5xx_spi.h>
26#include <asm/portmux.h> 26#include <asm/portmux.h>
27#include <asm/dpmc.h> 27#include <asm/dpmc.h>
28#include <mach/fio_flag.h>
29 28
30/* 29/*
31 * Name the Board for the /proc/cpuinfo 30 * Name the Board for the /proc/cpuinfo
@@ -225,7 +224,7 @@ static struct resource bfin_uart0_resources[] = {
225 }, 224 },
226}; 225};
227 226
228unsigned short bfin_uart0_peripherals[] = { 227static unsigned short bfin_uart0_peripherals[] = {
229 P_UART0_TX, P_UART0_RX, 0 228 P_UART0_TX, P_UART0_RX, 0
230}; 229};
231 230
@@ -290,9 +289,9 @@ static struct resource bfin_sport0_uart_resources[] = {
290 }, 289 },
291}; 290};
292 291
293unsigned short bfin_sport0_peripherals[] = { 292static unsigned short bfin_sport0_peripherals[] = {
294 P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS, 293 P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
295 P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0 294 P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
296}; 295};
297 296
298static struct platform_device bfin_sport0_uart_device = { 297static struct platform_device bfin_sport0_uart_device = {
@@ -324,9 +323,9 @@ static struct resource bfin_sport1_uart_resources[] = {
324 }, 323 },
325}; 324};
326 325
327unsigned short bfin_sport1_peripherals[] = { 326static unsigned short bfin_sport1_peripherals[] = {
328 P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS, 327 P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
329 P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0 328 P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
330}; 329};
331 330
332static struct platform_device bfin_sport1_uart_device = { 331static struct platform_device bfin_sport1_uart_device = {
@@ -476,10 +475,16 @@ static int __init blackstamp_init(void)
476 return ret; 475 return ret;
477 476
478#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) 477#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE)
479 /* setup BF533_STAMP CPLD to route AMS3 to Ethernet MAC */ 478 /*
480 bfin_write_FIO_DIR(bfin_read_FIO_DIR() | PF0); 479 * setup BF533_STAMP CPLD to route AMS3 to Ethernet MAC.
481 bfin_write_FIO_FLAG_S(PF0); 480 * the bfin-async-map driver takes care of flipping between
482 SSYNC(); 481 * flash and ethernet when necessary.
482 */
483 ret = gpio_request(GPIO_PF0, "enet_cpld");
484 if (!ret) {
485 gpio_direction_output(GPIO_PF0, 1);
486 gpio_free(GPIO_PF0);
487 }
483#endif 488#endif
484 489
485 spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info)); 490 spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info));
diff --git a/arch/blackfin/mach-bf533/boards/cm_bf533.c b/arch/blackfin/mach-bf533/boards/cm_bf533.c
index adbe62a81e25..4d5604eaa7c2 100644
--- a/arch/blackfin/mach-bf533/boards/cm_bf533.c
+++ b/arch/blackfin/mach-bf533/boards/cm_bf533.c
@@ -271,7 +271,7 @@ static struct resource bfin_uart0_resources[] = {
271 }, 271 },
272}; 272};
273 273
274unsigned short bfin_uart0_peripherals[] = { 274static unsigned short bfin_uart0_peripherals[] = {
275 P_UART0_TX, P_UART0_RX, 0 275 P_UART0_TX, P_UART0_RX, 0
276}; 276};
277 277
@@ -336,9 +336,9 @@ static struct resource bfin_sport0_uart_resources[] = {
336 }, 336 },
337}; 337};
338 338
339unsigned short bfin_sport0_peripherals[] = { 339static unsigned short bfin_sport0_peripherals[] = {
340 P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS, 340 P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
341 P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0 341 P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
342}; 342};
343 343
344static struct platform_device bfin_sport0_uart_device = { 344static struct platform_device bfin_sport0_uart_device = {
@@ -370,9 +370,9 @@ static struct resource bfin_sport1_uart_resources[] = {
370 }, 370 },
371}; 371};
372 372
373unsigned short bfin_sport1_peripherals[] = { 373static unsigned short bfin_sport1_peripherals[] = {
374 P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS, 374 P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
375 P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0 375 P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
376}; 376};
377 377
378static struct platform_device bfin_sport1_uart_device = { 378static struct platform_device bfin_sport1_uart_device = {
diff --git a/arch/blackfin/mach-bf533/boards/ezkit.c b/arch/blackfin/mach-bf533/boards/ezkit.c
index a1cb8e7c1010..b67b91d82242 100644
--- a/arch/blackfin/mach-bf533/boards/ezkit.c
+++ b/arch/blackfin/mach-bf533/boards/ezkit.c
@@ -349,7 +349,7 @@ static struct resource bfin_uart0_resources[] = {
349 }, 349 },
350}; 350};
351 351
352unsigned short bfin_uart0_peripherals[] = { 352static unsigned short bfin_uart0_peripherals[] = {
353 P_UART0_TX, P_UART0_RX, 0 353 P_UART0_TX, P_UART0_RX, 0
354}; 354};
355 355
diff --git a/arch/blackfin/mach-bf533/boards/ip0x.c b/arch/blackfin/mach-bf533/boards/ip0x.c
index 5ba4b02a12eb..f869a3711480 100644
--- a/arch/blackfin/mach-bf533/boards/ip0x.c
+++ b/arch/blackfin/mach-bf533/boards/ip0x.c
@@ -22,7 +22,6 @@
22#include <asm/dma.h> 22#include <asm/dma.h>
23#include <asm/bfin5xx_spi.h> 23#include <asm/bfin5xx_spi.h>
24#include <asm/portmux.h> 24#include <asm/portmux.h>
25#include <mach/fio_flag.h>
26 25
27/* 26/*
28 * Name the Board for the /proc/cpuinfo 27 * Name the Board for the /proc/cpuinfo
@@ -174,7 +173,7 @@ static struct resource bfin_uart0_resources[] = {
174 }, 173 },
175}; 174};
176 175
177unsigned short bfin_uart0_peripherals[] = { 176static unsigned short bfin_uart0_peripherals[] = {
178 P_UART0_TX, P_UART0_RX, 0 177 P_UART0_TX, P_UART0_RX, 0
179}; 178};
180 179
@@ -295,15 +294,7 @@ static int __init ip0x_init(void)
295 printk(KERN_INFO "%s(): registering device resources\n", __func__); 294 printk(KERN_INFO "%s(): registering device resources\n", __func__);
296 platform_add_devices(ip0x_devices, ARRAY_SIZE(ip0x_devices)); 295 platform_add_devices(ip0x_devices, ARRAY_SIZE(ip0x_devices));
297 296
298#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
299 for (i = 0; i < ARRAY_SIZE(bfin_spi_board_info); ++i) {
300 int j = 1 << bfin_spi_board_info[i].chip_select;
301 /* set spi cs to 1 */
302 bfin_write_FIO_DIR(bfin_read_FIO_DIR() | j);
303 bfin_write_FIO_FLAG_S(j);
304 }
305 spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info)); 297 spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info));
306#endif
307 298
308 return 0; 299 return 0;
309} 300}
diff --git a/arch/blackfin/mach-bf533/boards/stamp.c b/arch/blackfin/mach-bf533/boards/stamp.c
index b3b1cdea2703..43224ef00b8c 100644
--- a/arch/blackfin/mach-bf533/boards/stamp.c
+++ b/arch/blackfin/mach-bf533/boards/stamp.c
@@ -24,7 +24,6 @@
24#include <asm/reboot.h> 24#include <asm/reboot.h>
25#include <asm/portmux.h> 25#include <asm/portmux.h>
26#include <asm/dpmc.h> 26#include <asm/dpmc.h>
27#include <mach/fio_flag.h>
28 27
29/* 28/*
30 * Name the Board for the /proc/cpuinfo 29 * Name the Board for the /proc/cpuinfo
@@ -354,7 +353,7 @@ static struct resource bfin_uart0_resources[] = {
354 }, 353 },
355}; 354};
356 355
357unsigned short bfin_uart0_peripherals[] = { 356static unsigned short bfin_uart0_peripherals[] = {
358 P_UART0_TX, P_UART0_RX, 0 357 P_UART0_TX, P_UART0_RX, 0
359}; 358};
360 359
@@ -419,9 +418,9 @@ static struct resource bfin_sport0_uart_resources[] = {
419 }, 418 },
420}; 419};
421 420
422unsigned short bfin_sport0_peripherals[] = { 421static unsigned short bfin_sport0_peripherals[] = {
423 P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS, 422 P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
424 P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0 423 P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
425}; 424};
426 425
427static struct platform_device bfin_sport0_uart_device = { 426static struct platform_device bfin_sport0_uart_device = {
@@ -453,9 +452,9 @@ static struct resource bfin_sport1_uart_resources[] = {
453 }, 452 },
454}; 453};
455 454
456unsigned short bfin_sport1_peripherals[] = { 455static unsigned short bfin_sport1_peripherals[] = {
457 P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS, 456 P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
458 P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0 457 P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
459}; 458};
460 459
461static struct platform_device bfin_sport1_uart_device = { 460static struct platform_device bfin_sport1_uart_device = {
@@ -674,10 +673,16 @@ static int __init stamp_init(void)
674 return ret; 673 return ret;
675 674
676#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) 675#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE)
677 /* setup BF533_STAMP CPLD to route AMS3 to Ethernet MAC */ 676 /*
678 bfin_write_FIO_DIR(bfin_read_FIO_DIR() | PF0); 677 * setup BF533_STAMP CPLD to route AMS3 to Ethernet MAC.
679 bfin_write_FIO_FLAG_S(PF0); 678 * the bfin-async-map driver takes care of flipping between
680 SSYNC(); 679 * flash and ethernet when necessary.
680 */
681 ret = gpio_request(GPIO_PF0, "enet_cpld");
682 if (!ret) {
683 gpio_direction_output(GPIO_PF0, 1);
684 gpio_free(GPIO_PF0);
685 }
681#endif 686#endif
682 687
683 spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info)); 688 spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info));
@@ -713,7 +718,6 @@ void __init native_machine_early_platform_add_devices(void)
713void native_machine_restart(char *cmd) 718void native_machine_restart(char *cmd)
714{ 719{
715 /* workaround pull up on cpld / flash pin not being strong enough */ 720 /* workaround pull up on cpld / flash pin not being strong enough */
716 bfin_write_FIO_INEN(~PF0); 721 gpio_request(GPIO_PF0, "flash_cpld");
717 bfin_write_FIO_DIR(PF0); 722 gpio_direction_output(GPIO_PF0, 0);
718 bfin_write_FIO_FLAG_C(PF0);
719} 723}
diff --git a/arch/blackfin/mach-bf533/dma.c b/arch/blackfin/mach-bf533/dma.c
index 4a14a46a9a68..1f5988d43139 100644
--- a/arch/blackfin/mach-bf533/dma.c
+++ b/arch/blackfin/mach-bf533/dma.c
@@ -11,7 +11,7 @@
11#include <asm/blackfin.h> 11#include <asm/blackfin.h>
12#include <asm/dma.h> 12#include <asm/dma.h>
13 13
14struct dma_register *dma_io_base_addr[MAX_DMA_CHANNELS] = { 14struct dma_register * const dma_io_base_addr[MAX_DMA_CHANNELS] = {
15 (struct dma_register *) DMA0_NEXT_DESC_PTR, 15 (struct dma_register *) DMA0_NEXT_DESC_PTR,
16 (struct dma_register *) DMA1_NEXT_DESC_PTR, 16 (struct dma_register *) DMA1_NEXT_DESC_PTR,
17 (struct dma_register *) DMA2_NEXT_DESC_PTR, 17 (struct dma_register *) DMA2_NEXT_DESC_PTR,
diff --git a/arch/blackfin/mach-bf533/include/mach/bfin_serial.h b/arch/blackfin/mach-bf533/include/mach/bfin_serial.h
new file mode 100644
index 000000000000..08072c86d5dc
--- /dev/null
+++ b/arch/blackfin/mach-bf533/include/mach/bfin_serial.h
@@ -0,0 +1,14 @@
1/*
2 * mach/bfin_serial.h - Blackfin UART/Serial definitions
3 *
4 * Copyright 2006-2010 Analog Devices Inc.
5 *
6 * Licensed under the GPL-2 or later.
7 */
8
9#ifndef __BFIN_MACH_SERIAL_H__
10#define __BFIN_MACH_SERIAL_H__
11
12#define BFIN_UART_NR_PORTS 1
13
14#endif
diff --git a/arch/blackfin/mach-bf533/include/mach/bfin_serial_5xx.h b/arch/blackfin/mach-bf533/include/mach/bfin_serial_5xx.h
index 9e1f3defb6bc..45dcaa4f3e41 100644
--- a/arch/blackfin/mach-bf533/include/mach/bfin_serial_5xx.h
+++ b/arch/blackfin/mach-bf533/include/mach/bfin_serial_5xx.h
@@ -4,36 +4,9 @@
4 * Licensed under the GPL-2 or later 4 * Licensed under the GPL-2 or later
5 */ 5 */
6 6
7#include <linux/serial.h>
8#include <asm/dma.h> 7#include <asm/dma.h>
9#include <asm/portmux.h> 8#include <asm/portmux.h>
10 9
11#define UART_GET_CHAR(uart) bfin_read16(((uart)->port.membase + OFFSET_RBR))
12#define UART_GET_DLL(uart) bfin_read16(((uart)->port.membase + OFFSET_DLL))
13#define UART_GET_IER(uart) bfin_read16(((uart)->port.membase + OFFSET_IER))
14#define UART_GET_DLH(uart) bfin_read16(((uart)->port.membase + OFFSET_DLH))
15#define UART_GET_IIR(uart) bfin_read16(((uart)->port.membase + OFFSET_IIR))
16#define UART_GET_LCR(uart) bfin_read16(((uart)->port.membase + OFFSET_LCR))
17#define UART_GET_GCTL(uart) bfin_read16(((uart)->port.membase + OFFSET_GCTL))
18
19#define UART_PUT_CHAR(uart,v) bfin_write16(((uart)->port.membase + OFFSET_THR),v)
20#define UART_PUT_DLL(uart,v) bfin_write16(((uart)->port.membase + OFFSET_DLL),v)
21#define UART_PUT_IER(uart,v) bfin_write16(((uart)->port.membase + OFFSET_IER),v)
22#define UART_SET_IER(uart,v) UART_PUT_IER(uart, UART_GET_IER(uart) | (v))
23#define UART_CLEAR_IER(uart,v) UART_PUT_IER(uart, UART_GET_IER(uart) & ~(v))
24#define UART_PUT_DLH(uart,v) bfin_write16(((uart)->port.membase + OFFSET_DLH),v)
25#define UART_PUT_LCR(uart,v) bfin_write16(((uart)->port.membase + OFFSET_LCR),v)
26#define UART_PUT_GCTL(uart,v) bfin_write16(((uart)->port.membase + OFFSET_GCTL),v)
27
28#define UART_SET_DLAB(uart) do { UART_PUT_LCR(uart, UART_GET_LCR(uart) | DLAB); SSYNC(); } while (0)
29#define UART_CLEAR_DLAB(uart) do { UART_PUT_LCR(uart, UART_GET_LCR(uart) & ~DLAB); SSYNC(); } while (0)
30
31#define UART_GET_CTS(x) gpio_get_value(x->cts_pin)
32#define UART_DISABLE_RTS(x) gpio_set_value(x->rts_pin, 1)
33#define UART_ENABLE_RTS(x) gpio_set_value(x->rts_pin, 0)
34#define UART_ENABLE_INTS(x, v) UART_PUT_IER(x, v)
35#define UART_DISABLE_INTS(x) UART_PUT_IER(x, 0)
36
37#ifdef CONFIG_BFIN_UART0_CTSRTS 10#ifdef CONFIG_BFIN_UART0_CTSRTS
38# define CONFIG_SERIAL_BFIN_CTSRTS 11# define CONFIG_SERIAL_BFIN_CTSRTS
39# ifndef CONFIG_UART0_CTS_PIN 12# ifndef CONFIG_UART0_CTS_PIN
@@ -44,51 +17,6 @@
44# endif 17# endif
45#endif 18#endif
46 19
47#define BFIN_UART_TX_FIFO_SIZE 2
48
49struct bfin_serial_port {
50 struct uart_port port;
51 unsigned int old_status;
52 int status_irq;
53 unsigned int lsr;
54#ifdef CONFIG_SERIAL_BFIN_DMA
55 int tx_done;
56 int tx_count;
57 struct circ_buf rx_dma_buf;
58 struct timer_list rx_dma_timer;
59 int rx_dma_nrows;
60 unsigned int tx_dma_channel;
61 unsigned int rx_dma_channel;
62 struct work_struct tx_dma_workqueue;
63#else
64# if ANOMALY_05000363
65 unsigned int anomaly_threshold;
66# endif
67#endif
68#ifdef CONFIG_SERIAL_BFIN_CTSRTS
69 struct timer_list cts_timer;
70 int cts_pin;
71 int rts_pin;
72#endif
73};
74
75/* The hardware clears the LSR bits upon read, so we need to cache
76 * some of the more fun bits in software so they don't get lost
77 * when checking the LSR in other code paths (TX).
78 */
79static inline unsigned int UART_GET_LSR(struct bfin_serial_port *uart)
80{
81 unsigned int lsr = bfin_read16(uart->port.membase + OFFSET_LSR);
82 uart->lsr |= (lsr & (BI|FE|PE|OE));
83 return lsr | uart->lsr;
84}
85
86static inline void UART_CLEAR_LSR(struct bfin_serial_port *uart)
87{
88 uart->lsr = 0;
89 bfin_write16(uart->port.membase + OFFSET_LSR, -1);
90}
91
92struct bfin_serial_res { 20struct bfin_serial_res {
93 unsigned long uart_base_addr; 21 unsigned long uart_base_addr;
94 int uart_irq; 22 int uart_irq;
@@ -120,3 +48,5 @@ struct bfin_serial_res bfin_serial_resource[] = {
120}; 48};
121 49
122#define DRIVER_NAME "bfin-uart" 50#define DRIVER_NAME "bfin-uart"
51
52#include <asm/bfin_serial.h>
diff --git a/arch/blackfin/mach-bf533/include/mach/blackfin.h b/arch/blackfin/mach-bf533/include/mach/blackfin.h
index f4bd6df5d968..e366207fbf12 100644
--- a/arch/blackfin/mach-bf533/include/mach/blackfin.h
+++ b/arch/blackfin/mach-bf533/include/mach/blackfin.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * Copyright 2005-2009 Analog Devices Inc. 2 * Copyright 2005-2010 Analog Devices Inc.
3 * 3 *
4 * Licensed under the GPL-2 or later 4 * Licensed under the GPL-2 or later.
5 */ 5 */
6 6
7#ifndef _MACH_BLACKFIN_H_ 7#ifndef _MACH_BLACKFIN_H_
@@ -10,26 +10,14 @@
10#define BF533_FAMILY 10#define BF533_FAMILY
11 11
12#include "bf533.h" 12#include "bf533.h"
13#include "defBF532.h"
14#include "anomaly.h" 13#include "anomaly.h"
15 14
16#if !defined(__ASSEMBLY__) 15#include <asm/def_LPBlackfin.h>
17#include "cdefBF532.h" 16#include "defBF532.h"
18#endif
19
20#define BFIN_UART_NR_PORTS 1
21 17
22#define OFFSET_THR 0x00 /* Transmit Holding register */ 18#ifndef __ASSEMBLY__
23#define OFFSET_RBR 0x00 /* Receive Buffer register */ 19# include <asm/cdef_LPBlackfin.h>
24#define OFFSET_DLL 0x00 /* Divisor Latch (Low-Byte) */ 20# include "cdefBF532.h"
25#define OFFSET_IER 0x04 /* Interrupt Enable Register */ 21#endif
26#define OFFSET_DLH 0x04 /* Divisor Latch (High-Byte) */
27#define OFFSET_IIR 0x08 /* Interrupt Identification Register */
28#define OFFSET_LCR 0x0C /* Line Control Register */
29#define OFFSET_MCR 0x10 /* Modem Control Register */
30#define OFFSET_LSR 0x14 /* Line Status Register */
31#define OFFSET_MSR 0x18 /* Modem Status Register */
32#define OFFSET_SCR 0x1C /* SCR Scratch Register */
33#define OFFSET_GCTL 0x24 /* Global Control Register */
34 22
35#endif /* _MACH_BLACKFIN_H_ */ 23#endif
diff --git a/arch/blackfin/mach-bf533/include/mach/cdefBF532.h b/arch/blackfin/mach-bf533/include/mach/cdefBF532.h
index 401e524f5321..fd0cbe4df21a 100644
--- a/arch/blackfin/mach-bf533/include/mach/cdefBF532.h
+++ b/arch/blackfin/mach-bf533/include/mach/cdefBF532.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2005-2008 Analog Devices Inc. 2 * Copyright 2005-2010 Analog Devices Inc.
3 * 3 *
4 * Licensed under the GPL-2 or later 4 * Licensed under the GPL-2 or later
5 */ 5 */
@@ -7,9 +7,6 @@
7#ifndef _CDEF_BF532_H 7#ifndef _CDEF_BF532_H
8#define _CDEF_BF532_H 8#define _CDEF_BF532_H
9 9
10/*include core specific register pointer definitions*/
11#include <asm/cdef_LPBlackfin.h>
12
13/* Clock and System Control (0xFFC0 0400-0xFFC0 07FF) */ 10/* Clock and System Control (0xFFC0 0400-0xFFC0 07FF) */
14#define bfin_read_PLL_CTL() bfin_read16(PLL_CTL) 11#define bfin_read_PLL_CTL() bfin_read16(PLL_CTL)
15#define bfin_read_PLL_STAT() bfin_read16(PLL_STAT) 12#define bfin_read_PLL_STAT() bfin_read16(PLL_STAT)
@@ -66,16 +63,10 @@
66#define bfin_write_RTC_PREN(val) bfin_write16(RTC_PREN,val) 63#define bfin_write_RTC_PREN(val) bfin_write16(RTC_PREN,val)
67 64
68/* DMA Traffic controls */ 65/* DMA Traffic controls */
69#define bfin_read_DMA_TCPER() bfin_read16(DMA_TCPER) 66#define bfin_read_DMAC_TC_PER() bfin_read16(DMAC_TC_PER)
70#define bfin_write_DMA_TCPER(val) bfin_write16(DMA_TCPER,val) 67#define bfin_write_DMAC_TC_PER(val) bfin_write16(DMAC_TC_PER,val)
71#define bfin_read_DMA_TCCNT() bfin_read16(DMA_TCCNT) 68#define bfin_read_DMAC_TC_CNT() bfin_read16(DMAC_TC_CNT)
72#define bfin_write_DMA_TCCNT(val) bfin_write16(DMA_TCCNT,val) 69#define bfin_write_DMAC_TC_CNT(val) bfin_write16(DMAC_TC_CNT,val)
73
74/* Alternate deprecated register names (below) provided for backwards code compatibility */
75#define bfin_read_DMA_TC_PER() bfin_read16(DMA_TC_PER)
76#define bfin_write_DMA_TC_PER(val) bfin_write16(DMA_TC_PER,val)
77#define bfin_read_DMA_TC_CNT() bfin_read16(DMA_TC_CNT)
78#define bfin_write_DMA_TC_CNT(val) bfin_write16(DMA_TC_CNT,val)
79 70
80/* General Purpose IO (0xFFC0 2400-0xFFC0 27FF) */ 71/* General Purpose IO (0xFFC0 2400-0xFFC0 27FF) */
81#define bfin_read_FIO_DIR() bfin_read16(FIO_DIR) 72#define bfin_read_FIO_DIR() bfin_read16(FIO_DIR)
@@ -105,6 +96,47 @@
105#define bfin_read_FIO_MASKB_T() bfin_read16(FIO_MASKB_T) 96#define bfin_read_FIO_MASKB_T() bfin_read16(FIO_MASKB_T)
106#define bfin_write_FIO_MASKB_T(val) bfin_write16(FIO_MASKB_T,val) 97#define bfin_write_FIO_MASKB_T(val) bfin_write16(FIO_MASKB_T,val)
107 98
99#if ANOMALY_05000311
100/* Keep at the CPP expansion to avoid circular header dependency loops */
101#define BFIN_WRITE_FIO_FLAG(name, val) \
102 do { \
103 unsigned long __flags; \
104 __flags = hard_local_irq_save(); \
105 bfin_write16(FIO_FLAG_##name, val); \
106 bfin_read_CHIPID(); \
107 hard_local_irq_restore(__flags); \
108 } while (0)
109#define bfin_write_FIO_FLAG_D(val) BFIN_WRITE_FIO_FLAG(D, val)
110#define bfin_write_FIO_FLAG_C(val) BFIN_WRITE_FIO_FLAG(C, val)
111#define bfin_write_FIO_FLAG_S(val) BFIN_WRITE_FIO_FLAG(S, val)
112#define bfin_write_FIO_FLAG_T(val) BFIN_WRITE_FIO_FLAG(T, val)
113
114#define BFIN_READ_FIO_FLAG(name) \
115 ({ \
116 unsigned long __flags; \
117 u16 __ret; \
118 __flags = hard_local_irq_save(); \
119 __ret = bfin_read16(FIO_FLAG_##name); \
120 bfin_read_CHIPID(); \
121 hard_local_irq_restore(__flags); \
122 __ret; \
123 })
124#define bfin_read_FIO_FLAG_D() BFIN_READ_FIO_FLAG(D)
125#define bfin_read_FIO_FLAG_C() BFIN_READ_FIO_FLAG(C)
126#define bfin_read_FIO_FLAG_S() BFIN_READ_FIO_FLAG(S)
127#define bfin_read_FIO_FLAG_T() BFIN_READ_FIO_FLAG(T)
128
129#else
130#define bfin_write_FIO_FLAG_D(val) bfin_write16(FIO_FLAG_D, val)
131#define bfin_write_FIO_FLAG_C(val) bfin_write16(FIO_FLAG_C, val)
132#define bfin_write_FIO_FLAG_S(val) bfin_write16(FIO_FLAG_S, val)
133#define bfin_write_FIO_FLAG_T(val) bfin_write16(FIO_FLAG_T, val)
134#define bfin_read_FIO_FLAG_D() bfin_read16(FIO_FLAG_D)
135#define bfin_read_FIO_FLAG_C() bfin_read16(FIO_FLAG_C)
136#define bfin_read_FIO_FLAG_S() bfin_read16(FIO_FLAG_S)
137#define bfin_read_FIO_FLAG_T() bfin_read16(FIO_FLAG_T)
138#endif
139
108/* DMA Controller */ 140/* DMA Controller */
109#define bfin_read_DMA0_CONFIG() bfin_read16(DMA0_CONFIG) 141#define bfin_read_DMA0_CONFIG() bfin_read16(DMA0_CONFIG)
110#define bfin_write_DMA0_CONFIG(val) bfin_write16(DMA0_CONFIG,val) 142#define bfin_write_DMA0_CONFIG(val) bfin_write16(DMA0_CONFIG,val)
@@ -647,7 +679,4 @@
647#define bfin_read_PPI_FRAME() bfin_read16(PPI_FRAME) 679#define bfin_read_PPI_FRAME() bfin_read16(PPI_FRAME)
648#define bfin_write_PPI_FRAME(val) bfin_write16(PPI_FRAME,val) 680#define bfin_write_PPI_FRAME(val) bfin_write16(PPI_FRAME,val)
649 681
650/* These need to be last due to the cdef/linux inter-dependencies */
651#include <asm/irq.h>
652
653#endif /* _CDEF_BF532_H */ 682#endif /* _CDEF_BF532_H */
diff --git a/arch/blackfin/mach-bf533/include/mach/defBF532.h b/arch/blackfin/mach-bf533/include/mach/defBF532.h
index 3adb0b44e597..2376d5393511 100644
--- a/arch/blackfin/mach-bf533/include/mach/defBF532.h
+++ b/arch/blackfin/mach-bf533/include/mach/defBF532.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * System & MMR bit and Address definitions for ADSP-BF532 2 * System & MMR bit and Address definitions for ADSP-BF532
3 * 3 *
4 * Copyright 2005-2008 Analog Devices Inc. 4 * Copyright 2005-2010 Analog Devices Inc.
5 * 5 *
6 * Licensed under the ADI BSD license or the GPL-2 (or later) 6 * Licensed under the ADI BSD license or the GPL-2 (or later)
7 */ 7 */
@@ -9,9 +9,6 @@
9#ifndef _DEF_BF532_H 9#ifndef _DEF_BF532_H
10#define _DEF_BF532_H 10#define _DEF_BF532_H
11 11
12/* include all Core registers and bit definitions */
13#include <asm/def_LPBlackfin.h>
14
15/*********************************************************************************** */ 12/*********************************************************************************** */
16/* System MMR Register Map */ 13/* System MMR Register Map */
17/*********************************************************************************** */ 14/*********************************************************************************** */
@@ -182,12 +179,8 @@
182#define EBIU_SDSTAT 0xFFC00A1C /* SDRAM Status Register */ 179#define EBIU_SDSTAT 0xFFC00A1C /* SDRAM Status Register */
183 180
184/* DMA Traffic controls */ 181/* DMA Traffic controls */
185#define DMA_TC_PER 0xFFC00B0C /* Traffic Control Periods Register */ 182#define DMAC_TC_PER 0xFFC00B0C /* Traffic Control Periods Register */
186#define DMA_TC_CNT 0xFFC00B10 /* Traffic Control Current Counts Register */ 183#define DMAC_TC_CNT 0xFFC00B10 /* Traffic Control Current Counts Register */
187
188/* Alternate deprecated register names (below) provided for backwards code compatibility */
189#define DMA_TCPER 0xFFC00B0C /* Traffic Control Periods Register */
190#define DMA_TCCNT 0xFFC00B10 /* Traffic Control Current Counts Register */
191 184
192/* DMA Controller (0xFFC00C00 - 0xFFC00FFF) */ 185/* DMA Controller (0xFFC00C00 - 0xFFC00FFF) */
193#define DMA0_CONFIG 0xFFC00C08 /* DMA Channel 0 Configuration Register */ 186#define DMA0_CONFIG 0xFFC00C08 /* DMA Channel 0 Configuration Register */
@@ -432,83 +425,6 @@
432#define IWR_ENABLE(x) (1 << (x)) /* Wakeup Enable Peripheral #x */ 425#define IWR_ENABLE(x) (1 << (x)) /* Wakeup Enable Peripheral #x */
433#define IWR_DISABLE(x) (0xFFFFFFFF ^ (1 << (x))) /* Wakeup Disable Peripheral #x */ 426#define IWR_DISABLE(x) (0xFFFFFFFF ^ (1 << (x))) /* Wakeup Disable Peripheral #x */
434 427
435/* ***************************** UART CONTROLLER MASKS ********************** */
436
437/* UART_LCR Register */
438
439#define DLAB 0x80
440#define SB 0x40
441#define STP 0x20
442#define EPS 0x10
443#define PEN 0x08
444#define STB 0x04
445#define WLS(x) ((x-5) & 0x03)
446
447#define DLAB_P 0x07
448#define SB_P 0x06
449#define STP_P 0x05
450#define EPS_P 0x04
451#define PEN_P 0x03
452#define STB_P 0x02
453#define WLS_P1 0x01
454#define WLS_P0 0x00
455
456/* UART_MCR Register */
457#define LOOP_ENA 0x10
458#define LOOP_ENA_P 0x04
459
460/* UART_LSR Register */
461#define TEMT 0x40
462#define THRE 0x20
463#define BI 0x10
464#define FE 0x08
465#define PE 0x04
466#define OE 0x02
467#define DR 0x01
468
469#define TEMP_P 0x06
470#define THRE_P 0x05
471#define BI_P 0x04
472#define FE_P 0x03
473#define PE_P 0x02
474#define OE_P 0x01
475#define DR_P 0x00
476
477/* UART_IER Register */
478#define ELSI 0x04
479#define ETBEI 0x02
480#define ERBFI 0x01
481
482#define ELSI_P 0x02
483#define ETBEI_P 0x01
484#define ERBFI_P 0x00
485
486/* UART_IIR Register */
487#define STATUS(x) ((x << 1) & 0x06)
488#define NINT 0x01
489#define STATUS_P1 0x02
490#define STATUS_P0 0x01
491#define NINT_P 0x00
492#define IIR_TX_READY 0x02 /* UART_THR empty */
493#define IIR_RX_READY 0x04 /* Receive data ready */
494#define IIR_LINE_CHANGE 0x06 /* Receive line status */
495#define IIR_STATUS 0x06
496
497/* UART_GCTL Register */
498#define FFE 0x20
499#define FPE 0x10
500#define RPOLC 0x08
501#define TPOLC 0x04
502#define IREN 0x02
503#define UCEN 0x01
504
505#define FFE_P 0x05
506#define FPE_P 0x04
507#define RPOLC_P 0x03
508#define TPOLC_P 0x02
509#define IREN_P 0x01
510#define UCEN_P 0x00
511
512/* ********* PARALLEL PERIPHERAL INTERFACE (PPI) MASKS **************** */ 428/* ********* PARALLEL PERIPHERAL INTERFACE (PPI) MASKS **************** */
513 429
514/* PPI_CONTROL Masks */ 430/* PPI_CONTROL Masks */
@@ -643,44 +559,6 @@
643#define ERR_TYP_P0 0x0E 559#define ERR_TYP_P0 0x0E
644#define ERR_TYP_P1 0x0F 560#define ERR_TYP_P1 0x0F
645 561
646/*/ ****************** PROGRAMMABLE FLAG MASKS ********************* */
647
648/* General Purpose IO (0xFFC00700 - 0xFFC007FF) Masks */
649#define PF0 0x0001
650#define PF1 0x0002
651#define PF2 0x0004
652#define PF3 0x0008
653#define PF4 0x0010
654#define PF5 0x0020
655#define PF6 0x0040
656#define PF7 0x0080
657#define PF8 0x0100
658#define PF9 0x0200
659#define PF10 0x0400
660#define PF11 0x0800
661#define PF12 0x1000
662#define PF13 0x2000
663#define PF14 0x4000
664#define PF15 0x8000
665
666/* General Purpose IO (0xFFC00700 - 0xFFC007FF) BIT POSITIONS */
667#define PF0_P 0
668#define PF1_P 1
669#define PF2_P 2
670#define PF3_P 3
671#define PF4_P 4
672#define PF5_P 5
673#define PF6_P 6
674#define PF7_P 7
675#define PF8_P 8
676#define PF9_P 9
677#define PF10_P 10
678#define PF11_P 11
679#define PF12_P 12
680#define PF13_P 13
681#define PF14_P 14
682#define PF15_P 15
683
684/* ********************* ASYNCHRONOUS MEMORY CONTROLLER MASKS ************* */ 562/* ********************* ASYNCHRONOUS MEMORY CONTROLLER MASKS ************* */
685 563
686/* AMGCTL Masks */ 564/* AMGCTL Masks */
diff --git a/arch/blackfin/mach-bf533/include/mach/fio_flag.h b/arch/blackfin/mach-bf533/include/mach/fio_flag.h
deleted file mode 100644
index d0bfba0b083b..000000000000
--- a/arch/blackfin/mach-bf533/include/mach/fio_flag.h
+++ /dev/null
@@ -1,55 +0,0 @@
1/*
2 * Copyright 2005-2008 Analog Devices Inc.
3 *
4 * Licensed under the GPL-2 or later
5 */
6
7#ifndef _MACH_FIO_FLAG_H
8#define _MACH_FIO_FLAG_H
9
10#include <asm/blackfin.h>
11#include <asm/irqflags.h>
12
13#if ANOMALY_05000311
14#define BFIN_WRITE_FIO_FLAG(name) \
15static inline void bfin_write_FIO_FLAG_##name(unsigned short val) \
16{ \
17 unsigned long flags; \
18 flags = hard_local_irq_save(); \
19 bfin_write16(FIO_FLAG_##name, val); \
20 bfin_read_CHIPID(); \
21 hard_local_irq_restore(flags); \
22}
23BFIN_WRITE_FIO_FLAG(D)
24BFIN_WRITE_FIO_FLAG(C)
25BFIN_WRITE_FIO_FLAG(S)
26BFIN_WRITE_FIO_FLAG(T)
27
28#define BFIN_READ_FIO_FLAG(name) \
29static inline u16 bfin_read_FIO_FLAG_##name(void) \
30{ \
31 unsigned long flags; \
32 u16 ret; \
33 flags = hard_local_irq_save(); \
34 ret = bfin_read16(FIO_FLAG_##name); \
35 bfin_read_CHIPID(); \
36 hard_local_irq_restore(flags); \
37 return ret; \
38}
39BFIN_READ_FIO_FLAG(D)
40BFIN_READ_FIO_FLAG(C)
41BFIN_READ_FIO_FLAG(S)
42BFIN_READ_FIO_FLAG(T)
43
44#else
45#define bfin_write_FIO_FLAG_D(val) bfin_write16(FIO_FLAG_D, val)
46#define bfin_write_FIO_FLAG_C(val) bfin_write16(FIO_FLAG_C, val)
47#define bfin_write_FIO_FLAG_S(val) bfin_write16(FIO_FLAG_S, val)
48#define bfin_write_FIO_FLAG_T(val) bfin_write16(FIO_FLAG_T, val)
49#define bfin_read_FIO_FLAG_T() bfin_read16(FIO_FLAG_T)
50#define bfin_read_FIO_FLAG_C() bfin_read16(FIO_FLAG_C)
51#define bfin_read_FIO_FLAG_S() bfin_read16(FIO_FLAG_S)
52#define bfin_read_FIO_FLAG_D() bfin_read16(FIO_FLAG_D)
53#endif
54
55#endif /* _MACH_FIO_FLAG_H */
diff --git a/arch/blackfin/mach-bf533/include/mach/gpio.h b/arch/blackfin/mach-bf533/include/mach/gpio.h
index e02416db4b00..cce4f8fb3785 100644
--- a/arch/blackfin/mach-bf533/include/mach/gpio.h
+++ b/arch/blackfin/mach-bf533/include/mach/gpio.h
@@ -28,4 +28,6 @@
28 28
29#define PORT_F GPIO_PF0 29#define PORT_F GPIO_PF0
30 30
31#include <mach-common/ports-f.h>
32
31#endif /* _MACH_GPIO_H_ */ 33#endif /* _MACH_GPIO_H_ */
diff --git a/arch/blackfin/mach-bf533/include/mach/pll.h b/arch/blackfin/mach-bf533/include/mach/pll.h
index 169c106d0edb..94cca674d835 100644
--- a/arch/blackfin/mach-bf533/include/mach/pll.h
+++ b/arch/blackfin/mach-bf533/include/mach/pll.h
@@ -1,57 +1 @@
1/* #include <mach-common/pll.h>
2 * Copyright 2005-2008 Analog Devices Inc.
3 *
4 * Licensed under the GPL-2 or later
5 */
6
7#ifndef _MACH_PLL_H
8#define _MACH_PLL_H
9
10#include <asm/blackfin.h>
11#include <asm/irqflags.h>
12
13/* Writing to PLL_CTL initiates a PLL relock sequence. */
14static __inline__ void bfin_write_PLL_CTL(unsigned int val)
15{
16 unsigned long flags, iwr;
17
18 if (val == bfin_read_PLL_CTL())
19 return;
20
21 flags = hard_local_irq_save();
22 /* Enable the PLL Wakeup bit in SIC IWR */
23 iwr = bfin_read32(SIC_IWR);
24 /* Only allow PPL Wakeup) */
25 bfin_write32(SIC_IWR, IWR_ENABLE(0));
26
27 bfin_write16(PLL_CTL, val);
28 SSYNC();
29 asm("IDLE;");
30
31 bfin_write32(SIC_IWR, iwr);
32 hard_local_irq_restore(flags);
33}
34
35/* Writing to VR_CTL initiates a PLL relock sequence. */
36static __inline__ void bfin_write_VR_CTL(unsigned int val)
37{
38 unsigned long flags, iwr;
39
40 if (val == bfin_read_VR_CTL())
41 return;
42
43 flags = hard_local_irq_save();
44 /* Enable the PLL Wakeup bit in SIC IWR */
45 iwr = bfin_read32(SIC_IWR);
46 /* Only allow PPL Wakeup) */
47 bfin_write32(SIC_IWR, IWR_ENABLE(0));
48
49 bfin_write16(VR_CTL, val);
50 SSYNC();
51 asm("IDLE;");
52
53 bfin_write32(SIC_IWR, iwr);
54 hard_local_irq_restore(flags);
55}
56
57#endif /* _MACH_PLL_H */
diff --git a/arch/blackfin/mach-bf537/boards/Kconfig b/arch/blackfin/mach-bf537/boards/Kconfig
index 44132fda63be..a44bf3a1816e 100644
--- a/arch/blackfin/mach-bf537/boards/Kconfig
+++ b/arch/blackfin/mach-bf537/boards/Kconfig
@@ -39,4 +39,10 @@ config CAMSIG_MINOTAUR
39 help 39 help
40 Board supply package for CSP Minotaur 40 Board supply package for CSP Minotaur
41 41
42config DNP5370
43 bool "SSV Dil/NetPC DNP/5370"
44 depends on (BF537)
45 help
46 Board supply package for DNP/5370 DIL64 module
47
42endchoice 48endchoice
diff --git a/arch/blackfin/mach-bf537/boards/Makefile b/arch/blackfin/mach-bf537/boards/Makefile
index 7e6aa4e5b205..fe42258fe1f4 100644
--- a/arch/blackfin/mach-bf537/boards/Makefile
+++ b/arch/blackfin/mach-bf537/boards/Makefile
@@ -8,3 +8,4 @@ obj-$(CONFIG_BFIN537_BLUETECHNIX_CM_U) += cm_bf537u.o
8obj-$(CONFIG_BFIN537_BLUETECHNIX_TCM) += tcm_bf537.o 8obj-$(CONFIG_BFIN537_BLUETECHNIX_TCM) += tcm_bf537.o
9obj-$(CONFIG_PNAV10) += pnav10.o 9obj-$(CONFIG_PNAV10) += pnav10.o
10obj-$(CONFIG_CAMSIG_MINOTAUR) += minotaur.o 10obj-$(CONFIG_CAMSIG_MINOTAUR) += minotaur.o
11obj-$(CONFIG_DNP5370) += dnp5370.o
diff --git a/arch/blackfin/mach-bf537/boards/cm_bf537e.c b/arch/blackfin/mach-bf537/boards/cm_bf537e.c
index 836698c4ee54..2c776e188a94 100644
--- a/arch/blackfin/mach-bf537/boards/cm_bf537e.c
+++ b/arch/blackfin/mach-bf537/boards/cm_bf537e.c
@@ -373,7 +373,7 @@ static struct resource bfin_uart0_resources[] = {
373#endif 373#endif
374}; 374};
375 375
376unsigned short bfin_uart0_peripherals[] = { 376static unsigned short bfin_uart0_peripherals[] = {
377 P_UART0_TX, P_UART0_RX, 0 377 P_UART0_TX, P_UART0_RX, 0
378}; 378};
379 379
@@ -434,7 +434,7 @@ static struct resource bfin_uart1_resources[] = {
434#endif 434#endif
435}; 435};
436 436
437unsigned short bfin_uart1_peripherals[] = { 437static unsigned short bfin_uart1_peripherals[] = {
438 P_UART1_TX, P_UART1_RX, 0 438 P_UART1_TX, P_UART1_RX, 0
439}; 439};
440 440
@@ -545,9 +545,9 @@ static struct resource bfin_sport0_uart_resources[] = {
545 }, 545 },
546}; 546};
547 547
548unsigned short bfin_sport0_peripherals[] = { 548static unsigned short bfin_sport0_peripherals[] = {
549 P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS, 549 P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
550 P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0 550 P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
551}; 551};
552 552
553static struct platform_device bfin_sport0_uart_device = { 553static struct platform_device bfin_sport0_uart_device = {
@@ -579,9 +579,9 @@ static struct resource bfin_sport1_uart_resources[] = {
579 }, 579 },
580}; 580};
581 581
582unsigned short bfin_sport1_peripherals[] = { 582static unsigned short bfin_sport1_peripherals[] = {
583 P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS, 583 P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
584 P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0 584 P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
585}; 585};
586 586
587static struct platform_device bfin_sport1_uart_device = { 587static struct platform_device bfin_sport1_uart_device = {
diff --git a/arch/blackfin/mach-bf537/boards/cm_bf537u.c b/arch/blackfin/mach-bf537/boards/cm_bf537u.c
index 2a85670273cb..085661175ec7 100644
--- a/arch/blackfin/mach-bf537/boards/cm_bf537u.c
+++ b/arch/blackfin/mach-bf537/boards/cm_bf537u.c
@@ -356,7 +356,7 @@ static struct resource bfin_uart0_resources[] = {
356 }, 356 },
357}; 357};
358 358
359unsigned short bfin_uart0_peripherals[] = { 359static unsigned short bfin_uart0_peripherals[] = {
360 P_UART0_TX, P_UART0_RX, 0 360 P_UART0_TX, P_UART0_RX, 0
361}; 361};
362 362
@@ -399,7 +399,7 @@ static struct resource bfin_uart1_resources[] = {
399 }, 399 },
400}; 400};
401 401
402unsigned short bfin_uart1_peripherals[] = { 402static unsigned short bfin_uart1_peripherals[] = {
403 P_UART1_TX, P_UART1_RX, 0 403 P_UART1_TX, P_UART1_RX, 0
404}; 404};
405 405
@@ -510,9 +510,9 @@ static struct resource bfin_sport0_uart_resources[] = {
510 }, 510 },
511}; 511};
512 512
513unsigned short bfin_sport0_peripherals[] = { 513static unsigned short bfin_sport0_peripherals[] = {
514 P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS, 514 P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
515 P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0 515 P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
516}; 516};
517 517
518static struct platform_device bfin_sport0_uart_device = { 518static struct platform_device bfin_sport0_uart_device = {
@@ -544,9 +544,9 @@ static struct resource bfin_sport1_uart_resources[] = {
544 }, 544 },
545}; 545};
546 546
547unsigned short bfin_sport1_peripherals[] = { 547static unsigned short bfin_sport1_peripherals[] = {
548 P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS, 548 P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
549 P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0 549 P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
550}; 550};
551 551
552static struct platform_device bfin_sport1_uart_device = { 552static struct platform_device bfin_sport1_uart_device = {
diff --git a/arch/blackfin/mach-bf537/boards/dnp5370.c b/arch/blackfin/mach-bf537/boards/dnp5370.c
new file mode 100644
index 000000000000..e1e9ea02ad89
--- /dev/null
+++ b/arch/blackfin/mach-bf537/boards/dnp5370.c
@@ -0,0 +1,418 @@
1/*
2 * This is the configuration for SSV Dil/NetPC DNP/5370 board.
3 *
4 * DIL module: http://www.dilnetpc.com/dnp0086.htm
5 * SK28 (starter kit): http://www.dilnetpc.com/dnp0088.htm
6 *
7 * Copyright 2010 3ality Digital Systems
8 * Copyright 2005 National ICT Australia (NICTA)
9 * Copyright 2004-2006 Analog Devices Inc.
10 *
11 * Licensed under the GPL-2 or later.
12 */
13
14#include <linux/device.h>
15#include <linux/kernel.h>
16#include <linux/platform_device.h>
17#include <linux/io.h>
18#include <linux/mtd/mtd.h>
19#include <linux/mtd/nand.h>
20#include <linux/mtd/partitions.h>
21#include <linux/mtd/plat-ram.h>
22#include <linux/mtd/physmap.h>
23#include <linux/spi/spi.h>
24#include <linux/spi/flash.h>
25#include <linux/irq.h>
26#include <linux/interrupt.h>
27#include <linux/i2c.h>
28#include <linux/spi/mmc_spi.h>
29#include <linux/phy.h>
30#include <asm/dma.h>
31#include <asm/bfin5xx_spi.h>
32#include <asm/reboot.h>
33#include <asm/portmux.h>
34#include <asm/dpmc.h>
35
36/*
37 * Name the Board for the /proc/cpuinfo
38 */
39const char bfin_board_name[] = "DNP/5370";
40#define FLASH_MAC 0x202f0000
41#define CONFIG_MTD_PHYSMAP_LEN 0x300000
42
43#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE)
44static struct platform_device rtc_device = {
45 .name = "rtc-bfin",
46 .id = -1,
47};
48#endif
49
50#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
51#include <linux/bfin_mac.h>
52static const unsigned short bfin_mac_peripherals[] = P_RMII0;
53
54static struct bfin_phydev_platform_data bfin_phydev_data[] = {
55 {
56 .addr = 1,
57 .irq = PHY_POLL, /* IRQ_MAC_PHYINT */
58 },
59};
60
61static struct bfin_mii_bus_platform_data bfin_mii_bus_data = {
62 .phydev_number = 1,
63 .phydev_data = bfin_phydev_data,
64 .phy_mode = PHY_INTERFACE_MODE_RMII,
65 .mac_peripherals = bfin_mac_peripherals,
66};
67
68static struct platform_device bfin_mii_bus = {
69 .name = "bfin_mii_bus",
70 .dev = {
71 .platform_data = &bfin_mii_bus_data,
72 }
73};
74
75static struct platform_device bfin_mac_device = {
76 .name = "bfin_mac",
77 .dev = {
78 .platform_data = &bfin_mii_bus,
79 }
80};
81#endif
82
83#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE)
84static struct mtd_partition asmb_flash_partitions[] = {
85 {
86 .name = "bootloader(nor)",
87 .size = 0x30000,
88 .offset = 0,
89 }, {
90 .name = "linux kernel and rootfs(nor)",
91 .size = 0x300000 - 0x30000 - 0x10000,
92 .offset = MTDPART_OFS_APPEND,
93 }, {
94 .name = "MAC address(nor)",
95 .size = 0x10000,
96 .offset = MTDPART_OFS_APPEND,
97 .mask_flags = MTD_WRITEABLE,
98 }
99};
100
101static struct physmap_flash_data asmb_flash_data = {
102 .width = 1,
103 .parts = asmb_flash_partitions,
104 .nr_parts = ARRAY_SIZE(asmb_flash_partitions),
105};
106
107static struct resource asmb_flash_resource = {
108 .start = 0x20000000,
109 .end = 0x202fffff,
110 .flags = IORESOURCE_MEM,
111};
112
113/* 4 MB NOR flash attached to async memory banks 0-2,
114 * therefore only 3 MB visible.
115 */
116static struct platform_device asmb_flash_device = {
117 .name = "physmap-flash",
118 .id = 0,
119 .dev = {
120 .platform_data = &asmb_flash_data,
121 },
122 .num_resources = 1,
123 .resource = &asmb_flash_resource,
124};
125#endif
126
127#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
128
129#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
130
131#define MMC_SPI_CARD_DETECT_INT IRQ_PF5
132
133static int bfin_mmc_spi_init(struct device *dev,
134 irqreturn_t (*detect_int)(int, void *), void *data)
135{
136 return request_irq(MMC_SPI_CARD_DETECT_INT, detect_int,
137 IRQF_TRIGGER_FALLING, "mmc-spi-detect", data);
138}
139
140static void bfin_mmc_spi_exit(struct device *dev, void *data)
141{
142 free_irq(MMC_SPI_CARD_DETECT_INT, data);
143}
144
145static struct bfin5xx_spi_chip mmc_spi_chip_info = {
146 .enable_dma = 0, /* use no dma transfer with this chip*/
147 .bits_per_word = 8,
148};
149
150static struct mmc_spi_platform_data bfin_mmc_spi_pdata = {
151 .init = bfin_mmc_spi_init,
152 .exit = bfin_mmc_spi_exit,
153 .detect_delay = 100, /* msecs */
154};
155#endif
156
157#if defined(CONFIG_MTD_DATAFLASH) || defined(CONFIG_MTD_DATAFLASH_MODULE)
158/* This mapping is for at45db642 it has 1056 page size,
159 * partition size and offset should be page aligned
160 */
161static struct mtd_partition bfin_spi_dataflash_partitions[] = {
162 {
163 .name = "JFFS2 dataflash(nor)",
164#ifdef CONFIG_MTD_PAGESIZE_1024
165 .offset = 0x40000,
166 .size = 0x7C0000,
167#else
168 .offset = 0x0,
169 .size = 0x840000,
170#endif
171 }
172};
173
174static struct flash_platform_data bfin_spi_dataflash_data = {
175 .name = "mtd_dataflash",
176 .parts = bfin_spi_dataflash_partitions,
177 .nr_parts = ARRAY_SIZE(bfin_spi_dataflash_partitions),
178 .type = "mtd_dataflash",
179};
180
181static struct bfin5xx_spi_chip spi_dataflash_chip_info = {
182 .enable_dma = 0, /* use no dma transfer with this chip*/
183 .bits_per_word = 8,
184};
185#endif
186
187static struct spi_board_info bfin_spi_board_info[] __initdata = {
188/* SD/MMC card reader at SPI bus */
189#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
190 {
191 .modalias = "mmc_spi",
192 .max_speed_hz = 20000000,
193 .bus_num = 0,
194 .chip_select = 1,
195 .platform_data = &bfin_mmc_spi_pdata,
196 .controller_data = &mmc_spi_chip_info,
197 .mode = SPI_MODE_3,
198 },
199#endif
200
201/* 8 Megabyte Atmel NOR flash chip at SPI bus */
202#if defined(CONFIG_MTD_DATAFLASH) || defined(CONFIG_MTD_DATAFLASH_MODULE)
203 {
204 .modalias = "mtd_dataflash",
205 .max_speed_hz = 16700000,
206 .bus_num = 0,
207 .chip_select = 2,
208 .platform_data = &bfin_spi_dataflash_data,
209 .controller_data = &spi_dataflash_chip_info,
210 .mode = SPI_MODE_3, /* SPI_CPHA and SPI_CPOL */
211 },
212#endif
213};
214
215/* SPI controller data */
216/* SPI (0) */
217static struct resource bfin_spi0_resource[] = {
218 [0] = {
219 .start = SPI0_REGBASE,
220 .end = SPI0_REGBASE + 0xFF,
221 .flags = IORESOURCE_MEM,
222 },
223 [1] = {
224 .start = CH_SPI,
225 .end = CH_SPI,
226 .flags = IORESOURCE_DMA,
227 },
228 [2] = {
229 .start = IRQ_SPI,
230 .end = IRQ_SPI,
231 .flags = IORESOURCE_IRQ,
232 },
233};
234
235static struct bfin5xx_spi_master spi_bfin_master_info = {
236 .num_chipselect = 8,
237 .enable_dma = 1, /* master has the ability to do dma transfer */
238 .pin_req = {P_SPI0_SCK, P_SPI0_MISO, P_SPI0_MOSI, 0},
239};
240
241static struct platform_device spi_bfin_master_device = {
242 .name = "bfin-spi",
243 .id = 0, /* Bus number */
244 .num_resources = ARRAY_SIZE(bfin_spi0_resource),
245 .resource = bfin_spi0_resource,
246 .dev = {
247 .platform_data = &spi_bfin_master_info, /* Passed to driver */
248 },
249};
250#endif
251
252#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
253#ifdef CONFIG_SERIAL_BFIN_UART0
254static struct resource bfin_uart0_resources[] = {
255 {
256 .start = UART0_THR,
257 .end = UART0_GCTL+2,
258 .flags = IORESOURCE_MEM,
259 },
260 {
261 .start = IRQ_UART0_RX,
262 .end = IRQ_UART0_RX+1,
263 .flags = IORESOURCE_IRQ,
264 },
265 {
266 .start = IRQ_UART0_ERROR,
267 .end = IRQ_UART0_ERROR,
268 .flags = IORESOURCE_IRQ,
269 },
270 {
271 .start = CH_UART0_TX,
272 .end = CH_UART0_TX,
273 .flags = IORESOURCE_DMA,
274 },
275 {
276 .start = CH_UART0_RX,
277 .end = CH_UART0_RX,
278 .flags = IORESOURCE_DMA,
279 },
280};
281
282static unsigned short bfin_uart0_peripherals[] = {
283 P_UART0_TX, P_UART0_RX, 0
284};
285
286static struct platform_device bfin_uart0_device = {
287 .name = "bfin-uart",
288 .id = 0,
289 .num_resources = ARRAY_SIZE(bfin_uart0_resources),
290 .resource = bfin_uart0_resources,
291 .dev = {
292 .platform_data = &bfin_uart0_peripherals, /* Passed to driver */
293 },
294};
295#endif
296
297#ifdef CONFIG_SERIAL_BFIN_UART1
298static struct resource bfin_uart1_resources[] = {
299 {
300 .start = UART1_THR,
301 .end = UART1_GCTL+2,
302 .flags = IORESOURCE_MEM,
303 },
304 {
305 .start = IRQ_UART1_RX,
306 .end = IRQ_UART1_RX+1,
307 .flags = IORESOURCE_IRQ,
308 },
309 {
310 .start = IRQ_UART1_ERROR,
311 .end = IRQ_UART1_ERROR,
312 .flags = IORESOURCE_IRQ,
313 },
314 {
315 .start = CH_UART1_TX,
316 .end = CH_UART1_TX,
317 .flags = IORESOURCE_DMA,
318 },
319 {
320 .start = CH_UART1_RX,
321 .end = CH_UART1_RX,
322 .flags = IORESOURCE_DMA,
323 },
324};
325
326static unsigned short bfin_uart1_peripherals[] = {
327 P_UART1_TX, P_UART1_RX, 0
328};
329
330static struct platform_device bfin_uart1_device = {
331 .name = "bfin-uart",
332 .id = 1,
333 .num_resources = ARRAY_SIZE(bfin_uart1_resources),
334 .resource = bfin_uart1_resources,
335 .dev = {
336 .platform_data = &bfin_uart1_peripherals, /* Passed to driver */
337 },
338};
339#endif
340#endif
341
342#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
343static struct resource bfin_twi0_resource[] = {
344 [0] = {
345 .start = TWI0_REGBASE,
346 .end = TWI0_REGBASE + 0xff,
347 .flags = IORESOURCE_MEM,
348 },
349 [1] = {
350 .start = IRQ_TWI,
351 .end = IRQ_TWI,
352 .flags = IORESOURCE_IRQ,
353 },
354};
355
356static struct platform_device i2c_bfin_twi_device = {
357 .name = "i2c-bfin-twi",
358 .id = 0,
359 .num_resources = ARRAY_SIZE(bfin_twi0_resource),
360 .resource = bfin_twi0_resource,
361};
362#endif
363
364static struct platform_device *dnp5370_devices[] __initdata = {
365
366#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
367#ifdef CONFIG_SERIAL_BFIN_UART0
368 &bfin_uart0_device,
369#endif
370#ifdef CONFIG_SERIAL_BFIN_UART1
371 &bfin_uart1_device,
372#endif
373#endif
374
375#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE)
376 &asmb_flash_device,
377#endif
378
379#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
380 &bfin_mii_bus,
381 &bfin_mac_device,
382#endif
383
384#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
385 &spi_bfin_master_device,
386#endif
387
388#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
389 &i2c_bfin_twi_device,
390#endif
391
392#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE)
393 &rtc_device,
394#endif
395
396};
397
398static int __init dnp5370_init(void)
399{
400 printk(KERN_INFO "DNP/5370: registering device resources\n");
401 platform_add_devices(dnp5370_devices, ARRAY_SIZE(dnp5370_devices));
402 printk(KERN_INFO "DNP/5370: registering %zu SPI slave devices\n",
403 ARRAY_SIZE(bfin_spi_board_info));
404 spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info));
405 printk(KERN_INFO "DNP/5370: MAC %pM\n", (void *)FLASH_MAC);
406 return 0;
407}
408arch_initcall(dnp5370_init);
409
410/*
411 * Currently the MAC address is saved in Flash by U-Boot
412 */
413void bfin_get_ether_addr(char *addr)
414{
415 *(u32 *)(&(addr[0])) = bfin_read32(FLASH_MAC);
416 *(u16 *)(&(addr[4])) = bfin_read16(FLASH_MAC + 4);
417}
418EXPORT_SYMBOL(bfin_get_ether_addr);
diff --git a/arch/blackfin/mach-bf537/boards/minotaur.c b/arch/blackfin/mach-bf537/boards/minotaur.c
index 49800518412c..bfb3671a78da 100644
--- a/arch/blackfin/mach-bf537/boards/minotaur.c
+++ b/arch/blackfin/mach-bf537/boards/minotaur.c
@@ -263,7 +263,7 @@ static struct resource bfin_uart0_resources[] = {
263 }, 263 },
264}; 264};
265 265
266unsigned short bfin_uart0_peripherals[] = { 266static unsigned short bfin_uart0_peripherals[] = {
267 P_UART0_TX, P_UART0_RX, 0 267 P_UART0_TX, P_UART0_RX, 0
268}; 268};
269 269
@@ -306,7 +306,7 @@ static struct resource bfin_uart1_resources[] = {
306 }, 306 },
307}; 307};
308 308
309unsigned short bfin_uart1_peripherals[] = { 309static unsigned short bfin_uart1_peripherals[] = {
310 P_UART1_TX, P_UART1_RX, 0 310 P_UART1_TX, P_UART1_RX, 0
311}; 311};
312 312
@@ -419,9 +419,9 @@ static struct resource bfin_sport0_uart_resources[] = {
419 }, 419 },
420}; 420};
421 421
422unsigned short bfin_sport0_peripherals[] = { 422static unsigned short bfin_sport0_peripherals[] = {
423 P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS, 423 P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
424 P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0 424 P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
425}; 425};
426 426
427static struct platform_device bfin_sport0_uart_device = { 427static struct platform_device bfin_sport0_uart_device = {
@@ -453,9 +453,9 @@ static struct resource bfin_sport1_uart_resources[] = {
453 }, 453 },
454}; 454};
455 455
456unsigned short bfin_sport1_peripherals[] = { 456static unsigned short bfin_sport1_peripherals[] = {
457 P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS, 457 P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
458 P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0 458 P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
459}; 459};
460 460
461static struct platform_device bfin_sport1_uart_device = { 461static struct platform_device bfin_sport1_uart_device = {
diff --git a/arch/blackfin/mach-bf537/boards/pnav10.c b/arch/blackfin/mach-bf537/boards/pnav10.c
index b95807894e25..9389f03e3b0a 100644
--- a/arch/blackfin/mach-bf537/boards/pnav10.c
+++ b/arch/blackfin/mach-bf537/boards/pnav10.c
@@ -367,7 +367,7 @@ static struct resource bfin_uart0_resources[] = {
367 }, 367 },
368}; 368};
369 369
370unsigned short bfin_uart0_peripherals[] = { 370static unsigned short bfin_uart0_peripherals[] = {
371 P_UART0_TX, P_UART0_RX, 0 371 P_UART0_TX, P_UART0_RX, 0
372}; 372};
373 373
@@ -410,7 +410,7 @@ static struct resource bfin_uart1_resources[] = {
410 }, 410 },
411}; 411};
412 412
413unsigned short bfin_uart1_peripherals[] = { 413static unsigned short bfin_uart1_peripherals[] = {
414 P_UART1_TX, P_UART1_RX, 0 414 P_UART1_TX, P_UART1_RX, 0
415}; 415};
416 416
diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c
index 3aa344ce8e52..2c69785a7bbe 100644
--- a/arch/blackfin/mach-bf537/boards/stamp.c
+++ b/arch/blackfin/mach-bf537/boards/stamp.c
@@ -289,7 +289,7 @@ static struct platform_device isp1362_hcd_device = {
289#endif 289#endif
290 290
291#if defined(CONFIG_CAN_BFIN) || defined(CONFIG_CAN_BFIN_MODULE) 291#if defined(CONFIG_CAN_BFIN) || defined(CONFIG_CAN_BFIN_MODULE)
292unsigned short bfin_can_peripherals[] = { 292static unsigned short bfin_can_peripherals[] = {
293 P_CAN0_RX, P_CAN0_TX, 0 293 P_CAN0_RX, P_CAN0_TX, 0
294}; 294};
295 295
@@ -693,7 +693,7 @@ static struct bfin5xx_spi_chip ad2s90_spi_chip_info = {
693#endif 693#endif
694 694
695#if defined(CONFIG_AD2S120X) || defined(CONFIG_AD2S120X_MODULE) 695#if defined(CONFIG_AD2S120X) || defined(CONFIG_AD2S120X_MODULE)
696unsigned short ad2s120x_platform_data[] = { 696static unsigned short ad2s120x_platform_data[] = {
697 /* used as SAMPLE and RDVEL */ 697 /* used as SAMPLE and RDVEL */
698 GPIO_PF5, GPIO_PF6, 0 698 GPIO_PF5, GPIO_PF6, 0
699}; 699};
@@ -705,7 +705,7 @@ static struct bfin5xx_spi_chip ad2s120x_spi_chip_info = {
705#endif 705#endif
706 706
707#if defined(CONFIG_AD2S1210) || defined(CONFIG_AD2S1210_MODULE) 707#if defined(CONFIG_AD2S1210) || defined(CONFIG_AD2S1210_MODULE)
708unsigned short ad2s1210_platform_data[] = { 708static unsigned short ad2s1210_platform_data[] = {
709 /* use as SAMPLE, A0, A1 */ 709 /* use as SAMPLE, A0, A1 */
710 GPIO_PF7, GPIO_PF8, GPIO_PF9, 710 GPIO_PF7, GPIO_PF8, GPIO_PF9,
711# if defined(CONFIG_AD2S1210_GPIO_INPUT) || defined(CONFIG_AD2S1210_GPIO_OUTPUT) 711# if defined(CONFIG_AD2S1210_GPIO_INPUT) || defined(CONFIG_AD2S1210_GPIO_OUTPUT)
@@ -1717,7 +1717,7 @@ static struct resource bfin_uart0_resources[] = {
1717#endif 1717#endif
1718}; 1718};
1719 1719
1720unsigned short bfin_uart0_peripherals[] = { 1720static unsigned short bfin_uart0_peripherals[] = {
1721 P_UART0_TX, P_UART0_RX, 0 1721 P_UART0_TX, P_UART0_RX, 0
1722}; 1722};
1723 1723
@@ -1760,7 +1760,7 @@ static struct resource bfin_uart1_resources[] = {
1760 }, 1760 },
1761}; 1761};
1762 1762
1763unsigned short bfin_uart1_peripherals[] = { 1763static unsigned short bfin_uart1_peripherals[] = {
1764 P_UART1_TX, P_UART1_RX, 0 1764 P_UART1_TX, P_UART1_RX, 0
1765}; 1765};
1766 1766
@@ -2447,9 +2447,9 @@ static struct resource bfin_sport0_uart_resources[] = {
2447 }, 2447 },
2448}; 2448};
2449 2449
2450unsigned short bfin_sport0_peripherals[] = { 2450static unsigned short bfin_sport0_peripherals[] = {
2451 P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS, 2451 P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
2452 P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0 2452 P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
2453}; 2453};
2454 2454
2455static struct platform_device bfin_sport0_uart_device = { 2455static struct platform_device bfin_sport0_uart_device = {
@@ -2481,9 +2481,9 @@ static struct resource bfin_sport1_uart_resources[] = {
2481 }, 2481 },
2482}; 2482};
2483 2483
2484unsigned short bfin_sport1_peripherals[] = { 2484static unsigned short bfin_sport1_peripherals[] = {
2485 P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS, 2485 P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
2486 P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0 2486 P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
2487}; 2487};
2488 2488
2489static struct platform_device bfin_sport1_uart_device = { 2489static struct platform_device bfin_sport1_uart_device = {
diff --git a/arch/blackfin/mach-bf537/boards/tcm_bf537.c b/arch/blackfin/mach-bf537/boards/tcm_bf537.c
index 31498add1a42..0761b201abca 100644
--- a/arch/blackfin/mach-bf537/boards/tcm_bf537.c
+++ b/arch/blackfin/mach-bf537/boards/tcm_bf537.c
@@ -356,7 +356,7 @@ static struct resource bfin_uart0_resources[] = {
356 }, 356 },
357}; 357};
358 358
359unsigned short bfin_uart0_peripherals[] = { 359static unsigned short bfin_uart0_peripherals[] = {
360 P_UART0_TX, P_UART0_RX, 0 360 P_UART0_TX, P_UART0_RX, 0
361}; 361};
362 362
@@ -399,7 +399,7 @@ static struct resource bfin_uart1_resources[] = {
399 }, 399 },
400}; 400};
401 401
402unsigned short bfin_uart1_peripherals[] = { 402static unsigned short bfin_uart1_peripherals[] = {
403 P_UART1_TX, P_UART1_RX, 0 403 P_UART1_TX, P_UART1_RX, 0
404}; 404};
405 405
@@ -512,9 +512,9 @@ static struct resource bfin_sport0_uart_resources[] = {
512 }, 512 },
513}; 513};
514 514
515unsigned short bfin_sport0_peripherals[] = { 515static unsigned short bfin_sport0_peripherals[] = {
516 P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS, 516 P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
517 P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0 517 P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
518}; 518};
519 519
520static struct platform_device bfin_sport0_uart_device = { 520static struct platform_device bfin_sport0_uart_device = {
@@ -546,9 +546,9 @@ static struct resource bfin_sport1_uart_resources[] = {
546 }, 546 },
547}; 547};
548 548
549unsigned short bfin_sport1_peripherals[] = { 549static unsigned short bfin_sport1_peripherals[] = {
550 P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS, 550 P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
551 P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0 551 P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
552}; 552};
553 553
554static struct platform_device bfin_sport1_uart_device = { 554static struct platform_device bfin_sport1_uart_device = {
diff --git a/arch/blackfin/mach-bf537/dma.c b/arch/blackfin/mach-bf537/dma.c
index 5c8c4ed517bb..5c62e99c9fac 100644
--- a/arch/blackfin/mach-bf537/dma.c
+++ b/arch/blackfin/mach-bf537/dma.c
@@ -11,7 +11,7 @@
11#include <asm/blackfin.h> 11#include <asm/blackfin.h>
12#include <asm/dma.h> 12#include <asm/dma.h>
13 13
14struct dma_register *dma_io_base_addr[MAX_DMA_CHANNELS] = { 14struct dma_register * const dma_io_base_addr[MAX_DMA_CHANNELS] = {
15 (struct dma_register *) DMA0_NEXT_DESC_PTR, 15 (struct dma_register *) DMA0_NEXT_DESC_PTR,
16 (struct dma_register *) DMA1_NEXT_DESC_PTR, 16 (struct dma_register *) DMA1_NEXT_DESC_PTR,
17 (struct dma_register *) DMA2_NEXT_DESC_PTR, 17 (struct dma_register *) DMA2_NEXT_DESC_PTR,
diff --git a/arch/blackfin/mach-bf537/include/mach/bfin_serial.h b/arch/blackfin/mach-bf537/include/mach/bfin_serial.h
new file mode 100644
index 000000000000..00c603fe8218
--- /dev/null
+++ b/arch/blackfin/mach-bf537/include/mach/bfin_serial.h
@@ -0,0 +1,14 @@
1/*
2 * mach/bfin_serial.h - Blackfin UART/Serial definitions
3 *
4 * Copyright 2006-2010 Analog Devices Inc.
5 *
6 * Licensed under the GPL-2 or later.
7 */
8
9#ifndef __BFIN_MACH_SERIAL_H__
10#define __BFIN_MACH_SERIAL_H__
11
12#define BFIN_UART_NR_PORTS 2
13
14#endif
diff --git a/arch/blackfin/mach-bf537/include/mach/bfin_serial_5xx.h b/arch/blackfin/mach-bf537/include/mach/bfin_serial_5xx.h
index 635c91c526a3..3e955dba8951 100644
--- a/arch/blackfin/mach-bf537/include/mach/bfin_serial_5xx.h
+++ b/arch/blackfin/mach-bf537/include/mach/bfin_serial_5xx.h
@@ -4,36 +4,9 @@
4 * Licensed under the GPL-2 or later 4 * Licensed under the GPL-2 or later
5 */ 5 */
6 6
7#include <linux/serial.h>
8#include <asm/dma.h> 7#include <asm/dma.h>
9#include <asm/portmux.h> 8#include <asm/portmux.h>
10 9
11#define UART_GET_CHAR(uart) bfin_read16(((uart)->port.membase + OFFSET_RBR))
12#define UART_GET_DLL(uart) bfin_read16(((uart)->port.membase + OFFSET_DLL))
13#define UART_GET_IER(uart) bfin_read16(((uart)->port.membase + OFFSET_IER))
14#define UART_GET_DLH(uart) bfin_read16(((uart)->port.membase + OFFSET_DLH))
15#define UART_GET_IIR(uart) bfin_read16(((uart)->port.membase + OFFSET_IIR))
16#define UART_GET_LCR(uart) bfin_read16(((uart)->port.membase + OFFSET_LCR))
17#define UART_GET_GCTL(uart) bfin_read16(((uart)->port.membase + OFFSET_GCTL))
18
19#define UART_PUT_CHAR(uart,v) bfin_write16(((uart)->port.membase + OFFSET_THR),v)
20#define UART_PUT_DLL(uart,v) bfin_write16(((uart)->port.membase + OFFSET_DLL),v)
21#define UART_PUT_IER(uart,v) bfin_write16(((uart)->port.membase + OFFSET_IER),v)
22#define UART_SET_IER(uart,v) UART_PUT_IER(uart, UART_GET_IER(uart) | (v))
23#define UART_CLEAR_IER(uart,v) UART_PUT_IER(uart, UART_GET_IER(uart) & ~(v))
24#define UART_PUT_DLH(uart,v) bfin_write16(((uart)->port.membase + OFFSET_DLH),v)
25#define UART_PUT_LCR(uart,v) bfin_write16(((uart)->port.membase + OFFSET_LCR),v)
26#define UART_PUT_GCTL(uart,v) bfin_write16(((uart)->port.membase + OFFSET_GCTL),v)
27
28#define UART_SET_DLAB(uart) do { UART_PUT_LCR(uart, UART_GET_LCR(uart) | DLAB); SSYNC(); } while (0)
29#define UART_CLEAR_DLAB(uart) do { UART_PUT_LCR(uart, UART_GET_LCR(uart) & ~DLAB); SSYNC(); } while (0)
30
31#define UART_GET_CTS(x) gpio_get_value(x->cts_pin)
32#define UART_DISABLE_RTS(x) gpio_set_value(x->rts_pin, 1)
33#define UART_ENABLE_RTS(x) gpio_set_value(x->rts_pin, 0)
34#define UART_ENABLE_INTS(x, v) UART_PUT_IER(x, v)
35#define UART_DISABLE_INTS(x) UART_PUT_IER(x, 0)
36
37#if defined(CONFIG_BFIN_UART0_CTSRTS) || defined(CONFIG_BFIN_UART1_CTSRTS) 10#if defined(CONFIG_BFIN_UART0_CTSRTS) || defined(CONFIG_BFIN_UART1_CTSRTS)
38# define CONFIG_SERIAL_BFIN_CTSRTS 11# define CONFIG_SERIAL_BFIN_CTSRTS
39 12
@@ -54,49 +27,6 @@
54# endif 27# endif
55#endif 28#endif
56 29
57#define BFIN_UART_TX_FIFO_SIZE 2
58
59/*
60 * The pin configuration is different from schematic
61 */
62struct bfin_serial_port {
63 struct uart_port port;
64 unsigned int old_status;
65 int status_irq;
66 unsigned int lsr;
67#ifdef CONFIG_SERIAL_BFIN_DMA
68 int tx_done;
69 int tx_count;
70 struct circ_buf rx_dma_buf;
71 struct timer_list rx_dma_timer;
72 int rx_dma_nrows;
73 unsigned int tx_dma_channel;
74 unsigned int rx_dma_channel;
75 struct work_struct tx_dma_workqueue;
76#endif
77#ifdef CONFIG_SERIAL_BFIN_CTSRTS
78 int cts_pin;
79 int rts_pin;
80#endif
81};
82
83/* The hardware clears the LSR bits upon read, so we need to cache
84 * some of the more fun bits in software so they don't get lost
85 * when checking the LSR in other code paths (TX).
86 */
87static inline unsigned int UART_GET_LSR(struct bfin_serial_port *uart)
88{
89 unsigned int lsr = bfin_read16(uart->port.membase + OFFSET_LSR);
90 uart->lsr |= (lsr & (BI|FE|PE|OE));
91 return lsr | uart->lsr;
92}
93
94static inline void UART_CLEAR_LSR(struct bfin_serial_port *uart)
95{
96 uart->lsr = 0;
97 bfin_write16(uart->port.membase + OFFSET_LSR, -1);
98}
99
100struct bfin_serial_res { 30struct bfin_serial_res {
101 unsigned long uart_base_addr; 31 unsigned long uart_base_addr;
102 int uart_irq; 32 int uart_irq;
@@ -145,3 +75,5 @@ struct bfin_serial_res bfin_serial_resource[] = {
145}; 75};
146 76
147#define DRIVER_NAME "bfin-uart" 77#define DRIVER_NAME "bfin-uart"
78
79#include <asm/bfin_serial.h>
diff --git a/arch/blackfin/mach-bf537/include/mach/blackfin.h b/arch/blackfin/mach-bf537/include/mach/blackfin.h
index a12d4b6a221d..baa096fc724a 100644
--- a/arch/blackfin/mach-bf537/include/mach/blackfin.h
+++ b/arch/blackfin/mach-bf537/include/mach/blackfin.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * Copyright 2005-2009 Analog Devices Inc. 2 * Copyright 2005-2010 Analog Devices Inc.
3 * 3 *
4 * Licensed under the GPL-2 or later 4 * Licensed under the GPL-2 or later.
5 */ 5 */
6 6
7#ifndef _MACH_BLACKFIN_H_ 7#ifndef _MACH_BLACKFIN_H_
@@ -10,34 +10,24 @@
10#define BF537_FAMILY 10#define BF537_FAMILY
11 11
12#include "bf537.h" 12#include "bf537.h"
13#include "defBF534.h"
14#include "anomaly.h" 13#include "anomaly.h"
15 14
15#include <asm/def_LPBlackfin.h>
16#ifdef CONFIG_BF534
17# include "defBF534.h"
18#endif
16#if defined(CONFIG_BF537) || defined(CONFIG_BF536) 19#if defined(CONFIG_BF537) || defined(CONFIG_BF536)
17#include "defBF537.h" 20# include "defBF537.h"
18#endif 21#endif
19 22
20#if !defined(__ASSEMBLY__) 23#if !defined(__ASSEMBLY__)
21#include "cdefBF534.h" 24# include <asm/cdef_LPBlackfin.h>
22 25# ifdef CONFIG_BF534
23#if defined(CONFIG_BF537) || defined(CONFIG_BF536) 26# include "cdefBF534.h"
24#include "cdefBF537.h" 27# endif
28# if defined(CONFIG_BF537) || defined(CONFIG_BF536)
29# include "cdefBF537.h"
30# endif
25#endif 31#endif
26#endif
27
28#define BFIN_UART_NR_PORTS 2
29
30#define OFFSET_THR 0x00 /* Transmit Holding register */
31#define OFFSET_RBR 0x00 /* Receive Buffer register */
32#define OFFSET_DLL 0x00 /* Divisor Latch (Low-Byte) */
33#define OFFSET_IER 0x04 /* Interrupt Enable Register */
34#define OFFSET_DLH 0x04 /* Divisor Latch (High-Byte) */
35#define OFFSET_IIR 0x08 /* Interrupt Identification Register */
36#define OFFSET_LCR 0x0C /* Line Control Register */
37#define OFFSET_MCR 0x10 /* Modem Control Register */
38#define OFFSET_LSR 0x14 /* Line Status Register */
39#define OFFSET_MSR 0x18 /* Modem Status Register */
40#define OFFSET_SCR 0x1C /* SCR Scratch Register */
41#define OFFSET_GCTL 0x24 /* Global Control Register */
42 32
43#endif 33#endif
diff --git a/arch/blackfin/mach-bf537/include/mach/cdefBF534.h b/arch/blackfin/mach-bf537/include/mach/cdefBF534.h
index fbeb35e14135..563ede907336 100644
--- a/arch/blackfin/mach-bf537/include/mach/cdefBF534.h
+++ b/arch/blackfin/mach-bf537/include/mach/cdefBF534.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2005-2008 Analog Devices Inc. 2 * Copyright 2005-2010 Analog Devices Inc.
3 * 3 *
4 * Licensed under the GPL-2 or later 4 * Licensed under the GPL-2 or later
5 */ 5 */
@@ -7,14 +7,6 @@
7#ifndef _CDEF_BF534_H 7#ifndef _CDEF_BF534_H
8#define _CDEF_BF534_H 8#define _CDEF_BF534_H
9 9
10#include <asm/blackfin.h>
11
12/* Include all Core registers and bit definitions */
13#include "defBF534.h"
14
15/* Include core specific register pointer definitions */
16#include <asm/cdef_LPBlackfin.h>
17
18/* Clock and System Control (0xFFC00000 - 0xFFC000FF) */ 10/* Clock and System Control (0xFFC00000 - 0xFFC000FF) */
19#define bfin_read_PLL_CTL() bfin_read16(PLL_CTL) 11#define bfin_read_PLL_CTL() bfin_read16(PLL_CTL)
20#define bfin_read_PLL_DIV() bfin_read16(PLL_DIV) 12#define bfin_read_PLL_DIV() bfin_read16(PLL_DIV)
@@ -355,16 +347,10 @@
355#define bfin_write_EBIU_SDSTAT(val) bfin_write16(EBIU_SDSTAT,val) 347#define bfin_write_EBIU_SDSTAT(val) bfin_write16(EBIU_SDSTAT,val)
356 348
357/* DMA Traffic Control Registers */ 349/* DMA Traffic Control Registers */
358#define bfin_read_DMA_TC_PER() bfin_read16(DMA_TC_PER) 350#define bfin_read_DMAC_TC_PER() bfin_read16(DMAC_TC_PER)
359#define bfin_write_DMA_TC_PER(val) bfin_write16(DMA_TC_PER,val) 351#define bfin_write_DMAC_TC_PER(val) bfin_write16(DMAC_TC_PER,val)
360#define bfin_read_DMA_TC_CNT() bfin_read16(DMA_TC_CNT) 352#define bfin_read_DMAC_TC_CNT() bfin_read16(DMAC_TC_CNT)
361#define bfin_write_DMA_TC_CNT(val) bfin_write16(DMA_TC_CNT,val) 353#define bfin_write_DMAC_TC_CNT(val) bfin_write16(DMAC_TC_CNT,val)
362
363/* Alternate deprecated register names (below) provided for backwards code compatibility */
364#define bfin_read_DMA_TCPER() bfin_read16(DMA_TCPER)
365#define bfin_write_DMA_TCPER(val) bfin_write16(DMA_TCPER,val)
366#define bfin_read_DMA_TCCNT() bfin_read16(DMA_TCCNT)
367#define bfin_write_DMA_TCCNT(val) bfin_write16(DMA_TCCNT,val)
368 354
369/* DMA Controller */ 355/* DMA Controller */
370#define bfin_read_DMA0_CONFIG() bfin_read16(DMA0_CONFIG) 356#define bfin_read_DMA0_CONFIG() bfin_read16(DMA0_CONFIG)
@@ -1747,7 +1733,4 @@
1747#define bfin_read_HMDMA1_BCOUNT() bfin_read16(HMDMA1_BCOUNT) 1733#define bfin_read_HMDMA1_BCOUNT() bfin_read16(HMDMA1_BCOUNT)
1748#define bfin_write_HMDMA1_BCOUNT(val) bfin_write16(HMDMA1_BCOUNT,val) 1734#define bfin_write_HMDMA1_BCOUNT(val) bfin_write16(HMDMA1_BCOUNT,val)
1749 1735
1750/* These need to be last due to the cdef/linux inter-dependencies */
1751#include <asm/irq.h>
1752
1753#endif /* _CDEF_BF534_H */ 1736#endif /* _CDEF_BF534_H */
diff --git a/arch/blackfin/mach-bf537/include/mach/cdefBF537.h b/arch/blackfin/mach-bf537/include/mach/cdefBF537.h
index 9363c3990421..19ec21ea150a 100644
--- a/arch/blackfin/mach-bf537/include/mach/cdefBF537.h
+++ b/arch/blackfin/mach-bf537/include/mach/cdefBF537.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2005-2008 Analog Devices Inc. 2 * Copyright 2005-2010 Analog Devices Inc.
3 * 3 *
4 * Licensed under the GPL-2 or later 4 * Licensed under the GPL-2 or later
5 */ 5 */
@@ -10,9 +10,6 @@
10/* Include MMRs Common to BF534 */ 10/* Include MMRs Common to BF534 */
11#include "cdefBF534.h" 11#include "cdefBF534.h"
12 12
13/* Include all Core registers and bit definitions */
14#include "defBF537.h"
15
16/* Include Macro "Defines" For EMAC (Unique to BF536/BF537 */ 13/* Include Macro "Defines" For EMAC (Unique to BF536/BF537 */
17/* 10/100 Ethernet Controller (0xFFC03000 - 0xFFC031FF) */ 14/* 10/100 Ethernet Controller (0xFFC03000 - 0xFFC031FF) */
18#define bfin_read_EMAC_OPMODE() bfin_read32(EMAC_OPMODE) 15#define bfin_read_EMAC_OPMODE() bfin_read32(EMAC_OPMODE)
diff --git a/arch/blackfin/mach-bf537/include/mach/defBF534.h b/arch/blackfin/mach-bf537/include/mach/defBF534.h
index 0323e6bacdae..32529a03b266 100644
--- a/arch/blackfin/mach-bf537/include/mach/defBF534.h
+++ b/arch/blackfin/mach-bf537/include/mach/defBF534.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2005-2008 Analog Devices Inc. 2 * Copyright 2005-2010 Analog Devices Inc.
3 * 3 *
4 * Licensed under the ADI BSD license or the GPL-2 (or later) 4 * Licensed under the ADI BSD license or the GPL-2 (or later)
5 */ 5 */
@@ -7,9 +7,6 @@
7#ifndef _DEF_BF534_H 7#ifndef _DEF_BF534_H
8#define _DEF_BF534_H 8#define _DEF_BF534_H
9 9
10/* Include all Core registers and bit definitions */
11#include <asm/def_LPBlackfin.h>
12
13/************************************************************************************ 10/************************************************************************************
14** System MMR Register Map 11** System MMR Register Map
15*************************************************************************************/ 12*************************************************************************************/
@@ -193,12 +190,8 @@
193#define EBIU_SDSTAT 0xFFC00A1C /* SDRAM Status Register */ 190#define EBIU_SDSTAT 0xFFC00A1C /* SDRAM Status Register */
194 191
195/* DMA Traffic Control Registers */ 192/* DMA Traffic Control Registers */
196#define DMA_TC_PER 0xFFC00B0C /* Traffic Control Periods Register */ 193#define DMAC_TC_PER 0xFFC00B0C /* Traffic Control Periods Register */
197#define DMA_TC_CNT 0xFFC00B10 /* Traffic Control Current Counts Register */ 194#define DMAC_TC_CNT 0xFFC00B10 /* Traffic Control Current Counts Register */
198
199/* Alternate deprecated register names (below) provided for backwards code compatibility */
200#define DMA_TCPER 0xFFC00B0C /* Traffic Control Periods Register */
201#define DMA_TCCNT 0xFFC00B10 /* Traffic Control Current Counts Register */
202 195
203/* DMA Controller (0xFFC00C00 - 0xFFC00FFF) */ 196/* DMA Controller (0xFFC00C00 - 0xFFC00FFF) */
204#define DMA0_NEXT_DESC_PTR 0xFFC00C00 /* DMA Channel 0 Next Descriptor Pointer Register */ 197#define DMA0_NEXT_DESC_PTR 0xFFC00C00 /* DMA Channel 0 Next Descriptor Pointer Register */
@@ -1029,48 +1022,6 @@
1029#define IWR_ENABLE(x) (1 << ((x)&0x1F)) /* Wakeup Enable Peripheral #x */ 1022#define IWR_ENABLE(x) (1 << ((x)&0x1F)) /* Wakeup Enable Peripheral #x */
1030#define IWR_DISABLE(x) (0xFFFFFFFF ^ (1 << ((x)&0x1F))) /* Wakeup Disable Peripheral #x */ 1023#define IWR_DISABLE(x) (0xFFFFFFFF ^ (1 << ((x)&0x1F))) /* Wakeup Disable Peripheral #x */
1031 1024
1032/* ************** UART CONTROLLER MASKS *************************/
1033/* UARTx_LCR Masks */
1034#define WLS(x) (((x)-5) & 0x03) /* Word Length Select */
1035#define STB 0x04 /* Stop Bits */
1036#define PEN 0x08 /* Parity Enable */
1037#define EPS 0x10 /* Even Parity Select */
1038#define STP 0x20 /* Stick Parity */
1039#define SB 0x40 /* Set Break */
1040#define DLAB 0x80 /* Divisor Latch Access */
1041
1042/* UARTx_MCR Mask */
1043#define LOOP_ENA 0x10 /* Loopback Mode Enable */
1044#define LOOP_ENA_P 0x04
1045/* UARTx_LSR Masks */
1046#define DR 0x01 /* Data Ready */
1047#define OE 0x02 /* Overrun Error */
1048#define PE 0x04 /* Parity Error */
1049#define FE 0x08 /* Framing Error */
1050#define BI 0x10 /* Break Interrupt */
1051#define THRE 0x20 /* THR Empty */
1052#define TEMT 0x40 /* TSR and UART_THR Empty */
1053
1054/* UARTx_IER Masks */
1055#define ERBFI 0x01 /* Enable Receive Buffer Full Interrupt */
1056#define ETBEI 0x02 /* Enable Transmit Buffer Empty Interrupt */
1057#define ELSI 0x04 /* Enable RX Status Interrupt */
1058
1059/* UARTx_IIR Masks */
1060#define NINT 0x01 /* Pending Interrupt */
1061#define IIR_TX_READY 0x02 /* UART_THR empty */
1062#define IIR_RX_READY 0x04 /* Receive data ready */
1063#define IIR_LINE_CHANGE 0x06 /* Receive line status */
1064#define IIR_STATUS 0x06
1065
1066/* UARTx_GCTL Masks */
1067#define UCEN 0x01 /* Enable UARTx Clocks */
1068#define IREN 0x02 /* Enable IrDA Mode */
1069#define TPOLC 0x04 /* IrDA TX Polarity Change */
1070#define RPOLC 0x08 /* IrDA RX Polarity Change */
1071#define FPE 0x10 /* Force Parity Error On Transmit */
1072#define FFE 0x20 /* Force Framing Error On Transmit */
1073
1074/* **************** GENERAL PURPOSE TIMER MASKS **********************/ 1025/* **************** GENERAL PURPOSE TIMER MASKS **********************/
1075/* TIMER_ENABLE Masks */ 1026/* TIMER_ENABLE Masks */
1076#define TIMEN0 0x0001 /* Enable Timer 0 */ 1027#define TIMEN0 0x0001 /* Enable Timer 0 */
@@ -1141,62 +1092,6 @@
1141#define EMU_RUN 0x0200 /* Emulation Behavior Select */ 1092#define EMU_RUN 0x0200 /* Emulation Behavior Select */
1142#define ERR_TYP 0xC000 /* Error Type */ 1093#define ERR_TYP 0xC000 /* Error Type */
1143 1094
1144/* ****************** GPIO PORTS F, G, H MASKS ***********************/
1145/* General Purpose IO (0xFFC00700 - 0xFFC007FF) Masks */
1146/* Port F Masks */
1147#define PF0 0x0001
1148#define PF1 0x0002
1149#define PF2 0x0004
1150#define PF3 0x0008
1151#define PF4 0x0010
1152#define PF5 0x0020
1153#define PF6 0x0040
1154#define PF7 0x0080
1155#define PF8 0x0100
1156#define PF9 0x0200
1157#define PF10 0x0400
1158#define PF11 0x0800
1159#define PF12 0x1000
1160#define PF13 0x2000
1161#define PF14 0x4000
1162#define PF15 0x8000
1163
1164/* Port G Masks */
1165#define PG0 0x0001
1166#define PG1 0x0002
1167#define PG2 0x0004
1168#define PG3 0x0008
1169#define PG4 0x0010
1170#define PG5 0x0020
1171#define PG6 0x0040
1172#define PG7 0x0080
1173#define PG8 0x0100
1174#define PG9 0x0200
1175#define PG10 0x0400
1176#define PG11 0x0800
1177#define PG12 0x1000
1178#define PG13 0x2000
1179#define PG14 0x4000
1180#define PG15 0x8000
1181
1182/* Port H Masks */
1183#define PH0 0x0001
1184#define PH1 0x0002
1185#define PH2 0x0004
1186#define PH3 0x0008
1187#define PH4 0x0010
1188#define PH5 0x0020
1189#define PH6 0x0040
1190#define PH7 0x0080
1191#define PH8 0x0100
1192#define PH9 0x0200
1193#define PH10 0x0400
1194#define PH11 0x0800
1195#define PH12 0x1000
1196#define PH13 0x2000
1197#define PH14 0x4000
1198#define PH15 0x8000
1199
1200/* ********************* ASYNCHRONOUS MEMORY CONTROLLER MASKS *************************/ 1095/* ********************* ASYNCHRONOUS MEMORY CONTROLLER MASKS *************************/
1201/* EBIU_AMGCTL Masks */ 1096/* EBIU_AMGCTL Masks */
1202#define AMCKEN 0x0001 /* Enable CLKOUT */ 1097#define AMCKEN 0x0001 /* Enable CLKOUT */
diff --git a/arch/blackfin/mach-bf537/include/mach/defBF537.h b/arch/blackfin/mach-bf537/include/mach/defBF537.h
index 8cb5d5cf0c94..3d471d752684 100644
--- a/arch/blackfin/mach-bf537/include/mach/defBF537.h
+++ b/arch/blackfin/mach-bf537/include/mach/defBF537.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2005-2008 Analog Devices Inc. 2 * Copyright 2005-2010 Analog Devices Inc.
3 * 3 *
4 * Licensed under the ADI BSD license or the GPL-2 (or later) 4 * Licensed under the ADI BSD license or the GPL-2 (or later)
5 */ 5 */
@@ -7,9 +7,6 @@
7#ifndef _DEF_BF537_H 7#ifndef _DEF_BF537_H
8#define _DEF_BF537_H 8#define _DEF_BF537_H
9 9
10/* Include all Core registers and bit definitions*/
11#include <asm/cdef_LPBlackfin.h>
12
13/* Include all MMR and bit defines common to BF534 */ 10/* Include all MMR and bit defines common to BF534 */
14#include "defBF534.h" 11#include "defBF534.h"
15 12
diff --git a/arch/blackfin/mach-bf537/include/mach/gpio.h b/arch/blackfin/mach-bf537/include/mach/gpio.h
index f80c2995efdb..fba606b699c3 100644
--- a/arch/blackfin/mach-bf537/include/mach/gpio.h
+++ b/arch/blackfin/mach-bf537/include/mach/gpio.h
@@ -62,4 +62,8 @@
62#define PORT_G GPIO_PG0 62#define PORT_G GPIO_PG0
63#define PORT_H GPIO_PH0 63#define PORT_H GPIO_PH0
64 64
65#include <mach-common/ports-f.h>
66#include <mach-common/ports-g.h>
67#include <mach-common/ports-h.h>
68
65#endif /* _MACH_GPIO_H_ */ 69#endif /* _MACH_GPIO_H_ */
diff --git a/arch/blackfin/mach-bf537/include/mach/pll.h b/arch/blackfin/mach-bf537/include/mach/pll.h
index 169c106d0edb..94cca674d835 100644
--- a/arch/blackfin/mach-bf537/include/mach/pll.h
+++ b/arch/blackfin/mach-bf537/include/mach/pll.h
@@ -1,57 +1 @@
1/* #include <mach-common/pll.h>
2 * Copyright 2005-2008 Analog Devices Inc.
3 *
4 * Licensed under the GPL-2 or later
5 */
6
7#ifndef _MACH_PLL_H
8#define _MACH_PLL_H
9
10#include <asm/blackfin.h>
11#include <asm/irqflags.h>
12
13/* Writing to PLL_CTL initiates a PLL relock sequence. */
14static __inline__ void bfin_write_PLL_CTL(unsigned int val)
15{
16 unsigned long flags, iwr;
17
18 if (val == bfin_read_PLL_CTL())
19 return;
20
21 flags = hard_local_irq_save();
22 /* Enable the PLL Wakeup bit in SIC IWR */
23 iwr = bfin_read32(SIC_IWR);
24 /* Only allow PPL Wakeup) */
25 bfin_write32(SIC_IWR, IWR_ENABLE(0));
26
27 bfin_write16(PLL_CTL, val);
28 SSYNC();
29 asm("IDLE;");
30
31 bfin_write32(SIC_IWR, iwr);
32 hard_local_irq_restore(flags);
33}
34
35/* Writing to VR_CTL initiates a PLL relock sequence. */
36static __inline__ void bfin_write_VR_CTL(unsigned int val)
37{
38 unsigned long flags, iwr;
39
40 if (val == bfin_read_VR_CTL())
41 return;
42
43 flags = hard_local_irq_save();
44 /* Enable the PLL Wakeup bit in SIC IWR */
45 iwr = bfin_read32(SIC_IWR);
46 /* Only allow PPL Wakeup) */
47 bfin_write32(SIC_IWR, IWR_ENABLE(0));
48
49 bfin_write16(VR_CTL, val);
50 SSYNC();
51 asm("IDLE;");
52
53 bfin_write32(SIC_IWR, iwr);
54 hard_local_irq_restore(flags);
55}
56
57#endif /* _MACH_PLL_H */
diff --git a/arch/blackfin/mach-bf538/boards/ezkit.c b/arch/blackfin/mach-bf538/boards/ezkit.c
index c6fb0a52f849..e61424ef35eb 100644
--- a/arch/blackfin/mach-bf538/boards/ezkit.c
+++ b/arch/blackfin/mach-bf538/boards/ezkit.c
@@ -82,7 +82,7 @@ static struct resource bfin_uart0_resources[] = {
82#endif 82#endif
83}; 83};
84 84
85unsigned short bfin_uart0_peripherals[] = { 85static unsigned short bfin_uart0_peripherals[] = {
86 P_UART0_TX, P_UART0_RX, 0 86 P_UART0_TX, P_UART0_RX, 0
87}; 87};
88 88
@@ -125,7 +125,7 @@ static struct resource bfin_uart1_resources[] = {
125 }, 125 },
126}; 126};
127 127
128unsigned short bfin_uart1_peripherals[] = { 128static unsigned short bfin_uart1_peripherals[] = {
129 P_UART1_TX, P_UART1_RX, 0 129 P_UART1_TX, P_UART1_RX, 0
130}; 130};
131 131
@@ -168,7 +168,7 @@ static struct resource bfin_uart2_resources[] = {
168 }, 168 },
169}; 169};
170 170
171unsigned short bfin_uart2_peripherals[] = { 171static unsigned short bfin_uart2_peripherals[] = {
172 P_UART2_TX, P_UART2_RX, 0 172 P_UART2_TX, P_UART2_RX, 0
173}; 173};
174 174
@@ -282,9 +282,9 @@ static struct resource bfin_sport0_uart_resources[] = {
282 }, 282 },
283}; 283};
284 284
285unsigned short bfin_sport0_peripherals[] = { 285static unsigned short bfin_sport0_peripherals[] = {
286 P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS, 286 P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
287 P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0 287 P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
288}; 288};
289 289
290static struct platform_device bfin_sport0_uart_device = { 290static struct platform_device bfin_sport0_uart_device = {
@@ -316,9 +316,9 @@ static struct resource bfin_sport1_uart_resources[] = {
316 }, 316 },
317}; 317};
318 318
319unsigned short bfin_sport1_peripherals[] = { 319static unsigned short bfin_sport1_peripherals[] = {
320 P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS, 320 P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
321 P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0 321 P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
322}; 322};
323 323
324static struct platform_device bfin_sport1_uart_device = { 324static struct platform_device bfin_sport1_uart_device = {
@@ -350,7 +350,7 @@ static struct resource bfin_sport2_uart_resources[] = {
350 }, 350 },
351}; 351};
352 352
353unsigned short bfin_sport2_peripherals[] = { 353static unsigned short bfin_sport2_peripherals[] = {
354 P_SPORT2_TFS, P_SPORT2_DTPRI, P_SPORT2_TSCLK, P_SPORT2_RFS, 354 P_SPORT2_TFS, P_SPORT2_DTPRI, P_SPORT2_TSCLK, P_SPORT2_RFS,
355 P_SPORT2_DRPRI, P_SPORT2_RSCLK, P_SPORT2_DRSEC, P_SPORT2_DTSEC, 0 355 P_SPORT2_DRPRI, P_SPORT2_RSCLK, P_SPORT2_DRSEC, P_SPORT2_DTSEC, 0
356}; 356};
@@ -384,7 +384,7 @@ static struct resource bfin_sport3_uart_resources[] = {
384 }, 384 },
385}; 385};
386 386
387unsigned short bfin_sport3_peripherals[] = { 387static unsigned short bfin_sport3_peripherals[] = {
388 P_SPORT3_TFS, P_SPORT3_DTPRI, P_SPORT3_TSCLK, P_SPORT3_RFS, 388 P_SPORT3_TFS, P_SPORT3_DTPRI, P_SPORT3_TSCLK, P_SPORT3_RFS,
389 P_SPORT3_DRPRI, P_SPORT3_RSCLK, P_SPORT3_DRSEC, P_SPORT3_DTSEC, 0 389 P_SPORT3_DRPRI, P_SPORT3_RSCLK, P_SPORT3_DRSEC, P_SPORT3_DTSEC, 0
390}; 390};
@@ -402,7 +402,7 @@ static struct platform_device bfin_sport3_uart_device = {
402#endif 402#endif
403 403
404#if defined(CONFIG_CAN_BFIN) || defined(CONFIG_CAN_BFIN_MODULE) 404#if defined(CONFIG_CAN_BFIN) || defined(CONFIG_CAN_BFIN_MODULE)
405unsigned short bfin_can_peripherals[] = { 405static unsigned short bfin_can_peripherals[] = {
406 P_CAN0_RX, P_CAN0_TX, 0 406 P_CAN0_RX, P_CAN0_TX, 0
407}; 407};
408 408
diff --git a/arch/blackfin/mach-bf538/dma.c b/arch/blackfin/mach-bf538/dma.c
index 5dc022589214..cce8ef5a5cec 100644
--- a/arch/blackfin/mach-bf538/dma.c
+++ b/arch/blackfin/mach-bf538/dma.c
@@ -11,7 +11,7 @@
11#include <asm/blackfin.h> 11#include <asm/blackfin.h>
12#include <asm/dma.h> 12#include <asm/dma.h>
13 13
14struct dma_register *dma_io_base_addr[MAX_DMA_CHANNELS] = { 14struct dma_register * const dma_io_base_addr[MAX_DMA_CHANNELS] = {
15 (struct dma_register *) DMA0_NEXT_DESC_PTR, 15 (struct dma_register *) DMA0_NEXT_DESC_PTR,
16 (struct dma_register *) DMA1_NEXT_DESC_PTR, 16 (struct dma_register *) DMA1_NEXT_DESC_PTR,
17 (struct dma_register *) DMA2_NEXT_DESC_PTR, 17 (struct dma_register *) DMA2_NEXT_DESC_PTR,
@@ -32,14 +32,14 @@ struct dma_register *dma_io_base_addr[MAX_DMA_CHANNELS] = {
32 (struct dma_register *) DMA17_NEXT_DESC_PTR, 32 (struct dma_register *) DMA17_NEXT_DESC_PTR,
33 (struct dma_register *) DMA18_NEXT_DESC_PTR, 33 (struct dma_register *) DMA18_NEXT_DESC_PTR,
34 (struct dma_register *) DMA19_NEXT_DESC_PTR, 34 (struct dma_register *) DMA19_NEXT_DESC_PTR,
35 (struct dma_register *) MDMA0_D0_NEXT_DESC_PTR, 35 (struct dma_register *) MDMA_D0_NEXT_DESC_PTR,
36 (struct dma_register *) MDMA0_S0_NEXT_DESC_PTR, 36 (struct dma_register *) MDMA_S0_NEXT_DESC_PTR,
37 (struct dma_register *) MDMA0_D1_NEXT_DESC_PTR, 37 (struct dma_register *) MDMA_D1_NEXT_DESC_PTR,
38 (struct dma_register *) MDMA0_S1_NEXT_DESC_PTR, 38 (struct dma_register *) MDMA_S1_NEXT_DESC_PTR,
39 (struct dma_register *) MDMA1_D0_NEXT_DESC_PTR, 39 (struct dma_register *) MDMA_D2_NEXT_DESC_PTR,
40 (struct dma_register *) MDMA1_S0_NEXT_DESC_PTR, 40 (struct dma_register *) MDMA_S2_NEXT_DESC_PTR,
41 (struct dma_register *) MDMA1_D1_NEXT_DESC_PTR, 41 (struct dma_register *) MDMA_D3_NEXT_DESC_PTR,
42 (struct dma_register *) MDMA1_S1_NEXT_DESC_PTR, 42 (struct dma_register *) MDMA_S3_NEXT_DESC_PTR,
43}; 43};
44EXPORT_SYMBOL(dma_io_base_addr); 44EXPORT_SYMBOL(dma_io_base_addr);
45 45
diff --git a/arch/blackfin/mach-bf538/include/mach/bfin_serial.h b/arch/blackfin/mach-bf538/include/mach/bfin_serial.h
new file mode 100644
index 000000000000..c66e2760aad3
--- /dev/null
+++ b/arch/blackfin/mach-bf538/include/mach/bfin_serial.h
@@ -0,0 +1,14 @@
1/*
2 * mach/bfin_serial.h - Blackfin UART/Serial definitions
3 *
4 * Copyright 2006-2010 Analog Devices Inc.
5 *
6 * Licensed under the GPL-2 or later.
7 */
8
9#ifndef __BFIN_MACH_SERIAL_H__
10#define __BFIN_MACH_SERIAL_H__
11
12#define BFIN_UART_NR_PORTS 3
13
14#endif
diff --git a/arch/blackfin/mach-bf538/include/mach/bfin_serial_5xx.h b/arch/blackfin/mach-bf538/include/mach/bfin_serial_5xx.h
index 5c148142f041..beb502e9cb33 100644
--- a/arch/blackfin/mach-bf538/include/mach/bfin_serial_5xx.h
+++ b/arch/blackfin/mach-bf538/include/mach/bfin_serial_5xx.h
@@ -4,36 +4,9 @@
4 * Licensed under the GPL-2 or later. 4 * Licensed under the GPL-2 or later.
5 */ 5 */
6 6
7#include <linux/serial.h>
8#include <asm/dma.h> 7#include <asm/dma.h>
9#include <asm/portmux.h> 8#include <asm/portmux.h>
10 9
11#define UART_GET_CHAR(uart) bfin_read16(((uart)->port.membase + OFFSET_RBR))
12#define UART_GET_DLL(uart) bfin_read16(((uart)->port.membase + OFFSET_DLL))
13#define UART_GET_IER(uart) bfin_read16(((uart)->port.membase + OFFSET_IER))
14#define UART_GET_DLH(uart) bfin_read16(((uart)->port.membase + OFFSET_DLH))
15#define UART_GET_IIR(uart) bfin_read16(((uart)->port.membase + OFFSET_IIR))
16#define UART_GET_LCR(uart) bfin_read16(((uart)->port.membase + OFFSET_LCR))
17#define UART_GET_GCTL(uart) bfin_read16(((uart)->port.membase + OFFSET_GCTL))
18
19#define UART_PUT_CHAR(uart, v) bfin_write16(((uart)->port.membase + OFFSET_THR), v)
20#define UART_PUT_DLL(uart, v) bfin_write16(((uart)->port.membase + OFFSET_DLL), v)
21#define UART_PUT_IER(uart, v) bfin_write16(((uart)->port.membase + OFFSET_IER), v)
22#define UART_SET_IER(uart, v) UART_PUT_IER(uart, UART_GET_IER(uart) | (v))
23#define UART_CLEAR_IER(uart, v) UART_PUT_IER(uart, UART_GET_IER(uart) & ~(v))
24#define UART_PUT_DLH(uart, v) bfin_write16(((uart)->port.membase + OFFSET_DLH), v)
25#define UART_PUT_LCR(uart, v) bfin_write16(((uart)->port.membase + OFFSET_LCR), v)
26#define UART_PUT_GCTL(uart, v) bfin_write16(((uart)->port.membase + OFFSET_GCTL), v)
27
28#define UART_SET_DLAB(uart) do { UART_PUT_LCR(uart, UART_GET_LCR(uart) | DLAB); SSYNC(); } while (0)
29#define UART_CLEAR_DLAB(uart) do { UART_PUT_LCR(uart, UART_GET_LCR(uart) & ~DLAB); SSYNC(); } while (0)
30
31#define UART_GET_CTS(x) gpio_get_value(x->cts_pin)
32#define UART_DISABLE_RTS(x) gpio_set_value(x->rts_pin, 1)
33#define UART_ENABLE_RTS(x) gpio_set_value(x->rts_pin, 0)
34#define UART_ENABLE_INTS(x, v) UART_PUT_IER(x, v)
35#define UART_DISABLE_INTS(x) UART_PUT_IER(x, 0)
36
37#if defined(CONFIG_BFIN_UART0_CTSRTS) || defined(CONFIG_BFIN_UART1_CTSRTS) 10#if defined(CONFIG_BFIN_UART0_CTSRTS) || defined(CONFIG_BFIN_UART1_CTSRTS)
38# define CONFIG_SERIAL_BFIN_CTSRTS 11# define CONFIG_SERIAL_BFIN_CTSRTS
39 12
@@ -54,50 +27,6 @@
54# endif 27# endif
55#endif 28#endif
56 29
57#define BFIN_UART_TX_FIFO_SIZE 2
58
59/*
60 * The pin configuration is different from schematic
61 */
62struct bfin_serial_port {
63 struct uart_port port;
64 unsigned int old_status;
65 int status_irq;
66 unsigned int lsr;
67#ifdef CONFIG_SERIAL_BFIN_DMA
68 int tx_done;
69 int tx_count;
70 struct circ_buf rx_dma_buf;
71 struct timer_list rx_dma_timer;
72 int rx_dma_nrows;
73 unsigned int tx_dma_channel;
74 unsigned int rx_dma_channel;
75 struct work_struct tx_dma_workqueue;
76#endif
77#ifdef CONFIG_SERIAL_BFIN_CTSRTS
78 struct timer_list cts_timer;
79 int cts_pin;
80 int rts_pin;
81#endif
82};
83
84/* The hardware clears the LSR bits upon read, so we need to cache
85 * some of the more fun bits in software so they don't get lost
86 * when checking the LSR in other code paths (TX).
87 */
88static inline unsigned int UART_GET_LSR(struct bfin_serial_port *uart)
89{
90 unsigned int lsr = bfin_read16(uart->port.membase + OFFSET_LSR);
91 uart->lsr |= (lsr & (BI|FE|PE|OE));
92 return lsr | uart->lsr;
93}
94
95static inline void UART_CLEAR_LSR(struct bfin_serial_port *uart)
96{
97 uart->lsr = 0;
98 bfin_write16(uart->port.membase + OFFSET_LSR, -1);
99}
100
101struct bfin_serial_res { 30struct bfin_serial_res {
102 unsigned long uart_base_addr; 31 unsigned long uart_base_addr;
103 int uart_irq; 32 int uart_irq;
@@ -160,3 +89,5 @@ struct bfin_serial_res bfin_serial_resource[] = {
160}; 89};
161 90
162#define DRIVER_NAME "bfin-uart" 91#define DRIVER_NAME "bfin-uart"
92
93#include <asm/bfin_serial.h>
diff --git a/arch/blackfin/mach-bf538/include/mach/blackfin.h b/arch/blackfin/mach-bf538/include/mach/blackfin.h
index 08b5eabb1ed5..791d08400cf0 100644
--- a/arch/blackfin/mach-bf538/include/mach/blackfin.h
+++ b/arch/blackfin/mach-bf538/include/mach/blackfin.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2008-2009 Analog Devices Inc. 2 * Copyright 2008-2010 Analog Devices Inc.
3 * 3 *
4 * Licensed under the GPL-2 or later. 4 * Licensed under the GPL-2 or later.
5 */ 5 */
@@ -10,31 +10,24 @@
10#define BF538_FAMILY 10#define BF538_FAMILY
11 11
12#include "bf538.h" 12#include "bf538.h"
13#include "defBF539.h"
14#include "anomaly.h" 13#include "anomaly.h"
15 14
16 15#include <asm/def_LPBlackfin.h>
17#if !defined(__ASSEMBLY__) 16#ifdef CONFIG_BF538
18#include "cdefBF538.h" 17# include "defBF538.h"
19
20#if defined(CONFIG_BF539)
21#include "cdefBF539.h"
22#endif 18#endif
19#ifdef CONFIG_BF539
20# include "defBF539.h"
23#endif 21#endif
24 22
25#define BFIN_UART_NR_PORTS 3 23#ifndef __ASSEMBLY__
26 24# include <asm/cdef_LPBlackfin.h>
27#define OFFSET_THR 0x00 /* Transmit Holding register */ 25# ifdef CONFIG_BF538
28#define OFFSET_RBR 0x00 /* Receive Buffer register */ 26# include "cdefBF538.h"
29#define OFFSET_DLL 0x00 /* Divisor Latch (Low-Byte) */ 27# endif
30#define OFFSET_IER 0x04 /* Interrupt Enable Register */ 28# ifdef CONFIG_BF539
31#define OFFSET_DLH 0x04 /* Divisor Latch (High-Byte) */ 29# include "cdefBF539.h"
32#define OFFSET_IIR 0x08 /* Interrupt Identification Register */ 30# endif
33#define OFFSET_LCR 0x0C /* Line Control Register */ 31#endif
34#define OFFSET_MCR 0x10 /* Modem Control Register */
35#define OFFSET_LSR 0x14 /* Line Status Register */
36#define OFFSET_MSR 0x18 /* Modem Status Register */
37#define OFFSET_SCR 0x1C /* SCR Scratch Register */
38#define OFFSET_GCTL 0x24 /* Global Control Register */
39 32
40#endif 33#endif
diff --git a/arch/blackfin/mach-bf538/include/mach/cdefBF538.h b/arch/blackfin/mach-bf538/include/mach/cdefBF538.h
index 085b06b8c0a5..f6a56792180b 100644
--- a/arch/blackfin/mach-bf538/include/mach/cdefBF538.h
+++ b/arch/blackfin/mach-bf538/include/mach/cdefBF538.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2008-2009 Analog Devices Inc. 2 * Copyright 2008-2010 Analog Devices Inc.
3 * 3 *
4 * Licensed under the GPL-2 or later. 4 * Licensed under the GPL-2 or later.
5 */ 5 */
@@ -7,14 +7,6 @@
7#ifndef _CDEF_BF538_H 7#ifndef _CDEF_BF538_H
8#define _CDEF_BF538_H 8#define _CDEF_BF538_H
9 9
10#include <asm/blackfin.h>
11
12/*include all Core registers and bit definitions*/
13#include "defBF539.h"
14
15/*include core specific register pointer definitions*/
16#include <asm/cdef_LPBlackfin.h>
17
18#define bfin_writePTR(addr, val) bfin_write32(addr, val) 10#define bfin_writePTR(addr, val) bfin_write32(addr, val)
19 11
20#define bfin_read_PLL_CTL() bfin_read16(PLL_CTL) 12#define bfin_read_PLL_CTL() bfin_read16(PLL_CTL)
@@ -487,10 +479,10 @@
487#define bfin_write_EBIU_SDRRC(val) bfin_write16(EBIU_SDRRC, val) 479#define bfin_write_EBIU_SDRRC(val) bfin_write16(EBIU_SDRRC, val)
488#define bfin_read_EBIU_SDSTAT() bfin_read16(EBIU_SDSTAT) 480#define bfin_read_EBIU_SDSTAT() bfin_read16(EBIU_SDSTAT)
489#define bfin_write_EBIU_SDSTAT(val) bfin_write16(EBIU_SDSTAT, val) 481#define bfin_write_EBIU_SDSTAT(val) bfin_write16(EBIU_SDSTAT, val)
490#define bfin_read_DMA0_TC_PER() bfin_read16(DMA0_TC_PER) 482#define bfin_read_DMAC0_TC_PER() bfin_read16(DMAC0_TC_PER)
491#define bfin_write_DMA0_TC_PER(val) bfin_write16(DMA0_TC_PER, val) 483#define bfin_write_DMAC0_TC_PER(val) bfin_write16(DMAC0_TC_PER, val)
492#define bfin_read_DMA0_TC_CNT() bfin_read16(DMA0_TC_CNT) 484#define bfin_read_DMAC0_TC_CNT() bfin_read16(DMAC0_TC_CNT)
493#define bfin_write_DMA0_TC_CNT(val) bfin_write16(DMA0_TC_CNT, val) 485#define bfin_write_DMAC0_TC_CNT(val) bfin_write16(DMAC0_TC_CNT, val)
494#define bfin_read_DMA0_NEXT_DESC_PTR() bfin_readPTR(DMA0_NEXT_DESC_PTR) 486#define bfin_read_DMA0_NEXT_DESC_PTR() bfin_readPTR(DMA0_NEXT_DESC_PTR)
495#define bfin_write_DMA0_NEXT_DESC_PTR(val) bfin_writePTR(DMA0_NEXT_DESC_PTR, val) 487#define bfin_write_DMA0_NEXT_DESC_PTR(val) bfin_writePTR(DMA0_NEXT_DESC_PTR, val)
496#define bfin_read_DMA0_START_ADDR() bfin_readPTR(DMA0_START_ADDR) 488#define bfin_read_DMA0_START_ADDR() bfin_readPTR(DMA0_START_ADDR)
@@ -699,10 +691,10 @@
699#define bfin_write_DMA7_CURR_X_COUNT(val) bfin_write16(DMA7_CURR_X_COUNT, val) 691#define bfin_write_DMA7_CURR_X_COUNT(val) bfin_write16(DMA7_CURR_X_COUNT, val)
700#define bfin_read_DMA7_CURR_Y_COUNT() bfin_read16(DMA7_CURR_Y_COUNT) 692#define bfin_read_DMA7_CURR_Y_COUNT() bfin_read16(DMA7_CURR_Y_COUNT)
701#define bfin_write_DMA7_CURR_Y_COUNT(val) bfin_write16(DMA7_CURR_Y_COUNT, val) 693#define bfin_write_DMA7_CURR_Y_COUNT(val) bfin_write16(DMA7_CURR_Y_COUNT, val)
702#define bfin_read_DMA1_TC_PER() bfin_read16(DMA1_TC_PER) 694#define bfin_read_DMAC1_TC_PER() bfin_read16(DMAC1_TC_PER)
703#define bfin_write_DMA1_TC_PER(val) bfin_write16(DMA1_TC_PER, val) 695#define bfin_write_DMAC1_TC_PER(val) bfin_write16(DMAC1_TC_PER, val)
704#define bfin_read_DMA1_TC_CNT() bfin_read16(DMA1_TC_CNT) 696#define bfin_read_DMAC1_TC_CNT() bfin_read16(DMAC1_TC_CNT)
705#define bfin_write_DMA1_TC_CNT(val) bfin_write16(DMA1_TC_CNT, val) 697#define bfin_write_DMAC1_TC_CNT(val) bfin_write16(DMAC1_TC_CNT, val)
706#define bfin_read_DMA8_NEXT_DESC_PTR() bfin_readPTR(DMA8_NEXT_DESC_PTR) 698#define bfin_read_DMA8_NEXT_DESC_PTR() bfin_readPTR(DMA8_NEXT_DESC_PTR)
707#define bfin_write_DMA8_NEXT_DESC_PTR(val) bfin_writePTR(DMA8_NEXT_DESC_PTR, val) 699#define bfin_write_DMA8_NEXT_DESC_PTR(val) bfin_writePTR(DMA8_NEXT_DESC_PTR, val)
708#define bfin_read_DMA8_START_ADDR() bfin_readPTR(DMA8_START_ADDR) 700#define bfin_read_DMA8_START_ADDR() bfin_readPTR(DMA8_START_ADDR)
@@ -1015,273 +1007,214 @@
1015#define bfin_write_DMA19_CURR_X_COUNT(val) bfin_write16(DMA19_CURR_X_COUNT, val) 1007#define bfin_write_DMA19_CURR_X_COUNT(val) bfin_write16(DMA19_CURR_X_COUNT, val)
1016#define bfin_read_DMA19_CURR_Y_COUNT() bfin_read16(DMA19_CURR_Y_COUNT) 1008#define bfin_read_DMA19_CURR_Y_COUNT() bfin_read16(DMA19_CURR_Y_COUNT)
1017#define bfin_write_DMA19_CURR_Y_COUNT(val) bfin_write16(DMA19_CURR_Y_COUNT, val) 1009#define bfin_write_DMA19_CURR_Y_COUNT(val) bfin_write16(DMA19_CURR_Y_COUNT, val)
1018#define bfin_read_MDMA0_D0_NEXT_DESC_PTR() bfin_readPTR(MDMA0_D0_NEXT_DESC_PTR) 1010#define bfin_read_MDMA_D0_NEXT_DESC_PTR() bfin_readPTR(MDMA_D0_NEXT_DESC_PTR)
1019#define bfin_write_MDMA0_D0_NEXT_DESC_PTR(val) bfin_writePTR(MDMA0_D0_NEXT_DESC_PTR, val) 1011#define bfin_write_MDMA_D0_NEXT_DESC_PTR(val) bfin_writePTR(MDMA_D0_NEXT_DESC_PTR, val)
1020#define bfin_read_MDMA0_D0_START_ADDR() bfin_readPTR(MDMA0_D0_START_ADDR) 1012#define bfin_read_MDMA_D0_START_ADDR() bfin_readPTR(MDMA_D0_START_ADDR)
1021#define bfin_write_MDMA0_D0_START_ADDR(val) bfin_writePTR(MDMA0_D0_START_ADDR, val) 1013#define bfin_write_MDMA_D0_START_ADDR(val) bfin_writePTR(MDMA_D0_START_ADDR, val)
1022#define bfin_read_MDMA0_D0_CONFIG() bfin_read16(MDMA0_D0_CONFIG) 1014#define bfin_read_MDMA_D0_CONFIG() bfin_read16(MDMA_D0_CONFIG)
1023#define bfin_write_MDMA0_D0_CONFIG(val) bfin_write16(MDMA0_D0_CONFIG, val) 1015#define bfin_write_MDMA_D0_CONFIG(val) bfin_write16(MDMA_D0_CONFIG, val)
1024#define bfin_read_MDMA0_D0_X_COUNT() bfin_read16(MDMA0_D0_X_COUNT) 1016#define bfin_read_MDMA_D0_X_COUNT() bfin_read16(MDMA_D0_X_COUNT)
1025#define bfin_write_MDMA0_D0_X_COUNT(val) bfin_write16(MDMA0_D0_X_COUNT, val) 1017#define bfin_write_MDMA_D0_X_COUNT(val) bfin_write16(MDMA_D0_X_COUNT, val)
1026#define bfin_read_MDMA0_D0_X_MODIFY() bfin_read16(MDMA0_D0_X_MODIFY) 1018#define bfin_read_MDMA_D0_X_MODIFY() bfin_read16(MDMA_D0_X_MODIFY)
1027#define bfin_write_MDMA0_D0_X_MODIFY(val) bfin_write16(MDMA0_D0_X_MODIFY, val) 1019#define bfin_write_MDMA_D0_X_MODIFY(val) bfin_write16(MDMA_D0_X_MODIFY, val)
1028#define bfin_read_MDMA0_D0_Y_COUNT() bfin_read16(MDMA0_D0_Y_COUNT) 1020#define bfin_read_MDMA_D0_Y_COUNT() bfin_read16(MDMA_D0_Y_COUNT)
1029#define bfin_write_MDMA0_D0_Y_COUNT(val) bfin_write16(MDMA0_D0_Y_COUNT, val) 1021#define bfin_write_MDMA_D0_Y_COUNT(val) bfin_write16(MDMA_D0_Y_COUNT, val)
1030#define bfin_read_MDMA0_D0_Y_MODIFY() bfin_read16(MDMA0_D0_Y_MODIFY) 1022#define bfin_read_MDMA_D0_Y_MODIFY() bfin_read16(MDMA_D0_Y_MODIFY)
1031#define bfin_write_MDMA0_D0_Y_MODIFY(val) bfin_write16(MDMA0_D0_Y_MODIFY, val) 1023#define bfin_write_MDMA_D0_Y_MODIFY(val) bfin_write16(MDMA_D0_Y_MODIFY, val)
1032#define bfin_read_MDMA0_D0_CURR_DESC_PTR() bfin_readPTR(MDMA0_D0_CURR_DESC_PTR) 1024#define bfin_read_MDMA_D0_CURR_DESC_PTR() bfin_readPTR(MDMA_D0_CURR_DESC_PTR)
1033#define bfin_write_MDMA0_D0_CURR_DESC_PTR(val) bfin_writePTR(MDMA0_D0_CURR_DESC_PTR, val) 1025#define bfin_write_MDMA_D0_CURR_DESC_PTR(val) bfin_writePTR(MDMA_D0_CURR_DESC_PTR, val)
1034#define bfin_read_MDMA0_D0_CURR_ADDR() bfin_readPTR(MDMA0_D0_CURR_ADDR) 1026#define bfin_read_MDMA_D0_CURR_ADDR() bfin_readPTR(MDMA_D0_CURR_ADDR)
1035#define bfin_write_MDMA0_D0_CURR_ADDR(val) bfin_writePTR(MDMA0_D0_CURR_ADDR, val) 1027#define bfin_write_MDMA_D0_CURR_ADDR(val) bfin_writePTR(MDMA_D0_CURR_ADDR, val)
1036#define bfin_read_MDMA0_D0_IRQ_STATUS() bfin_read16(MDMA0_D0_IRQ_STATUS) 1028#define bfin_read_MDMA_D0_IRQ_STATUS() bfin_read16(MDMA_D0_IRQ_STATUS)
1037#define bfin_write_MDMA0_D0_IRQ_STATUS(val) bfin_write16(MDMA0_D0_IRQ_STATUS, val) 1029#define bfin_write_MDMA_D0_IRQ_STATUS(val) bfin_write16(MDMA_D0_IRQ_STATUS, val)
1038#define bfin_read_MDMA0_D0_PERIPHERAL_MAP() bfin_read16(MDMA0_D0_PERIPHERAL_MAP) 1030#define bfin_read_MDMA_D0_PERIPHERAL_MAP() bfin_read16(MDMA_D0_PERIPHERAL_MAP)
1039#define bfin_write_MDMA0_D0_PERIPHERAL_MAP(val) bfin_write16(MDMA0_D0_PERIPHERAL_MAP, val) 1031#define bfin_write_MDMA_D0_PERIPHERAL_MAP(val) bfin_write16(MDMA_D0_PERIPHERAL_MAP, val)
1040#define bfin_read_MDMA0_D0_CURR_X_COUNT() bfin_read16(MDMA0_D0_CURR_X_COUNT) 1032#define bfin_read_MDMA_D0_CURR_X_COUNT() bfin_read16(MDMA_D0_CURR_X_COUNT)
1041#define bfin_write_MDMA0_D0_CURR_X_COUNT(val) bfin_write16(MDMA0_D0_CURR_X_COUNT, val) 1033#define bfin_write_MDMA_D0_CURR_X_COUNT(val) bfin_write16(MDMA_D0_CURR_X_COUNT, val)
1042#define bfin_read_MDMA0_D0_CURR_Y_COUNT() bfin_read16(MDMA0_D0_CURR_Y_COUNT) 1034#define bfin_read_MDMA_D0_CURR_Y_COUNT() bfin_read16(MDMA_D0_CURR_Y_COUNT)
1043#define bfin_write_MDMA0_D0_CURR_Y_COUNT(val) bfin_write16(MDMA0_D0_CURR_Y_COUNT, val) 1035#define bfin_write_MDMA_D0_CURR_Y_COUNT(val) bfin_write16(MDMA_D0_CURR_Y_COUNT, val)
1044#define bfin_read_MDMA0_S0_NEXT_DESC_PTR() bfin_readPTR(MDMA0_S0_NEXT_DESC_PTR) 1036#define bfin_read_MDMA_S0_NEXT_DESC_PTR() bfin_readPTR(MDMA_S0_NEXT_DESC_PTR)
1045#define bfin_write_MDMA0_S0_NEXT_DESC_PTR(val) bfin_writePTR(MDMA0_S0_NEXT_DESC_PTR, val) 1037#define bfin_write_MDMA_S0_NEXT_DESC_PTR(val) bfin_writePTR(MDMA_S0_NEXT_DESC_PTR, val)
1046#define bfin_read_MDMA0_S0_START_ADDR() bfin_readPTR(MDMA0_S0_START_ADDR) 1038#define bfin_read_MDMA_S0_START_ADDR() bfin_readPTR(MDMA_S0_START_ADDR)
1047#define bfin_write_MDMA0_S0_START_ADDR(val) bfin_writePTR(MDMA0_S0_START_ADDR, val) 1039#define bfin_write_MDMA_S0_START_ADDR(val) bfin_writePTR(MDMA_S0_START_ADDR, val)
1048#define bfin_read_MDMA0_S0_CONFIG() bfin_read16(MDMA0_S0_CONFIG) 1040#define bfin_read_MDMA_S0_CONFIG() bfin_read16(MDMA_S0_CONFIG)
1049#define bfin_write_MDMA0_S0_CONFIG(val) bfin_write16(MDMA0_S0_CONFIG, val) 1041#define bfin_write_MDMA_S0_CONFIG(val) bfin_write16(MDMA_S0_CONFIG, val)
1050#define bfin_read_MDMA0_S0_X_COUNT() bfin_read16(MDMA0_S0_X_COUNT) 1042#define bfin_read_MDMA_S0_X_COUNT() bfin_read16(MDMA_S0_X_COUNT)
1051#define bfin_write_MDMA0_S0_X_COUNT(val) bfin_write16(MDMA0_S0_X_COUNT, val) 1043#define bfin_write_MDMA_S0_X_COUNT(val) bfin_write16(MDMA_S0_X_COUNT, val)
1052#define bfin_read_MDMA0_S0_X_MODIFY() bfin_read16(MDMA0_S0_X_MODIFY) 1044#define bfin_read_MDMA_S0_X_MODIFY() bfin_read16(MDMA_S0_X_MODIFY)
1053#define bfin_write_MDMA0_S0_X_MODIFY(val) bfin_write16(MDMA0_S0_X_MODIFY, val) 1045#define bfin_write_MDMA_S0_X_MODIFY(val) bfin_write16(MDMA_S0_X_MODIFY, val)
1054#define bfin_read_MDMA0_S0_Y_COUNT() bfin_read16(MDMA0_S0_Y_COUNT) 1046#define bfin_read_MDMA_S0_Y_COUNT() bfin_read16(MDMA_S0_Y_COUNT)
1055#define bfin_write_MDMA0_S0_Y_COUNT(val) bfin_write16(MDMA0_S0_Y_COUNT, val) 1047#define bfin_write_MDMA_S0_Y_COUNT(val) bfin_write16(MDMA_S0_Y_COUNT, val)
1056#define bfin_read_MDMA0_S0_Y_MODIFY() bfin_read16(MDMA0_S0_Y_MODIFY) 1048#define bfin_read_MDMA_S0_Y_MODIFY() bfin_read16(MDMA_S0_Y_MODIFY)
1057#define bfin_write_MDMA0_S0_Y_MODIFY(val) bfin_write16(MDMA0_S0_Y_MODIFY, val) 1049#define bfin_write_MDMA_S0_Y_MODIFY(val) bfin_write16(MDMA_S0_Y_MODIFY, val)
1058#define bfin_read_MDMA0_S0_CURR_DESC_PTR() bfin_readPTR(MDMA0_S0_CURR_DESC_PTR) 1050#define bfin_read_MDMA_S0_CURR_DESC_PTR() bfin_readPTR(MDMA_S0_CURR_DESC_PTR)
1059#define bfin_write_MDMA0_S0_CURR_DESC_PTR(val) bfin_writePTR(MDMA0_S0_CURR_DESC_PTR, val) 1051#define bfin_write_MDMA_S0_CURR_DESC_PTR(val) bfin_writePTR(MDMA_S0_CURR_DESC_PTR, val)
1060#define bfin_read_MDMA0_S0_CURR_ADDR() bfin_readPTR(MDMA0_S0_CURR_ADDR) 1052#define bfin_read_MDMA_S0_CURR_ADDR() bfin_readPTR(MDMA_S0_CURR_ADDR)
1061#define bfin_write_MDMA0_S0_CURR_ADDR(val) bfin_writePTR(MDMA0_S0_CURR_ADDR, val) 1053#define bfin_write_MDMA_S0_CURR_ADDR(val) bfin_writePTR(MDMA_S0_CURR_ADDR, val)
1062#define bfin_read_MDMA0_S0_IRQ_STATUS() bfin_read16(MDMA0_S0_IRQ_STATUS) 1054#define bfin_read_MDMA_S0_IRQ_STATUS() bfin_read16(MDMA_S0_IRQ_STATUS)
1063#define bfin_write_MDMA0_S0_IRQ_STATUS(val) bfin_write16(MDMA0_S0_IRQ_STATUS, val) 1055#define bfin_write_MDMA_S0_IRQ_STATUS(val) bfin_write16(MDMA_S0_IRQ_STATUS, val)
1064#define bfin_read_MDMA0_S0_PERIPHERAL_MAP() bfin_read16(MDMA0_S0_PERIPHERAL_MAP) 1056#define bfin_read_MDMA_S0_PERIPHERAL_MAP() bfin_read16(MDMA_S0_PERIPHERAL_MAP)
1065#define bfin_write_MDMA0_S0_PERIPHERAL_MAP(val) bfin_write16(MDMA0_S0_PERIPHERAL_MAP, val) 1057#define bfin_write_MDMA_S0_PERIPHERAL_MAP(val) bfin_write16(MDMA_S0_PERIPHERAL_MAP, val)
1066#define bfin_read_MDMA0_S0_CURR_X_COUNT() bfin_read16(MDMA0_S0_CURR_X_COUNT) 1058#define bfin_read_MDMA_S0_CURR_X_COUNT() bfin_read16(MDMA_S0_CURR_X_COUNT)
1067#define bfin_write_MDMA0_S0_CURR_X_COUNT(val) bfin_write16(MDMA0_S0_CURR_X_COUNT, val) 1059#define bfin_write_MDMA_S0_CURR_X_COUNT(val) bfin_write16(MDMA_S0_CURR_X_COUNT, val)
1068#define bfin_read_MDMA0_S0_CURR_Y_COUNT() bfin_read16(MDMA0_S0_CURR_Y_COUNT) 1060#define bfin_read_MDMA_S0_CURR_Y_COUNT() bfin_read16(MDMA_S0_CURR_Y_COUNT)
1069#define bfin_write_MDMA0_S0_CURR_Y_COUNT(val) bfin_write16(MDMA0_S0_CURR_Y_COUNT, val) 1061#define bfin_write_MDMA_S0_CURR_Y_COUNT(val) bfin_write16(MDMA_S0_CURR_Y_COUNT, val)
1070#define bfin_read_MDMA0_D1_NEXT_DESC_PTR() bfin_readPTR(MDMA0_D1_NEXT_DESC_PTR) 1062#define bfin_read_MDMA_D1_NEXT_DESC_PTR() bfin_readPTR(MDMA_D1_NEXT_DESC_PTR)
1071#define bfin_write_MDMA0_D1_NEXT_DESC_PTR(val) bfin_writePTR(MDMA0_D1_NEXT_DESC_PTR, val) 1063#define bfin_write_MDMA_D1_NEXT_DESC_PTR(val) bfin_writePTR(MDMA_D1_NEXT_DESC_PTR, val)
1072#define bfin_read_MDMA0_D1_START_ADDR() bfin_readPTR(MDMA0_D1_START_ADDR) 1064#define bfin_read_MDMA_D1_START_ADDR() bfin_readPTR(MDMA_D1_START_ADDR)
1073#define bfin_write_MDMA0_D1_START_ADDR(val) bfin_writePTR(MDMA0_D1_START_ADDR, val) 1065#define bfin_write_MDMA_D1_START_ADDR(val) bfin_writePTR(MDMA_D1_START_ADDR, val)
1074#define bfin_read_MDMA0_D1_CONFIG() bfin_read16(MDMA0_D1_CONFIG) 1066#define bfin_read_MDMA_D1_CONFIG() bfin_read16(MDMA_D1_CONFIG)
1075#define bfin_write_MDMA0_D1_CONFIG(val) bfin_write16(MDMA0_D1_CONFIG, val) 1067#define bfin_write_MDMA_D1_CONFIG(val) bfin_write16(MDMA_D1_CONFIG, val)
1076#define bfin_read_MDMA0_D1_X_COUNT() bfin_read16(MDMA0_D1_X_COUNT) 1068#define bfin_read_MDMA_D1_X_COUNT() bfin_read16(MDMA_D1_X_COUNT)
1077#define bfin_write_MDMA0_D1_X_COUNT(val) bfin_write16(MDMA0_D1_X_COUNT, val) 1069#define bfin_write_MDMA_D1_X_COUNT(val) bfin_write16(MDMA_D1_X_COUNT, val)
1078#define bfin_read_MDMA0_D1_X_MODIFY() bfin_read16(MDMA0_D1_X_MODIFY) 1070#define bfin_read_MDMA_D1_X_MODIFY() bfin_read16(MDMA_D1_X_MODIFY)
1079#define bfin_write_MDMA0_D1_X_MODIFY(val) bfin_write16(MDMA0_D1_X_MODIFY, val) 1071#define bfin_write_MDMA_D1_X_MODIFY(val) bfin_write16(MDMA_D1_X_MODIFY, val)
1080#define bfin_read_MDMA0_D1_Y_COUNT() bfin_read16(MDMA0_D1_Y_COUNT) 1072#define bfin_read_MDMA_D1_Y_COUNT() bfin_read16(MDMA_D1_Y_COUNT)
1081#define bfin_write_MDMA0_D1_Y_COUNT(val) bfin_write16(MDMA0_D1_Y_COUNT, val) 1073#define bfin_write_MDMA_D1_Y_COUNT(val) bfin_write16(MDMA_D1_Y_COUNT, val)
1082#define bfin_read_MDMA0_D1_Y_MODIFY() bfin_read16(MDMA0_D1_Y_MODIFY) 1074#define bfin_read_MDMA_D1_Y_MODIFY() bfin_read16(MDMA_D1_Y_MODIFY)
1083#define bfin_write_MDMA0_D1_Y_MODIFY(val) bfin_write16(MDMA0_D1_Y_MODIFY, val) 1075#define bfin_write_MDMA_D1_Y_MODIFY(val) bfin_write16(MDMA_D1_Y_MODIFY, val)
1084#define bfin_read_MDMA0_D1_CURR_DESC_PTR() bfin_readPTR(MDMA0_D1_CURR_DESC_PTR) 1076#define bfin_read_MDMA_D1_CURR_DESC_PTR() bfin_readPTR(MDMA_D1_CURR_DESC_PTR)
1085#define bfin_write_MDMA0_D1_CURR_DESC_PTR(val) bfin_writePTR(MDMA0_D1_CURR_DESC_PTR, val) 1077#define bfin_write_MDMA_D1_CURR_DESC_PTR(val) bfin_writePTR(MDMA_D1_CURR_DESC_PTR, val)
1086#define bfin_read_MDMA0_D1_CURR_ADDR() bfin_readPTR(MDMA0_D1_CURR_ADDR) 1078#define bfin_read_MDMA_D1_CURR_ADDR() bfin_readPTR(MDMA_D1_CURR_ADDR)
1087#define bfin_write_MDMA0_D1_CURR_ADDR(val) bfin_writePTR(MDMA0_D1_CURR_ADDR, val) 1079#define bfin_write_MDMA_D1_CURR_ADDR(val) bfin_writePTR(MDMA_D1_CURR_ADDR, val)
1088#define bfin_read_MDMA0_D1_IRQ_STATUS() bfin_read16(MDMA0_D1_IRQ_STATUS) 1080#define bfin_read_MDMA_D1_IRQ_STATUS() bfin_read16(MDMA_D1_IRQ_STATUS)
1089#define bfin_write_MDMA0_D1_IRQ_STATUS(val) bfin_write16(MDMA0_D1_IRQ_STATUS, val) 1081#define bfin_write_MDMA_D1_IRQ_STATUS(val) bfin_write16(MDMA_D1_IRQ_STATUS, val)
1090#define bfin_read_MDMA0_D1_PERIPHERAL_MAP() bfin_read16(MDMA0_D1_PERIPHERAL_MAP) 1082#define bfin_read_MDMA_D1_PERIPHERAL_MAP() bfin_read16(MDMA_D1_PERIPHERAL_MAP)
1091#define bfin_write_MDMA0_D1_PERIPHERAL_MAP(val) bfin_write16(MDMA0_D1_PERIPHERAL_MAP, val) 1083#define bfin_write_MDMA_D1_PERIPHERAL_MAP(val) bfin_write16(MDMA_D1_PERIPHERAL_MAP, val)
1092#define bfin_read_MDMA0_D1_CURR_X_COUNT() bfin_read16(MDMA0_D1_CURR_X_COUNT) 1084#define bfin_read_MDMA_D1_CURR_X_COUNT() bfin_read16(MDMA_D1_CURR_X_COUNT)
1093#define bfin_write_MDMA0_D1_CURR_X_COUNT(val) bfin_write16(MDMA0_D1_CURR_X_COUNT, val) 1085#define bfin_write_MDMA_D1_CURR_X_COUNT(val) bfin_write16(MDMA_D1_CURR_X_COUNT, val)
1094#define bfin_read_MDMA0_D1_CURR_Y_COUNT() bfin_read16(MDMA0_D1_CURR_Y_COUNT) 1086#define bfin_read_MDMA_D1_CURR_Y_COUNT() bfin_read16(MDMA_D1_CURR_Y_COUNT)
1095#define bfin_write_MDMA0_D1_CURR_Y_COUNT(val) bfin_write16(MDMA0_D1_CURR_Y_COUNT, val) 1087#define bfin_write_MDMA_D1_CURR_Y_COUNT(val) bfin_write16(MDMA_D1_CURR_Y_COUNT, val)
1096#define bfin_read_MDMA0_S1_NEXT_DESC_PTR() bfin_readPTR(MDMA0_S1_NEXT_DESC_PTR) 1088#define bfin_read_MDMA_S1_NEXT_DESC_PTR() bfin_readPTR(MDMA_S1_NEXT_DESC_PTR)
1097#define bfin_write_MDMA0_S1_NEXT_DESC_PTR(val) bfin_writePTR(MDMA0_S1_NEXT_DESC_PTR, val) 1089#define bfin_write_MDMA_S1_NEXT_DESC_PTR(val) bfin_writePTR(MDMA_S1_NEXT_DESC_PTR, val)
1098#define bfin_read_MDMA0_S1_START_ADDR() bfin_readPTR(MDMA0_S1_START_ADDR) 1090#define bfin_read_MDMA_S1_START_ADDR() bfin_readPTR(MDMA_S1_START_ADDR)
1099#define bfin_write_MDMA0_S1_START_ADDR(val) bfin_writePTR(MDMA0_S1_START_ADDR, val) 1091#define bfin_write_MDMA_S1_START_ADDR(val) bfin_writePTR(MDMA_S1_START_ADDR, val)
1100#define bfin_read_MDMA0_S1_CONFIG() bfin_read16(MDMA0_S1_CONFIG) 1092#define bfin_read_MDMA_S1_CONFIG() bfin_read16(MDMA_S1_CONFIG)
1101#define bfin_write_MDMA0_S1_CONFIG(val) bfin_write16(MDMA0_S1_CONFIG, val) 1093#define bfin_write_MDMA_S1_CONFIG(val) bfin_write16(MDMA_S1_CONFIG, val)
1102#define bfin_read_MDMA0_S1_X_COUNT() bfin_read16(MDMA0_S1_X_COUNT) 1094#define bfin_read_MDMA_S1_X_COUNT() bfin_read16(MDMA_S1_X_COUNT)
1103#define bfin_write_MDMA0_S1_X_COUNT(val) bfin_write16(MDMA0_S1_X_COUNT, val) 1095#define bfin_write_MDMA_S1_X_COUNT(val) bfin_write16(MDMA_S1_X_COUNT, val)
1104#define bfin_read_MDMA0_S1_X_MODIFY() bfin_read16(MDMA0_S1_X_MODIFY) 1096#define bfin_read_MDMA_S1_X_MODIFY() bfin_read16(MDMA_S1_X_MODIFY)
1105#define bfin_write_MDMA0_S1_X_MODIFY(val) bfin_write16(MDMA0_S1_X_MODIFY, val) 1097#define bfin_write_MDMA_S1_X_MODIFY(val) bfin_write16(MDMA_S1_X_MODIFY, val)
1106#define bfin_read_MDMA0_S1_Y_COUNT() bfin_read16(MDMA0_S1_Y_COUNT) 1098#define bfin_read_MDMA_S1_Y_COUNT() bfin_read16(MDMA_S1_Y_COUNT)
1107#define bfin_write_MDMA0_S1_Y_COUNT(val) bfin_write16(MDMA0_S1_Y_COUNT, val) 1099#define bfin_write_MDMA_S1_Y_COUNT(val) bfin_write16(MDMA_S1_Y_COUNT, val)
1108#define bfin_read_MDMA0_S1_Y_MODIFY() bfin_read16(MDMA0_S1_Y_MODIFY) 1100#define bfin_read_MDMA_S1_Y_MODIFY() bfin_read16(MDMA_S1_Y_MODIFY)
1109#define bfin_write_MDMA0_S1_Y_MODIFY(val) bfin_write16(MDMA0_S1_Y_MODIFY, val) 1101#define bfin_write_MDMA_S1_Y_MODIFY(val) bfin_write16(MDMA_S1_Y_MODIFY, val)
1110#define bfin_read_MDMA0_S1_CURR_DESC_PTR() bfin_readPTR(MDMA0_S1_CURR_DESC_PTR) 1102#define bfin_read_MDMA_S1_CURR_DESC_PTR() bfin_readPTR(MDMA_S1_CURR_DESC_PTR)
1111#define bfin_write_MDMA0_S1_CURR_DESC_PTR(val) bfin_writePTR(MDMA0_S1_CURR_DESC_PTR, val) 1103#define bfin_write_MDMA_S1_CURR_DESC_PTR(val) bfin_writePTR(MDMA_S1_CURR_DESC_PTR, val)
1112#define bfin_read_MDMA0_S1_CURR_ADDR() bfin_readPTR(MDMA0_S1_CURR_ADDR) 1104#define bfin_read_MDMA_S1_CURR_ADDR() bfin_readPTR(MDMA_S1_CURR_ADDR)
1113#define bfin_write_MDMA0_S1_CURR_ADDR(val) bfin_writePTR(MDMA0_S1_CURR_ADDR, val) 1105#define bfin_write_MDMA_S1_CURR_ADDR(val) bfin_writePTR(MDMA_S1_CURR_ADDR, val)
1114#define bfin_read_MDMA0_S1_IRQ_STATUS() bfin_read16(MDMA0_S1_IRQ_STATUS) 1106#define bfin_read_MDMA_S1_IRQ_STATUS() bfin_read16(MDMA_S1_IRQ_STATUS)
1115#define bfin_write_MDMA0_S1_IRQ_STATUS(val) bfin_write16(MDMA0_S1_IRQ_STATUS, val) 1107#define bfin_write_MDMA_S1_IRQ_STATUS(val) bfin_write16(MDMA_S1_IRQ_STATUS, val)
1116#define bfin_read_MDMA0_S1_PERIPHERAL_MAP() bfin_read16(MDMA0_S1_PERIPHERAL_MAP) 1108#define bfin_read_MDMA_S1_PERIPHERAL_MAP() bfin_read16(MDMA_S1_PERIPHERAL_MAP)
1117#define bfin_write_MDMA0_S1_PERIPHERAL_MAP(val) bfin_write16(MDMA0_S1_PERIPHERAL_MAP, val) 1109#define bfin_write_MDMA_S1_PERIPHERAL_MAP(val) bfin_write16(MDMA_S1_PERIPHERAL_MAP, val)
1118#define bfin_read_MDMA0_S1_CURR_X_COUNT() bfin_read16(MDMA0_S1_CURR_X_COUNT) 1110#define bfin_read_MDMA_S1_CURR_X_COUNT() bfin_read16(MDMA_S1_CURR_X_COUNT)
1119#define bfin_write_MDMA0_S1_CURR_X_COUNT(val) bfin_write16(MDMA0_S1_CURR_X_COUNT, val) 1111#define bfin_write_MDMA_S1_CURR_X_COUNT(val) bfin_write16(MDMA_S1_CURR_X_COUNT, val)
1120#define bfin_read_MDMA0_S1_CURR_Y_COUNT() bfin_read16(MDMA0_S1_CURR_Y_COUNT) 1112#define bfin_read_MDMA_S1_CURR_Y_COUNT() bfin_read16(MDMA_S1_CURR_Y_COUNT)
1121#define bfin_write_MDMA0_S1_CURR_Y_COUNT(val) bfin_write16(MDMA0_S1_CURR_Y_COUNT, val) 1113#define bfin_write_MDMA_S1_CURR_Y_COUNT(val) bfin_write16(MDMA_S1_CURR_Y_COUNT, val)
1122#define bfin_read_MDMA1_D0_NEXT_DESC_PTR() bfin_readPTR(MDMA1_D0_NEXT_DESC_PTR) 1114#define bfin_read_MDMA_D2_NEXT_DESC_PTR() bfin_readPTR(MDMA_D2_NEXT_DESC_PTR)
1123#define bfin_write_MDMA1_D0_NEXT_DESC_PTR(val) bfin_writePTR(MDMA1_D0_NEXT_DESC_PTR, val) 1115#define bfin_write_MDMA_D2_NEXT_DESC_PTR(val) bfin_writePTR(MDMA_D2_NEXT_DESC_PTR, val)
1124#define bfin_read_MDMA1_D0_START_ADDR() bfin_readPTR(MDMA1_D0_START_ADDR) 1116#define bfin_read_MDMA_D2_START_ADDR() bfin_readPTR(MDMA_D2_START_ADDR)
1125#define bfin_write_MDMA1_D0_START_ADDR(val) bfin_writePTR(MDMA1_D0_START_ADDR, val) 1117#define bfin_write_MDMA_D2_START_ADDR(val) bfin_writePTR(MDMA_D2_START_ADDR, val)
1126#define bfin_read_MDMA1_D0_CONFIG() bfin_read16(MDMA1_D0_CONFIG) 1118#define bfin_read_MDMA_D2_CONFIG() bfin_read16(MDMA_D2_CONFIG)
1127#define bfin_write_MDMA1_D0_CONFIG(val) bfin_write16(MDMA1_D0_CONFIG, val) 1119#define bfin_write_MDMA_D2_CONFIG(val) bfin_write16(MDMA_D2_CONFIG, val)
1128#define bfin_read_MDMA1_D0_X_COUNT() bfin_read16(MDMA1_D0_X_COUNT) 1120#define bfin_read_MDMA_D2_X_COUNT() bfin_read16(MDMA_D2_X_COUNT)
1129#define bfin_write_MDMA1_D0_X_COUNT(val) bfin_write16(MDMA1_D0_X_COUNT, val) 1121#define bfin_write_MDMA_D2_X_COUNT(val) bfin_write16(MDMA_D2_X_COUNT, val)
1130#define bfin_read_MDMA1_D0_X_MODIFY() bfin_read16(MDMA1_D0_X_MODIFY) 1122#define bfin_read_MDMA_D2_X_MODIFY() bfin_read16(MDMA_D2_X_MODIFY)
1131#define bfin_write_MDMA1_D0_X_MODIFY(val) bfin_write16(MDMA1_D0_X_MODIFY, val) 1123#define bfin_write_MDMA_D2_X_MODIFY(val) bfin_write16(MDMA_D2_X_MODIFY, val)
1132#define bfin_read_MDMA1_D0_Y_COUNT() bfin_read16(MDMA1_D0_Y_COUNT) 1124#define bfin_read_MDMA_D2_Y_COUNT() bfin_read16(MDMA_D2_Y_COUNT)
1133#define bfin_write_MDMA1_D0_Y_COUNT(val) bfin_write16(MDMA1_D0_Y_COUNT, val) 1125#define bfin_write_MDMA_D2_Y_COUNT(val) bfin_write16(MDMA_D2_Y_COUNT, val)
1134#define bfin_read_MDMA1_D0_Y_MODIFY() bfin_read16(MDMA1_D0_Y_MODIFY) 1126#define bfin_read_MDMA_D2_Y_MODIFY() bfin_read16(MDMA_D2_Y_MODIFY)
1135#define bfin_write_MDMA1_D0_Y_MODIFY(val) bfin_write16(MDMA1_D0_Y_MODIFY, val) 1127#define bfin_write_MDMA_D2_Y_MODIFY(val) bfin_write16(MDMA_D2_Y_MODIFY, val)
1136#define bfin_read_MDMA1_D0_CURR_DESC_PTR() bfin_readPTR(MDMA1_D0_CURR_DESC_PTR) 1128#define bfin_read_MDMA_D2_CURR_DESC_PTR() bfin_readPTR(MDMA_D2_CURR_DESC_PTR)
1137#define bfin_write_MDMA1_D0_CURR_DESC_PTR(val) bfin_writePTR(MDMA1_D0_CURR_DESC_PTR, val) 1129#define bfin_write_MDMA_D2_CURR_DESC_PTR(val) bfin_writePTR(MDMA_D2_CURR_DESC_PTR, val)
1138#define bfin_read_MDMA1_D0_CURR_ADDR() bfin_readPTR(MDMA1_D0_CURR_ADDR) 1130#define bfin_read_MDMA_D2_CURR_ADDR() bfin_readPTR(MDMA_D2_CURR_ADDR)
1139#define bfin_write_MDMA1_D0_CURR_ADDR(val) bfin_writePTR(MDMA1_D0_CURR_ADDR, val) 1131#define bfin_write_MDMA_D2_CURR_ADDR(val) bfin_writePTR(MDMA_D2_CURR_ADDR, val)
1140#define bfin_read_MDMA1_D0_IRQ_STATUS() bfin_read16(MDMA1_D0_IRQ_STATUS) 1132#define bfin_read_MDMA_D2_IRQ_STATUS() bfin_read16(MDMA_D2_IRQ_STATUS)
1141#define bfin_write_MDMA1_D0_IRQ_STATUS(val) bfin_write16(MDMA1_D0_IRQ_STATUS, val) 1133#define bfin_write_MDMA_D2_IRQ_STATUS(val) bfin_write16(MDMA_D2_IRQ_STATUS, val)
1142#define bfin_read_MDMA1_D0_PERIPHERAL_MAP() bfin_read16(MDMA1_D0_PERIPHERAL_MAP) 1134#define bfin_read_MDMA_D2_PERIPHERAL_MAP() bfin_read16(MDMA_D2_PERIPHERAL_MAP)
1143#define bfin_write_MDMA1_D0_PERIPHERAL_MAP(val) bfin_write16(MDMA1_D0_PERIPHERAL_MAP, val) 1135#define bfin_write_MDMA_D2_PERIPHERAL_MAP(val) bfin_write16(MDMA_D2_PERIPHERAL_MAP, val)
1144#define bfin_read_MDMA1_D0_CURR_X_COUNT() bfin_read16(MDMA1_D0_CURR_X_COUNT) 1136#define bfin_read_MDMA_D2_CURR_X_COUNT() bfin_read16(MDMA_D2_CURR_X_COUNT)
1145#define bfin_write_MDMA1_D0_CURR_X_COUNT(val) bfin_write16(MDMA1_D0_CURR_X_COUNT, val) 1137#define bfin_write_MDMA_D2_CURR_X_COUNT(val) bfin_write16(MDMA_D2_CURR_X_COUNT, val)
1146#define bfin_read_MDMA1_D0_CURR_Y_COUNT() bfin_read16(MDMA1_D0_CURR_Y_COUNT) 1138#define bfin_read_MDMA_D2_CURR_Y_COUNT() bfin_read16(MDMA_D2_CURR_Y_COUNT)
1147#define bfin_write_MDMA1_D0_CURR_Y_COUNT(val) bfin_write16(MDMA1_D0_CURR_Y_COUNT, val) 1139#define bfin_write_MDMA_D2_CURR_Y_COUNT(val) bfin_write16(MDMA_D2_CURR_Y_COUNT, val)
1148#define bfin_read_MDMA1_S0_NEXT_DESC_PTR() bfin_readPTR(MDMA1_S0_NEXT_DESC_PTR) 1140#define bfin_read_MDMA_S2_NEXT_DESC_PTR() bfin_readPTR(MDMA_S2_NEXT_DESC_PTR)
1149#define bfin_write_MDMA1_S0_NEXT_DESC_PTR(val) bfin_writePTR(MDMA1_S0_NEXT_DESC_PTR, val) 1141#define bfin_write_MDMA_S2_NEXT_DESC_PTR(val) bfin_writePTR(MDMA_S2_NEXT_DESC_PTR, val)
1150#define bfin_read_MDMA1_S0_START_ADDR() bfin_readPTR(MDMA1_S0_START_ADDR) 1142#define bfin_read_MDMA_S2_START_ADDR() bfin_readPTR(MDMA_S2_START_ADDR)
1151#define bfin_write_MDMA1_S0_START_ADDR(val) bfin_writePTR(MDMA1_S0_START_ADDR, val) 1143#define bfin_write_MDMA_S2_START_ADDR(val) bfin_writePTR(MDMA_S2_START_ADDR, val)
1152#define bfin_read_MDMA1_S0_CONFIG() bfin_read16(MDMA1_S0_CONFIG) 1144#define bfin_read_MDMA_S2_CONFIG() bfin_read16(MDMA_S2_CONFIG)
1153#define bfin_write_MDMA1_S0_CONFIG(val) bfin_write16(MDMA1_S0_CONFIG, val) 1145#define bfin_write_MDMA_S2_CONFIG(val) bfin_write16(MDMA_S2_CONFIG, val)
1154#define bfin_read_MDMA1_S0_X_COUNT() bfin_read16(MDMA1_S0_X_COUNT) 1146#define bfin_read_MDMA_S2_X_COUNT() bfin_read16(MDMA_S2_X_COUNT)
1155#define bfin_write_MDMA1_S0_X_COUNT(val) bfin_write16(MDMA1_S0_X_COUNT, val) 1147#define bfin_write_MDMA_S2_X_COUNT(val) bfin_write16(MDMA_S2_X_COUNT, val)
1156#define bfin_read_MDMA1_S0_X_MODIFY() bfin_read16(MDMA1_S0_X_MODIFY) 1148#define bfin_read_MDMA_S2_X_MODIFY() bfin_read16(MDMA_S2_X_MODIFY)
1157#define bfin_write_MDMA1_S0_X_MODIFY(val) bfin_write16(MDMA1_S0_X_MODIFY, val) 1149#define bfin_write_MDMA_S2_X_MODIFY(val) bfin_write16(MDMA_S2_X_MODIFY, val)
1158#define bfin_read_MDMA1_S0_Y_COUNT() bfin_read16(MDMA1_S0_Y_COUNT) 1150#define bfin_read_MDMA_S2_Y_COUNT() bfin_read16(MDMA_S2_Y_COUNT)
1159#define bfin_write_MDMA1_S0_Y_COUNT(val) bfin_write16(MDMA1_S0_Y_COUNT, val) 1151#define bfin_write_MDMA_S2_Y_COUNT(val) bfin_write16(MDMA_S2_Y_COUNT, val)
1160#define bfin_read_MDMA1_S0_Y_MODIFY() bfin_read16(MDMA1_S0_Y_MODIFY) 1152#define bfin_read_MDMA_S2_Y_MODIFY() bfin_read16(MDMA_S2_Y_MODIFY)
1161#define bfin_write_MDMA1_S0_Y_MODIFY(val) bfin_write16(MDMA1_S0_Y_MODIFY, val) 1153#define bfin_write_MDMA_S2_Y_MODIFY(val) bfin_write16(MDMA_S2_Y_MODIFY, val)
1162#define bfin_read_MDMA1_S0_CURR_DESC_PTR() bfin_readPTR(MDMA1_S0_CURR_DESC_PTR) 1154#define bfin_read_MDMA_S2_CURR_DESC_PTR() bfin_readPTR(MDMA_S2_CURR_DESC_PTR)
1163#define bfin_write_MDMA1_S0_CURR_DESC_PTR(val) bfin_writePTR(MDMA1_S0_CURR_DESC_PTR, val) 1155#define bfin_write_MDMA_S2_CURR_DESC_PTR(val) bfin_writePTR(MDMA_S2_CURR_DESC_PTR, val)
1164#define bfin_read_MDMA1_S0_CURR_ADDR() bfin_readPTR(MDMA1_S0_CURR_ADDR) 1156#define bfin_read_MDMA_S2_CURR_ADDR() bfin_readPTR(MDMA_S2_CURR_ADDR)
1165#define bfin_write_MDMA1_S0_CURR_ADDR(val) bfin_writePTR(MDMA1_S0_CURR_ADDR, val) 1157#define bfin_write_MDMA_S2_CURR_ADDR(val) bfin_writePTR(MDMA_S2_CURR_ADDR, val)
1166#define bfin_read_MDMA1_S0_IRQ_STATUS() bfin_read16(MDMA1_S0_IRQ_STATUS) 1158#define bfin_read_MDMA_S2_IRQ_STATUS() bfin_read16(MDMA_S2_IRQ_STATUS)
1167#define bfin_write_MDMA1_S0_IRQ_STATUS(val) bfin_write16(MDMA1_S0_IRQ_STATUS, val) 1159#define bfin_write_MDMA_S2_IRQ_STATUS(val) bfin_write16(MDMA_S2_IRQ_STATUS, val)
1168#define bfin_read_MDMA1_S0_PERIPHERAL_MAP() bfin_read16(MDMA1_S0_PERIPHERAL_MAP) 1160#define bfin_read_MDMA_S2_PERIPHERAL_MAP() bfin_read16(MDMA_S2_PERIPHERAL_MAP)
1169#define bfin_write_MDMA1_S0_PERIPHERAL_MAP(val) bfin_write16(MDMA1_S0_PERIPHERAL_MAP, val) 1161#define bfin_write_MDMA_S2_PERIPHERAL_MAP(val) bfin_write16(MDMA_S2_PERIPHERAL_MAP, val)
1170#define bfin_read_MDMA1_S0_CURR_X_COUNT() bfin_read16(MDMA1_S0_CURR_X_COUNT) 1162#define bfin_read_MDMA_S2_CURR_X_COUNT() bfin_read16(MDMA_S2_CURR_X_COUNT)
1171#define bfin_write_MDMA1_S0_CURR_X_COUNT(val) bfin_write16(MDMA1_S0_CURR_X_COUNT, val) 1163#define bfin_write_MDMA_S2_CURR_X_COUNT(val) bfin_write16(MDMA_S2_CURR_X_COUNT, val)
1172#define bfin_read_MDMA1_S0_CURR_Y_COUNT() bfin_read16(MDMA1_S0_CURR_Y_COUNT) 1164#define bfin_read_MDMA_S2_CURR_Y_COUNT() bfin_read16(MDMA_S2_CURR_Y_COUNT)
1173#define bfin_write_MDMA1_S0_CURR_Y_COUNT(val) bfin_write16(MDMA1_S0_CURR_Y_COUNT, val) 1165#define bfin_write_MDMA_S2_CURR_Y_COUNT(val) bfin_write16(MDMA_S2_CURR_Y_COUNT, val)
1174#define bfin_read_MDMA1_D1_NEXT_DESC_PTR() bfin_readPTR(MDMA1_D1_NEXT_DESC_PTR) 1166#define bfin_read_MDMA_D3_NEXT_DESC_PTR() bfin_readPTR(MDMA_D3_NEXT_DESC_PTR)
1175#define bfin_write_MDMA1_D1_NEXT_DESC_PTR(val) bfin_writePTR(MDMA1_D1_NEXT_DESC_PTR, val) 1167#define bfin_write_MDMA_D3_NEXT_DESC_PTR(val) bfin_writePTR(MDMA_D3_NEXT_DESC_PTR, val)
1176#define bfin_read_MDMA1_D1_START_ADDR() bfin_readPTR(MDMA1_D1_START_ADDR) 1168#define bfin_read_MDMA_D3_START_ADDR() bfin_readPTR(MDMA_D3_START_ADDR)
1177#define bfin_write_MDMA1_D1_START_ADDR(val) bfin_writePTR(MDMA1_D1_START_ADDR, val) 1169#define bfin_write_MDMA_D3_START_ADDR(val) bfin_writePTR(MDMA_D3_START_ADDR, val)
1178#define bfin_read_MDMA1_D1_CONFIG() bfin_read16(MDMA1_D1_CONFIG) 1170#define bfin_read_MDMA_D3_CONFIG() bfin_read16(MDMA_D3_CONFIG)
1179#define bfin_write_MDMA1_D1_CONFIG(val) bfin_write16(MDMA1_D1_CONFIG, val) 1171#define bfin_write_MDMA_D3_CONFIG(val) bfin_write16(MDMA_D3_CONFIG, val)
1180#define bfin_read_MDMA1_D1_X_COUNT() bfin_read16(MDMA1_D1_X_COUNT) 1172#define bfin_read_MDMA_D3_X_COUNT() bfin_read16(MDMA_D3_X_COUNT)
1181#define bfin_write_MDMA1_D1_X_COUNT(val) bfin_write16(MDMA1_D1_X_COUNT, val) 1173#define bfin_write_MDMA_D3_X_COUNT(val) bfin_write16(MDMA_D3_X_COUNT, val)
1182#define bfin_read_MDMA1_D1_X_MODIFY() bfin_read16(MDMA1_D1_X_MODIFY) 1174#define bfin_read_MDMA_D3_X_MODIFY() bfin_read16(MDMA_D3_X_MODIFY)
1183#define bfin_write_MDMA1_D1_X_MODIFY(val) bfin_write16(MDMA1_D1_X_MODIFY, val) 1175#define bfin_write_MDMA_D3_X_MODIFY(val) bfin_write16(MDMA_D3_X_MODIFY, val)
1184#define bfin_read_MDMA1_D1_Y_COUNT() bfin_read16(MDMA1_D1_Y_COUNT) 1176#define bfin_read_MDMA_D3_Y_COUNT() bfin_read16(MDMA_D3_Y_COUNT)
1185#define bfin_write_MDMA1_D1_Y_COUNT(val) bfin_write16(MDMA1_D1_Y_COUNT, val) 1177#define bfin_write_MDMA_D3_Y_COUNT(val) bfin_write16(MDMA_D3_Y_COUNT, val)
1186#define bfin_read_MDMA1_D1_Y_MODIFY() bfin_read16(MDMA1_D1_Y_MODIFY) 1178#define bfin_read_MDMA_D3_Y_MODIFY() bfin_read16(MDMA_D3_Y_MODIFY)
1187#define bfin_write_MDMA1_D1_Y_MODIFY(val) bfin_write16(MDMA1_D1_Y_MODIFY, val) 1179#define bfin_write_MDMA_D3_Y_MODIFY(val) bfin_write16(MDMA_D3_Y_MODIFY, val)
1188#define bfin_read_MDMA1_D1_CURR_DESC_PTR() bfin_readPTR(MDMA1_D1_CURR_DESC_PTR) 1180#define bfin_read_MDMA_D3_CURR_DESC_PTR() bfin_readPTR(MDMA_D3_CURR_DESC_PTR)
1189#define bfin_write_MDMA1_D1_CURR_DESC_PTR(val) bfin_writePTR(MDMA1_D1_CURR_DESC_PTR, val) 1181#define bfin_write_MDMA_D3_CURR_DESC_PTR(val) bfin_writePTR(MDMA_D3_CURR_DESC_PTR, val)
1190#define bfin_read_MDMA1_D1_CURR_ADDR() bfin_readPTR(MDMA1_D1_CURR_ADDR) 1182#define bfin_read_MDMA_D3_CURR_ADDR() bfin_readPTR(MDMA_D3_CURR_ADDR)
1191#define bfin_write_MDMA1_D1_CURR_ADDR(val) bfin_writePTR(MDMA1_D1_CURR_ADDR, val) 1183#define bfin_write_MDMA_D3_CURR_ADDR(val) bfin_writePTR(MDMA_D3_CURR_ADDR, val)
1192#define bfin_read_MDMA1_D1_IRQ_STATUS() bfin_read16(MDMA1_D1_IRQ_STATUS) 1184#define bfin_read_MDMA_D3_IRQ_STATUS() bfin_read16(MDMA_D3_IRQ_STATUS)
1193#define bfin_write_MDMA1_D1_IRQ_STATUS(val) bfin_write16(MDMA1_D1_IRQ_STATUS, val) 1185#define bfin_write_MDMA_D3_IRQ_STATUS(val) bfin_write16(MDMA_D3_IRQ_STATUS, val)
1194#define bfin_read_MDMA1_D1_PERIPHERAL_MAP() bfin_read16(MDMA1_D1_PERIPHERAL_MAP) 1186#define bfin_read_MDMA_D3_PERIPHERAL_MAP() bfin_read16(MDMA_D3_PERIPHERAL_MAP)
1195#define bfin_write_MDMA1_D1_PERIPHERAL_MAP(val) bfin_write16(MDMA1_D1_PERIPHERAL_MAP, val) 1187#define bfin_write_MDMA_D3_PERIPHERAL_MAP(val) bfin_write16(MDMA_D3_PERIPHERAL_MAP, val)
1196#define bfin_read_MDMA1_D1_CURR_X_COUNT() bfin_read16(MDMA1_D1_CURR_X_COUNT) 1188#define bfin_read_MDMA_D3_CURR_X_COUNT() bfin_read16(MDMA_D3_CURR_X_COUNT)
1197#define bfin_write_MDMA1_D1_CURR_X_COUNT(val) bfin_write16(MDMA1_D1_CURR_X_COUNT, val) 1189#define bfin_write_MDMA_D3_CURR_X_COUNT(val) bfin_write16(MDMA_D3_CURR_X_COUNT, val)
1198#define bfin_read_MDMA1_D1_CURR_Y_COUNT() bfin_read16(MDMA1_D1_CURR_Y_COUNT) 1190#define bfin_read_MDMA_D3_CURR_Y_COUNT() bfin_read16(MDMA_D3_CURR_Y_COUNT)
1199#define bfin_write_MDMA1_D1_CURR_Y_COUNT(val) bfin_write16(MDMA1_D1_CURR_Y_COUNT, val) 1191#define bfin_write_MDMA_D3_CURR_Y_COUNT(val) bfin_write16(MDMA_D3_CURR_Y_COUNT, val)
1200#define bfin_read_MDMA1_S1_NEXT_DESC_PTR() bfin_readPTR(MDMA1_S1_NEXT_DESC_PTR) 1192#define bfin_read_MDMA_S3_NEXT_DESC_PTR() bfin_readPTR(MDMA_S3_NEXT_DESC_PTR)
1201#define bfin_write_MDMA1_S1_NEXT_DESC_PTR(val) bfin_writePTR(MDMA1_S1_NEXT_DESC_PTR, val) 1193#define bfin_write_MDMA_S3_NEXT_DESC_PTR(val) bfin_writePTR(MDMA_S3_NEXT_DESC_PTR, val)
1202#define bfin_read_MDMA1_S1_START_ADDR() bfin_readPTR(MDMA1_S1_START_ADDR) 1194#define bfin_read_MDMA_S3_START_ADDR() bfin_readPTR(MDMA_S3_START_ADDR)
1203#define bfin_write_MDMA1_S1_START_ADDR(val) bfin_writePTR(MDMA1_S1_START_ADDR, val) 1195#define bfin_write_MDMA_S3_START_ADDR(val) bfin_writePTR(MDMA_S3_START_ADDR, val)
1204#define bfin_read_MDMA1_S1_CONFIG() bfin_read16(MDMA1_S1_CONFIG) 1196#define bfin_read_MDMA_S3_CONFIG() bfin_read16(MDMA_S3_CONFIG)
1205#define bfin_write_MDMA1_S1_CONFIG(val) bfin_write16(MDMA1_S1_CONFIG, val) 1197#define bfin_write_MDMA_S3_CONFIG(val) bfin_write16(MDMA_S3_CONFIG, val)
1206#define bfin_read_MDMA1_S1_X_COUNT() bfin_read16(MDMA1_S1_X_COUNT) 1198#define bfin_read_MDMA_S3_X_COUNT() bfin_read16(MDMA_S3_X_COUNT)
1207#define bfin_write_MDMA1_S1_X_COUNT(val) bfin_write16(MDMA1_S1_X_COUNT, val) 1199#define bfin_write_MDMA_S3_X_COUNT(val) bfin_write16(MDMA_S3_X_COUNT, val)
1208#define bfin_read_MDMA1_S1_X_MODIFY() bfin_read16(MDMA1_S1_X_MODIFY) 1200#define bfin_read_MDMA_S3_X_MODIFY() bfin_read16(MDMA_S3_X_MODIFY)
1209#define bfin_write_MDMA1_S1_X_MODIFY(val) bfin_write16(MDMA1_S1_X_MODIFY, val) 1201#define bfin_write_MDMA_S3_X_MODIFY(val) bfin_write16(MDMA_S3_X_MODIFY, val)
1210#define bfin_read_MDMA1_S1_Y_COUNT() bfin_read16(MDMA1_S1_Y_COUNT) 1202#define bfin_read_MDMA_S3_Y_COUNT() bfin_read16(MDMA_S3_Y_COUNT)
1211#define bfin_write_MDMA1_S1_Y_COUNT(val) bfin_write16(MDMA1_S1_Y_COUNT, val) 1203#define bfin_write_MDMA_S3_Y_COUNT(val) bfin_write16(MDMA_S3_Y_COUNT, val)
1212#define bfin_read_MDMA1_S1_Y_MODIFY() bfin_read16(MDMA1_S1_Y_MODIFY) 1204#define bfin_read_MDMA_S3_Y_MODIFY() bfin_read16(MDMA_S3_Y_MODIFY)
1213#define bfin_write_MDMA1_S1_Y_MODIFY(val) bfin_write16(MDMA1_S1_Y_MODIFY, val) 1205#define bfin_write_MDMA_S3_Y_MODIFY(val) bfin_write16(MDMA_S3_Y_MODIFY, val)
1214#define bfin_read_MDMA1_S1_CURR_DESC_PTR() bfin_readPTR(MDMA1_S1_CURR_DESC_PTR) 1206#define bfin_read_MDMA_S3_CURR_DESC_PTR() bfin_readPTR(MDMA_S3_CURR_DESC_PTR)
1215#define bfin_write_MDMA1_S1_CURR_DESC_PTR(val) bfin_writePTR(MDMA1_S1_CURR_DESC_PTR, val) 1207#define bfin_write_MDMA_S3_CURR_DESC_PTR(val) bfin_writePTR(MDMA_S3_CURR_DESC_PTR, val)
1216#define bfin_read_MDMA1_S1_CURR_ADDR() bfin_readPTR(MDMA1_S1_CURR_ADDR) 1208#define bfin_read_MDMA_S3_CURR_ADDR() bfin_readPTR(MDMA_S3_CURR_ADDR)
1217#define bfin_write_MDMA1_S1_CURR_ADDR(val) bfin_writePTR(MDMA1_S1_CURR_ADDR, val) 1209#define bfin_write_MDMA_S3_CURR_ADDR(val) bfin_writePTR(MDMA_S3_CURR_ADDR, val)
1218#define bfin_read_MDMA1_S1_IRQ_STATUS() bfin_read16(MDMA1_S1_IRQ_STATUS) 1210#define bfin_read_MDMA_S3_IRQ_STATUS() bfin_read16(MDMA_S3_IRQ_STATUS)
1219#define bfin_write_MDMA1_S1_IRQ_STATUS(val) bfin_write16(MDMA1_S1_IRQ_STATUS, val) 1211#define bfin_write_MDMA_S3_IRQ_STATUS(val) bfin_write16(MDMA_S3_IRQ_STATUS, val)
1220#define bfin_read_MDMA1_S1_PERIPHERAL_MAP() bfin_read16(MDMA1_S1_PERIPHERAL_MAP) 1212#define bfin_read_MDMA_S3_PERIPHERAL_MAP() bfin_read16(MDMA_S3_PERIPHERAL_MAP)
1221#define bfin_write_MDMA1_S1_PERIPHERAL_MAP(val) bfin_write16(MDMA1_S1_PERIPHERAL_MAP, val) 1213#define bfin_write_MDMA_S3_PERIPHERAL_MAP(val) bfin_write16(MDMA_S3_PERIPHERAL_MAP, val)
1222#define bfin_read_MDMA1_S1_CURR_X_COUNT() bfin_read16(MDMA1_S1_CURR_X_COUNT) 1214#define bfin_read_MDMA_S3_CURR_X_COUNT() bfin_read16(MDMA_S3_CURR_X_COUNT)
1223#define bfin_write_MDMA1_S1_CURR_X_COUNT(val) bfin_write16(MDMA1_S1_CURR_X_COUNT, val) 1215#define bfin_write_MDMA_S3_CURR_X_COUNT(val) bfin_write16(MDMA_S3_CURR_X_COUNT, val)
1224#define bfin_read_MDMA1_S1_CURR_Y_COUNT() bfin_read16(MDMA1_S1_CURR_Y_COUNT) 1216#define bfin_read_MDMA_S3_CURR_Y_COUNT() bfin_read16(MDMA_S3_CURR_Y_COUNT)
1225#define bfin_write_MDMA1_S1_CURR_Y_COUNT(val) bfin_write16(MDMA1_S1_CURR_Y_COUNT, val) 1217#define bfin_write_MDMA_S3_CURR_Y_COUNT(val) bfin_write16(MDMA_S3_CURR_Y_COUNT, val)
1226
1227#define bfin_read_MDMA_S0_CONFIG() bfin_read_MDMA0_S0_CONFIG()
1228#define bfin_write_MDMA_S0_CONFIG(val) bfin_write_MDMA0_S0_CONFIG(val)
1229#define bfin_read_MDMA_S0_IRQ_STATUS() bfin_read_MDMA0_S0_IRQ_STATUS()
1230#define bfin_write_MDMA_S0_IRQ_STATUS(val) bfin_write_MDMA0_S0_IRQ_STATUS(val)
1231#define bfin_read_MDMA_S0_X_MODIFY() bfin_read_MDMA0_S0_X_MODIFY()
1232#define bfin_write_MDMA_S0_X_MODIFY(val) bfin_write_MDMA0_S0_X_MODIFY(val)
1233#define bfin_read_MDMA_S0_Y_MODIFY() bfin_read_MDMA0_S0_Y_MODIFY()
1234#define bfin_write_MDMA_S0_Y_MODIFY(val) bfin_write_MDMA0_S0_Y_MODIFY(val)
1235#define bfin_read_MDMA_S0_X_COUNT() bfin_read_MDMA0_S0_X_COUNT()
1236#define bfin_write_MDMA_S0_X_COUNT(val) bfin_write_MDMA0_S0_X_COUNT(val)
1237#define bfin_read_MDMA_S0_Y_COUNT() bfin_read_MDMA0_S0_Y_COUNT()
1238#define bfin_write_MDMA_S0_Y_COUNT(val) bfin_write_MDMA0_S0_Y_COUNT(val)
1239#define bfin_read_MDMA_S0_START_ADDR() bfin_read_MDMA0_S0_START_ADDR()
1240#define bfin_write_MDMA_S0_START_ADDR(val) bfin_write_MDMA0_S0_START_ADDR(val)
1241#define bfin_read_MDMA_D0_CONFIG() bfin_read_MDMA0_D0_CONFIG()
1242#define bfin_write_MDMA_D0_CONFIG(val) bfin_write_MDMA0_D0_CONFIG(val)
1243#define bfin_read_MDMA_D0_IRQ_STATUS() bfin_read_MDMA0_D0_IRQ_STATUS()
1244#define bfin_write_MDMA_D0_IRQ_STATUS(val) bfin_write_MDMA0_D0_IRQ_STATUS(val)
1245#define bfin_read_MDMA_D0_X_MODIFY() bfin_read_MDMA0_D0_X_MODIFY()
1246#define bfin_write_MDMA_D0_X_MODIFY(val) bfin_write_MDMA0_D0_X_MODIFY(val)
1247#define bfin_read_MDMA_D0_Y_MODIFY() bfin_read_MDMA0_D0_Y_MODIFY()
1248#define bfin_write_MDMA_D0_Y_MODIFY(val) bfin_write_MDMA0_D0_Y_MODIFY(val)
1249#define bfin_read_MDMA_D0_X_COUNT() bfin_read_MDMA0_D0_X_COUNT()
1250#define bfin_write_MDMA_D0_X_COUNT(val) bfin_write_MDMA0_D0_X_COUNT(val)
1251#define bfin_read_MDMA_D0_Y_COUNT() bfin_read_MDMA0_D0_Y_COUNT()
1252#define bfin_write_MDMA_D0_Y_COUNT(val) bfin_write_MDMA0_D0_Y_COUNT(val)
1253#define bfin_read_MDMA_D0_START_ADDR() bfin_read_MDMA0_D0_START_ADDR()
1254#define bfin_write_MDMA_D0_START_ADDR(val) bfin_write_MDMA0_D0_START_ADDR(val)
1255
1256#define bfin_read_MDMA_S1_CONFIG() bfin_read_MDMA0_S1_CONFIG()
1257#define bfin_write_MDMA_S1_CONFIG(val) bfin_write_MDMA0_S1_CONFIG(val)
1258#define bfin_read_MDMA_S1_IRQ_STATUS() bfin_read_MDMA0_S1_IRQ_STATUS()
1259#define bfin_write_MDMA_S1_IRQ_STATUS(val) bfin_write_MDMA0_S1_IRQ_STATUS(val)
1260#define bfin_read_MDMA_S1_X_MODIFY() bfin_read_MDMA0_S1_X_MODIFY()
1261#define bfin_write_MDMA_S1_X_MODIFY(val) bfin_write_MDMA0_S1_X_MODIFY(val)
1262#define bfin_read_MDMA_S1_Y_MODIFY() bfin_read_MDMA0_S1_Y_MODIFY()
1263#define bfin_write_MDMA_S1_Y_MODIFY(val) bfin_write_MDMA0_S1_Y_MODIFY(val)
1264#define bfin_read_MDMA_S1_X_COUNT() bfin_read_MDMA0_S1_X_COUNT()
1265#define bfin_write_MDMA_S1_X_COUNT(val) bfin_write_MDMA0_S1_X_COUNT(val)
1266#define bfin_read_MDMA_S1_Y_COUNT() bfin_read_MDMA0_S1_Y_COUNT()
1267#define bfin_write_MDMA_S1_Y_COUNT(val) bfin_write_MDMA0_S1_Y_COUNT(val)
1268#define bfin_read_MDMA_S1_START_ADDR() bfin_read_MDMA0_S1_START_ADDR()
1269#define bfin_write_MDMA_S1_START_ADDR(val) bfin_write_MDMA0_S1_START_ADDR(val)
1270#define bfin_read_MDMA_D1_CONFIG() bfin_read_MDMA0_D1_CONFIG()
1271#define bfin_write_MDMA_D1_CONFIG(val) bfin_write_MDMA0_D1_CONFIG(val)
1272#define bfin_read_MDMA_D1_IRQ_STATUS() bfin_read_MDMA0_D1_IRQ_STATUS()
1273#define bfin_write_MDMA_D1_IRQ_STATUS(val) bfin_write_MDMA0_D1_IRQ_STATUS(val)
1274#define bfin_read_MDMA_D1_X_MODIFY() bfin_read_MDMA0_D1_X_MODIFY()
1275#define bfin_write_MDMA_D1_X_MODIFY(val) bfin_write_MDMA0_D1_X_MODIFY(val)
1276#define bfin_read_MDMA_D1_Y_MODIFY() bfin_read_MDMA0_D1_Y_MODIFY()
1277#define bfin_write_MDMA_D1_Y_MODIFY(val) bfin_write_MDMA0_D1_Y_MODIFY(val)
1278#define bfin_read_MDMA_D1_X_COUNT() bfin_read_MDMA0_D1_X_COUNT()
1279#define bfin_write_MDMA_D1_X_COUNT(val) bfin_write_MDMA0_D1_X_COUNT(val)
1280#define bfin_read_MDMA_D1_Y_COUNT() bfin_read_MDMA0_D1_Y_COUNT()
1281#define bfin_write_MDMA_D1_Y_COUNT(val) bfin_write_MDMA0_D1_Y_COUNT(val)
1282#define bfin_read_MDMA_D1_START_ADDR() bfin_read_MDMA0_D1_START_ADDR()
1283#define bfin_write_MDMA_D1_START_ADDR(val) bfin_write_MDMA0_D1_START_ADDR(val)
1284
1285#define bfin_read_PPI_CONTROL() bfin_read16(PPI_CONTROL) 1218#define bfin_read_PPI_CONTROL() bfin_read16(PPI_CONTROL)
1286#define bfin_write_PPI_CONTROL(val) bfin_write16(PPI_CONTROL, val) 1219#define bfin_write_PPI_CONTROL(val) bfin_write16(PPI_CONTROL, val)
1287#define bfin_read_PPI_STATUS() bfin_read16(PPI_STATUS) 1220#define bfin_read_PPI_STATUS() bfin_read16(PPI_STATUS)
@@ -2024,7 +1957,4 @@
2024#define bfin_read_CAN_MB31_ID1() bfin_read16(CAN_MB31_ID1) 1957#define bfin_read_CAN_MB31_ID1() bfin_read16(CAN_MB31_ID1)
2025#define bfin_write_CAN_MB31_ID1(val) bfin_write16(CAN_MB31_ID1, val) 1958#define bfin_write_CAN_MB31_ID1(val) bfin_write16(CAN_MB31_ID1, val)
2026 1959
2027/* These need to be last due to the cdef/linux inter-dependencies */
2028#include <asm/irq.h>
2029
2030#endif 1960#endif
diff --git a/arch/blackfin/mach-bf538/include/mach/cdefBF539.h b/arch/blackfin/mach-bf538/include/mach/cdefBF539.h
index 198c4bbc8e5d..acc15f3aba38 100644
--- a/arch/blackfin/mach-bf538/include/mach/cdefBF539.h
+++ b/arch/blackfin/mach-bf538/include/mach/cdefBF539.h
@@ -1,6 +1,7 @@
1/* DO NOT EDIT THIS FILE 1/*
2 * Automatically generated by generate-cdef-headers.xsl 2 * Copyright 2008-2010 Analog Devices Inc.
3 * DO NOT EDIT THIS FILE 3 *
4 * Licensed under the GPL-2 or later.
4 */ 5 */
5 6
6#ifndef _CDEF_BF539_H 7#ifndef _CDEF_BF539_H
@@ -9,7 +10,6 @@
9/* Include MMRs Common to BF538 */ 10/* Include MMRs Common to BF538 */
10#include "cdefBF538.h" 11#include "cdefBF538.h"
11 12
12
13#define bfin_read_MXVR_CONFIG() bfin_read16(MXVR_CONFIG) 13#define bfin_read_MXVR_CONFIG() bfin_read16(MXVR_CONFIG)
14#define bfin_write_MXVR_CONFIG(val) bfin_write16(MXVR_CONFIG, val) 14#define bfin_write_MXVR_CONFIG(val) bfin_write16(MXVR_CONFIG, val)
15#define bfin_read_MXVR_PLL_CTL_0() bfin_read32(MXVR_PLL_CTL_0) 15#define bfin_read_MXVR_PLL_CTL_0() bfin_read32(MXVR_PLL_CTL_0)
diff --git a/arch/blackfin/mach-bf538/include/mach/defBF538.h b/arch/blackfin/mach-bf538/include/mach/defBF538.h
new file mode 100644
index 000000000000..d27f81d6c4b1
--- /dev/null
+++ b/arch/blackfin/mach-bf538/include/mach/defBF538.h
@@ -0,0 +1,1825 @@
1/*
2 * Copyright 2008-2010 Analog Devices Inc.
3 *
4 * Licensed under the ADI BSD license or the GPL-2 (or later)
5 */
6
7#ifndef _DEF_BF538_H
8#define _DEF_BF538_H
9
10/* Clock/Regulator Control (0xFFC00000 - 0xFFC000FF) */
11#define PLL_CTL 0xFFC00000 /* PLL Control register (16-bit) */
12#define PLL_DIV 0xFFC00004 /* PLL Divide Register (16-bit) */
13#define VR_CTL 0xFFC00008 /* Voltage Regulator Control Register (16-bit) */
14#define PLL_STAT 0xFFC0000C /* PLL Status register (16-bit) */
15#define PLL_LOCKCNT 0xFFC00010 /* PLL Lock Count register (16-bit) */
16#define CHIPID 0xFFC00014 /* Chip ID Register */
17
18/* CHIPID Masks */
19#define CHIPID_VERSION 0xF0000000
20#define CHIPID_FAMILY 0x0FFFF000
21#define CHIPID_MANUFACTURE 0x00000FFE
22
23/* System Interrupt Controller (0xFFC00100 - 0xFFC001FF) */
24#define SWRST 0xFFC00100 /* Software Reset Register (16-bit) */
25#define SYSCR 0xFFC00104 /* System Configuration registe */
26#define SIC_RVECT 0xFFC00108
27#define SIC_IMASK0 0xFFC0010C /* Interrupt Mask Register */
28#define SIC_IAR0 0xFFC00110 /* Interrupt Assignment Register 0 */
29#define SIC_IAR1 0xFFC00114 /* Interrupt Assignment Register 1 */
30#define SIC_IAR2 0xFFC00118 /* Interrupt Assignment Register 2 */
31#define SIC_IAR3 0xFFC0011C /* Interrupt Assignment Register 3 */
32#define SIC_ISR0 0xFFC00120 /* Interrupt Status Register */
33#define SIC_IWR0 0xFFC00124 /* Interrupt Wakeup Register */
34#define SIC_IMASK1 0xFFC00128 /* Interrupt Mask Register 1 */
35#define SIC_ISR1 0xFFC0012C /* Interrupt Status Register 1 */
36#define SIC_IWR1 0xFFC00130 /* Interrupt Wakeup Register 1 */
37#define SIC_IAR4 0xFFC00134 /* Interrupt Assignment Register 4 */
38#define SIC_IAR5 0xFFC00138 /* Interrupt Assignment Register 5 */
39#define SIC_IAR6 0xFFC0013C /* Interrupt Assignment Register 6 */
40
41
42/* Watchdog Timer (0xFFC00200 - 0xFFC002FF) */
43#define WDOG_CTL 0xFFC00200 /* Watchdog Control Register */
44#define WDOG_CNT 0xFFC00204 /* Watchdog Count Register */
45#define WDOG_STAT 0xFFC00208 /* Watchdog Status Register */
46
47
48/* Real Time Clock (0xFFC00300 - 0xFFC003FF) */
49#define RTC_STAT 0xFFC00300 /* RTC Status Register */
50#define RTC_ICTL 0xFFC00304 /* RTC Interrupt Control Register */
51#define RTC_ISTAT 0xFFC00308 /* RTC Interrupt Status Register */
52#define RTC_SWCNT 0xFFC0030C /* RTC Stopwatch Count Register */
53#define RTC_ALARM 0xFFC00310 /* RTC Alarm Time Register */
54#define RTC_FAST 0xFFC00314 /* RTC Prescaler Enable Register */
55#define RTC_PREN 0xFFC00314 /* RTC Prescaler Enable Register (alternate macro) */
56
57
58/* UART0 Controller (0xFFC00400 - 0xFFC004FF) */
59#define UART0_THR 0xFFC00400 /* Transmit Holding register */
60#define UART0_RBR 0xFFC00400 /* Receive Buffer register */
61#define UART0_DLL 0xFFC00400 /* Divisor Latch (Low-Byte) */
62#define UART0_IER 0xFFC00404 /* Interrupt Enable Register */
63#define UART0_DLH 0xFFC00404 /* Divisor Latch (High-Byte) */
64#define UART0_IIR 0xFFC00408 /* Interrupt Identification Register */
65#define UART0_LCR 0xFFC0040C /* Line Control Register */
66#define UART0_MCR 0xFFC00410 /* Modem Control Register */
67#define UART0_LSR 0xFFC00414 /* Line Status Register */
68#define UART0_SCR 0xFFC0041C /* SCR Scratch Register */
69#define UART0_GCTL 0xFFC00424 /* Global Control Register */
70
71
72/* SPI0 Controller (0xFFC00500 - 0xFFC005FF) */
73
74#define SPI0_CTL 0xFFC00500 /* SPI0 Control Register */
75#define SPI0_FLG 0xFFC00504 /* SPI0 Flag register */
76#define SPI0_STAT 0xFFC00508 /* SPI0 Status register */
77#define SPI0_TDBR 0xFFC0050C /* SPI0 Transmit Data Buffer Register */
78#define SPI0_RDBR 0xFFC00510 /* SPI0 Receive Data Buffer Register */
79#define SPI0_BAUD 0xFFC00514 /* SPI0 Baud rate Register */
80#define SPI0_SHADOW 0xFFC00518 /* SPI0_RDBR Shadow Register */
81#define SPI0_REGBASE SPI0_CTL
82
83
84/* TIMER 0, 1, 2 Registers (0xFFC00600 - 0xFFC006FF) */
85#define TIMER0_CONFIG 0xFFC00600 /* Timer 0 Configuration Register */
86#define TIMER0_COUNTER 0xFFC00604 /* Timer 0 Counter Register */
87#define TIMER0_PERIOD 0xFFC00608 /* Timer 0 Period Register */
88#define TIMER0_WIDTH 0xFFC0060C /* Timer 0 Width Register */
89
90#define TIMER1_CONFIG 0xFFC00610 /* Timer 1 Configuration Register */
91#define TIMER1_COUNTER 0xFFC00614 /* Timer 1 Counter Register */
92#define TIMER1_PERIOD 0xFFC00618 /* Timer 1 Period Register */
93#define TIMER1_WIDTH 0xFFC0061C /* Timer 1 Width Register */
94
95#define TIMER2_CONFIG 0xFFC00620 /* Timer 2 Configuration Register */
96#define TIMER2_COUNTER 0xFFC00624 /* Timer 2 Counter Register */
97#define TIMER2_PERIOD 0xFFC00628 /* Timer 2 Period Register */
98#define TIMER2_WIDTH 0xFFC0062C /* Timer 2 Width Register */
99
100#define TIMER_ENABLE 0xFFC00640 /* Timer Enable Register */
101#define TIMER_DISABLE 0xFFC00644 /* Timer Disable Register */
102#define TIMER_STATUS 0xFFC00648 /* Timer Status Register */
103
104
105/* Programmable Flags (0xFFC00700 - 0xFFC007FF) */
106#define FIO_FLAG_D 0xFFC00700 /* Flag Mask to directly specify state of pins */
107#define FIO_FLAG_C 0xFFC00704 /* Peripheral Interrupt Flag Register (clear) */
108#define FIO_FLAG_S 0xFFC00708 /* Peripheral Interrupt Flag Register (set) */
109#define FIO_FLAG_T 0xFFC0070C /* Flag Mask to directly toggle state of pins */
110#define FIO_MASKA_D 0xFFC00710 /* Flag Mask Interrupt A Register (set directly) */
111#define FIO_MASKA_C 0xFFC00714 /* Flag Mask Interrupt A Register (clear) */
112#define FIO_MASKA_S 0xFFC00718 /* Flag Mask Interrupt A Register (set) */
113#define FIO_MASKA_T 0xFFC0071C /* Flag Mask Interrupt A Register (toggle) */
114#define FIO_MASKB_D 0xFFC00720 /* Flag Mask Interrupt B Register (set directly) */
115#define FIO_MASKB_C 0xFFC00724 /* Flag Mask Interrupt B Register (clear) */
116#define FIO_MASKB_S 0xFFC00728 /* Flag Mask Interrupt B Register (set) */
117#define FIO_MASKB_T 0xFFC0072C /* Flag Mask Interrupt B Register (toggle) */
118#define FIO_DIR 0xFFC00730 /* Peripheral Flag Direction Register */
119#define FIO_POLAR 0xFFC00734 /* Flag Source Polarity Register */
120#define FIO_EDGE 0xFFC00738 /* Flag Source Sensitivity Register */
121#define FIO_BOTH 0xFFC0073C /* Flag Set on BOTH Edges Register */
122#define FIO_INEN 0xFFC00740 /* Flag Input Enable Register */
123
124
125/* SPORT0 Controller (0xFFC00800 - 0xFFC008FF) */
126#define SPORT0_TCR1 0xFFC00800 /* SPORT0 Transmit Configuration 1 Register */
127#define SPORT0_TCR2 0xFFC00804 /* SPORT0 Transmit Configuration 2 Register */
128#define SPORT0_TCLKDIV 0xFFC00808 /* SPORT0 Transmit Clock Divider */
129#define SPORT0_TFSDIV 0xFFC0080C /* SPORT0 Transmit Frame Sync Divider */
130#define SPORT0_TX 0xFFC00810 /* SPORT0 TX Data Register */
131#define SPORT0_RX 0xFFC00818 /* SPORT0 RX Data Register */
132#define SPORT0_RCR1 0xFFC00820 /* SPORT0 Transmit Configuration 1 Register */
133#define SPORT0_RCR2 0xFFC00824 /* SPORT0 Transmit Configuration 2 Register */
134#define SPORT0_RCLKDIV 0xFFC00828 /* SPORT0 Receive Clock Divider */
135#define SPORT0_RFSDIV 0xFFC0082C /* SPORT0 Receive Frame Sync Divider */
136#define SPORT0_STAT 0xFFC00830 /* SPORT0 Status Register */
137#define SPORT0_CHNL 0xFFC00834 /* SPORT0 Current Channel Register */
138#define SPORT0_MCMC1 0xFFC00838 /* SPORT0 Multi-Channel Configuration Register 1 */
139#define SPORT0_MCMC2 0xFFC0083C /* SPORT0 Multi-Channel Configuration Register 2 */
140#define SPORT0_MTCS0 0xFFC00840 /* SPORT0 Multi-Channel Transmit Select Register 0 */
141#define SPORT0_MTCS1 0xFFC00844 /* SPORT0 Multi-Channel Transmit Select Register 1 */
142#define SPORT0_MTCS2 0xFFC00848 /* SPORT0 Multi-Channel Transmit Select Register 2 */
143#define SPORT0_MTCS3 0xFFC0084C /* SPORT0 Multi-Channel Transmit Select Register 3 */
144#define SPORT0_MRCS0 0xFFC00850 /* SPORT0 Multi-Channel Receive Select Register 0 */
145#define SPORT0_MRCS1 0xFFC00854 /* SPORT0 Multi-Channel Receive Select Register 1 */
146#define SPORT0_MRCS2 0xFFC00858 /* SPORT0 Multi-Channel Receive Select Register 2 */
147#define SPORT0_MRCS3 0xFFC0085C /* SPORT0 Multi-Channel Receive Select Register 3 */
148
149
150/* SPORT1 Controller (0xFFC00900 - 0xFFC009FF) */
151#define SPORT1_TCR1 0xFFC00900 /* SPORT1 Transmit Configuration 1 Register */
152#define SPORT1_TCR2 0xFFC00904 /* SPORT1 Transmit Configuration 2 Register */
153#define SPORT1_TCLKDIV 0xFFC00908 /* SPORT1 Transmit Clock Divider */
154#define SPORT1_TFSDIV 0xFFC0090C /* SPORT1 Transmit Frame Sync Divider */
155#define SPORT1_TX 0xFFC00910 /* SPORT1 TX Data Register */
156#define SPORT1_RX 0xFFC00918 /* SPORT1 RX Data Register */
157#define SPORT1_RCR1 0xFFC00920 /* SPORT1 Transmit Configuration 1 Register */
158#define SPORT1_RCR2 0xFFC00924 /* SPORT1 Transmit Configuration 2 Register */
159#define SPORT1_RCLKDIV 0xFFC00928 /* SPORT1 Receive Clock Divider */
160#define SPORT1_RFSDIV 0xFFC0092C /* SPORT1 Receive Frame Sync Divider */
161#define SPORT1_STAT 0xFFC00930 /* SPORT1 Status Register */
162#define SPORT1_CHNL 0xFFC00934 /* SPORT1 Current Channel Register */
163#define SPORT1_MCMC1 0xFFC00938 /* SPORT1 Multi-Channel Configuration Register 1 */
164#define SPORT1_MCMC2 0xFFC0093C /* SPORT1 Multi-Channel Configuration Register 2 */
165#define SPORT1_MTCS0 0xFFC00940 /* SPORT1 Multi-Channel Transmit Select Register 0 */
166#define SPORT1_MTCS1 0xFFC00944 /* SPORT1 Multi-Channel Transmit Select Register 1 */
167#define SPORT1_MTCS2 0xFFC00948 /* SPORT1 Multi-Channel Transmit Select Register 2 */
168#define SPORT1_MTCS3 0xFFC0094C /* SPORT1 Multi-Channel Transmit Select Register 3 */
169#define SPORT1_MRCS0 0xFFC00950 /* SPORT1 Multi-Channel Receive Select Register 0 */
170#define SPORT1_MRCS1 0xFFC00954 /* SPORT1 Multi-Channel Receive Select Register 1 */
171#define SPORT1_MRCS2 0xFFC00958 /* SPORT1 Multi-Channel Receive Select Register 2 */
172#define SPORT1_MRCS3 0xFFC0095C /* SPORT1 Multi-Channel Receive Select Register 3 */
173
174
175/* External Bus Interface Unit (0xFFC00A00 - 0xFFC00AFF) */
176/* Asynchronous Memory Controller */
177#define EBIU_AMGCTL 0xFFC00A00 /* Asynchronous Memory Global Control Register */
178#define EBIU_AMBCTL0 0xFFC00A04 /* Asynchronous Memory Bank Control Register 0 */
179#define EBIU_AMBCTL1 0xFFC00A08 /* Asynchronous Memory Bank Control Register 1 */
180
181/* SDRAM Controller */
182#define EBIU_SDGCTL 0xFFC00A10 /* SDRAM Global Control Register */
183#define EBIU_SDBCTL 0xFFC00A14 /* SDRAM Bank Control Register */
184#define EBIU_SDRRC 0xFFC00A18 /* SDRAM Refresh Rate Control Register */
185#define EBIU_SDSTAT 0xFFC00A1C /* SDRAM Status Register */
186
187
188
189/* DMA Controller 0 Traffic Control Registers (0xFFC00B00 - 0xFFC00BFF) */
190
191#define DMAC0_TC_PER 0xFFC00B0C /* DMA Controller 0 Traffic Control Periods Register */
192#define DMAC0_TC_CNT 0xFFC00B10 /* DMA Controller 0 Traffic Control Current Counts Register */
193
194
195
196/* DMA Controller 0 (0xFFC00C00 - 0xFFC00FFF) */
197
198#define DMA0_NEXT_DESC_PTR 0xFFC00C00 /* DMA Channel 0 Next Descriptor Pointer Register */
199#define DMA0_START_ADDR 0xFFC00C04 /* DMA Channel 0 Start Address Register */
200#define DMA0_CONFIG 0xFFC00C08 /* DMA Channel 0 Configuration Register */
201#define DMA0_X_COUNT 0xFFC00C10 /* DMA Channel 0 X Count Register */
202#define DMA0_X_MODIFY 0xFFC00C14 /* DMA Channel 0 X Modify Register */
203#define DMA0_Y_COUNT 0xFFC00C18 /* DMA Channel 0 Y Count Register */
204#define DMA0_Y_MODIFY 0xFFC00C1C /* DMA Channel 0 Y Modify Register */
205#define DMA0_CURR_DESC_PTR 0xFFC00C20 /* DMA Channel 0 Current Descriptor Pointer Register */
206#define DMA0_CURR_ADDR 0xFFC00C24 /* DMA Channel 0 Current Address Register */
207#define DMA0_IRQ_STATUS 0xFFC00C28 /* DMA Channel 0 Interrupt/Status Register */
208#define DMA0_PERIPHERAL_MAP 0xFFC00C2C /* DMA Channel 0 Peripheral Map Register */
209#define DMA0_CURR_X_COUNT 0xFFC00C30 /* DMA Channel 0 Current X Count Register */
210#define DMA0_CURR_Y_COUNT 0xFFC00C38 /* DMA Channel 0 Current Y Count Register */
211
212#define DMA1_NEXT_DESC_PTR 0xFFC00C40 /* DMA Channel 1 Next Descriptor Pointer Register */
213#define DMA1_START_ADDR 0xFFC00C44 /* DMA Channel 1 Start Address Register */
214#define DMA1_CONFIG 0xFFC00C48 /* DMA Channel 1 Configuration Register */
215#define DMA1_X_COUNT 0xFFC00C50 /* DMA Channel 1 X Count Register */
216#define DMA1_X_MODIFY 0xFFC00C54 /* DMA Channel 1 X Modify Register */
217#define DMA1_Y_COUNT 0xFFC00C58 /* DMA Channel 1 Y Count Register */
218#define DMA1_Y_MODIFY 0xFFC00C5C /* DMA Channel 1 Y Modify Register */
219#define DMA1_CURR_DESC_PTR 0xFFC00C60 /* DMA Channel 1 Current Descriptor Pointer Register */
220#define DMA1_CURR_ADDR 0xFFC00C64 /* DMA Channel 1 Current Address Register */
221#define DMA1_IRQ_STATUS 0xFFC00C68 /* DMA Channel 1 Interrupt/Status Register */
222#define DMA1_PERIPHERAL_MAP 0xFFC00C6C /* DMA Channel 1 Peripheral Map Register */
223#define DMA1_CURR_X_COUNT 0xFFC00C70 /* DMA Channel 1 Current X Count Register */
224#define DMA1_CURR_Y_COUNT 0xFFC00C78 /* DMA Channel 1 Current Y Count Register */
225
226#define DMA2_NEXT_DESC_PTR 0xFFC00C80 /* DMA Channel 2 Next Descriptor Pointer Register */
227#define DMA2_START_ADDR 0xFFC00C84 /* DMA Channel 2 Start Address Register */
228#define DMA2_CONFIG 0xFFC00C88 /* DMA Channel 2 Configuration Register */
229#define DMA2_X_COUNT 0xFFC00C90 /* DMA Channel 2 X Count Register */
230#define DMA2_X_MODIFY 0xFFC00C94 /* DMA Channel 2 X Modify Register */
231#define DMA2_Y_COUNT 0xFFC00C98 /* DMA Channel 2 Y Count Register */
232#define DMA2_Y_MODIFY 0xFFC00C9C /* DMA Channel 2 Y Modify Register */
233#define DMA2_CURR_DESC_PTR 0xFFC00CA0 /* DMA Channel 2 Current Descriptor Pointer Register */
234#define DMA2_CURR_ADDR 0xFFC00CA4 /* DMA Channel 2 Current Address Register */
235#define DMA2_IRQ_STATUS 0xFFC00CA8 /* DMA Channel 2 Interrupt/Status Register */
236#define DMA2_PERIPHERAL_MAP 0xFFC00CAC /* DMA Channel 2 Peripheral Map Register */
237#define DMA2_CURR_X_COUNT 0xFFC00CB0 /* DMA Channel 2 Current X Count Register */
238#define DMA2_CURR_Y_COUNT 0xFFC00CB8 /* DMA Channel 2 Current Y Count Register */
239
240#define DMA3_NEXT_DESC_PTR 0xFFC00CC0 /* DMA Channel 3 Next Descriptor Pointer Register */
241#define DMA3_START_ADDR 0xFFC00CC4 /* DMA Channel 3 Start Address Register */
242#define DMA3_CONFIG 0xFFC00CC8 /* DMA Channel 3 Configuration Register */
243#define DMA3_X_COUNT 0xFFC00CD0 /* DMA Channel 3 X Count Register */
244#define DMA3_X_MODIFY 0xFFC00CD4 /* DMA Channel 3 X Modify Register */
245#define DMA3_Y_COUNT 0xFFC00CD8 /* DMA Channel 3 Y Count Register */
246#define DMA3_Y_MODIFY 0xFFC00CDC /* DMA Channel 3 Y Modify Register */
247#define DMA3_CURR_DESC_PTR 0xFFC00CE0 /* DMA Channel 3 Current Descriptor Pointer Register */
248#define DMA3_CURR_ADDR 0xFFC00CE4 /* DMA Channel 3 Current Address Register */
249#define DMA3_IRQ_STATUS 0xFFC00CE8 /* DMA Channel 3 Interrupt/Status Register */
250#define DMA3_PERIPHERAL_MAP 0xFFC00CEC /* DMA Channel 3 Peripheral Map Register */
251#define DMA3_CURR_X_COUNT 0xFFC00CF0 /* DMA Channel 3 Current X Count Register */
252#define DMA3_CURR_Y_COUNT 0xFFC00CF8 /* DMA Channel 3 Current Y Count Register */
253
254#define DMA4_NEXT_DESC_PTR 0xFFC00D00 /* DMA Channel 4 Next Descriptor Pointer Register */
255#define DMA4_START_ADDR 0xFFC00D04 /* DMA Channel 4 Start Address Register */
256#define DMA4_CONFIG 0xFFC00D08 /* DMA Channel 4 Configuration Register */
257#define DMA4_X_COUNT 0xFFC00D10 /* DMA Channel 4 X Count Register */
258#define DMA4_X_MODIFY 0xFFC00D14 /* DMA Channel 4 X Modify Register */
259#define DMA4_Y_COUNT 0xFFC00D18 /* DMA Channel 4 Y Count Register */
260#define DMA4_Y_MODIFY 0xFFC00D1C /* DMA Channel 4 Y Modify Register */
261#define DMA4_CURR_DESC_PTR 0xFFC00D20 /* DMA Channel 4 Current Descriptor Pointer Register */
262#define DMA4_CURR_ADDR 0xFFC00D24 /* DMA Channel 4 Current Address Register */
263#define DMA4_IRQ_STATUS 0xFFC00D28 /* DMA Channel 4 Interrupt/Status Register */
264#define DMA4_PERIPHERAL_MAP 0xFFC00D2C /* DMA Channel 4 Peripheral Map Register */
265#define DMA4_CURR_X_COUNT 0xFFC00D30 /* DMA Channel 4 Current X Count Register */
266#define DMA4_CURR_Y_COUNT 0xFFC00D38 /* DMA Channel 4 Current Y Count Register */
267
268#define DMA5_NEXT_DESC_PTR 0xFFC00D40 /* DMA Channel 5 Next Descriptor Pointer Register */
269#define DMA5_START_ADDR 0xFFC00D44 /* DMA Channel 5 Start Address Register */
270#define DMA5_CONFIG 0xFFC00D48 /* DMA Channel 5 Configuration Register */
271#define DMA5_X_COUNT 0xFFC00D50 /* DMA Channel 5 X Count Register */
272#define DMA5_X_MODIFY 0xFFC00D54 /* DMA Channel 5 X Modify Register */
273#define DMA5_Y_COUNT 0xFFC00D58 /* DMA Channel 5 Y Count Register */
274#define DMA5_Y_MODIFY 0xFFC00D5C /* DMA Channel 5 Y Modify Register */
275#define DMA5_CURR_DESC_PTR 0xFFC00D60 /* DMA Channel 5 Current Descriptor Pointer Register */
276#define DMA5_CURR_ADDR 0xFFC00D64 /* DMA Channel 5 Current Address Register */
277#define DMA5_IRQ_STATUS 0xFFC00D68 /* DMA Channel 5 Interrupt/Status Register */
278#define DMA5_PERIPHERAL_MAP 0xFFC00D6C /* DMA Channel 5 Peripheral Map Register */
279#define DMA5_CURR_X_COUNT 0xFFC00D70 /* DMA Channel 5 Current X Count Register */
280#define DMA5_CURR_Y_COUNT 0xFFC00D78 /* DMA Channel 5 Current Y Count Register */
281
282#define DMA6_NEXT_DESC_PTR 0xFFC00D80 /* DMA Channel 6 Next Descriptor Pointer Register */
283#define DMA6_START_ADDR 0xFFC00D84 /* DMA Channel 6 Start Address Register */
284#define DMA6_CONFIG 0xFFC00D88 /* DMA Channel 6 Configuration Register */
285#define DMA6_X_COUNT 0xFFC00D90 /* DMA Channel 6 X Count Register */
286#define DMA6_X_MODIFY 0xFFC00D94 /* DMA Channel 6 X Modify Register */
287#define DMA6_Y_COUNT 0xFFC00D98 /* DMA Channel 6 Y Count Register */
288#define DMA6_Y_MODIFY 0xFFC00D9C /* DMA Channel 6 Y Modify Register */
289#define DMA6_CURR_DESC_PTR 0xFFC00DA0 /* DMA Channel 6 Current Descriptor Pointer Register */
290#define DMA6_CURR_ADDR 0xFFC00DA4 /* DMA Channel 6 Current Address Register */
291#define DMA6_IRQ_STATUS 0xFFC00DA8 /* DMA Channel 6 Interrupt/Status Register */
292#define DMA6_PERIPHERAL_MAP 0xFFC00DAC /* DMA Channel 6 Peripheral Map Register */
293#define DMA6_CURR_X_COUNT 0xFFC00DB0 /* DMA Channel 6 Current X Count Register */
294#define DMA6_CURR_Y_COUNT 0xFFC00DB8 /* DMA Channel 6 Current Y Count Register */
295
296#define DMA7_NEXT_DESC_PTR 0xFFC00DC0 /* DMA Channel 7 Next Descriptor Pointer Register */
297#define DMA7_START_ADDR 0xFFC00DC4 /* DMA Channel 7 Start Address Register */
298#define DMA7_CONFIG 0xFFC00DC8 /* DMA Channel 7 Configuration Register */
299#define DMA7_X_COUNT 0xFFC00DD0 /* DMA Channel 7 X Count Register */
300#define DMA7_X_MODIFY 0xFFC00DD4 /* DMA Channel 7 X Modify Register */
301#define DMA7_Y_COUNT 0xFFC00DD8 /* DMA Channel 7 Y Count Register */
302#define DMA7_Y_MODIFY 0xFFC00DDC /* DMA Channel 7 Y Modify Register */
303#define DMA7_CURR_DESC_PTR 0xFFC00DE0 /* DMA Channel 7 Current Descriptor Pointer Register */
304#define DMA7_CURR_ADDR 0xFFC00DE4 /* DMA Channel 7 Current Address Register */
305#define DMA7_IRQ_STATUS 0xFFC00DE8 /* DMA Channel 7 Interrupt/Status Register */
306#define DMA7_PERIPHERAL_MAP 0xFFC00DEC /* DMA Channel 7 Peripheral Map Register */
307#define DMA7_CURR_X_COUNT 0xFFC00DF0 /* DMA Channel 7 Current X Count Register */
308#define DMA7_CURR_Y_COUNT 0xFFC00DF8 /* DMA Channel 7 Current Y Count Register */
309
310#define MDMA_D0_NEXT_DESC_PTR 0xFFC00E00 /* MemDMA0 Stream 0 Destination Next Descriptor Pointer Register */
311#define MDMA_D0_START_ADDR 0xFFC00E04 /* MemDMA0 Stream 0 Destination Start Address Register */
312#define MDMA_D0_CONFIG 0xFFC00E08 /* MemDMA0 Stream 0 Destination Configuration Register */
313#define MDMA_D0_X_COUNT 0xFFC00E10 /* MemDMA0 Stream 0 Destination X Count Register */
314#define MDMA_D0_X_MODIFY 0xFFC00E14 /* MemDMA0 Stream 0 Destination X Modify Register */
315#define MDMA_D0_Y_COUNT 0xFFC00E18 /* MemDMA0 Stream 0 Destination Y Count Register */
316#define MDMA_D0_Y_MODIFY 0xFFC00E1C /* MemDMA0 Stream 0 Destination Y Modify Register */
317#define MDMA_D0_CURR_DESC_PTR 0xFFC00E20 /* MemDMA0 Stream 0 Destination Current Descriptor Pointer Register */
318#define MDMA_D0_CURR_ADDR 0xFFC00E24 /* MemDMA0 Stream 0 Destination Current Address Register */
319#define MDMA_D0_IRQ_STATUS 0xFFC00E28 /* MemDMA0 Stream 0 Destination Interrupt/Status Register */
320#define MDMA_D0_PERIPHERAL_MAP 0xFFC00E2C /* MemDMA0 Stream 0 Destination Peripheral Map Register */
321#define MDMA_D0_CURR_X_COUNT 0xFFC00E30 /* MemDMA0 Stream 0 Destination Current X Count Register */
322#define MDMA_D0_CURR_Y_COUNT 0xFFC00E38 /* MemDMA0 Stream 0 Destination Current Y Count Register */
323
324#define MDMA_S0_NEXT_DESC_PTR 0xFFC00E40 /* MemDMA0 Stream 0 Source Next Descriptor Pointer Register */
325#define MDMA_S0_START_ADDR 0xFFC00E44 /* MemDMA0 Stream 0 Source Start Address Register */
326#define MDMA_S0_CONFIG 0xFFC00E48 /* MemDMA0 Stream 0 Source Configuration Register */
327#define MDMA_S0_X_COUNT 0xFFC00E50 /* MemDMA0 Stream 0 Source X Count Register */
328#define MDMA_S0_X_MODIFY 0xFFC00E54 /* MemDMA0 Stream 0 Source X Modify Register */
329#define MDMA_S0_Y_COUNT 0xFFC00E58 /* MemDMA0 Stream 0 Source Y Count Register */
330#define MDMA_S0_Y_MODIFY 0xFFC00E5C /* MemDMA0 Stream 0 Source Y Modify Register */
331#define MDMA_S0_CURR_DESC_PTR 0xFFC00E60 /* MemDMA0 Stream 0 Source Current Descriptor Pointer Register */
332#define MDMA_S0_CURR_ADDR 0xFFC00E64 /* MemDMA0 Stream 0 Source Current Address Register */
333#define MDMA_S0_IRQ_STATUS 0xFFC00E68 /* MemDMA0 Stream 0 Source Interrupt/Status Register */
334#define MDMA_S0_PERIPHERAL_MAP 0xFFC00E6C /* MemDMA0 Stream 0 Source Peripheral Map Register */
335#define MDMA_S0_CURR_X_COUNT 0xFFC00E70 /* MemDMA0 Stream 0 Source Current X Count Register */
336#define MDMA_S0_CURR_Y_COUNT 0xFFC00E78 /* MemDMA0 Stream 0 Source Current Y Count Register */
337
338#define MDMA_D1_NEXT_DESC_PTR 0xFFC00E80 /* MemDMA0 Stream 1 Destination Next Descriptor Pointer Register */
339#define MDMA_D1_START_ADDR 0xFFC00E84 /* MemDMA0 Stream 1 Destination Start Address Register */
340#define MDMA_D1_CONFIG 0xFFC00E88 /* MemDMA0 Stream 1 Destination Configuration Register */
341#define MDMA_D1_X_COUNT 0xFFC00E90 /* MemDMA0 Stream 1 Destination X Count Register */
342#define MDMA_D1_X_MODIFY 0xFFC00E94 /* MemDMA0 Stream 1 Destination X Modify Register */
343#define MDMA_D1_Y_COUNT 0xFFC00E98 /* MemDMA0 Stream 1 Destination Y Count Register */
344#define MDMA_D1_Y_MODIFY 0xFFC00E9C /* MemDMA0 Stream 1 Destination Y Modify Register */
345#define MDMA_D1_CURR_DESC_PTR 0xFFC00EA0 /* MemDMA0 Stream 1 Destination Current Descriptor Pointer Register */
346#define MDMA_D1_CURR_ADDR 0xFFC00EA4 /* MemDMA0 Stream 1 Destination Current Address Register */
347#define MDMA_D1_IRQ_STATUS 0xFFC00EA8 /* MemDMA0 Stream 1 Destination Interrupt/Status Register */
348#define MDMA_D1_PERIPHERAL_MAP 0xFFC00EAC /* MemDMA0 Stream 1 Destination Peripheral Map Register */
349#define MDMA_D1_CURR_X_COUNT 0xFFC00EB0 /* MemDMA0 Stream 1 Destination Current X Count Register */
350#define MDMA_D1_CURR_Y_COUNT 0xFFC00EB8 /* MemDMA0 Stream 1 Destination Current Y Count Register */
351
352#define MDMA_S1_NEXT_DESC_PTR 0xFFC00EC0 /* MemDMA0 Stream 1 Source Next Descriptor Pointer Register */
353#define MDMA_S1_START_ADDR 0xFFC00EC4 /* MemDMA0 Stream 1 Source Start Address Register */
354#define MDMA_S1_CONFIG 0xFFC00EC8 /* MemDMA0 Stream 1 Source Configuration Register */
355#define MDMA_S1_X_COUNT 0xFFC00ED0 /* MemDMA0 Stream 1 Source X Count Register */
356#define MDMA_S1_X_MODIFY 0xFFC00ED4 /* MemDMA0 Stream 1 Source X Modify Register */
357#define MDMA_S1_Y_COUNT 0xFFC00ED8 /* MemDMA0 Stream 1 Source Y Count Register */
358#define MDMA_S1_Y_MODIFY 0xFFC00EDC /* MemDMA0 Stream 1 Source Y Modify Register */
359#define MDMA_S1_CURR_DESC_PTR 0xFFC00EE0 /* MemDMA0 Stream 1 Source Current Descriptor Pointer Register */
360#define MDMA_S1_CURR_ADDR 0xFFC00EE4 /* MemDMA0 Stream 1 Source Current Address Register */
361#define MDMA_S1_IRQ_STATUS 0xFFC00EE8 /* MemDMA0 Stream 1 Source Interrupt/Status Register */
362#define MDMA_S1_PERIPHERAL_MAP 0xFFC00EEC /* MemDMA0 Stream 1 Source Peripheral Map Register */
363#define MDMA_S1_CURR_X_COUNT 0xFFC00EF0 /* MemDMA0 Stream 1 Source Current X Count Register */
364#define MDMA_S1_CURR_Y_COUNT 0xFFC00EF8 /* MemDMA0 Stream 1 Source Current Y Count Register */
365
366
367/* Parallel Peripheral Interface (PPI) (0xFFC01000 - 0xFFC010FF) */
368#define PPI_CONTROL 0xFFC01000 /* PPI Control Register */
369#define PPI_STATUS 0xFFC01004 /* PPI Status Register */
370#define PPI_COUNT 0xFFC01008 /* PPI Transfer Count Register */
371#define PPI_DELAY 0xFFC0100C /* PPI Delay Count Register */
372#define PPI_FRAME 0xFFC01010 /* PPI Frame Length Register */
373
374
375/* Two-Wire Interface 0 (0xFFC01400 - 0xFFC014FF) */
376#define TWI0_CLKDIV 0xFFC01400 /* Serial Clock Divider Register */
377#define TWI0_CONTROL 0xFFC01404 /* TWI0 Master Internal Time Reference Register */
378#define TWI0_SLAVE_CTL 0xFFC01408 /* Slave Mode Control Register */
379#define TWI0_SLAVE_STAT 0xFFC0140C /* Slave Mode Status Register */
380#define TWI0_SLAVE_ADDR 0xFFC01410 /* Slave Mode Address Register */
381#define TWI0_MASTER_CTL 0xFFC01414 /* Master Mode Control Register */
382#define TWI0_MASTER_STAT 0xFFC01418 /* Master Mode Status Register */
383#define TWI0_MASTER_ADDR 0xFFC0141C /* Master Mode Address Register */
384#define TWI0_INT_STAT 0xFFC01420 /* TWI0 Master Interrupt Register */
385#define TWI0_INT_MASK 0xFFC01424 /* TWI0 Master Interrupt Mask Register */
386#define TWI0_FIFO_CTL 0xFFC01428 /* FIFO Control Register */
387#define TWI0_FIFO_STAT 0xFFC0142C /* FIFO Status Register */
388#define TWI0_XMT_DATA8 0xFFC01480 /* FIFO Transmit Data Single Byte Register */
389#define TWI0_XMT_DATA16 0xFFC01484 /* FIFO Transmit Data Double Byte Register */
390#define TWI0_RCV_DATA8 0xFFC01488 /* FIFO Receive Data Single Byte Register */
391#define TWI0_RCV_DATA16 0xFFC0148C /* FIFO Receive Data Double Byte Register */
392
393#define TWI0_REGBASE TWI0_CLKDIV
394
395/* the following are for backwards compatibility */
396#define TWI0_PRESCALE TWI0_CONTROL
397#define TWI0_INT_SRC TWI0_INT_STAT
398#define TWI0_INT_ENABLE TWI0_INT_MASK
399
400
401/* General-Purpose Ports (0xFFC01500 - 0xFFC015FF) */
402
403/* GPIO Port C Register Names */
404#define PORTCIO_FER 0xFFC01500 /* GPIO Pin Port C Configuration Register */
405#define PORTCIO 0xFFC01510 /* GPIO Pin Port C Data Register */
406#define PORTCIO_CLEAR 0xFFC01520 /* Clear GPIO Pin Port C Register */
407#define PORTCIO_SET 0xFFC01530 /* Set GPIO Pin Port C Register */
408#define PORTCIO_TOGGLE 0xFFC01540 /* Toggle GPIO Pin Port C Register */
409#define PORTCIO_DIR 0xFFC01550 /* GPIO Pin Port C Direction Register */
410#define PORTCIO_INEN 0xFFC01560 /* GPIO Pin Port C Input Enable Register */
411
412/* GPIO Port D Register Names */
413#define PORTDIO_FER 0xFFC01504 /* GPIO Pin Port D Configuration Register */
414#define PORTDIO 0xFFC01514 /* GPIO Pin Port D Data Register */
415#define PORTDIO_CLEAR 0xFFC01524 /* Clear GPIO Pin Port D Register */
416#define PORTDIO_SET 0xFFC01534 /* Set GPIO Pin Port D Register */
417#define PORTDIO_TOGGLE 0xFFC01544 /* Toggle GPIO Pin Port D Register */
418#define PORTDIO_DIR 0xFFC01554 /* GPIO Pin Port D Direction Register */
419#define PORTDIO_INEN 0xFFC01564 /* GPIO Pin Port D Input Enable Register */
420
421/* GPIO Port E Register Names */
422#define PORTEIO_FER 0xFFC01508 /* GPIO Pin Port E Configuration Register */
423#define PORTEIO 0xFFC01518 /* GPIO Pin Port E Data Register */
424#define PORTEIO_CLEAR 0xFFC01528 /* Clear GPIO Pin Port E Register */
425#define PORTEIO_SET 0xFFC01538 /* Set GPIO Pin Port E Register */
426#define PORTEIO_TOGGLE 0xFFC01548 /* Toggle GPIO Pin Port E Register */
427#define PORTEIO_DIR 0xFFC01558 /* GPIO Pin Port E Direction Register */
428#define PORTEIO_INEN 0xFFC01568 /* GPIO Pin Port E Input Enable Register */
429
430/* DMA Controller 1 Traffic Control Registers (0xFFC01B00 - 0xFFC01BFF) */
431
432#define DMAC1_TC_PER 0xFFC01B0C /* DMA Controller 1 Traffic Control Periods Register */
433#define DMAC1_TC_CNT 0xFFC01B10 /* DMA Controller 1 Traffic Control Current Counts Register */
434
435
436
437/* DMA Controller 1 (0xFFC01C00 - 0xFFC01FFF) */
438#define DMA8_NEXT_DESC_PTR 0xFFC01C00 /* DMA Channel 8 Next Descriptor Pointer Register */
439#define DMA8_START_ADDR 0xFFC01C04 /* DMA Channel 8 Start Address Register */
440#define DMA8_CONFIG 0xFFC01C08 /* DMA Channel 8 Configuration Register */
441#define DMA8_X_COUNT 0xFFC01C10 /* DMA Channel 8 X Count Register */
442#define DMA8_X_MODIFY 0xFFC01C14 /* DMA Channel 8 X Modify Register */
443#define DMA8_Y_COUNT 0xFFC01C18 /* DMA Channel 8 Y Count Register */
444#define DMA8_Y_MODIFY 0xFFC01C1C /* DMA Channel 8 Y Modify Register */
445#define DMA8_CURR_DESC_PTR 0xFFC01C20 /* DMA Channel 8 Current Descriptor Pointer Register */
446#define DMA8_CURR_ADDR 0xFFC01C24 /* DMA Channel 8 Current Address Register */
447#define DMA8_IRQ_STATUS 0xFFC01C28 /* DMA Channel 8 Interrupt/Status Register */
448#define DMA8_PERIPHERAL_MAP 0xFFC01C2C /* DMA Channel 8 Peripheral Map Register */
449#define DMA8_CURR_X_COUNT 0xFFC01C30 /* DMA Channel 8 Current X Count Register */
450#define DMA8_CURR_Y_COUNT 0xFFC01C38 /* DMA Channel 8 Current Y Count Register */
451
452#define DMA9_NEXT_DESC_PTR 0xFFC01C40 /* DMA Channel 9 Next Descriptor Pointer Register */
453#define DMA9_START_ADDR 0xFFC01C44 /* DMA Channel 9 Start Address Register */
454#define DMA9_CONFIG 0xFFC01C48 /* DMA Channel 9 Configuration Register */
455#define DMA9_X_COUNT 0xFFC01C50 /* DMA Channel 9 X Count Register */
456#define DMA9_X_MODIFY 0xFFC01C54 /* DMA Channel 9 X Modify Register */
457#define DMA9_Y_COUNT 0xFFC01C58 /* DMA Channel 9 Y Count Register */
458#define DMA9_Y_MODIFY 0xFFC01C5C /* DMA Channel 9 Y Modify Register */
459#define DMA9_CURR_DESC_PTR 0xFFC01C60 /* DMA Channel 9 Current Descriptor Pointer Register */
460#define DMA9_CURR_ADDR 0xFFC01C64 /* DMA Channel 9 Current Address Register */
461#define DMA9_IRQ_STATUS 0xFFC01C68 /* DMA Channel 9 Interrupt/Status Register */
462#define DMA9_PERIPHERAL_MAP 0xFFC01C6C /* DMA Channel 9 Peripheral Map Register */
463#define DMA9_CURR_X_COUNT 0xFFC01C70 /* DMA Channel 9 Current X Count Register */
464#define DMA9_CURR_Y_COUNT 0xFFC01C78 /* DMA Channel 9 Current Y Count Register */
465
466#define DMA10_NEXT_DESC_PTR 0xFFC01C80 /* DMA Channel 10 Next Descriptor Pointer Register */
467#define DMA10_START_ADDR 0xFFC01C84 /* DMA Channel 10 Start Address Register */
468#define DMA10_CONFIG 0xFFC01C88 /* DMA Channel 10 Configuration Register */
469#define DMA10_X_COUNT 0xFFC01C90 /* DMA Channel 10 X Count Register */
470#define DMA10_X_MODIFY 0xFFC01C94 /* DMA Channel 10 X Modify Register */
471#define DMA10_Y_COUNT 0xFFC01C98 /* DMA Channel 10 Y Count Register */
472#define DMA10_Y_MODIFY 0xFFC01C9C /* DMA Channel 10 Y Modify Register */
473#define DMA10_CURR_DESC_PTR 0xFFC01CA0 /* DMA Channel 10 Current Descriptor Pointer Register */
474#define DMA10_CURR_ADDR 0xFFC01CA4 /* DMA Channel 10 Current Address Register */
475#define DMA10_IRQ_STATUS 0xFFC01CA8 /* DMA Channel 10 Interrupt/Status Register */
476#define DMA10_PERIPHERAL_MAP 0xFFC01CAC /* DMA Channel 10 Peripheral Map Register */
477#define DMA10_CURR_X_COUNT 0xFFC01CB0 /* DMA Channel 10 Current X Count Register */
478#define DMA10_CURR_Y_COUNT 0xFFC01CB8 /* DMA Channel 10 Current Y Count Register */
479
480#define DMA11_NEXT_DESC_PTR 0xFFC01CC0 /* DMA Channel 11 Next Descriptor Pointer Register */
481#define DMA11_START_ADDR 0xFFC01CC4 /* DMA Channel 11 Start Address Register */
482#define DMA11_CONFIG 0xFFC01CC8 /* DMA Channel 11 Configuration Register */
483#define DMA11_X_COUNT 0xFFC01CD0 /* DMA Channel 11 X Count Register */
484#define DMA11_X_MODIFY 0xFFC01CD4 /* DMA Channel 11 X Modify Register */
485#define DMA11_Y_COUNT 0xFFC01CD8 /* DMA Channel 11 Y Count Register */
486#define DMA11_Y_MODIFY 0xFFC01CDC /* DMA Channel 11 Y Modify Register */
487#define DMA11_CURR_DESC_PTR 0xFFC01CE0 /* DMA Channel 11 Current Descriptor Pointer Register */
488#define DMA11_CURR_ADDR 0xFFC01CE4 /* DMA Channel 11 Current Address Register */
489#define DMA11_IRQ_STATUS 0xFFC01CE8 /* DMA Channel 11 Interrupt/Status Register */
490#define DMA11_PERIPHERAL_MAP 0xFFC01CEC /* DMA Channel 11 Peripheral Map Register */
491#define DMA11_CURR_X_COUNT 0xFFC01CF0 /* DMA Channel 11 Current X Count Register */
492#define DMA11_CURR_Y_COUNT 0xFFC01CF8 /* DMA Channel 11 Current Y Count Register */
493
494#define DMA12_NEXT_DESC_PTR 0xFFC01D00 /* DMA Channel 12 Next Descriptor Pointer Register */
495#define DMA12_START_ADDR 0xFFC01D04 /* DMA Channel 12 Start Address Register */
496#define DMA12_CONFIG 0xFFC01D08 /* DMA Channel 12 Configuration Register */
497#define DMA12_X_COUNT 0xFFC01D10 /* DMA Channel 12 X Count Register */
498#define DMA12_X_MODIFY 0xFFC01D14 /* DMA Channel 12 X Modify Register */
499#define DMA12_Y_COUNT 0xFFC01D18 /* DMA Channel 12 Y Count Register */
500#define DMA12_Y_MODIFY 0xFFC01D1C /* DMA Channel 12 Y Modify Register */
501#define DMA12_CURR_DESC_PTR 0xFFC01D20 /* DMA Channel 12 Current Descriptor Pointer Register */
502#define DMA12_CURR_ADDR 0xFFC01D24 /* DMA Channel 12 Current Address Register */
503#define DMA12_IRQ_STATUS 0xFFC01D28 /* DMA Channel 12 Interrupt/Status Register */
504#define DMA12_PERIPHERAL_MAP 0xFFC01D2C /* DMA Channel 12 Peripheral Map Register */
505#define DMA12_CURR_X_COUNT 0xFFC01D30 /* DMA Channel 12 Current X Count Register */
506#define DMA12_CURR_Y_COUNT 0xFFC01D38 /* DMA Channel 12 Current Y Count Register */
507
508#define DMA13_NEXT_DESC_PTR 0xFFC01D40 /* DMA Channel 13 Next Descriptor Pointer Register */
509#define DMA13_START_ADDR 0xFFC01D44 /* DMA Channel 13 Start Address Register */
510#define DMA13_CONFIG 0xFFC01D48 /* DMA Channel 13 Configuration Register */
511#define DMA13_X_COUNT 0xFFC01D50 /* DMA Channel 13 X Count Register */
512#define DMA13_X_MODIFY 0xFFC01D54 /* DMA Channel 13 X Modify Register */
513#define DMA13_Y_COUNT 0xFFC01D58 /* DMA Channel 13 Y Count Register */
514#define DMA13_Y_MODIFY 0xFFC01D5C /* DMA Channel 13 Y Modify Register */
515#define DMA13_CURR_DESC_PTR 0xFFC01D60 /* DMA Channel 13 Current Descriptor Pointer Register */
516#define DMA13_CURR_ADDR 0xFFC01D64 /* DMA Channel 13 Current Address Register */
517#define DMA13_IRQ_STATUS 0xFFC01D68 /* DMA Channel 13 Interrupt/Status Register */
518#define DMA13_PERIPHERAL_MAP 0xFFC01D6C /* DMA Channel 13 Peripheral Map Register */
519#define DMA13_CURR_X_COUNT 0xFFC01D70 /* DMA Channel 13 Current X Count Register */
520#define DMA13_CURR_Y_COUNT 0xFFC01D78 /* DMA Channel 13 Current Y Count Register */
521
522#define DMA14_NEXT_DESC_PTR 0xFFC01D80 /* DMA Channel 14 Next Descriptor Pointer Register */
523#define DMA14_START_ADDR 0xFFC01D84 /* DMA Channel 14 Start Address Register */
524#define DMA14_CONFIG 0xFFC01D88 /* DMA Channel 14 Configuration Register */
525#define DMA14_X_COUNT 0xFFC01D90 /* DMA Channel 14 X Count Register */
526#define DMA14_X_MODIFY 0xFFC01D94 /* DMA Channel 14 X Modify Register */
527#define DMA14_Y_COUNT 0xFFC01D98 /* DMA Channel 14 Y Count Register */
528#define DMA14_Y_MODIFY 0xFFC01D9C /* DMA Channel 14 Y Modify Register */
529#define DMA14_CURR_DESC_PTR 0xFFC01DA0 /* DMA Channel 14 Current Descriptor Pointer Register */
530#define DMA14_CURR_ADDR 0xFFC01DA4 /* DMA Channel 14 Current Address Register */
531#define DMA14_IRQ_STATUS 0xFFC01DA8 /* DMA Channel 14 Interrupt/Status Register */
532#define DMA14_PERIPHERAL_MAP 0xFFC01DAC /* DMA Channel 14 Peripheral Map Register */
533#define DMA14_CURR_X_COUNT 0xFFC01DB0 /* DMA Channel 14 Current X Count Register */
534#define DMA14_CURR_Y_COUNT 0xFFC01DB8 /* DMA Channel 14 Current Y Count Register */
535
536#define DMA15_NEXT_DESC_PTR 0xFFC01DC0 /* DMA Channel 15 Next Descriptor Pointer Register */
537#define DMA15_START_ADDR 0xFFC01DC4 /* DMA Channel 15 Start Address Register */
538#define DMA15_CONFIG 0xFFC01DC8 /* DMA Channel 15 Configuration Register */
539#define DMA15_X_COUNT 0xFFC01DD0 /* DMA Channel 15 X Count Register */
540#define DMA15_X_MODIFY 0xFFC01DD4 /* DMA Channel 15 X Modify Register */
541#define DMA15_Y_COUNT 0xFFC01DD8 /* DMA Channel 15 Y Count Register */
542#define DMA15_Y_MODIFY 0xFFC01DDC /* DMA Channel 15 Y Modify Register */
543#define DMA15_CURR_DESC_PTR 0xFFC01DE0 /* DMA Channel 15 Current Descriptor Pointer Register */
544#define DMA15_CURR_ADDR 0xFFC01DE4 /* DMA Channel 15 Current Address Register */
545#define DMA15_IRQ_STATUS 0xFFC01DE8 /* DMA Channel 15 Interrupt/Status Register */
546#define DMA15_PERIPHERAL_MAP 0xFFC01DEC /* DMA Channel 15 Peripheral Map Register */
547#define DMA15_CURR_X_COUNT 0xFFC01DF0 /* DMA Channel 15 Current X Count Register */
548#define DMA15_CURR_Y_COUNT 0xFFC01DF8 /* DMA Channel 15 Current Y Count Register */
549
550#define DMA16_NEXT_DESC_PTR 0xFFC01E00 /* DMA Channel 16 Next Descriptor Pointer Register */
551#define DMA16_START_ADDR 0xFFC01E04 /* DMA Channel 16 Start Address Register */
552#define DMA16_CONFIG 0xFFC01E08 /* DMA Channel 16 Configuration Register */
553#define DMA16_X_COUNT 0xFFC01E10 /* DMA Channel 16 X Count Register */
554#define DMA16_X_MODIFY 0xFFC01E14 /* DMA Channel 16 X Modify Register */
555#define DMA16_Y_COUNT 0xFFC01E18 /* DMA Channel 16 Y Count Register */
556#define DMA16_Y_MODIFY 0xFFC01E1C /* DMA Channel 16 Y Modify Register */
557#define DMA16_CURR_DESC_PTR 0xFFC01E20 /* DMA Channel 16 Current Descriptor Pointer Register */
558#define DMA16_CURR_ADDR 0xFFC01E24 /* DMA Channel 16 Current Address Register */
559#define DMA16_IRQ_STATUS 0xFFC01E28 /* DMA Channel 16 Interrupt/Status Register */
560#define DMA16_PERIPHERAL_MAP 0xFFC01E2C /* DMA Channel 16 Peripheral Map Register */
561#define DMA16_CURR_X_COUNT 0xFFC01E30 /* DMA Channel 16 Current X Count Register */
562#define DMA16_CURR_Y_COUNT 0xFFC01E38 /* DMA Channel 16 Current Y Count Register */
563
564#define DMA17_NEXT_DESC_PTR 0xFFC01E40 /* DMA Channel 17 Next Descriptor Pointer Register */
565#define DMA17_START_ADDR 0xFFC01E44 /* DMA Channel 17 Start Address Register */
566#define DMA17_CONFIG 0xFFC01E48 /* DMA Channel 17 Configuration Register */
567#define DMA17_X_COUNT 0xFFC01E50 /* DMA Channel 17 X Count Register */
568#define DMA17_X_MODIFY 0xFFC01E54 /* DMA Channel 17 X Modify Register */
569#define DMA17_Y_COUNT 0xFFC01E58 /* DMA Channel 17 Y Count Register */
570#define DMA17_Y_MODIFY 0xFFC01E5C /* DMA Channel 17 Y Modify Register */
571#define DMA17_CURR_DESC_PTR 0xFFC01E60 /* DMA Channel 17 Current Descriptor Pointer Register */
572#define DMA17_CURR_ADDR 0xFFC01E64 /* DMA Channel 17 Current Address Register */
573#define DMA17_IRQ_STATUS 0xFFC01E68 /* DMA Channel 17 Interrupt/Status Register */
574#define DMA17_PERIPHERAL_MAP 0xFFC01E6C /* DMA Channel 17 Peripheral Map Register */
575#define DMA17_CURR_X_COUNT 0xFFC01E70 /* DMA Channel 17 Current X Count Register */
576#define DMA17_CURR_Y_COUNT 0xFFC01E78 /* DMA Channel 17 Current Y Count Register */
577
578#define DMA18_NEXT_DESC_PTR 0xFFC01E80 /* DMA Channel 18 Next Descriptor Pointer Register */
579#define DMA18_START_ADDR 0xFFC01E84 /* DMA Channel 18 Start Address Register */
580#define DMA18_CONFIG 0xFFC01E88 /* DMA Channel 18 Configuration Register */
581#define DMA18_X_COUNT 0xFFC01E90 /* DMA Channel 18 X Count Register */
582#define DMA18_X_MODIFY 0xFFC01E94 /* DMA Channel 18 X Modify Register */
583#define DMA18_Y_COUNT 0xFFC01E98 /* DMA Channel 18 Y Count Register */
584#define DMA18_Y_MODIFY 0xFFC01E9C /* DMA Channel 18 Y Modify Register */
585#define DMA18_CURR_DESC_PTR 0xFFC01EA0 /* DMA Channel 18 Current Descriptor Pointer Register */
586#define DMA18_CURR_ADDR 0xFFC01EA4 /* DMA Channel 18 Current Address Register */
587#define DMA18_IRQ_STATUS 0xFFC01EA8 /* DMA Channel 18 Interrupt/Status Register */
588#define DMA18_PERIPHERAL_MAP 0xFFC01EAC /* DMA Channel 18 Peripheral Map Register */
589#define DMA18_CURR_X_COUNT 0xFFC01EB0 /* DMA Channel 18 Current X Count Register */
590#define DMA18_CURR_Y_COUNT 0xFFC01EB8 /* DMA Channel 18 Current Y Count Register */
591
592#define DMA19_NEXT_DESC_PTR 0xFFC01EC0 /* DMA Channel 19 Next Descriptor Pointer Register */
593#define DMA19_START_ADDR 0xFFC01EC4 /* DMA Channel 19 Start Address Register */
594#define DMA19_CONFIG 0xFFC01EC8 /* DMA Channel 19 Configuration Register */
595#define DMA19_X_COUNT 0xFFC01ED0 /* DMA Channel 19 X Count Register */
596#define DMA19_X_MODIFY 0xFFC01ED4 /* DMA Channel 19 X Modify Register */
597#define DMA19_Y_COUNT 0xFFC01ED8 /* DMA Channel 19 Y Count Register */
598#define DMA19_Y_MODIFY 0xFFC01EDC /* DMA Channel 19 Y Modify Register */
599#define DMA19_CURR_DESC_PTR 0xFFC01EE0 /* DMA Channel 19 Current Descriptor Pointer Register */
600#define DMA19_CURR_ADDR 0xFFC01EE4 /* DMA Channel 19 Current Address Register */
601#define DMA19_IRQ_STATUS 0xFFC01EE8 /* DMA Channel 19 Interrupt/Status Register */
602#define DMA19_PERIPHERAL_MAP 0xFFC01EEC /* DMA Channel 19 Peripheral Map Register */
603#define DMA19_CURR_X_COUNT 0xFFC01EF0 /* DMA Channel 19 Current X Count Register */
604#define DMA19_CURR_Y_COUNT 0xFFC01EF8 /* DMA Channel 19 Current Y Count Register */
605
606#define MDMA_D2_NEXT_DESC_PTR 0xFFC01F00 /* MemDMA1 Stream 0 Destination Next Descriptor Pointer Register */
607#define MDMA_D2_START_ADDR 0xFFC01F04 /* MemDMA1 Stream 0 Destination Start Address Register */
608#define MDMA_D2_CONFIG 0xFFC01F08 /* MemDMA1 Stream 0 Destination Configuration Register */
609#define MDMA_D2_X_COUNT 0xFFC01F10 /* MemDMA1 Stream 0 Destination X Count Register */
610#define MDMA_D2_X_MODIFY 0xFFC01F14 /* MemDMA1 Stream 0 Destination X Modify Register */
611#define MDMA_D2_Y_COUNT 0xFFC01F18 /* MemDMA1 Stream 0 Destination Y Count Register */
612#define MDMA_D2_Y_MODIFY 0xFFC01F1C /* MemDMA1 Stream 0 Destination Y Modify Register */
613#define MDMA_D2_CURR_DESC_PTR 0xFFC01F20 /* MemDMA1 Stream 0 Destination Current Descriptor Pointer Register */
614#define MDMA_D2_CURR_ADDR 0xFFC01F24 /* MemDMA1 Stream 0 Destination Current Address Register */
615#define MDMA_D2_IRQ_STATUS 0xFFC01F28 /* MemDMA1 Stream 0 Destination Interrupt/Status Register */
616#define MDMA_D2_PERIPHERAL_MAP 0xFFC01F2C /* MemDMA1 Stream 0 Destination Peripheral Map Register */
617#define MDMA_D2_CURR_X_COUNT 0xFFC01F30 /* MemDMA1 Stream 0 Destination Current X Count Register */
618#define MDMA_D2_CURR_Y_COUNT 0xFFC01F38 /* MemDMA1 Stream 0 Destination Current Y Count Register */
619
620#define MDMA_S2_NEXT_DESC_PTR 0xFFC01F40 /* MemDMA1 Stream 0 Source Next Descriptor Pointer Register */
621#define MDMA_S2_START_ADDR 0xFFC01F44 /* MemDMA1 Stream 0 Source Start Address Register */
622#define MDMA_S2_CONFIG 0xFFC01F48 /* MemDMA1 Stream 0 Source Configuration Register */
623#define MDMA_S2_X_COUNT 0xFFC01F50 /* MemDMA1 Stream 0 Source X Count Register */
624#define MDMA_S2_X_MODIFY 0xFFC01F54 /* MemDMA1 Stream 0 Source X Modify Register */
625#define MDMA_S2_Y_COUNT 0xFFC01F58 /* MemDMA1 Stream 0 Source Y Count Register */
626#define MDMA_S2_Y_MODIFY 0xFFC01F5C /* MemDMA1 Stream 0 Source Y Modify Register */
627#define MDMA_S2_CURR_DESC_PTR 0xFFC01F60 /* MemDMA1 Stream 0 Source Current Descriptor Pointer Register */
628#define MDMA_S2_CURR_ADDR 0xFFC01F64 /* MemDMA1 Stream 0 Source Current Address Register */
629#define MDMA_S2_IRQ_STATUS 0xFFC01F68 /* MemDMA1 Stream 0 Source Interrupt/Status Register */
630#define MDMA_S2_PERIPHERAL_MAP 0xFFC01F6C /* MemDMA1 Stream 0 Source Peripheral Map Register */
631#define MDMA_S2_CURR_X_COUNT 0xFFC01F70 /* MemDMA1 Stream 0 Source Current X Count Register */
632#define MDMA_S2_CURR_Y_COUNT 0xFFC01F78 /* MemDMA1 Stream 0 Source Current Y Count Register */
633
634#define MDMA_D3_NEXT_DESC_PTR 0xFFC01F80 /* MemDMA1 Stream 1 Destination Next Descriptor Pointer Register */
635#define MDMA_D3_START_ADDR 0xFFC01F84 /* MemDMA1 Stream 1 Destination Start Address Register */
636#define MDMA_D3_CONFIG 0xFFC01F88 /* MemDMA1 Stream 1 Destination Configuration Register */
637#define MDMA_D3_X_COUNT 0xFFC01F90 /* MemDMA1 Stream 1 Destination X Count Register */
638#define MDMA_D3_X_MODIFY 0xFFC01F94 /* MemDMA1 Stream 1 Destination X Modify Register */
639#define MDMA_D3_Y_COUNT 0xFFC01F98 /* MemDMA1 Stream 1 Destination Y Count Register */
640#define MDMA_D3_Y_MODIFY 0xFFC01F9C /* MemDMA1 Stream 1 Destination Y Modify Register */
641#define MDMA_D3_CURR_DESC_PTR 0xFFC01FA0 /* MemDMA1 Stream 1 Destination Current Descriptor Pointer Register */
642#define MDMA_D3_CURR_ADDR 0xFFC01FA4 /* MemDMA1 Stream 1 Destination Current Address Register */
643#define MDMA_D3_IRQ_STATUS 0xFFC01FA8 /* MemDMA1 Stream 1 Destination Interrupt/Status Register */
644#define MDMA_D3_PERIPHERAL_MAP 0xFFC01FAC /* MemDMA1 Stream 1 Destination Peripheral Map Register */
645#define MDMA_D3_CURR_X_COUNT 0xFFC01FB0 /* MemDMA1 Stream 1 Destination Current X Count Register */
646#define MDMA_D3_CURR_Y_COUNT 0xFFC01FB8 /* MemDMA1 Stream 1 Destination Current Y Count Register */
647
648#define MDMA_S3_NEXT_DESC_PTR 0xFFC01FC0 /* MemDMA1 Stream 1 Source Next Descriptor Pointer Register */
649#define MDMA_S3_START_ADDR 0xFFC01FC4 /* MemDMA1 Stream 1 Source Start Address Register */
650#define MDMA_S3_CONFIG 0xFFC01FC8 /* MemDMA1 Stream 1 Source Configuration Register */
651#define MDMA_S3_X_COUNT 0xFFC01FD0 /* MemDMA1 Stream 1 Source X Count Register */
652#define MDMA_S3_X_MODIFY 0xFFC01FD4 /* MemDMA1 Stream 1 Source X Modify Register */
653#define MDMA_S3_Y_COUNT 0xFFC01FD8 /* MemDMA1 Stream 1 Source Y Count Register */
654#define MDMA_S3_Y_MODIFY 0xFFC01FDC /* MemDMA1 Stream 1 Source Y Modify Register */
655#define MDMA_S3_CURR_DESC_PTR 0xFFC01FE0 /* MemDMA1 Stream 1 Source Current Descriptor Pointer Register */
656#define MDMA_S3_CURR_ADDR 0xFFC01FE4 /* MemDMA1 Stream 1 Source Current Address Register */
657#define MDMA_S3_IRQ_STATUS 0xFFC01FE8 /* MemDMA1 Stream 1 Source Interrupt/Status Register */
658#define MDMA_S3_PERIPHERAL_MAP 0xFFC01FEC /* MemDMA1 Stream 1 Source Peripheral Map Register */
659#define MDMA_S3_CURR_X_COUNT 0xFFC01FF0 /* MemDMA1 Stream 1 Source Current X Count Register */
660#define MDMA_S3_CURR_Y_COUNT 0xFFC01FF8 /* MemDMA1 Stream 1 Source Current Y Count Register */
661
662
663/* UART1 Controller (0xFFC02000 - 0xFFC020FF) */
664#define UART1_THR 0xFFC02000 /* Transmit Holding register */
665#define UART1_RBR 0xFFC02000 /* Receive Buffer register */
666#define UART1_DLL 0xFFC02000 /* Divisor Latch (Low-Byte) */
667#define UART1_IER 0xFFC02004 /* Interrupt Enable Register */
668#define UART1_DLH 0xFFC02004 /* Divisor Latch (High-Byte) */
669#define UART1_IIR 0xFFC02008 /* Interrupt Identification Register */
670#define UART1_LCR 0xFFC0200C /* Line Control Register */
671#define UART1_MCR 0xFFC02010 /* Modem Control Register */
672#define UART1_LSR 0xFFC02014 /* Line Status Register */
673#define UART1_SCR 0xFFC0201C /* SCR Scratch Register */
674#define UART1_GCTL 0xFFC02024 /* Global Control Register */
675
676
677/* UART2 Controller (0xFFC02100 - 0xFFC021FF) */
678#define UART2_THR 0xFFC02100 /* Transmit Holding register */
679#define UART2_RBR 0xFFC02100 /* Receive Buffer register */
680#define UART2_DLL 0xFFC02100 /* Divisor Latch (Low-Byte) */
681#define UART2_IER 0xFFC02104 /* Interrupt Enable Register */
682#define UART2_DLH 0xFFC02104 /* Divisor Latch (High-Byte) */
683#define UART2_IIR 0xFFC02108 /* Interrupt Identification Register */
684#define UART2_LCR 0xFFC0210C /* Line Control Register */
685#define UART2_MCR 0xFFC02110 /* Modem Control Register */
686#define UART2_LSR 0xFFC02114 /* Line Status Register */
687#define UART2_SCR 0xFFC0211C /* SCR Scratch Register */
688#define UART2_GCTL 0xFFC02124 /* Global Control Register */
689
690
691/* Two-Wire Interface 1 (0xFFC02200 - 0xFFC022FF) */
692#define TWI1_CLKDIV 0xFFC02200 /* Serial Clock Divider Register */
693#define TWI1_CONTROL 0xFFC02204 /* TWI1 Master Internal Time Reference Register */
694#define TWI1_SLAVE_CTL 0xFFC02208 /* Slave Mode Control Register */
695#define TWI1_SLAVE_STAT 0xFFC0220C /* Slave Mode Status Register */
696#define TWI1_SLAVE_ADDR 0xFFC02210 /* Slave Mode Address Register */
697#define TWI1_MASTER_CTL 0xFFC02214 /* Master Mode Control Register */
698#define TWI1_MASTER_STAT 0xFFC02218 /* Master Mode Status Register */
699#define TWI1_MASTER_ADDR 0xFFC0221C /* Master Mode Address Register */
700#define TWI1_INT_STAT 0xFFC02220 /* TWI1 Master Interrupt Register */
701#define TWI1_INT_MASK 0xFFC02224 /* TWI1 Master Interrupt Mask Register */
702#define TWI1_FIFO_CTL 0xFFC02228 /* FIFO Control Register */
703#define TWI1_FIFO_STAT 0xFFC0222C /* FIFO Status Register */
704#define TWI1_XMT_DATA8 0xFFC02280 /* FIFO Transmit Data Single Byte Register */
705#define TWI1_XMT_DATA16 0xFFC02284 /* FIFO Transmit Data Double Byte Register */
706#define TWI1_RCV_DATA8 0xFFC02288 /* FIFO Receive Data Single Byte Register */
707#define TWI1_RCV_DATA16 0xFFC0228C /* FIFO Receive Data Double Byte Register */
708#define TWI1_REGBASE TWI1_CLKDIV
709
710
711/* the following are for backwards compatibility */
712#define TWI1_PRESCALE TWI1_CONTROL
713#define TWI1_INT_SRC TWI1_INT_STAT
714#define TWI1_INT_ENABLE TWI1_INT_MASK
715
716
717/* SPI1 Controller (0xFFC02300 - 0xFFC023FF) */
718#define SPI1_CTL 0xFFC02300 /* SPI1 Control Register */
719#define SPI1_FLG 0xFFC02304 /* SPI1 Flag register */
720#define SPI1_STAT 0xFFC02308 /* SPI1 Status register */
721#define SPI1_TDBR 0xFFC0230C /* SPI1 Transmit Data Buffer Register */
722#define SPI1_RDBR 0xFFC02310 /* SPI1 Receive Data Buffer Register */
723#define SPI1_BAUD 0xFFC02314 /* SPI1 Baud rate Register */
724#define SPI1_SHADOW 0xFFC02318 /* SPI1_RDBR Shadow Register */
725#define SPI1_REGBASE SPI1_CTL
726
727/* SPI2 Controller (0xFFC02400 - 0xFFC024FF) */
728#define SPI2_CTL 0xFFC02400 /* SPI2 Control Register */
729#define SPI2_FLG 0xFFC02404 /* SPI2 Flag register */
730#define SPI2_STAT 0xFFC02408 /* SPI2 Status register */
731#define SPI2_TDBR 0xFFC0240C /* SPI2 Transmit Data Buffer Register */
732#define SPI2_RDBR 0xFFC02410 /* SPI2 Receive Data Buffer Register */
733#define SPI2_BAUD 0xFFC02414 /* SPI2 Baud rate Register */
734#define SPI2_SHADOW 0xFFC02418 /* SPI2_RDBR Shadow Register */
735#define SPI2_REGBASE SPI2_CTL
736
737/* SPORT2 Controller (0xFFC02500 - 0xFFC025FF) */
738#define SPORT2_TCR1 0xFFC02500 /* SPORT2 Transmit Configuration 1 Register */
739#define SPORT2_TCR2 0xFFC02504 /* SPORT2 Transmit Configuration 2 Register */
740#define SPORT2_TCLKDIV 0xFFC02508 /* SPORT2 Transmit Clock Divider */
741#define SPORT2_TFSDIV 0xFFC0250C /* SPORT2 Transmit Frame Sync Divider */
742#define SPORT2_TX 0xFFC02510 /* SPORT2 TX Data Register */
743#define SPORT2_RX 0xFFC02518 /* SPORT2 RX Data Register */
744#define SPORT2_RCR1 0xFFC02520 /* SPORT2 Transmit Configuration 1 Register */
745#define SPORT2_RCR2 0xFFC02524 /* SPORT2 Transmit Configuration 2 Register */
746#define SPORT2_RCLKDIV 0xFFC02528 /* SPORT2 Receive Clock Divider */
747#define SPORT2_RFSDIV 0xFFC0252C /* SPORT2 Receive Frame Sync Divider */
748#define SPORT2_STAT 0xFFC02530 /* SPORT2 Status Register */
749#define SPORT2_CHNL 0xFFC02534 /* SPORT2 Current Channel Register */
750#define SPORT2_MCMC1 0xFFC02538 /* SPORT2 Multi-Channel Configuration Register 1 */
751#define SPORT2_MCMC2 0xFFC0253C /* SPORT2 Multi-Channel Configuration Register 2 */
752#define SPORT2_MTCS0 0xFFC02540 /* SPORT2 Multi-Channel Transmit Select Register 0 */
753#define SPORT2_MTCS1 0xFFC02544 /* SPORT2 Multi-Channel Transmit Select Register 1 */
754#define SPORT2_MTCS2 0xFFC02548 /* SPORT2 Multi-Channel Transmit Select Register 2 */
755#define SPORT2_MTCS3 0xFFC0254C /* SPORT2 Multi-Channel Transmit Select Register 3 */
756#define SPORT2_MRCS0 0xFFC02550 /* SPORT2 Multi-Channel Receive Select Register 0 */
757#define SPORT2_MRCS1 0xFFC02554 /* SPORT2 Multi-Channel Receive Select Register 1 */
758#define SPORT2_MRCS2 0xFFC02558 /* SPORT2 Multi-Channel Receive Select Register 2 */
759#define SPORT2_MRCS3 0xFFC0255C /* SPORT2 Multi-Channel Receive Select Register 3 */
760
761
762/* SPORT3 Controller (0xFFC02600 - 0xFFC026FF) */
763#define SPORT3_TCR1 0xFFC02600 /* SPORT3 Transmit Configuration 1 Register */
764#define SPORT3_TCR2 0xFFC02604 /* SPORT3 Transmit Configuration 2 Register */
765#define SPORT3_TCLKDIV 0xFFC02608 /* SPORT3 Transmit Clock Divider */
766#define SPORT3_TFSDIV 0xFFC0260C /* SPORT3 Transmit Frame Sync Divider */
767#define SPORT3_TX 0xFFC02610 /* SPORT3 TX Data Register */
768#define SPORT3_RX 0xFFC02618 /* SPORT3 RX Data Register */
769#define SPORT3_RCR1 0xFFC02620 /* SPORT3 Transmit Configuration 1 Register */
770#define SPORT3_RCR2 0xFFC02624 /* SPORT3 Transmit Configuration 2 Register */
771#define SPORT3_RCLKDIV 0xFFC02628 /* SPORT3 Receive Clock Divider */
772#define SPORT3_RFSDIV 0xFFC0262C /* SPORT3 Receive Frame Sync Divider */
773#define SPORT3_STAT 0xFFC02630 /* SPORT3 Status Register */
774#define SPORT3_CHNL 0xFFC02634 /* SPORT3 Current Channel Register */
775#define SPORT3_MCMC1 0xFFC02638 /* SPORT3 Multi-Channel Configuration Register 1 */
776#define SPORT3_MCMC2 0xFFC0263C /* SPORT3 Multi-Channel Configuration Register 2 */
777#define SPORT3_MTCS0 0xFFC02640 /* SPORT3 Multi-Channel Transmit Select Register 0 */
778#define SPORT3_MTCS1 0xFFC02644 /* SPORT3 Multi-Channel Transmit Select Register 1 */
779#define SPORT3_MTCS2 0xFFC02648 /* SPORT3 Multi-Channel Transmit Select Register 2 */
780#define SPORT3_MTCS3 0xFFC0264C /* SPORT3 Multi-Channel Transmit Select Register 3 */
781#define SPORT3_MRCS0 0xFFC02650 /* SPORT3 Multi-Channel Receive Select Register 0 */
782#define SPORT3_MRCS1 0xFFC02654 /* SPORT3 Multi-Channel Receive Select Register 1 */
783#define SPORT3_MRCS2 0xFFC02658 /* SPORT3 Multi-Channel Receive Select Register 2 */
784#define SPORT3_MRCS3 0xFFC0265C /* SPORT3 Multi-Channel Receive Select Register 3 */
785
786
787/* CAN Controller (0xFFC02A00 - 0xFFC02FFF) */
788/* For Mailboxes 0-15 */
789#define CAN_MC1 0xFFC02A00 /* Mailbox config reg 1 */
790#define CAN_MD1 0xFFC02A04 /* Mailbox direction reg 1 */
791#define CAN_TRS1 0xFFC02A08 /* Transmit Request Set reg 1 */
792#define CAN_TRR1 0xFFC02A0C /* Transmit Request Reset reg 1 */
793#define CAN_TA1 0xFFC02A10 /* Transmit Acknowledge reg 1 */
794#define CAN_AA1 0xFFC02A14 /* Transmit Abort Acknowledge reg 1 */
795#define CAN_RMP1 0xFFC02A18 /* Receive Message Pending reg 1 */
796#define CAN_RML1 0xFFC02A1C /* Receive Message Lost reg 1 */
797#define CAN_MBTIF1 0xFFC02A20 /* Mailbox Transmit Interrupt Flag reg 1 */
798#define CAN_MBRIF1 0xFFC02A24 /* Mailbox Receive Interrupt Flag reg 1 */
799#define CAN_MBIM1 0xFFC02A28 /* Mailbox Interrupt Mask reg 1 */
800#define CAN_RFH1 0xFFC02A2C /* Remote Frame Handling reg 1 */
801#define CAN_OPSS1 0xFFC02A30 /* Overwrite Protection Single Shot Xmission reg 1 */
802
803/* For Mailboxes 16-31 */
804#define CAN_MC2 0xFFC02A40 /* Mailbox config reg 2 */
805#define CAN_MD2 0xFFC02A44 /* Mailbox direction reg 2 */
806#define CAN_TRS2 0xFFC02A48 /* Transmit Request Set reg 2 */
807#define CAN_TRR2 0xFFC02A4C /* Transmit Request Reset reg 2 */
808#define CAN_TA2 0xFFC02A50 /* Transmit Acknowledge reg 2 */
809#define CAN_AA2 0xFFC02A54 /* Transmit Abort Acknowledge reg 2 */
810#define CAN_RMP2 0xFFC02A58 /* Receive Message Pending reg 2 */
811#define CAN_RML2 0xFFC02A5C /* Receive Message Lost reg 2 */
812#define CAN_MBTIF2 0xFFC02A60 /* Mailbox Transmit Interrupt Flag reg 2 */
813#define CAN_MBRIF2 0xFFC02A64 /* Mailbox Receive Interrupt Flag reg 2 */
814#define CAN_MBIM2 0xFFC02A68 /* Mailbox Interrupt Mask reg 2 */
815#define CAN_RFH2 0xFFC02A6C /* Remote Frame Handling reg 2 */
816#define CAN_OPSS2 0xFFC02A70 /* Overwrite Protection Single Shot Xmission reg 2 */
817
818#define CAN_CLOCK 0xFFC02A80 /* Bit Timing Configuration register 0 */
819#define CAN_TIMING 0xFFC02A84 /* Bit Timing Configuration register 1 */
820
821#define CAN_DEBUG 0xFFC02A88 /* Debug Register */
822/* the following is for backwards compatibility */
823#define CAN_CNF CAN_DEBUG
824
825#define CAN_STATUS 0xFFC02A8C /* Global Status Register */
826#define CAN_CEC 0xFFC02A90 /* Error Counter Register */
827#define CAN_GIS 0xFFC02A94 /* Global Interrupt Status Register */
828#define CAN_GIM 0xFFC02A98 /* Global Interrupt Mask Register */
829#define CAN_GIF 0xFFC02A9C /* Global Interrupt Flag Register */
830#define CAN_CONTROL 0xFFC02AA0 /* Master Control Register */
831#define CAN_INTR 0xFFC02AA4 /* Interrupt Pending Register */
832#define CAN_MBTD 0xFFC02AAC /* Mailbox Temporary Disable Feature */
833#define CAN_EWR 0xFFC02AB0 /* Programmable Warning Level */
834#define CAN_ESR 0xFFC02AB4 /* Error Status Register */
835#define CAN_UCCNT 0xFFC02AC4 /* Universal Counter */
836#define CAN_UCRC 0xFFC02AC8 /* Universal Counter Reload/Capture Register */
837#define CAN_UCCNF 0xFFC02ACC /* Universal Counter Configuration Register */
838
839/* Mailbox Acceptance Masks */
840#define CAN_AM00L 0xFFC02B00 /* Mailbox 0 Low Acceptance Mask */
841#define CAN_AM00H 0xFFC02B04 /* Mailbox 0 High Acceptance Mask */
842#define CAN_AM01L 0xFFC02B08 /* Mailbox 1 Low Acceptance Mask */
843#define CAN_AM01H 0xFFC02B0C /* Mailbox 1 High Acceptance Mask */
844#define CAN_AM02L 0xFFC02B10 /* Mailbox 2 Low Acceptance Mask */
845#define CAN_AM02H 0xFFC02B14 /* Mailbox 2 High Acceptance Mask */
846#define CAN_AM03L 0xFFC02B18 /* Mailbox 3 Low Acceptance Mask */
847#define CAN_AM03H 0xFFC02B1C /* Mailbox 3 High Acceptance Mask */
848#define CAN_AM04L 0xFFC02B20 /* Mailbox 4 Low Acceptance Mask */
849#define CAN_AM04H 0xFFC02B24 /* Mailbox 4 High Acceptance Mask */
850#define CAN_AM05L 0xFFC02B28 /* Mailbox 5 Low Acceptance Mask */
851#define CAN_AM05H 0xFFC02B2C /* Mailbox 5 High Acceptance Mask */
852#define CAN_AM06L 0xFFC02B30 /* Mailbox 6 Low Acceptance Mask */
853#define CAN_AM06H 0xFFC02B34 /* Mailbox 6 High Acceptance Mask */
854#define CAN_AM07L 0xFFC02B38 /* Mailbox 7 Low Acceptance Mask */
855#define CAN_AM07H 0xFFC02B3C /* Mailbox 7 High Acceptance Mask */
856#define CAN_AM08L 0xFFC02B40 /* Mailbox 8 Low Acceptance Mask */
857#define CAN_AM08H 0xFFC02B44 /* Mailbox 8 High Acceptance Mask */
858#define CAN_AM09L 0xFFC02B48 /* Mailbox 9 Low Acceptance Mask */
859#define CAN_AM09H 0xFFC02B4C /* Mailbox 9 High Acceptance Mask */
860#define CAN_AM10L 0xFFC02B50 /* Mailbox 10 Low Acceptance Mask */
861#define CAN_AM10H 0xFFC02B54 /* Mailbox 10 High Acceptance Mask */
862#define CAN_AM11L 0xFFC02B58 /* Mailbox 11 Low Acceptance Mask */
863#define CAN_AM11H 0xFFC02B5C /* Mailbox 11 High Acceptance Mask */
864#define CAN_AM12L 0xFFC02B60 /* Mailbox 12 Low Acceptance Mask */
865#define CAN_AM12H 0xFFC02B64 /* Mailbox 12 High Acceptance Mask */
866#define CAN_AM13L 0xFFC02B68 /* Mailbox 13 Low Acceptance Mask */
867#define CAN_AM13H 0xFFC02B6C /* Mailbox 13 High Acceptance Mask */
868#define CAN_AM14L 0xFFC02B70 /* Mailbox 14 Low Acceptance Mask */
869#define CAN_AM14H 0xFFC02B74 /* Mailbox 14 High Acceptance Mask */
870#define CAN_AM15L 0xFFC02B78 /* Mailbox 15 Low Acceptance Mask */
871#define CAN_AM15H 0xFFC02B7C /* Mailbox 15 High Acceptance Mask */
872
873#define CAN_AM16L 0xFFC02B80 /* Mailbox 16 Low Acceptance Mask */
874#define CAN_AM16H 0xFFC02B84 /* Mailbox 16 High Acceptance Mask */
875#define CAN_AM17L 0xFFC02B88 /* Mailbox 17 Low Acceptance Mask */
876#define CAN_AM17H 0xFFC02B8C /* Mailbox 17 High Acceptance Mask */
877#define CAN_AM18L 0xFFC02B90 /* Mailbox 18 Low Acceptance Mask */
878#define CAN_AM18H 0xFFC02B94 /* Mailbox 18 High Acceptance Mask */
879#define CAN_AM19L 0xFFC02B98 /* Mailbox 19 Low Acceptance Mask */
880#define CAN_AM19H 0xFFC02B9C /* Mailbox 19 High Acceptance Mask */
881#define CAN_AM20L 0xFFC02BA0 /* Mailbox 20 Low Acceptance Mask */
882#define CAN_AM20H 0xFFC02BA4 /* Mailbox 20 High Acceptance Mask */
883#define CAN_AM21L 0xFFC02BA8 /* Mailbox 21 Low Acceptance Mask */
884#define CAN_AM21H 0xFFC02BAC /* Mailbox 21 High Acceptance Mask */
885#define CAN_AM22L 0xFFC02BB0 /* Mailbox 22 Low Acceptance Mask */
886#define CAN_AM22H 0xFFC02BB4 /* Mailbox 22 High Acceptance Mask */
887#define CAN_AM23L 0xFFC02BB8 /* Mailbox 23 Low Acceptance Mask */
888#define CAN_AM23H 0xFFC02BBC /* Mailbox 23 High Acceptance Mask */
889#define CAN_AM24L 0xFFC02BC0 /* Mailbox 24 Low Acceptance Mask */
890#define CAN_AM24H 0xFFC02BC4 /* Mailbox 24 High Acceptance Mask */
891#define CAN_AM25L 0xFFC02BC8 /* Mailbox 25 Low Acceptance Mask */
892#define CAN_AM25H 0xFFC02BCC /* Mailbox 25 High Acceptance Mask */
893#define CAN_AM26L 0xFFC02BD0 /* Mailbox 26 Low Acceptance Mask */
894#define CAN_AM26H 0xFFC02BD4 /* Mailbox 26 High Acceptance Mask */
895#define CAN_AM27L 0xFFC02BD8 /* Mailbox 27 Low Acceptance Mask */
896#define CAN_AM27H 0xFFC02BDC /* Mailbox 27 High Acceptance Mask */
897#define CAN_AM28L 0xFFC02BE0 /* Mailbox 28 Low Acceptance Mask */
898#define CAN_AM28H 0xFFC02BE4 /* Mailbox 28 High Acceptance Mask */
899#define CAN_AM29L 0xFFC02BE8 /* Mailbox 29 Low Acceptance Mask */
900#define CAN_AM29H 0xFFC02BEC /* Mailbox 29 High Acceptance Mask */
901#define CAN_AM30L 0xFFC02BF0 /* Mailbox 30 Low Acceptance Mask */
902#define CAN_AM30H 0xFFC02BF4 /* Mailbox 30 High Acceptance Mask */
903#define CAN_AM31L 0xFFC02BF8 /* Mailbox 31 Low Acceptance Mask */
904#define CAN_AM31H 0xFFC02BFC /* Mailbox 31 High Acceptance Mask */
905
906/* CAN Acceptance Mask Macros */
907#define CAN_AM_L(x) (CAN_AM00L+((x)*0x8))
908#define CAN_AM_H(x) (CAN_AM00H+((x)*0x8))
909
910/* Mailbox Registers */
911#define CAN_MB00_DATA0 0xFFC02C00 /* Mailbox 0 Data Word 0 [15:0] Register */
912#define CAN_MB00_DATA1 0xFFC02C04 /* Mailbox 0 Data Word 1 [31:16] Register */
913#define CAN_MB00_DATA2 0xFFC02C08 /* Mailbox 0 Data Word 2 [47:32] Register */
914#define CAN_MB00_DATA3 0xFFC02C0C /* Mailbox 0 Data Word 3 [63:48] Register */
915#define CAN_MB00_LENGTH 0xFFC02C10 /* Mailbox 0 Data Length Code Register */
916#define CAN_MB00_TIMESTAMP 0xFFC02C14 /* Mailbox 0 Time Stamp Value Register */
917#define CAN_MB00_ID0 0xFFC02C18 /* Mailbox 0 Identifier Low Register */
918#define CAN_MB00_ID1 0xFFC02C1C /* Mailbox 0 Identifier High Register */
919
920#define CAN_MB01_DATA0 0xFFC02C20 /* Mailbox 1 Data Word 0 [15:0] Register */
921#define CAN_MB01_DATA1 0xFFC02C24 /* Mailbox 1 Data Word 1 [31:16] Register */
922#define CAN_MB01_DATA2 0xFFC02C28 /* Mailbox 1 Data Word 2 [47:32] Register */
923#define CAN_MB01_DATA3 0xFFC02C2C /* Mailbox 1 Data Word 3 [63:48] Register */
924#define CAN_MB01_LENGTH 0xFFC02C30 /* Mailbox 1 Data Length Code Register */
925#define CAN_MB01_TIMESTAMP 0xFFC02C34 /* Mailbox 1 Time Stamp Value Register */
926#define CAN_MB01_ID0 0xFFC02C38 /* Mailbox 1 Identifier Low Register */
927#define CAN_MB01_ID1 0xFFC02C3C /* Mailbox 1 Identifier High Register */
928
929#define CAN_MB02_DATA0 0xFFC02C40 /* Mailbox 2 Data Word 0 [15:0] Register */
930#define CAN_MB02_DATA1 0xFFC02C44 /* Mailbox 2 Data Word 1 [31:16] Register */
931#define CAN_MB02_DATA2 0xFFC02C48 /* Mailbox 2 Data Word 2 [47:32] Register */
932#define CAN_MB02_DATA3 0xFFC02C4C /* Mailbox 2 Data Word 3 [63:48] Register */
933#define CAN_MB02_LENGTH 0xFFC02C50 /* Mailbox 2 Data Length Code Register */
934#define CAN_MB02_TIMESTAMP 0xFFC02C54 /* Mailbox 2 Time Stamp Value Register */
935#define CAN_MB02_ID0 0xFFC02C58 /* Mailbox 2 Identifier Low Register */
936#define CAN_MB02_ID1 0xFFC02C5C /* Mailbox 2 Identifier High Register */
937
938#define CAN_MB03_DATA0 0xFFC02C60 /* Mailbox 3 Data Word 0 [15:0] Register */
939#define CAN_MB03_DATA1 0xFFC02C64 /* Mailbox 3 Data Word 1 [31:16] Register */
940#define CAN_MB03_DATA2 0xFFC02C68 /* Mailbox 3 Data Word 2 [47:32] Register */
941#define CAN_MB03_DATA3 0xFFC02C6C /* Mailbox 3 Data Word 3 [63:48] Register */
942#define CAN_MB03_LENGTH 0xFFC02C70 /* Mailbox 3 Data Length Code Register */
943#define CAN_MB03_TIMESTAMP 0xFFC02C74 /* Mailbox 3 Time Stamp Value Register */
944#define CAN_MB03_ID0 0xFFC02C78 /* Mailbox 3 Identifier Low Register */
945#define CAN_MB03_ID1 0xFFC02C7C /* Mailbox 3 Identifier High Register */
946
947#define CAN_MB04_DATA0 0xFFC02C80 /* Mailbox 4 Data Word 0 [15:0] Register */
948#define CAN_MB04_DATA1 0xFFC02C84 /* Mailbox 4 Data Word 1 [31:16] Register */
949#define CAN_MB04_DATA2 0xFFC02C88 /* Mailbox 4 Data Word 2 [47:32] Register */
950#define CAN_MB04_DATA3 0xFFC02C8C /* Mailbox 4 Data Word 3 [63:48] Register */
951#define CAN_MB04_LENGTH 0xFFC02C90 /* Mailbox 4 Data Length Code Register */
952#define CAN_MB04_TIMESTAMP 0xFFC02C94 /* Mailbox 4 Time Stamp Value Register */
953#define CAN_MB04_ID0 0xFFC02C98 /* Mailbox 4 Identifier Low Register */
954#define CAN_MB04_ID1 0xFFC02C9C /* Mailbox 4 Identifier High Register */
955
956#define CAN_MB05_DATA0 0xFFC02CA0 /* Mailbox 5 Data Word 0 [15:0] Register */
957#define CAN_MB05_DATA1 0xFFC02CA4 /* Mailbox 5 Data Word 1 [31:16] Register */
958#define CAN_MB05_DATA2 0xFFC02CA8 /* Mailbox 5 Data Word 2 [47:32] Register */
959#define CAN_MB05_DATA3 0xFFC02CAC /* Mailbox 5 Data Word 3 [63:48] Register */
960#define CAN_MB05_LENGTH 0xFFC02CB0 /* Mailbox 5 Data Length Code Register */
961#define CAN_MB05_TIMESTAMP 0xFFC02CB4 /* Mailbox 5 Time Stamp Value Register */
962#define CAN_MB05_ID0 0xFFC02CB8 /* Mailbox 5 Identifier Low Register */
963#define CAN_MB05_ID1 0xFFC02CBC /* Mailbox 5 Identifier High Register */
964
965#define CAN_MB06_DATA0 0xFFC02CC0 /* Mailbox 6 Data Word 0 [15:0] Register */
966#define CAN_MB06_DATA1 0xFFC02CC4 /* Mailbox 6 Data Word 1 [31:16] Register */
967#define CAN_MB06_DATA2 0xFFC02CC8 /* Mailbox 6 Data Word 2 [47:32] Register */
968#define CAN_MB06_DATA3 0xFFC02CCC /* Mailbox 6 Data Word 3 [63:48] Register */
969#define CAN_MB06_LENGTH 0xFFC02CD0 /* Mailbox 6 Data Length Code Register */
970#define CAN_MB06_TIMESTAMP 0xFFC02CD4 /* Mailbox 6 Time Stamp Value Register */
971#define CAN_MB06_ID0 0xFFC02CD8 /* Mailbox 6 Identifier Low Register */
972#define CAN_MB06_ID1 0xFFC02CDC /* Mailbox 6 Identifier High Register */
973
974#define CAN_MB07_DATA0 0xFFC02CE0 /* Mailbox 7 Data Word 0 [15:0] Register */
975#define CAN_MB07_DATA1 0xFFC02CE4 /* Mailbox 7 Data Word 1 [31:16] Register */
976#define CAN_MB07_DATA2 0xFFC02CE8 /* Mailbox 7 Data Word 2 [47:32] Register */
977#define CAN_MB07_DATA3 0xFFC02CEC /* Mailbox 7 Data Word 3 [63:48] Register */
978#define CAN_MB07_LENGTH 0xFFC02CF0 /* Mailbox 7 Data Length Code Register */
979#define CAN_MB07_TIMESTAMP 0xFFC02CF4 /* Mailbox 7 Time Stamp Value Register */
980#define CAN_MB07_ID0 0xFFC02CF8 /* Mailbox 7 Identifier Low Register */
981#define CAN_MB07_ID1 0xFFC02CFC /* Mailbox 7 Identifier High Register */
982
983#define CAN_MB08_DATA0 0xFFC02D00 /* Mailbox 8 Data Word 0 [15:0] Register */
984#define CAN_MB08_DATA1 0xFFC02D04 /* Mailbox 8 Data Word 1 [31:16] Register */
985#define CAN_MB08_DATA2 0xFFC02D08 /* Mailbox 8 Data Word 2 [47:32] Register */
986#define CAN_MB08_DATA3 0xFFC02D0C /* Mailbox 8 Data Word 3 [63:48] Register */
987#define CAN_MB08_LENGTH 0xFFC02D10 /* Mailbox 8 Data Length Code Register */
988#define CAN_MB08_TIMESTAMP 0xFFC02D14 /* Mailbox 8 Time Stamp Value Register */
989#define CAN_MB08_ID0 0xFFC02D18 /* Mailbox 8 Identifier Low Register */
990#define CAN_MB08_ID1 0xFFC02D1C /* Mailbox 8 Identifier High Register */
991
992#define CAN_MB09_DATA0 0xFFC02D20 /* Mailbox 9 Data Word 0 [15:0] Register */
993#define CAN_MB09_DATA1 0xFFC02D24 /* Mailbox 9 Data Word 1 [31:16] Register */
994#define CAN_MB09_DATA2 0xFFC02D28 /* Mailbox 9 Data Word 2 [47:32] Register */
995#define CAN_MB09_DATA3 0xFFC02D2C /* Mailbox 9 Data Word 3 [63:48] Register */
996#define CAN_MB09_LENGTH 0xFFC02D30 /* Mailbox 9 Data Length Code Register */
997#define CAN_MB09_TIMESTAMP 0xFFC02D34 /* Mailbox 9 Time Stamp Value Register */
998#define CAN_MB09_ID0 0xFFC02D38 /* Mailbox 9 Identifier Low Register */
999#define CAN_MB09_ID1 0xFFC02D3C /* Mailbox 9 Identifier High Register */
1000
1001#define CAN_MB10_DATA0 0xFFC02D40 /* Mailbox 10 Data Word 0 [15:0] Register */
1002#define CAN_MB10_DATA1 0xFFC02D44 /* Mailbox 10 Data Word 1 [31:16] Register */
1003#define CAN_MB10_DATA2 0xFFC02D48 /* Mailbox 10 Data Word 2 [47:32] Register */
1004#define CAN_MB10_DATA3 0xFFC02D4C /* Mailbox 10 Data Word 3 [63:48] Register */
1005#define CAN_MB10_LENGTH 0xFFC02D50 /* Mailbox 10 Data Length Code Register */
1006#define CAN_MB10_TIMESTAMP 0xFFC02D54 /* Mailbox 10 Time Stamp Value Register */
1007#define CAN_MB10_ID0 0xFFC02D58 /* Mailbox 10 Identifier Low Register */
1008#define CAN_MB10_ID1 0xFFC02D5C /* Mailbox 10 Identifier High Register */
1009
1010#define CAN_MB11_DATA0 0xFFC02D60 /* Mailbox 11 Data Word 0 [15:0] Register */
1011#define CAN_MB11_DATA1 0xFFC02D64 /* Mailbox 11 Data Word 1 [31:16] Register */
1012#define CAN_MB11_DATA2 0xFFC02D68 /* Mailbox 11 Data Word 2 [47:32] Register */
1013#define CAN_MB11_DATA3 0xFFC02D6C /* Mailbox 11 Data Word 3 [63:48] Register */
1014#define CAN_MB11_LENGTH 0xFFC02D70 /* Mailbox 11 Data Length Code Register */
1015#define CAN_MB11_TIMESTAMP 0xFFC02D74 /* Mailbox 11 Time Stamp Value Register */
1016#define CAN_MB11_ID0 0xFFC02D78 /* Mailbox 11 Identifier Low Register */
1017#define CAN_MB11_ID1 0xFFC02D7C /* Mailbox 11 Identifier High Register */
1018
1019#define CAN_MB12_DATA0 0xFFC02D80 /* Mailbox 12 Data Word 0 [15:0] Register */
1020#define CAN_MB12_DATA1 0xFFC02D84 /* Mailbox 12 Data Word 1 [31:16] Register */
1021#define CAN_MB12_DATA2 0xFFC02D88 /* Mailbox 12 Data Word 2 [47:32] Register */
1022#define CAN_MB12_DATA3 0xFFC02D8C /* Mailbox 12 Data Word 3 [63:48] Register */
1023#define CAN_MB12_LENGTH 0xFFC02D90 /* Mailbox 12 Data Length Code Register */
1024#define CAN_MB12_TIMESTAMP 0xFFC02D94 /* Mailbox 12 Time Stamp Value Register */
1025#define CAN_MB12_ID0 0xFFC02D98 /* Mailbox 12 Identifier Low Register */
1026#define CAN_MB12_ID1 0xFFC02D9C /* Mailbox 12 Identifier High Register */
1027
1028#define CAN_MB13_DATA0 0xFFC02DA0 /* Mailbox 13 Data Word 0 [15:0] Register */
1029#define CAN_MB13_DATA1 0xFFC02DA4 /* Mailbox 13 Data Word 1 [31:16] Register */
1030#define CAN_MB13_DATA2 0xFFC02DA8 /* Mailbox 13 Data Word 2 [47:32] Register */
1031#define CAN_MB13_DATA3 0xFFC02DAC /* Mailbox 13 Data Word 3 [63:48] Register */
1032#define CAN_MB13_LENGTH 0xFFC02DB0 /* Mailbox 13 Data Length Code Register */
1033#define CAN_MB13_TIMESTAMP 0xFFC02DB4 /* Mailbox 13 Time Stamp Value Register */
1034#define CAN_MB13_ID0 0xFFC02DB8 /* Mailbox 13 Identifier Low Register */
1035#define CAN_MB13_ID1 0xFFC02DBC /* Mailbox 13 Identifier High Register */
1036
1037#define CAN_MB14_DATA0 0xFFC02DC0 /* Mailbox 14 Data Word 0 [15:0] Register */
1038#define CAN_MB14_DATA1 0xFFC02DC4 /* Mailbox 14 Data Word 1 [31:16] Register */
1039#define CAN_MB14_DATA2 0xFFC02DC8 /* Mailbox 14 Data Word 2 [47:32] Register */
1040#define CAN_MB14_DATA3 0xFFC02DCC /* Mailbox 14 Data Word 3 [63:48] Register */
1041#define CAN_MB14_LENGTH 0xFFC02DD0 /* Mailbox 14 Data Length Code Register */
1042#define CAN_MB14_TIMESTAMP 0xFFC02DD4 /* Mailbox 14 Time Stamp Value Register */
1043#define CAN_MB14_ID0 0xFFC02DD8 /* Mailbox 14 Identifier Low Register */
1044#define CAN_MB14_ID1 0xFFC02DDC /* Mailbox 14 Identifier High Register */
1045
1046#define CAN_MB15_DATA0 0xFFC02DE0 /* Mailbox 15 Data Word 0 [15:0] Register */
1047#define CAN_MB15_DATA1 0xFFC02DE4 /* Mailbox 15 Data Word 1 [31:16] Register */
1048#define CAN_MB15_DATA2 0xFFC02DE8 /* Mailbox 15 Data Word 2 [47:32] Register */
1049#define CAN_MB15_DATA3 0xFFC02DEC /* Mailbox 15 Data Word 3 [63:48] Register */
1050#define CAN_MB15_LENGTH 0xFFC02DF0 /* Mailbox 15 Data Length Code Register */
1051#define CAN_MB15_TIMESTAMP 0xFFC02DF4 /* Mailbox 15 Time Stamp Value Register */
1052#define CAN_MB15_ID0 0xFFC02DF8 /* Mailbox 15 Identifier Low Register */
1053#define CAN_MB15_ID1 0xFFC02DFC /* Mailbox 15 Identifier High Register */
1054
1055#define CAN_MB16_DATA0 0xFFC02E00 /* Mailbox 16 Data Word 0 [15:0] Register */
1056#define CAN_MB16_DATA1 0xFFC02E04 /* Mailbox 16 Data Word 1 [31:16] Register */
1057#define CAN_MB16_DATA2 0xFFC02E08 /* Mailbox 16 Data Word 2 [47:32] Register */
1058#define CAN_MB16_DATA3 0xFFC02E0C /* Mailbox 16 Data Word 3 [63:48] Register */
1059#define CAN_MB16_LENGTH 0xFFC02E10 /* Mailbox 16 Data Length Code Register */
1060#define CAN_MB16_TIMESTAMP 0xFFC02E14 /* Mailbox 16 Time Stamp Value Register */
1061#define CAN_MB16_ID0 0xFFC02E18 /* Mailbox 16 Identifier Low Register */
1062#define CAN_MB16_ID1 0xFFC02E1C /* Mailbox 16 Identifier High Register */
1063
1064#define CAN_MB17_DATA0 0xFFC02E20 /* Mailbox 17 Data Word 0 [15:0] Register */
1065#define CAN_MB17_DATA1 0xFFC02E24 /* Mailbox 17 Data Word 1 [31:16] Register */
1066#define CAN_MB17_DATA2 0xFFC02E28 /* Mailbox 17 Data Word 2 [47:32] Register */
1067#define CAN_MB17_DATA3 0xFFC02E2C /* Mailbox 17 Data Word 3 [63:48] Register */
1068#define CAN_MB17_LENGTH 0xFFC02E30 /* Mailbox 17 Data Length Code Register */
1069#define CAN_MB17_TIMESTAMP 0xFFC02E34 /* Mailbox 17 Time Stamp Value Register */
1070#define CAN_MB17_ID0 0xFFC02E38 /* Mailbox 17 Identifier Low Register */
1071#define CAN_MB17_ID1 0xFFC02E3C /* Mailbox 17 Identifier High Register */
1072
1073#define CAN_MB18_DATA0 0xFFC02E40 /* Mailbox 18 Data Word 0 [15:0] Register */
1074#define CAN_MB18_DATA1 0xFFC02E44 /* Mailbox 18 Data Word 1 [31:16] Register */
1075#define CAN_MB18_DATA2 0xFFC02E48 /* Mailbox 18 Data Word 2 [47:32] Register */
1076#define CAN_MB18_DATA3 0xFFC02E4C /* Mailbox 18 Data Word 3 [63:48] Register */
1077#define CAN_MB18_LENGTH 0xFFC02E50 /* Mailbox 18 Data Length Code Register */
1078#define CAN_MB18_TIMESTAMP 0xFFC02E54 /* Mailbox 18 Time Stamp Value Register */
1079#define CAN_MB18_ID0 0xFFC02E58 /* Mailbox 18 Identifier Low Register */
1080#define CAN_MB18_ID1 0xFFC02E5C /* Mailbox 18 Identifier High Register */
1081
1082#define CAN_MB19_DATA0 0xFFC02E60 /* Mailbox 19 Data Word 0 [15:0] Register */
1083#define CAN_MB19_DATA1 0xFFC02E64 /* Mailbox 19 Data Word 1 [31:16] Register */
1084#define CAN_MB19_DATA2 0xFFC02E68 /* Mailbox 19 Data Word 2 [47:32] Register */
1085#define CAN_MB19_DATA3 0xFFC02E6C /* Mailbox 19 Data Word 3 [63:48] Register */
1086#define CAN_MB19_LENGTH 0xFFC02E70 /* Mailbox 19 Data Length Code Register */
1087#define CAN_MB19_TIMESTAMP 0xFFC02E74 /* Mailbox 19 Time Stamp Value Register */
1088#define CAN_MB19_ID0 0xFFC02E78 /* Mailbox 19 Identifier Low Register */
1089#define CAN_MB19_ID1 0xFFC02E7C /* Mailbox 19 Identifier High Register */
1090
1091#define CAN_MB20_DATA0 0xFFC02E80 /* Mailbox 20 Data Word 0 [15:0] Register */
1092#define CAN_MB20_DATA1 0xFFC02E84 /* Mailbox 20 Data Word 1 [31:16] Register */
1093#define CAN_MB20_DATA2 0xFFC02E88 /* Mailbox 20 Data Word 2 [47:32] Register */
1094#define CAN_MB20_DATA3 0xFFC02E8C /* Mailbox 20 Data Word 3 [63:48] Register */
1095#define CAN_MB20_LENGTH 0xFFC02E90 /* Mailbox 20 Data Length Code Register */
1096#define CAN_MB20_TIMESTAMP 0xFFC02E94 /* Mailbox 20 Time Stamp Value Register */
1097#define CAN_MB20_ID0 0xFFC02E98 /* Mailbox 20 Identifier Low Register */
1098#define CAN_MB20_ID1 0xFFC02E9C /* Mailbox 20 Identifier High Register */
1099
1100#define CAN_MB21_DATA0 0xFFC02EA0 /* Mailbox 21 Data Word 0 [15:0] Register */
1101#define CAN_MB21_DATA1 0xFFC02EA4 /* Mailbox 21 Data Word 1 [31:16] Register */
1102#define CAN_MB21_DATA2 0xFFC02EA8 /* Mailbox 21 Data Word 2 [47:32] Register */
1103#define CAN_MB21_DATA3 0xFFC02EAC /* Mailbox 21 Data Word 3 [63:48] Register */
1104#define CAN_MB21_LENGTH 0xFFC02EB0 /* Mailbox 21 Data Length Code Register */
1105#define CAN_MB21_TIMESTAMP 0xFFC02EB4 /* Mailbox 21 Time Stamp Value Register */
1106#define CAN_MB21_ID0 0xFFC02EB8 /* Mailbox 21 Identifier Low Register */
1107#define CAN_MB21_ID1 0xFFC02EBC /* Mailbox 21 Identifier High Register */
1108
1109#define CAN_MB22_DATA0 0xFFC02EC0 /* Mailbox 22 Data Word 0 [15:0] Register */
1110#define CAN_MB22_DATA1 0xFFC02EC4 /* Mailbox 22 Data Word 1 [31:16] Register */
1111#define CAN_MB22_DATA2 0xFFC02EC8 /* Mailbox 22 Data Word 2 [47:32] Register */
1112#define CAN_MB22_DATA3 0xFFC02ECC /* Mailbox 22 Data Word 3 [63:48] Register */
1113#define CAN_MB22_LENGTH 0xFFC02ED0 /* Mailbox 22 Data Length Code Register */
1114#define CAN_MB22_TIMESTAMP 0xFFC02ED4 /* Mailbox 22 Time Stamp Value Register */
1115#define CAN_MB22_ID0 0xFFC02ED8 /* Mailbox 22 Identifier Low Register */
1116#define CAN_MB22_ID1 0xFFC02EDC /* Mailbox 22 Identifier High Register */
1117
1118#define CAN_MB23_DATA0 0xFFC02EE0 /* Mailbox 23 Data Word 0 [15:0] Register */
1119#define CAN_MB23_DATA1 0xFFC02EE4 /* Mailbox 23 Data Word 1 [31:16] Register */
1120#define CAN_MB23_DATA2 0xFFC02EE8 /* Mailbox 23 Data Word 2 [47:32] Register */
1121#define CAN_MB23_DATA3 0xFFC02EEC /* Mailbox 23 Data Word 3 [63:48] Register */
1122#define CAN_MB23_LENGTH 0xFFC02EF0 /* Mailbox 23 Data Length Code Register */
1123#define CAN_MB23_TIMESTAMP 0xFFC02EF4 /* Mailbox 23 Time Stamp Value Register */
1124#define CAN_MB23_ID0 0xFFC02EF8 /* Mailbox 23 Identifier Low Register */
1125#define CAN_MB23_ID1 0xFFC02EFC /* Mailbox 23 Identifier High Register */
1126
1127#define CAN_MB24_DATA0 0xFFC02F00 /* Mailbox 24 Data Word 0 [15:0] Register */
1128#define CAN_MB24_DATA1 0xFFC02F04 /* Mailbox 24 Data Word 1 [31:16] Register */
1129#define CAN_MB24_DATA2 0xFFC02F08 /* Mailbox 24 Data Word 2 [47:32] Register */
1130#define CAN_MB24_DATA3 0xFFC02F0C /* Mailbox 24 Data Word 3 [63:48] Register */
1131#define CAN_MB24_LENGTH 0xFFC02F10 /* Mailbox 24 Data Length Code Register */
1132#define CAN_MB24_TIMESTAMP 0xFFC02F14 /* Mailbox 24 Time Stamp Value Register */
1133#define CAN_MB24_ID0 0xFFC02F18 /* Mailbox 24 Identifier Low Register */
1134#define CAN_MB24_ID1 0xFFC02F1C /* Mailbox 24 Identifier High Register */
1135
1136#define CAN_MB25_DATA0 0xFFC02F20 /* Mailbox 25 Data Word 0 [15:0] Register */
1137#define CAN_MB25_DATA1 0xFFC02F24 /* Mailbox 25 Data Word 1 [31:16] Register */
1138#define CAN_MB25_DATA2 0xFFC02F28 /* Mailbox 25 Data Word 2 [47:32] Register */
1139#define CAN_MB25_DATA3 0xFFC02F2C /* Mailbox 25 Data Word 3 [63:48] Register */
1140#define CAN_MB25_LENGTH 0xFFC02F30 /* Mailbox 25 Data Length Code Register */
1141#define CAN_MB25_TIMESTAMP 0xFFC02F34 /* Mailbox 25 Time Stamp Value Register */
1142#define CAN_MB25_ID0 0xFFC02F38 /* Mailbox 25 Identifier Low Register */
1143#define CAN_MB25_ID1 0xFFC02F3C /* Mailbox 25 Identifier High Register */
1144
1145#define CAN_MB26_DATA0 0xFFC02F40 /* Mailbox 26 Data Word 0 [15:0] Register */
1146#define CAN_MB26_DATA1 0xFFC02F44 /* Mailbox 26 Data Word 1 [31:16] Register */
1147#define CAN_MB26_DATA2 0xFFC02F48 /* Mailbox 26 Data Word 2 [47:32] Register */
1148#define CAN_MB26_DATA3 0xFFC02F4C /* Mailbox 26 Data Word 3 [63:48] Register */
1149#define CAN_MB26_LENGTH 0xFFC02F50 /* Mailbox 26 Data Length Code Register */
1150#define CAN_MB26_TIMESTAMP 0xFFC02F54 /* Mailbox 26 Time Stamp Value Register */
1151#define CAN_MB26_ID0 0xFFC02F58 /* Mailbox 26 Identifier Low Register */
1152#define CAN_MB26_ID1 0xFFC02F5C /* Mailbox 26 Identifier High Register */
1153
1154#define CAN_MB27_DATA0 0xFFC02F60 /* Mailbox 27 Data Word 0 [15:0] Register */
1155#define CAN_MB27_DATA1 0xFFC02F64 /* Mailbox 27 Data Word 1 [31:16] Register */
1156#define CAN_MB27_DATA2 0xFFC02F68 /* Mailbox 27 Data Word 2 [47:32] Register */
1157#define CAN_MB27_DATA3 0xFFC02F6C /* Mailbox 27 Data Word 3 [63:48] Register */
1158#define CAN_MB27_LENGTH 0xFFC02F70 /* Mailbox 27 Data Length Code Register */
1159#define CAN_MB27_TIMESTAMP 0xFFC02F74 /* Mailbox 27 Time Stamp Value Register */
1160#define CAN_MB27_ID0 0xFFC02F78 /* Mailbox 27 Identifier Low Register */
1161#define CAN_MB27_ID1 0xFFC02F7C /* Mailbox 27 Identifier High Register */
1162
1163#define CAN_MB28_DATA0 0xFFC02F80 /* Mailbox 28 Data Word 0 [15:0] Register */
1164#define CAN_MB28_DATA1 0xFFC02F84 /* Mailbox 28 Data Word 1 [31:16] Register */
1165#define CAN_MB28_DATA2 0xFFC02F88 /* Mailbox 28 Data Word 2 [47:32] Register */
1166#define CAN_MB28_DATA3 0xFFC02F8C /* Mailbox 28 Data Word 3 [63:48] Register */
1167#define CAN_MB28_LENGTH 0xFFC02F90 /* Mailbox 28 Data Length Code Register */
1168#define CAN_MB28_TIMESTAMP 0xFFC02F94 /* Mailbox 28 Time Stamp Value Register */
1169#define CAN_MB28_ID0 0xFFC02F98 /* Mailbox 28 Identifier Low Register */
1170#define CAN_MB28_ID1 0xFFC02F9C /* Mailbox 28 Identifier High Register */
1171
1172#define CAN_MB29_DATA0 0xFFC02FA0 /* Mailbox 29 Data Word 0 [15:0] Register */
1173#define CAN_MB29_DATA1 0xFFC02FA4 /* Mailbox 29 Data Word 1 [31:16] Register */
1174#define CAN_MB29_DATA2 0xFFC02FA8 /* Mailbox 29 Data Word 2 [47:32] Register */
1175#define CAN_MB29_DATA3 0xFFC02FAC /* Mailbox 29 Data Word 3 [63:48] Register */
1176#define CAN_MB29_LENGTH 0xFFC02FB0 /* Mailbox 29 Data Length Code Register */
1177#define CAN_MB29_TIMESTAMP 0xFFC02FB4 /* Mailbox 29 Time Stamp Value Register */
1178#define CAN_MB29_ID0 0xFFC02FB8 /* Mailbox 29 Identifier Low Register */
1179#define CAN_MB29_ID1 0xFFC02FBC /* Mailbox 29 Identifier High Register */
1180
1181#define CAN_MB30_DATA0 0xFFC02FC0 /* Mailbox 30 Data Word 0 [15:0] Register */
1182#define CAN_MB30_DATA1 0xFFC02FC4 /* Mailbox 30 Data Word 1 [31:16] Register */
1183#define CAN_MB30_DATA2 0xFFC02FC8 /* Mailbox 30 Data Word 2 [47:32] Register */
1184#define CAN_MB30_DATA3 0xFFC02FCC /* Mailbox 30 Data Word 3 [63:48] Register */
1185#define CAN_MB30_LENGTH 0xFFC02FD0 /* Mailbox 30 Data Length Code Register */
1186#define CAN_MB30_TIMESTAMP 0xFFC02FD4 /* Mailbox 30 Time Stamp Value Register */
1187#define CAN_MB30_ID0 0xFFC02FD8 /* Mailbox 30 Identifier Low Register */
1188#define CAN_MB30_ID1 0xFFC02FDC /* Mailbox 30 Identifier High Register */
1189
1190#define CAN_MB31_DATA0 0xFFC02FE0 /* Mailbox 31 Data Word 0 [15:0] Register */
1191#define CAN_MB31_DATA1 0xFFC02FE4 /* Mailbox 31 Data Word 1 [31:16] Register */
1192#define CAN_MB31_DATA2 0xFFC02FE8 /* Mailbox 31 Data Word 2 [47:32] Register */
1193#define CAN_MB31_DATA3 0xFFC02FEC /* Mailbox 31 Data Word 3 [63:48] Register */
1194#define CAN_MB31_LENGTH 0xFFC02FF0 /* Mailbox 31 Data Length Code Register */
1195#define CAN_MB31_TIMESTAMP 0xFFC02FF4 /* Mailbox 31 Time Stamp Value Register */
1196#define CAN_MB31_ID0 0xFFC02FF8 /* Mailbox 31 Identifier Low Register */
1197#define CAN_MB31_ID1 0xFFC02FFC /* Mailbox 31 Identifier High Register */
1198
1199/* CAN Mailbox Area Macros */
1200#define CAN_MB_ID1(x) (CAN_MB00_ID1+((x)*0x20))
1201#define CAN_MB_ID0(x) (CAN_MB00_ID0+((x)*0x20))
1202#define CAN_MB_TIMESTAMP(x) (CAN_MB00_TIMESTAMP+((x)*0x20))
1203#define CAN_MB_LENGTH(x) (CAN_MB00_LENGTH+((x)*0x20))
1204#define CAN_MB_DATA3(x) (CAN_MB00_DATA3+((x)*0x20))
1205#define CAN_MB_DATA2(x) (CAN_MB00_DATA2+((x)*0x20))
1206#define CAN_MB_DATA1(x) (CAN_MB00_DATA1+((x)*0x20))
1207#define CAN_MB_DATA0(x) (CAN_MB00_DATA0+((x)*0x20))
1208
1209
1210/*********************************************************************************** */
1211/* System MMR Register Bits and Macros */
1212/******************************************************************************* */
1213
1214/* SWRST Mask */
1215#define SYSTEM_RESET 0x0007 /* Initiates A System Software Reset */
1216#define DOUBLE_FAULT 0x0008 /* Core Double Fault Causes Reset */
1217#define RESET_DOUBLE 0x2000 /* SW Reset Generated By Core Double-Fault */
1218#define RESET_WDOG 0x4000 /* SW Reset Generated By Watchdog Timer */
1219#define RESET_SOFTWARE 0x8000 /* SW Reset Occurred Since Last Read Of SWRST */
1220
1221/* SYSCR Masks */
1222#define BMODE 0x0006 /* Boot Mode - Latched During HW Reset From Mode Pins */
1223#define NOBOOT 0x0010 /* Execute From L1 or ASYNC Bank 0 When BMODE = 0 */
1224
1225
1226/* ************* SYSTEM INTERRUPT CONTROLLER MASKS ***************** */
1227
1228/* Peripheral Masks For SIC0_ISR, SIC0_IWR, SIC0_IMASK */
1229#define PLL_WAKEUP_IRQ 0x00000001 /* PLL Wakeup Interrupt Request */
1230#define DMAC0_ERR_IRQ 0x00000002 /* DMA Controller 0 Error Interrupt Request */
1231#define PPI_ERR_IRQ 0x00000004 /* PPI Error Interrupt Request */
1232#define SPORT0_ERR_IRQ 0x00000008 /* SPORT0 Error Interrupt Request */
1233#define SPORT1_ERR_IRQ 0x00000010 /* SPORT1 Error Interrupt Request */
1234#define SPI0_ERR_IRQ 0x00000020 /* SPI0 Error Interrupt Request */
1235#define UART0_ERR_IRQ 0x00000040 /* UART0 Error Interrupt Request */
1236#define RTC_IRQ 0x00000080 /* Real-Time Clock Interrupt Request */
1237#define DMA0_IRQ 0x00000100 /* DMA Channel 0 (PPI) Interrupt Request */
1238#define DMA1_IRQ 0x00000200 /* DMA Channel 1 (SPORT0 RX) Interrupt Request */
1239#define DMA2_IRQ 0x00000400 /* DMA Channel 2 (SPORT0 TX) Interrupt Request */
1240#define DMA3_IRQ 0x00000800 /* DMA Channel 3 (SPORT1 RX) Interrupt Request */
1241#define DMA4_IRQ 0x00001000 /* DMA Channel 4 (SPORT1 TX) Interrupt Request */
1242#define DMA5_IRQ 0x00002000 /* DMA Channel 5 (SPI) Interrupt Request */
1243#define DMA6_IRQ 0x00004000 /* DMA Channel 6 (UART RX) Interrupt Request */
1244#define DMA7_IRQ 0x00008000 /* DMA Channel 7 (UART TX) Interrupt Request */
1245#define TIMER0_IRQ 0x00010000 /* Timer 0 Interrupt Request */
1246#define TIMER1_IRQ 0x00020000 /* Timer 1 Interrupt Request */
1247#define TIMER2_IRQ 0x00040000 /* Timer 2 Interrupt Request */
1248#define PFA_IRQ 0x00080000 /* Programmable Flag Interrupt Request A */
1249#define PFB_IRQ 0x00100000 /* Programmable Flag Interrupt Request B */
1250#define MDMA0_0_IRQ 0x00200000 /* MemDMA0 Stream 0 Interrupt Request */
1251#define MDMA0_1_IRQ 0x00400000 /* MemDMA0 Stream 1 Interrupt Request */
1252#define WDOG_IRQ 0x00800000 /* Software Watchdog Timer Interrupt Request */
1253#define DMAC1_ERR_IRQ 0x01000000 /* DMA Controller 1 Error Interrupt Request */
1254#define SPORT2_ERR_IRQ 0x02000000 /* SPORT2 Error Interrupt Request */
1255#define SPORT3_ERR_IRQ 0x04000000 /* SPORT3 Error Interrupt Request */
1256#define MXVR_SD_IRQ 0x08000000 /* MXVR Synchronous Data Interrupt Request */
1257#define SPI1_ERR_IRQ 0x10000000 /* SPI1 Error Interrupt Request */
1258#define SPI2_ERR_IRQ 0x20000000 /* SPI2 Error Interrupt Request */
1259#define UART1_ERR_IRQ 0x40000000 /* UART1 Error Interrupt Request */
1260#define UART2_ERR_IRQ 0x80000000 /* UART2 Error Interrupt Request */
1261
1262/* the following are for backwards compatibility */
1263#define DMA0_ERR_IRQ DMAC0_ERR_IRQ
1264#define DMA1_ERR_IRQ DMAC1_ERR_IRQ
1265
1266
1267/* Peripheral Masks For SIC_ISR1, SIC_IWR1, SIC_IMASK1 */
1268#define CAN_ERR_IRQ 0x00000001 /* CAN Error Interrupt Request */
1269#define DMA8_IRQ 0x00000002 /* DMA Channel 8 (SPORT2 RX) Interrupt Request */
1270#define DMA9_IRQ 0x00000004 /* DMA Channel 9 (SPORT2 TX) Interrupt Request */
1271#define DMA10_IRQ 0x00000008 /* DMA Channel 10 (SPORT3 RX) Interrupt Request */
1272#define DMA11_IRQ 0x00000010 /* DMA Channel 11 (SPORT3 TX) Interrupt Request */
1273#define DMA12_IRQ 0x00000020 /* DMA Channel 12 Interrupt Request */
1274#define DMA13_IRQ 0x00000040 /* DMA Channel 13 Interrupt Request */
1275#define DMA14_IRQ 0x00000080 /* DMA Channel 14 (SPI1) Interrupt Request */
1276#define DMA15_IRQ 0x00000100 /* DMA Channel 15 (SPI2) Interrupt Request */
1277#define DMA16_IRQ 0x00000200 /* DMA Channel 16 (UART1 RX) Interrupt Request */
1278#define DMA17_IRQ 0x00000400 /* DMA Channel 17 (UART1 TX) Interrupt Request */
1279#define DMA18_IRQ 0x00000800 /* DMA Channel 18 (UART2 RX) Interrupt Request */
1280#define DMA19_IRQ 0x00001000 /* DMA Channel 19 (UART2 TX) Interrupt Request */
1281#define TWI0_IRQ 0x00002000 /* TWI0 Interrupt Request */
1282#define TWI1_IRQ 0x00004000 /* TWI1 Interrupt Request */
1283#define CAN_RX_IRQ 0x00008000 /* CAN Receive Interrupt Request */
1284#define CAN_TX_IRQ 0x00010000 /* CAN Transmit Interrupt Request */
1285#define MDMA1_0_IRQ 0x00020000 /* MemDMA1 Stream 0 Interrupt Request */
1286#define MDMA1_1_IRQ 0x00040000 /* MemDMA1 Stream 1 Interrupt Request */
1287#define MXVR_STAT_IRQ 0x00080000 /* MXVR Status Interrupt Request */
1288#define MXVR_CM_IRQ 0x00100000 /* MXVR Control Message Interrupt Request */
1289#define MXVR_AP_IRQ 0x00200000 /* MXVR Asynchronous Packet Interrupt */
1290
1291/* the following are for backwards compatibility */
1292#define MDMA0_IRQ MDMA1_0_IRQ
1293#define MDMA1_IRQ MDMA1_1_IRQ
1294
1295#ifdef _MISRA_RULES
1296#define _MF15 0xFu
1297#define _MF7 7u
1298#else
1299#define _MF15 0xF
1300#define _MF7 7
1301#endif /* _MISRA_RULES */
1302
1303/* SIC_IMASKx Masks */
1304#define SIC_UNMASK_ALL 0x00000000 /* Unmask all peripheral interrupts */
1305#define SIC_MASK_ALL 0xFFFFFFFF /* Mask all peripheral interrupts */
1306#ifdef _MISRA_RULES
1307#define SIC_MASK(x) (1 << ((x)&0x1Fu)) /* Mask Peripheral #x interrupt */
1308#define SIC_UNMASK(x) (0xFFFFFFFFu ^ (1 << ((x)&0x1Fu))) /* Unmask Peripheral #x interrupt */
1309#else
1310#define SIC_MASK(x) (1 << ((x)&0x1F)) /* Mask Peripheral #x interrupt */
1311#define SIC_UNMASK(x) (0xFFFFFFFF ^ (1 << ((x)&0x1F))) /* Unmask Peripheral #x interrupt */
1312#endif /* _MISRA_RULES */
1313
1314/* SIC_IWRx Masks */
1315#define IWR_DISABLE_ALL 0x00000000 /* Wakeup Disable all peripherals */
1316#define IWR_ENABLE_ALL 0xFFFFFFFF /* Wakeup Enable all peripherals */
1317#ifdef _MISRA_RULES
1318#define IWR_ENABLE(x) (1 << ((x)&0x1Fu)) /* Wakeup Enable Peripheral #x */
1319#define IWR_DISABLE(x) (0xFFFFFFFFu ^ (1 << ((x)&0x1Fu))) /* Wakeup Disable Peripheral #x */
1320#else
1321#define IWR_ENABLE(x) (1 << ((x)&0x1F)) /* Wakeup Enable Peripheral #x */
1322#define IWR_DISABLE(x) (0xFFFFFFFF ^ (1 << ((x)&0x1F))) /* Wakeup Disable Peripheral #x */
1323#endif /* _MISRA_RULES */
1324
1325/* ********* PARALLEL PERIPHERAL INTERFACE (PPI) MASKS **************** */
1326/* PPI_CONTROL Masks */
1327#define PORT_EN 0x0001 /* PPI Port Enable */
1328#define PORT_DIR 0x0002 /* PPI Port Direction */
1329#define XFR_TYPE 0x000C /* PPI Transfer Type */
1330#define PORT_CFG 0x0030 /* PPI Port Configuration */
1331#define FLD_SEL 0x0040 /* PPI Active Field Select */
1332#define PACK_EN 0x0080 /* PPI Packing Mode */
1333/* previous versions of defBF539.h erroneously included DMA32 (PPI 32-bit DMA Enable) */
1334#define SKIP_EN 0x0200 /* PPI Skip Element Enable */
1335#define SKIP_EO 0x0400 /* PPI Skip Even/Odd Elements */
1336#define DLENGTH 0x3800 /* PPI Data Length */
1337#define DLEN_8 0x0 /* PPI Data Length mask for DLEN=8 */
1338#define DLEN_10 0x0800 /* Data Length = 10 Bits */
1339#define DLEN_11 0x1000 /* Data Length = 11 Bits */
1340#define DLEN_12 0x1800 /* Data Length = 12 Bits */
1341#define DLEN_13 0x2000 /* Data Length = 13 Bits */
1342#define DLEN_14 0x2800 /* Data Length = 14 Bits */
1343#define DLEN_15 0x3000 /* Data Length = 15 Bits */
1344#define DLEN_16 0x3800 /* Data Length = 16 Bits */
1345#ifdef _MISRA_RULES
1346#define DLEN(x) ((((x)-9u) & 0x07u) << 11) /* PPI Data Length (only works for x=10-->x=16) */
1347#else
1348#define DLEN(x) ((((x)-9) & 0x07) << 11) /* PPI Data Length (only works for x=10-->x=16) */
1349#endif /* _MISRA_RULES */
1350#define POL 0xC000 /* PPI Signal Polarities */
1351#define POLC 0x4000 /* PPI Clock Polarity */
1352#define POLS 0x8000 /* PPI Frame Sync Polarity */
1353
1354
1355/* PPI_STATUS Masks */
1356#define FLD 0x0400 /* Field Indicator */
1357#define FT_ERR 0x0800 /* Frame Track Error */
1358#define OVR 0x1000 /* FIFO Overflow Error */
1359#define UNDR 0x2000 /* FIFO Underrun Error */
1360#define ERR_DET 0x4000 /* Error Detected Indicator */
1361#define ERR_NCOR 0x8000 /* Error Not Corrected Indicator */
1362
1363
1364/* ********** DMA CONTROLLER MASKS ***********************/
1365
1366/* DMAx_PERIPHERAL_MAP, MDMA_yy_PERIPHERAL_MAP Masks */
1367
1368#define CTYPE 0x0040 /* DMA Channel Type Indicator */
1369#define CTYPE_P 0x6 /* DMA Channel Type Indicator BIT POSITION */
1370#define PCAP8 0x0080 /* DMA 8-bit Operation Indicator */
1371#define PCAP16 0x0100 /* DMA 16-bit Operation Indicator */
1372#define PCAP32 0x0200 /* DMA 32-bit Operation Indicator */
1373#define PCAPWR 0x0400 /* DMA Write Operation Indicator */
1374#define PCAPRD 0x0800 /* DMA Read Operation Indicator */
1375#define PMAP 0xF000 /* DMA Peripheral Map Field */
1376
1377/* PMAP Encodings For DMA Controller 0 */
1378#define PMAP_PPI 0x0000 /* PMAP PPI Port DMA */
1379#define PMAP_SPORT0RX 0x1000 /* PMAP SPORT0 Receive DMA */
1380#define PMAP_SPORT0TX 0x2000 /* PMAP SPORT0 Transmit DMA */
1381#define PMAP_SPORT1RX 0x3000 /* PMAP SPORT1 Receive DMA */
1382#define PMAP_SPORT1TX 0x4000 /* PMAP SPORT1 Transmit DMA */
1383#define PMAP_SPI0 0x5000 /* PMAP SPI DMA */
1384#define PMAP_UART0RX 0x6000 /* PMAP UART Receive DMA */
1385#define PMAP_UART0TX 0x7000 /* PMAP UART Transmit DMA */
1386
1387/* PMAP Encodings For DMA Controller 1 */
1388#define PMAP_SPORT2RX 0x0000 /* PMAP SPORT2 Receive DMA */
1389#define PMAP_SPORT2TX 0x1000 /* PMAP SPORT2 Transmit DMA */
1390#define PMAP_SPORT3RX 0x2000 /* PMAP SPORT3 Receive DMA */
1391#define PMAP_SPORT3TX 0x3000 /* PMAP SPORT3 Transmit DMA */
1392#define PMAP_SPI1 0x6000 /* PMAP SPI1 DMA */
1393#define PMAP_SPI2 0x7000 /* PMAP SPI2 DMA */
1394#define PMAP_UART1RX 0x8000 /* PMAP UART1 Receive DMA */
1395#define PMAP_UART1TX 0x9000 /* PMAP UART1 Transmit DMA */
1396#define PMAP_UART2RX 0xA000 /* PMAP UART2 Receive DMA */
1397#define PMAP_UART2TX 0xB000 /* PMAP UART2 Transmit DMA */
1398
1399
1400/* ************* GENERAL PURPOSE TIMER MASKS ******************** */
1401/* PWM Timer bit definitions */
1402/* TIMER_ENABLE Register */
1403#define TIMEN0 0x0001 /* Enable Timer 0 */
1404#define TIMEN1 0x0002 /* Enable Timer 1 */
1405#define TIMEN2 0x0004 /* Enable Timer 2 */
1406
1407#define TIMEN0_P 0x00
1408#define TIMEN1_P 0x01
1409#define TIMEN2_P 0x02
1410
1411/* TIMER_DISABLE Register */
1412#define TIMDIS0 0x0001 /* Disable Timer 0 */
1413#define TIMDIS1 0x0002 /* Disable Timer 1 */
1414#define TIMDIS2 0x0004 /* Disable Timer 2 */
1415
1416#define TIMDIS0_P 0x00
1417#define TIMDIS1_P 0x01
1418#define TIMDIS2_P 0x02
1419
1420/* TIMER_STATUS Register */
1421#define TIMIL0 0x0001 /* Timer 0 Interrupt */
1422#define TIMIL1 0x0002 /* Timer 1 Interrupt */
1423#define TIMIL2 0x0004 /* Timer 2 Interrupt */
1424#define TOVF_ERR0 0x0010 /* Timer 0 Counter Overflow */
1425#define TOVF_ERR1 0x0020 /* Timer 1 Counter Overflow */
1426#define TOVF_ERR2 0x0040 /* Timer 2 Counter Overflow */
1427#define TRUN0 0x1000 /* Timer 0 Slave Enable Status */
1428#define TRUN1 0x2000 /* Timer 1 Slave Enable Status */
1429#define TRUN2 0x4000 /* Timer 2 Slave Enable Status */
1430
1431#define TIMIL0_P 0x00
1432#define TIMIL1_P 0x01
1433#define TIMIL2_P 0x02
1434#define TOVF_ERR0_P 0x04
1435#define TOVF_ERR1_P 0x05
1436#define TOVF_ERR2_P 0x06
1437#define TRUN0_P 0x0C
1438#define TRUN1_P 0x0D
1439#define TRUN2_P 0x0E
1440
1441/* Alternate Deprecated Macros Provided For Backwards Code Compatibility */
1442#define TOVL_ERR0 TOVF_ERR0
1443#define TOVL_ERR1 TOVF_ERR1
1444#define TOVL_ERR2 TOVF_ERR2
1445#define TOVL_ERR0_P TOVF_ERR0_P
1446#define TOVL_ERR1_P TOVF_ERR1_P
1447#define TOVL_ERR2_P TOVF_ERR2_P
1448
1449/* TIMERx_CONFIG Registers */
1450#define PWM_OUT 0x0001
1451#define WDTH_CAP 0x0002
1452#define EXT_CLK 0x0003
1453#define PULSE_HI 0x0004
1454#define PERIOD_CNT 0x0008
1455#define IRQ_ENA 0x0010
1456#define TIN_SEL 0x0020
1457#define OUT_DIS 0x0040
1458#define CLK_SEL 0x0080
1459#define TOGGLE_HI 0x0100
1460#define EMU_RUN 0x0200
1461#ifdef _MISRA_RULES
1462#define ERR_TYP(x) (((x) & 0x03u) << 14)
1463#else
1464#define ERR_TYP(x) (((x) & 0x03) << 14)
1465#endif /* _MISRA_RULES */
1466
1467#define TMODE_P0 0x00
1468#define TMODE_P1 0x01
1469#define PULSE_HI_P 0x02
1470#define PERIOD_CNT_P 0x03
1471#define IRQ_ENA_P 0x04
1472#define TIN_SEL_P 0x05
1473#define OUT_DIS_P 0x06
1474#define CLK_SEL_P 0x07
1475#define TOGGLE_HI_P 0x08
1476#define EMU_RUN_P 0x09
1477#define ERR_TYP_P0 0x0E
1478#define ERR_TYP_P1 0x0F
1479
1480/* ********************* ASYNCHRONOUS MEMORY CONTROLLER MASKS ************* */
1481/* EBIU_AMGCTL Masks */
1482#define AMCKEN 0x0001 /* Enable CLKOUT */
1483#define AMBEN_NONE 0x0000 /* All Banks Disabled */
1484#define AMBEN_B0 0x0002 /* Enable Asynchronous Memory Bank 0 only */
1485#define AMBEN_B0_B1 0x0004 /* Enable Asynchronous Memory Banks 0 & 1 only */
1486#define AMBEN_B0_B1_B2 0x0006 /* Enable Asynchronous Memory Banks 0, 1, and 2 */
1487#define AMBEN_ALL 0x0008 /* Enable Asynchronous Memory Banks (all) 0, 1, 2, and 3 */
1488#define CDPRIO 0x0100 /* DMA has priority over core for external accesses */
1489
1490/* EBIU_AMGCTL Bit Positions */
1491#define AMCKEN_P 0x0000 /* Enable CLKOUT */
1492#define AMBEN_P0 0x0001 /* Asynchronous Memory Enable, 000 - banks 0-3 disabled, 001 - Bank 0 enabled */
1493#define AMBEN_P1 0x0002 /* Asynchronous Memory Enable, 010 - banks 0&1 enabled, 011 - banks 0-3 enabled */
1494#define AMBEN_P2 0x0003 /* Asynchronous Memory Enable, 1xx - All banks (bank 0, 1, 2, and 3) enabled */
1495
1496/* EBIU_AMBCTL0 Masks */
1497#define B0RDYEN 0x00000001 /* Bank 0 RDY Enable, 0=disable, 1=enable */
1498#define B0RDYPOL 0x00000002 /* Bank 0 RDY Active high, 0=active low, 1=active high */
1499#define B0TT_1 0x00000004 /* Bank 0 Transition Time from Read to Write = 1 cycle */
1500#define B0TT_2 0x00000008 /* Bank 0 Transition Time from Read to Write = 2 cycles */
1501#define B0TT_3 0x0000000C /* Bank 0 Transition Time from Read to Write = 3 cycles */
1502#define B0TT_4 0x00000000 /* Bank 0 Transition Time from Read to Write = 4 cycles */
1503#define B0ST_1 0x00000010 /* Bank 0 Setup Time from AOE asserted to Read/Write asserted=1 cycle */
1504#define B0ST_2 0x00000020 /* Bank 0 Setup Time from AOE asserted to Read/Write asserted=2 cycles */
1505#define B0ST_3 0x00000030 /* Bank 0 Setup Time from AOE asserted to Read/Write asserted=3 cycles */
1506#define B0ST_4 0x00000000 /* Bank 0 Setup Time from AOE asserted to Read/Write asserted=4 cycles */
1507#define B0HT_1 0x00000040 /* Bank 0 Hold Time from Read/Write deasserted to AOE deasserted = 1 cycle */
1508#define B0HT_2 0x00000080 /* Bank 0 Hold Time from Read/Write deasserted to AOE deasserted = 2 cycles */
1509#define B0HT_3 0x000000C0 /* Bank 0 Hold Time from Read/Write deasserted to AOE deasserted = 3 cycles */
1510#define B0HT_0 0x00000000 /* Bank 0 Hold Time from Read/Write deasserted to AOE deasserted = 0 cycles */
1511#define B0RAT_1 0x00000100 /* Bank 0 Read Access Time = 1 cycle */
1512#define B0RAT_2 0x00000200 /* Bank 0 Read Access Time = 2 cycles */
1513#define B0RAT_3 0x00000300 /* Bank 0 Read Access Time = 3 cycles */
1514#define B0RAT_4 0x00000400 /* Bank 0 Read Access Time = 4 cycles */
1515#define B0RAT_5 0x00000500 /* Bank 0 Read Access Time = 5 cycles */
1516#define B0RAT_6 0x00000600 /* Bank 0 Read Access Time = 6 cycles */
1517#define B0RAT_7 0x00000700 /* Bank 0 Read Access Time = 7 cycles */
1518#define B0RAT_8 0x00000800 /* Bank 0 Read Access Time = 8 cycles */
1519#define B0RAT_9 0x00000900 /* Bank 0 Read Access Time = 9 cycles */
1520#define B0RAT_10 0x00000A00 /* Bank 0 Read Access Time = 10 cycles */
1521#define B0RAT_11 0x00000B00 /* Bank 0 Read Access Time = 11 cycles */
1522#define B0RAT_12 0x00000C00 /* Bank 0 Read Access Time = 12 cycles */
1523#define B0RAT_13 0x00000D00 /* Bank 0 Read Access Time = 13 cycles */
1524#define B0RAT_14 0x00000E00 /* Bank 0 Read Access Time = 14 cycles */
1525#define B0RAT_15 0x00000F00 /* Bank 0 Read Access Time = 15 cycles */
1526#define B0WAT_1 0x00001000 /* Bank 0 Write Access Time = 1 cycle */
1527#define B0WAT_2 0x00002000 /* Bank 0 Write Access Time = 2 cycles */
1528#define B0WAT_3 0x00003000 /* Bank 0 Write Access Time = 3 cycles */
1529#define B0WAT_4 0x00004000 /* Bank 0 Write Access Time = 4 cycles */
1530#define B0WAT_5 0x00005000 /* Bank 0 Write Access Time = 5 cycles */
1531#define B0WAT_6 0x00006000 /* Bank 0 Write Access Time = 6 cycles */
1532#define B0WAT_7 0x00007000 /* Bank 0 Write Access Time = 7 cycles */
1533#define B0WAT_8 0x00008000 /* Bank 0 Write Access Time = 8 cycles */
1534#define B0WAT_9 0x00009000 /* Bank 0 Write Access Time = 9 cycles */
1535#define B0WAT_10 0x0000A000 /* Bank 0 Write Access Time = 10 cycles */
1536#define B0WAT_11 0x0000B000 /* Bank 0 Write Access Time = 11 cycles */
1537#define B0WAT_12 0x0000C000 /* Bank 0 Write Access Time = 12 cycles */
1538#define B0WAT_13 0x0000D000 /* Bank 0 Write Access Time = 13 cycles */
1539#define B0WAT_14 0x0000E000 /* Bank 0 Write Access Time = 14 cycles */
1540#define B0WAT_15 0x0000F000 /* Bank 0 Write Access Time = 15 cycles */
1541#define B1RDYEN 0x00010000 /* Bank 1 RDY enable, 0=disable, 1=enable */
1542#define B1RDYPOL 0x00020000 /* Bank 1 RDY Active high, 0=active low, 1=active high */
1543#define B1TT_1 0x00040000 /* Bank 1 Transition Time from Read to Write = 1 cycle */
1544#define B1TT_2 0x00080000 /* Bank 1 Transition Time from Read to Write = 2 cycles */
1545#define B1TT_3 0x000C0000 /* Bank 1 Transition Time from Read to Write = 3 cycles */
1546#define B1TT_4 0x00000000 /* Bank 1 Transition Time from Read to Write = 4 cycles */
1547#define B1ST_1 0x00100000 /* Bank 1 Setup Time from AOE asserted to Read or Write asserted = 1 cycle */
1548#define B1ST_2 0x00200000 /* Bank 1 Setup Time from AOE asserted to Read or Write asserted = 2 cycles */
1549#define B1ST_3 0x00300000 /* Bank 1 Setup Time from AOE asserted to Read or Write asserted = 3 cycles */
1550#define B1ST_4 0x00000000 /* Bank 1 Setup Time from AOE asserted to Read or Write asserted = 4 cycles */
1551#define B1HT_1 0x00400000 /* Bank 1 Hold Time from Read or Write deasserted to AOE deasserted = 1 cycle */
1552#define B1HT_2 0x00800000 /* Bank 1 Hold Time from Read or Write deasserted to AOE deasserted = 2 cycles */
1553#define B1HT_3 0x00C00000 /* Bank 1 Hold Time from Read or Write deasserted to AOE deasserted = 3 cycles */
1554#define B1HT_0 0x00000000 /* Bank 1 Hold Time from Read or Write deasserted to AOE deasserted = 0 cycles */
1555#define B1RAT_1 0x01000000 /* Bank 1 Read Access Time = 1 cycle */
1556#define B1RAT_2 0x02000000 /* Bank 1 Read Access Time = 2 cycles */
1557#define B1RAT_3 0x03000000 /* Bank 1 Read Access Time = 3 cycles */
1558#define B1RAT_4 0x04000000 /* Bank 1 Read Access Time = 4 cycles */
1559#define B1RAT_5 0x05000000 /* Bank 1 Read Access Time = 5 cycles */
1560#define B1RAT_6 0x06000000 /* Bank 1 Read Access Time = 6 cycles */
1561#define B1RAT_7 0x07000000 /* Bank 1 Read Access Time = 7 cycles */
1562#define B1RAT_8 0x08000000 /* Bank 1 Read Access Time = 8 cycles */
1563#define B1RAT_9 0x09000000 /* Bank 1 Read Access Time = 9 cycles */
1564#define B1RAT_10 0x0A000000 /* Bank 1 Read Access Time = 10 cycles */
1565#define B1RAT_11 0x0B000000 /* Bank 1 Read Access Time = 11 cycles */
1566#define B1RAT_12 0x0C000000 /* Bank 1 Read Access Time = 12 cycles */
1567#define B1RAT_13 0x0D000000 /* Bank 1 Read Access Time = 13 cycles */
1568#define B1RAT_14 0x0E000000 /* Bank 1 Read Access Time = 14 cycles */
1569#define B1RAT_15 0x0F000000 /* Bank 1 Read Access Time = 15 cycles */
1570#define B1WAT_1 0x10000000 /* Bank 1 Write Access Time = 1 cycle */
1571#define B1WAT_2 0x20000000 /* Bank 1 Write Access Time = 2 cycles */
1572#define B1WAT_3 0x30000000 /* Bank 1 Write Access Time = 3 cycles */
1573#define B1WAT_4 0x40000000 /* Bank 1 Write Access Time = 4 cycles */
1574#define B1WAT_5 0x50000000 /* Bank 1 Write Access Time = 5 cycles */
1575#define B1WAT_6 0x60000000 /* Bank 1 Write Access Time = 6 cycles */
1576#define B1WAT_7 0x70000000 /* Bank 1 Write Access Time = 7 cycles */
1577#define B1WAT_8 0x80000000 /* Bank 1 Write Access Time = 8 cycles */
1578#define B1WAT_9 0x90000000 /* Bank 1 Write Access Time = 9 cycles */
1579#define B1WAT_10 0xA0000000 /* Bank 1 Write Access Time = 10 cycles */
1580#define B1WAT_11 0xB0000000 /* Bank 1 Write Access Time = 11 cycles */
1581#define B1WAT_12 0xC0000000 /* Bank 1 Write Access Time = 12 cycles */
1582#define B1WAT_13 0xD0000000 /* Bank 1 Write Access Time = 13 cycles */
1583#define B1WAT_14 0xE0000000 /* Bank 1 Write Access Time = 14 cycles */
1584#define B1WAT_15 0xF0000000 /* Bank 1 Write Access Time = 15 cycles */
1585
1586/* EBIU_AMBCTL1 Masks */
1587#define B2RDYEN 0x00000001 /* Bank 2 RDY Enable, 0=disable, 1=enable */
1588#define B2RDYPOL 0x00000002 /* Bank 2 RDY Active high, 0=active low, 1=active high */
1589#define B2TT_1 0x00000004 /* Bank 2 Transition Time from Read to Write = 1 cycle */
1590#define B2TT_2 0x00000008 /* Bank 2 Transition Time from Read to Write = 2 cycles */
1591#define B2TT_3 0x0000000C /* Bank 2 Transition Time from Read to Write = 3 cycles */
1592#define B2TT_4 0x00000000 /* Bank 2 Transition Time from Read to Write = 4 cycles */
1593#define B2ST_1 0x00000010 /* Bank 2 Setup Time from AOE asserted to Read or Write asserted = 1 cycle */
1594#define B2ST_2 0x00000020 /* Bank 2 Setup Time from AOE asserted to Read or Write asserted = 2 cycles */
1595#define B2ST_3 0x00000030 /* Bank 2 Setup Time from AOE asserted to Read or Write asserted = 3 cycles */
1596#define B2ST_4 0x00000000 /* Bank 2 Setup Time from AOE asserted to Read or Write asserted = 4 cycles */
1597#define B2HT_1 0x00000040 /* Bank 2 Hold Time from Read or Write deasserted to AOE deasserted = 1 cycle */
1598#define B2HT_2 0x00000080 /* Bank 2 Hold Time from Read or Write deasserted to AOE deasserted = 2 cycles */
1599#define B2HT_3 0x000000C0 /* Bank 2 Hold Time from Read or Write deasserted to AOE deasserted = 3 cycles */
1600#define B2HT_0 0x00000000 /* Bank 2 Hold Time from Read or Write deasserted to AOE deasserted = 0 cycles */
1601#define B2RAT_1 0x00000100 /* Bank 2 Read Access Time = 1 cycle */
1602#define B2RAT_2 0x00000200 /* Bank 2 Read Access Time = 2 cycles */
1603#define B2RAT_3 0x00000300 /* Bank 2 Read Access Time = 3 cycles */
1604#define B2RAT_4 0x00000400 /* Bank 2 Read Access Time = 4 cycles */
1605#define B2RAT_5 0x00000500 /* Bank 2 Read Access Time = 5 cycles */
1606#define B2RAT_6 0x00000600 /* Bank 2 Read Access Time = 6 cycles */
1607#define B2RAT_7 0x00000700 /* Bank 2 Read Access Time = 7 cycles */
1608#define B2RAT_8 0x00000800 /* Bank 2 Read Access Time = 8 cycles */
1609#define B2RAT_9 0x00000900 /* Bank 2 Read Access Time = 9 cycles */
1610#define B2RAT_10 0x00000A00 /* Bank 2 Read Access Time = 10 cycles */
1611#define B2RAT_11 0x00000B00 /* Bank 2 Read Access Time = 11 cycles */
1612#define B2RAT_12 0x00000C00 /* Bank 2 Read Access Time = 12 cycles */
1613#define B2RAT_13 0x00000D00 /* Bank 2 Read Access Time = 13 cycles */
1614#define B2RAT_14 0x00000E00 /* Bank 2 Read Access Time = 14 cycles */
1615#define B2RAT_15 0x00000F00 /* Bank 2 Read Access Time = 15 cycles */
1616#define B2WAT_1 0x00001000 /* Bank 2 Write Access Time = 1 cycle */
1617#define B2WAT_2 0x00002000 /* Bank 2 Write Access Time = 2 cycles */
1618#define B2WAT_3 0x00003000 /* Bank 2 Write Access Time = 3 cycles */
1619#define B2WAT_4 0x00004000 /* Bank 2 Write Access Time = 4 cycles */
1620#define B2WAT_5 0x00005000 /* Bank 2 Write Access Time = 5 cycles */
1621#define B2WAT_6 0x00006000 /* Bank 2 Write Access Time = 6 cycles */
1622#define B2WAT_7 0x00007000 /* Bank 2 Write Access Time = 7 cycles */
1623#define B2WAT_8 0x00008000 /* Bank 2 Write Access Time = 8 cycles */
1624#define B2WAT_9 0x00009000 /* Bank 2 Write Access Time = 9 cycles */
1625#define B2WAT_10 0x0000A000 /* Bank 2 Write Access Time = 10 cycles */
1626#define B2WAT_11 0x0000B000 /* Bank 2 Write Access Time = 11 cycles */
1627#define B2WAT_12 0x0000C000 /* Bank 2 Write Access Time = 12 cycles */
1628#define B2WAT_13 0x0000D000 /* Bank 2 Write Access Time = 13 cycles */
1629#define B2WAT_14 0x0000E000 /* Bank 2 Write Access Time = 14 cycles */
1630#define B2WAT_15 0x0000F000 /* Bank 2 Write Access Time = 15 cycles */
1631#define B3RDYEN 0x00010000 /* Bank 3 RDY enable, 0=disable, 1=enable */
1632#define B3RDYPOL 0x00020000 /* Bank 3 RDY Active high, 0=active low, 1=active high */
1633#define B3TT_1 0x00040000 /* Bank 3 Transition Time from Read to Write = 1 cycle */
1634#define B3TT_2 0x00080000 /* Bank 3 Transition Time from Read to Write = 2 cycles */
1635#define B3TT_3 0x000C0000 /* Bank 3 Transition Time from Read to Write = 3 cycles */
1636#define B3TT_4 0x00000000 /* Bank 3 Transition Time from Read to Write = 4 cycles */
1637#define B3ST_1 0x00100000 /* Bank 3 Setup Time from AOE asserted to Read or Write asserted = 1 cycle */
1638#define B3ST_2 0x00200000 /* Bank 3 Setup Time from AOE asserted to Read or Write asserted = 2 cycles */
1639#define B3ST_3 0x00300000 /* Bank 3 Setup Time from AOE asserted to Read or Write asserted = 3 cycles */
1640#define B3ST_4 0x00000000 /* Bank 3 Setup Time from AOE asserted to Read or Write asserted = 4 cycles */
1641#define B3HT_1 0x00400000 /* Bank 3 Hold Time from Read or Write deasserted to AOE deasserted = 1 cycle */
1642#define B3HT_2 0x00800000 /* Bank 3 Hold Time from Read or Write deasserted to AOE deasserted = 2 cycles */
1643#define B3HT_3 0x00C00000 /* Bank 3 Hold Time from Read or Write deasserted to AOE deasserted = 3 cycles */
1644#define B3HT_0 0x00000000 /* Bank 3 Hold Time from Read or Write deasserted to AOE deasserted = 0 cycles */
1645#define B3RAT_1 0x01000000 /* Bank 3 Read Access Time = 1 cycle */
1646#define B3RAT_2 0x02000000 /* Bank 3 Read Access Time = 2 cycles */
1647#define B3RAT_3 0x03000000 /* Bank 3 Read Access Time = 3 cycles */
1648#define B3RAT_4 0x04000000 /* Bank 3 Read Access Time = 4 cycles */
1649#define B3RAT_5 0x05000000 /* Bank 3 Read Access Time = 5 cycles */
1650#define B3RAT_6 0x06000000 /* Bank 3 Read Access Time = 6 cycles */
1651#define B3RAT_7 0x07000000 /* Bank 3 Read Access Time = 7 cycles */
1652#define B3RAT_8 0x08000000 /* Bank 3 Read Access Time = 8 cycles */
1653#define B3RAT_9 0x09000000 /* Bank 3 Read Access Time = 9 cycles */
1654#define B3RAT_10 0x0A000000 /* Bank 3 Read Access Time = 10 cycles */
1655#define B3RAT_11 0x0B000000 /* Bank 3 Read Access Time = 11 cycles */
1656#define B3RAT_12 0x0C000000 /* Bank 3 Read Access Time = 12 cycles */
1657#define B3RAT_13 0x0D000000 /* Bank 3 Read Access Time = 13 cycles */
1658#define B3RAT_14 0x0E000000 /* Bank 3 Read Access Time = 14 cycles */
1659#define B3RAT_15 0x0F000000 /* Bank 3 Read Access Time = 15 cycles */
1660#define B3WAT_1 0x10000000 /* Bank 3 Write Access Time = 1 cycle */
1661#define B3WAT_2 0x20000000 /* Bank 3 Write Access Time = 2 cycles */
1662#define B3WAT_3 0x30000000 /* Bank 3 Write Access Time = 3 cycles */
1663#define B3WAT_4 0x40000000 /* Bank 3 Write Access Time = 4 cycles */
1664#define B3WAT_5 0x50000000 /* Bank 3 Write Access Time = 5 cycles */
1665#define B3WAT_6 0x60000000 /* Bank 3 Write Access Time = 6 cycles */
1666#define B3WAT_7 0x70000000 /* Bank 3 Write Access Time = 7 cycles */
1667#define B3WAT_8 0x80000000 /* Bank 3 Write Access Time = 8 cycles */
1668#define B3WAT_9 0x90000000 /* Bank 3 Write Access Time = 9 cycles */
1669#define B3WAT_10 0xA0000000 /* Bank 3 Write Access Time = 10 cycles */
1670#define B3WAT_11 0xB0000000 /* Bank 3 Write Access Time = 11 cycles */
1671#define B3WAT_12 0xC0000000 /* Bank 3 Write Access Time = 12 cycles */
1672#define B3WAT_13 0xD0000000 /* Bank 3 Write Access Time = 13 cycles */
1673#define B3WAT_14 0xE0000000 /* Bank 3 Write Access Time = 14 cycles */
1674#define B3WAT_15 0xF0000000 /* Bank 3 Write Access Time = 15 cycles */
1675
1676/* ********************** SDRAM CONTROLLER MASKS *************************** */
1677/* EBIU_SDGCTL Masks */
1678#define SCTLE 0x00000001 /* Enable SCLK[0], /SRAS, /SCAS, /SWE, SDQM[3:0] */
1679#define CL_2 0x00000008 /* SDRAM CAS latency = 2 cycles */
1680#define CL_3 0x0000000C /* SDRAM CAS latency = 3 cycles */
1681#define PFE 0x00000010 /* Enable SDRAM prefetch */
1682#define PFP 0x00000020 /* Prefetch has priority over AMC requests */
1683#define PASR_ALL 0x00000000 /* All 4 SDRAM Banks Refreshed In Self-Refresh */
1684#define PASR_B0_B1 0x00000010 /* SDRAM Banks 0 and 1 Are Refreshed In Self-Refresh */
1685#define PASR_B0 0x00000020 /* Only SDRAM Bank 0 Is Refreshed In Self-Refresh */
1686#define TRAS_1 0x00000040 /* SDRAM tRAS = 1 cycle */
1687#define TRAS_2 0x00000080 /* SDRAM tRAS = 2 cycles */
1688#define TRAS_3 0x000000C0 /* SDRAM tRAS = 3 cycles */
1689#define TRAS_4 0x00000100 /* SDRAM tRAS = 4 cycles */
1690#define TRAS_5 0x00000140 /* SDRAM tRAS = 5 cycles */
1691#define TRAS_6 0x00000180 /* SDRAM tRAS = 6 cycles */
1692#define TRAS_7 0x000001C0 /* SDRAM tRAS = 7 cycles */
1693#define TRAS_8 0x00000200 /* SDRAM tRAS = 8 cycles */
1694#define TRAS_9 0x00000240 /* SDRAM tRAS = 9 cycles */
1695#define TRAS_10 0x00000280 /* SDRAM tRAS = 10 cycles */
1696#define TRAS_11 0x000002C0 /* SDRAM tRAS = 11 cycles */
1697#define TRAS_12 0x00000300 /* SDRAM tRAS = 12 cycles */
1698#define TRAS_13 0x00000340 /* SDRAM tRAS = 13 cycles */
1699#define TRAS_14 0x00000380 /* SDRAM tRAS = 14 cycles */
1700#define TRAS_15 0x000003C0 /* SDRAM tRAS = 15 cycles */
1701#define TRP_1 0x00000800 /* SDRAM tRP = 1 cycle */
1702#define TRP_2 0x00001000 /* SDRAM tRP = 2 cycles */
1703#define TRP_3 0x00001800 /* SDRAM tRP = 3 cycles */
1704#define TRP_4 0x00002000 /* SDRAM tRP = 4 cycles */
1705#define TRP_5 0x00002800 /* SDRAM tRP = 5 cycles */
1706#define TRP_6 0x00003000 /* SDRAM tRP = 6 cycles */
1707#define TRP_7 0x00003800 /* SDRAM tRP = 7 cycles */
1708#define TRCD_1 0x00008000 /* SDRAM tRCD = 1 cycle */
1709#define TRCD_2 0x00010000 /* SDRAM tRCD = 2 cycles */
1710#define TRCD_3 0x00018000 /* SDRAM tRCD = 3 cycles */
1711#define TRCD_4 0x00020000 /* SDRAM tRCD = 4 cycles */
1712#define TRCD_5 0x00028000 /* SDRAM tRCD = 5 cycles */
1713#define TRCD_6 0x00030000 /* SDRAM tRCD = 6 cycles */
1714#define TRCD_7 0x00038000 /* SDRAM tRCD = 7 cycles */
1715#define TWR_1 0x00080000 /* SDRAM tWR = 1 cycle */
1716#define TWR_2 0x00100000 /* SDRAM tWR = 2 cycles */
1717#define TWR_3 0x00180000 /* SDRAM tWR = 3 cycles */
1718#define PUPSD 0x00200000 /*Power-up start delay */
1719#define PSM 0x00400000 /* SDRAM power-up sequence = Precharge, mode register set, 8 CBR refresh cycles */
1720#define PSS 0x00800000 /* enable SDRAM power-up sequence on next SDRAM access */
1721#define SRFS 0x01000000 /* Start SDRAM self-refresh mode */
1722#define EBUFE 0x02000000 /* Enable external buffering timing */
1723#define FBBRW 0x04000000 /* Fast back-to-back read write enable */
1724#define EMREN 0x10000000 /* Extended mode register enable */
1725#define TCSR 0x20000000 /* Temp compensated self refresh value 85 deg C */
1726#define CDDBG 0x40000000 /* Tristate SDRAM controls during bus grant */
1727
1728/* EBIU_SDBCTL Masks */
1729#define EBE 0x00000001 /* Enable SDRAM external bank */
1730#define EBSZ_16 0x00000000 /* SDRAM external bank size = 16MB */
1731#define EBSZ_32 0x00000002 /* SDRAM external bank size = 32MB */
1732#define EBSZ_64 0x00000004 /* SDRAM external bank size = 64MB */
1733#define EBSZ_128 0x00000006 /* SDRAM external bank size = 128MB */
1734#define EBSZ_256 0x00000008 /* SDRAM External Bank Size = 256MB */
1735#define EBSZ_512 0x0000000A /* SDRAM External Bank Size = 512MB */
1736#define EBCAW_8 0x00000000 /* SDRAM external bank column address width = 8 bits */
1737#define EBCAW_9 0x00000010 /* SDRAM external bank column address width = 9 bits */
1738#define EBCAW_10 0x00000020 /* SDRAM external bank column address width = 9 bits */
1739#define EBCAW_11 0x00000030 /* SDRAM external bank column address width = 9 bits */
1740
1741/* EBIU_SDSTAT Masks */
1742#define SDCI 0x00000001 /* SDRAM controller is idle */
1743#define SDSRA 0x00000002 /* SDRAM SDRAM self refresh is active */
1744#define SDPUA 0x00000004 /* SDRAM power up active */
1745#define SDRS 0x00000008 /* SDRAM is in reset state */
1746#define SDEASE 0x00000010 /* SDRAM EAB sticky error status - W1C */
1747#define BGSTAT 0x00000020 /* Bus granted */
1748
1749
1750/* ******************** TWO-WIRE INTERFACE (TWIx) MASKS ***********************/
1751/* TWIx_CLKDIV Macros (Use: *pTWIx_CLKDIV = CLKLOW(x)|CLKHI(y); ) */
1752#ifdef _MISRA_RULES
1753#define CLKLOW(x) ((x) & 0xFFu) /* Periods Clock Is Held Low */
1754#define CLKHI(y) (((y)&0xFFu)<<0x8) /* Periods Before New Clock Low */
1755#else
1756#define CLKLOW(x) ((x) & 0xFF) /* Periods Clock Is Held Low */
1757#define CLKHI(y) (((y)&0xFF)<<0x8) /* Periods Before New Clock Low */
1758#endif /* _MISRA_RULES */
1759
1760/* TWIx_PRESCALE Masks */
1761#define PRESCALE 0x007F /* SCLKs Per Internal Time Reference (10MHz) */
1762#define TWI_ENA 0x0080 /* TWI Enable */
1763#define SCCB 0x0200 /* SCCB Compatibility Enable */
1764
1765/* TWIx_SLAVE_CTRL Masks */
1766#define SEN 0x0001 /* Slave Enable */
1767#define SADD_LEN 0x0002 /* Slave Address Length */
1768#define STDVAL 0x0004 /* Slave Transmit Data Valid */
1769#define NAK 0x0008 /* NAK/ACK* Generated At Conclusion Of Transfer */
1770#define GEN 0x0010 /* General Call Adrress Matching Enabled */
1771
1772/* TWIx_SLAVE_STAT Masks */
1773#define SDIR 0x0001 /* Slave Transfer Direction (Transmit/Receive*) */
1774#define GCALL 0x0002 /* General Call Indicator */
1775
1776/* TWIx_MASTER_CTRL Masks */
1777#define MEN 0x0001 /* Master Mode Enable */
1778#define MADD_LEN 0x0002 /* Master Address Length */
1779#define MDIR 0x0004 /* Master Transmit Direction (RX/TX*) */
1780#define FAST 0x0008 /* Use Fast Mode Timing Specs */
1781#define STOP 0x0010 /* Issue Stop Condition */
1782#define RSTART 0x0020 /* Repeat Start or Stop* At End Of Transfer */
1783#define DCNT 0x3FC0 /* Data Bytes To Transfer */
1784#define SDAOVR 0x4000 /* Serial Data Override */
1785#define SCLOVR 0x8000 /* Serial Clock Override */
1786
1787/* TWIx_MASTER_STAT Masks */
1788#define MPROG 0x0001 /* Master Transfer In Progress */
1789#define LOSTARB 0x0002 /* Lost Arbitration Indicator (Xfer Aborted) */
1790#define ANAK 0x0004 /* Address Not Acknowledged */
1791#define DNAK 0x0008 /* Data Not Acknowledged */
1792#define BUFRDERR 0x0010 /* Buffer Read Error */
1793#define BUFWRERR 0x0020 /* Buffer Write Error */
1794#define SDASEN 0x0040 /* Serial Data Sense */
1795#define SCLSEN 0x0080 /* Serial Clock Sense */
1796#define BUSBUSY 0x0100 /* Bus Busy Indicator */
1797
1798/* TWIx_INT_SRC and TWIx_INT_ENABLE Masks */
1799#define SINIT 0x0001 /* Slave Transfer Initiated */
1800#define SCOMP 0x0002 /* Slave Transfer Complete */
1801#define SERR 0x0004 /* Slave Transfer Error */
1802#define SOVF 0x0008 /* Slave Overflow */
1803#define MCOMP 0x0010 /* Master Transfer Complete */
1804#define MERR 0x0020 /* Master Transfer Error */
1805#define XMTSERV 0x0040 /* Transmit FIFO Service */
1806#define RCVSERV 0x0080 /* Receive FIFO Service */
1807
1808/* TWIx_FIFO_CTL Masks */
1809#define XMTFLUSH 0x0001 /* Transmit Buffer Flush */
1810#define RCVFLUSH 0x0002 /* Receive Buffer Flush */
1811#define XMTINTLEN 0x0004 /* Transmit Buffer Interrupt Length */
1812#define RCVINTLEN 0x0008 /* Receive Buffer Interrupt Length */
1813
1814/* TWIx_FIFO_STAT Masks */
1815#define XMTSTAT 0x0003 /* Transmit FIFO Status */
1816#define XMT_EMPTY 0x0000 /* Transmit FIFO Empty */
1817#define XMT_HALF 0x0001 /* Transmit FIFO Has 1 Byte To Write */
1818#define XMT_FULL 0x0003 /* Transmit FIFO Full (2 Bytes To Write) */
1819
1820#define RCVSTAT 0x000C /* Receive FIFO Status */
1821#define RCV_EMPTY 0x0000 /* Receive FIFO Empty */
1822#define RCV_HALF 0x0004 /* Receive FIFO Has 1 Byte To Read */
1823#define RCV_FULL 0x000C /* Receive FIFO Full (2 Bytes To Read) */
1824
1825#endif
diff --git a/arch/blackfin/mach-bf538/include/mach/defBF539.h b/arch/blackfin/mach-bf538/include/mach/defBF539.h
index 7a8ac5f44204..8100bcd01a0d 100644
--- a/arch/blackfin/mach-bf538/include/mach/defBF539.h
+++ b/arch/blackfin/mach-bf538/include/mach/defBF539.h
@@ -1,859 +1,13 @@
1/* 1/*
2 * Copyright 2008-2009 Analog Devices Inc. 2 * Copyright 2008-2010 Analog Devices Inc.
3 * 3 *
4 * Licensed under the ADI BSD license or the GPL-2 (or later) 4 * Licensed under the ADI BSD license or the GPL-2 (or later)
5 */ 5 */
6 6
7/* SYSTEM & MM REGISTER BIT & ADDRESS DEFINITIONS FOR ADSP-BF538/9 */
8
9#ifndef _DEF_BF539_H 7#ifndef _DEF_BF539_H
10#define _DEF_BF539_H 8#define _DEF_BF539_H
11 9
12/* include all Core registers and bit definitions */ 10#include "defBF538.h"
13#include <asm/def_LPBlackfin.h>
14
15
16/*********************************************************************************** */
17/* System MMR Register Map */
18/*********************************************************************************** */
19/* Clock/Regulator Control (0xFFC00000 - 0xFFC000FF) */
20#define PLL_CTL 0xFFC00000 /* PLL Control register (16-bit) */
21#define PLL_DIV 0xFFC00004 /* PLL Divide Register (16-bit) */
22#define VR_CTL 0xFFC00008 /* Voltage Regulator Control Register (16-bit) */
23#define PLL_STAT 0xFFC0000C /* PLL Status register (16-bit) */
24#define PLL_LOCKCNT 0xFFC00010 /* PLL Lock Count register (16-bit) */
25#define CHIPID 0xFFC00014 /* Chip ID Register */
26
27/* CHIPID Masks */
28#define CHIPID_VERSION 0xF0000000
29#define CHIPID_FAMILY 0x0FFFF000
30#define CHIPID_MANUFACTURE 0x00000FFE
31
32/* System Interrupt Controller (0xFFC00100 - 0xFFC001FF) */
33#define SWRST 0xFFC00100 /* Software Reset Register (16-bit) */
34#define SYSCR 0xFFC00104 /* System Configuration registe */
35#define SIC_RVECT 0xFFC00108
36#define SIC_IMASK0 0xFFC0010C /* Interrupt Mask Register */
37#define SIC_IAR0 0xFFC00110 /* Interrupt Assignment Register 0 */
38#define SIC_IAR1 0xFFC00114 /* Interrupt Assignment Register 1 */
39#define SIC_IAR2 0xFFC00118 /* Interrupt Assignment Register 2 */
40#define SIC_IAR3 0xFFC0011C /* Interrupt Assignment Register 3 */
41#define SIC_ISR0 0xFFC00120 /* Interrupt Status Register */
42#define SIC_IWR0 0xFFC00124 /* Interrupt Wakeup Register */
43#define SIC_IMASK1 0xFFC00128 /* Interrupt Mask Register 1 */
44#define SIC_ISR1 0xFFC0012C /* Interrupt Status Register 1 */
45#define SIC_IWR1 0xFFC00130 /* Interrupt Wakeup Register 1 */
46#define SIC_IAR4 0xFFC00134 /* Interrupt Assignment Register 4 */
47#define SIC_IAR5 0xFFC00138 /* Interrupt Assignment Register 5 */
48#define SIC_IAR6 0xFFC0013C /* Interrupt Assignment Register 6 */
49
50
51/* Watchdog Timer (0xFFC00200 - 0xFFC002FF) */
52#define WDOG_CTL 0xFFC00200 /* Watchdog Control Register */
53#define WDOG_CNT 0xFFC00204 /* Watchdog Count Register */
54#define WDOG_STAT 0xFFC00208 /* Watchdog Status Register */
55
56
57/* Real Time Clock (0xFFC00300 - 0xFFC003FF) */
58#define RTC_STAT 0xFFC00300 /* RTC Status Register */
59#define RTC_ICTL 0xFFC00304 /* RTC Interrupt Control Register */
60#define RTC_ISTAT 0xFFC00308 /* RTC Interrupt Status Register */
61#define RTC_SWCNT 0xFFC0030C /* RTC Stopwatch Count Register */
62#define RTC_ALARM 0xFFC00310 /* RTC Alarm Time Register */
63#define RTC_FAST 0xFFC00314 /* RTC Prescaler Enable Register */
64#define RTC_PREN 0xFFC00314 /* RTC Prescaler Enable Register (alternate macro) */
65
66
67/* UART0 Controller (0xFFC00400 - 0xFFC004FF) */
68#define UART0_THR 0xFFC00400 /* Transmit Holding register */
69#define UART0_RBR 0xFFC00400 /* Receive Buffer register */
70#define UART0_DLL 0xFFC00400 /* Divisor Latch (Low-Byte) */
71#define UART0_IER 0xFFC00404 /* Interrupt Enable Register */
72#define UART0_DLH 0xFFC00404 /* Divisor Latch (High-Byte) */
73#define UART0_IIR 0xFFC00408 /* Interrupt Identification Register */
74#define UART0_LCR 0xFFC0040C /* Line Control Register */
75#define UART0_MCR 0xFFC00410 /* Modem Control Register */
76#define UART0_LSR 0xFFC00414 /* Line Status Register */
77#define UART0_SCR 0xFFC0041C /* SCR Scratch Register */
78#define UART0_GCTL 0xFFC00424 /* Global Control Register */
79
80
81/* SPI0 Controller (0xFFC00500 - 0xFFC005FF) */
82
83#define SPI0_CTL 0xFFC00500 /* SPI0 Control Register */
84#define SPI0_FLG 0xFFC00504 /* SPI0 Flag register */
85#define SPI0_STAT 0xFFC00508 /* SPI0 Status register */
86#define SPI0_TDBR 0xFFC0050C /* SPI0 Transmit Data Buffer Register */
87#define SPI0_RDBR 0xFFC00510 /* SPI0 Receive Data Buffer Register */
88#define SPI0_BAUD 0xFFC00514 /* SPI0 Baud rate Register */
89#define SPI0_SHADOW 0xFFC00518 /* SPI0_RDBR Shadow Register */
90#define SPI0_REGBASE SPI0_CTL
91
92
93/* TIMER 0, 1, 2 Registers (0xFFC00600 - 0xFFC006FF) */
94#define TIMER0_CONFIG 0xFFC00600 /* Timer 0 Configuration Register */
95#define TIMER0_COUNTER 0xFFC00604 /* Timer 0 Counter Register */
96#define TIMER0_PERIOD 0xFFC00608 /* Timer 0 Period Register */
97#define TIMER0_WIDTH 0xFFC0060C /* Timer 0 Width Register */
98
99#define TIMER1_CONFIG 0xFFC00610 /* Timer 1 Configuration Register */
100#define TIMER1_COUNTER 0xFFC00614 /* Timer 1 Counter Register */
101#define TIMER1_PERIOD 0xFFC00618 /* Timer 1 Period Register */
102#define TIMER1_WIDTH 0xFFC0061C /* Timer 1 Width Register */
103
104#define TIMER2_CONFIG 0xFFC00620 /* Timer 2 Configuration Register */
105#define TIMER2_COUNTER 0xFFC00624 /* Timer 2 Counter Register */
106#define TIMER2_PERIOD 0xFFC00628 /* Timer 2 Period Register */
107#define TIMER2_WIDTH 0xFFC0062C /* Timer 2 Width Register */
108
109#define TIMER_ENABLE 0xFFC00640 /* Timer Enable Register */
110#define TIMER_DISABLE 0xFFC00644 /* Timer Disable Register */
111#define TIMER_STATUS 0xFFC00648 /* Timer Status Register */
112
113
114/* Programmable Flags (0xFFC00700 - 0xFFC007FF) */
115#define FIO_FLAG_D 0xFFC00700 /* Flag Mask to directly specify state of pins */
116#define FIO_FLAG_C 0xFFC00704 /* Peripheral Interrupt Flag Register (clear) */
117#define FIO_FLAG_S 0xFFC00708 /* Peripheral Interrupt Flag Register (set) */
118#define FIO_FLAG_T 0xFFC0070C /* Flag Mask to directly toggle state of pins */
119#define FIO_MASKA_D 0xFFC00710 /* Flag Mask Interrupt A Register (set directly) */
120#define FIO_MASKA_C 0xFFC00714 /* Flag Mask Interrupt A Register (clear) */
121#define FIO_MASKA_S 0xFFC00718 /* Flag Mask Interrupt A Register (set) */
122#define FIO_MASKA_T 0xFFC0071C /* Flag Mask Interrupt A Register (toggle) */
123#define FIO_MASKB_D 0xFFC00720 /* Flag Mask Interrupt B Register (set directly) */
124#define FIO_MASKB_C 0xFFC00724 /* Flag Mask Interrupt B Register (clear) */
125#define FIO_MASKB_S 0xFFC00728 /* Flag Mask Interrupt B Register (set) */
126#define FIO_MASKB_T 0xFFC0072C /* Flag Mask Interrupt B Register (toggle) */
127#define FIO_DIR 0xFFC00730 /* Peripheral Flag Direction Register */
128#define FIO_POLAR 0xFFC00734 /* Flag Source Polarity Register */
129#define FIO_EDGE 0xFFC00738 /* Flag Source Sensitivity Register */
130#define FIO_BOTH 0xFFC0073C /* Flag Set on BOTH Edges Register */
131#define FIO_INEN 0xFFC00740 /* Flag Input Enable Register */
132
133
134/* SPORT0 Controller (0xFFC00800 - 0xFFC008FF) */
135#define SPORT0_TCR1 0xFFC00800 /* SPORT0 Transmit Configuration 1 Register */
136#define SPORT0_TCR2 0xFFC00804 /* SPORT0 Transmit Configuration 2 Register */
137#define SPORT0_TCLKDIV 0xFFC00808 /* SPORT0 Transmit Clock Divider */
138#define SPORT0_TFSDIV 0xFFC0080C /* SPORT0 Transmit Frame Sync Divider */
139#define SPORT0_TX 0xFFC00810 /* SPORT0 TX Data Register */
140#define SPORT0_RX 0xFFC00818 /* SPORT0 RX Data Register */
141#define SPORT0_RCR1 0xFFC00820 /* SPORT0 Transmit Configuration 1 Register */
142#define SPORT0_RCR2 0xFFC00824 /* SPORT0 Transmit Configuration 2 Register */
143#define SPORT0_RCLKDIV 0xFFC00828 /* SPORT0 Receive Clock Divider */
144#define SPORT0_RFSDIV 0xFFC0082C /* SPORT0 Receive Frame Sync Divider */
145#define SPORT0_STAT 0xFFC00830 /* SPORT0 Status Register */
146#define SPORT0_CHNL 0xFFC00834 /* SPORT0 Current Channel Register */
147#define SPORT0_MCMC1 0xFFC00838 /* SPORT0 Multi-Channel Configuration Register 1 */
148#define SPORT0_MCMC2 0xFFC0083C /* SPORT0 Multi-Channel Configuration Register 2 */
149#define SPORT0_MTCS0 0xFFC00840 /* SPORT0 Multi-Channel Transmit Select Register 0 */
150#define SPORT0_MTCS1 0xFFC00844 /* SPORT0 Multi-Channel Transmit Select Register 1 */
151#define SPORT0_MTCS2 0xFFC00848 /* SPORT0 Multi-Channel Transmit Select Register 2 */
152#define SPORT0_MTCS3 0xFFC0084C /* SPORT0 Multi-Channel Transmit Select Register 3 */
153#define SPORT0_MRCS0 0xFFC00850 /* SPORT0 Multi-Channel Receive Select Register 0 */
154#define SPORT0_MRCS1 0xFFC00854 /* SPORT0 Multi-Channel Receive Select Register 1 */
155#define SPORT0_MRCS2 0xFFC00858 /* SPORT0 Multi-Channel Receive Select Register 2 */
156#define SPORT0_MRCS3 0xFFC0085C /* SPORT0 Multi-Channel Receive Select Register 3 */
157
158
159/* SPORT1 Controller (0xFFC00900 - 0xFFC009FF) */
160#define SPORT1_TCR1 0xFFC00900 /* SPORT1 Transmit Configuration 1 Register */
161#define SPORT1_TCR2 0xFFC00904 /* SPORT1 Transmit Configuration 2 Register */
162#define SPORT1_TCLKDIV 0xFFC00908 /* SPORT1 Transmit Clock Divider */
163#define SPORT1_TFSDIV 0xFFC0090C /* SPORT1 Transmit Frame Sync Divider */
164#define SPORT1_TX 0xFFC00910 /* SPORT1 TX Data Register */
165#define SPORT1_RX 0xFFC00918 /* SPORT1 RX Data Register */
166#define SPORT1_RCR1 0xFFC00920 /* SPORT1 Transmit Configuration 1 Register */
167#define SPORT1_RCR2 0xFFC00924 /* SPORT1 Transmit Configuration 2 Register */
168#define SPORT1_RCLKDIV 0xFFC00928 /* SPORT1 Receive Clock Divider */
169#define SPORT1_RFSDIV 0xFFC0092C /* SPORT1 Receive Frame Sync Divider */
170#define SPORT1_STAT 0xFFC00930 /* SPORT1 Status Register */
171#define SPORT1_CHNL 0xFFC00934 /* SPORT1 Current Channel Register */
172#define SPORT1_MCMC1 0xFFC00938 /* SPORT1 Multi-Channel Configuration Register 1 */
173#define SPORT1_MCMC2 0xFFC0093C /* SPORT1 Multi-Channel Configuration Register 2 */
174#define SPORT1_MTCS0 0xFFC00940 /* SPORT1 Multi-Channel Transmit Select Register 0 */
175#define SPORT1_MTCS1 0xFFC00944 /* SPORT1 Multi-Channel Transmit Select Register 1 */
176#define SPORT1_MTCS2 0xFFC00948 /* SPORT1 Multi-Channel Transmit Select Register 2 */
177#define SPORT1_MTCS3 0xFFC0094C /* SPORT1 Multi-Channel Transmit Select Register 3 */
178#define SPORT1_MRCS0 0xFFC00950 /* SPORT1 Multi-Channel Receive Select Register 0 */
179#define SPORT1_MRCS1 0xFFC00954 /* SPORT1 Multi-Channel Receive Select Register 1 */
180#define SPORT1_MRCS2 0xFFC00958 /* SPORT1 Multi-Channel Receive Select Register 2 */
181#define SPORT1_MRCS3 0xFFC0095C /* SPORT1 Multi-Channel Receive Select Register 3 */
182
183
184/* External Bus Interface Unit (0xFFC00A00 - 0xFFC00AFF) */
185/* Asynchronous Memory Controller */
186#define EBIU_AMGCTL 0xFFC00A00 /* Asynchronous Memory Global Control Register */
187#define EBIU_AMBCTL0 0xFFC00A04 /* Asynchronous Memory Bank Control Register 0 */
188#define EBIU_AMBCTL1 0xFFC00A08 /* Asynchronous Memory Bank Control Register 1 */
189
190/* SDRAM Controller */
191#define EBIU_SDGCTL 0xFFC00A10 /* SDRAM Global Control Register */
192#define EBIU_SDBCTL 0xFFC00A14 /* SDRAM Bank Control Register */
193#define EBIU_SDRRC 0xFFC00A18 /* SDRAM Refresh Rate Control Register */
194#define EBIU_SDSTAT 0xFFC00A1C /* SDRAM Status Register */
195
196
197
198/* DMA Controller 0 Traffic Control Registers (0xFFC00B00 - 0xFFC00BFF) */
199
200#define DMAC0_TC_PER 0xFFC00B0C /* DMA Controller 0 Traffic Control Periods Register */
201#define DMAC0_TC_CNT 0xFFC00B10 /* DMA Controller 0 Traffic Control Current Counts Register */
202
203/* Alternate deprecated register names (below) provided for backwards code compatibility */
204#define DMA0_TCPER DMAC0_TC_PER
205#define DMA0_TCCNT DMAC0_TC_CNT
206
207
208/* DMA Controller 0 (0xFFC00C00 - 0xFFC00FFF) */
209
210#define DMA0_NEXT_DESC_PTR 0xFFC00C00 /* DMA Channel 0 Next Descriptor Pointer Register */
211#define DMA0_START_ADDR 0xFFC00C04 /* DMA Channel 0 Start Address Register */
212#define DMA0_CONFIG 0xFFC00C08 /* DMA Channel 0 Configuration Register */
213#define DMA0_X_COUNT 0xFFC00C10 /* DMA Channel 0 X Count Register */
214#define DMA0_X_MODIFY 0xFFC00C14 /* DMA Channel 0 X Modify Register */
215#define DMA0_Y_COUNT 0xFFC00C18 /* DMA Channel 0 Y Count Register */
216#define DMA0_Y_MODIFY 0xFFC00C1C /* DMA Channel 0 Y Modify Register */
217#define DMA0_CURR_DESC_PTR 0xFFC00C20 /* DMA Channel 0 Current Descriptor Pointer Register */
218#define DMA0_CURR_ADDR 0xFFC00C24 /* DMA Channel 0 Current Address Register */
219#define DMA0_IRQ_STATUS 0xFFC00C28 /* DMA Channel 0 Interrupt/Status Register */
220#define DMA0_PERIPHERAL_MAP 0xFFC00C2C /* DMA Channel 0 Peripheral Map Register */
221#define DMA0_CURR_X_COUNT 0xFFC00C30 /* DMA Channel 0 Current X Count Register */
222#define DMA0_CURR_Y_COUNT 0xFFC00C38 /* DMA Channel 0 Current Y Count Register */
223
224#define DMA1_NEXT_DESC_PTR 0xFFC00C40 /* DMA Channel 1 Next Descriptor Pointer Register */
225#define DMA1_START_ADDR 0xFFC00C44 /* DMA Channel 1 Start Address Register */
226#define DMA1_CONFIG 0xFFC00C48 /* DMA Channel 1 Configuration Register */
227#define DMA1_X_COUNT 0xFFC00C50 /* DMA Channel 1 X Count Register */
228#define DMA1_X_MODIFY 0xFFC00C54 /* DMA Channel 1 X Modify Register */
229#define DMA1_Y_COUNT 0xFFC00C58 /* DMA Channel 1 Y Count Register */
230#define DMA1_Y_MODIFY 0xFFC00C5C /* DMA Channel 1 Y Modify Register */
231#define DMA1_CURR_DESC_PTR 0xFFC00C60 /* DMA Channel 1 Current Descriptor Pointer Register */
232#define DMA1_CURR_ADDR 0xFFC00C64 /* DMA Channel 1 Current Address Register */
233#define DMA1_IRQ_STATUS 0xFFC00C68 /* DMA Channel 1 Interrupt/Status Register */
234#define DMA1_PERIPHERAL_MAP 0xFFC00C6C /* DMA Channel 1 Peripheral Map Register */
235#define DMA1_CURR_X_COUNT 0xFFC00C70 /* DMA Channel 1 Current X Count Register */
236#define DMA1_CURR_Y_COUNT 0xFFC00C78 /* DMA Channel 1 Current Y Count Register */
237
238#define DMA2_NEXT_DESC_PTR 0xFFC00C80 /* DMA Channel 2 Next Descriptor Pointer Register */
239#define DMA2_START_ADDR 0xFFC00C84 /* DMA Channel 2 Start Address Register */
240#define DMA2_CONFIG 0xFFC00C88 /* DMA Channel 2 Configuration Register */
241#define DMA2_X_COUNT 0xFFC00C90 /* DMA Channel 2 X Count Register */
242#define DMA2_X_MODIFY 0xFFC00C94 /* DMA Channel 2 X Modify Register */
243#define DMA2_Y_COUNT 0xFFC00C98 /* DMA Channel 2 Y Count Register */
244#define DMA2_Y_MODIFY 0xFFC00C9C /* DMA Channel 2 Y Modify Register */
245#define DMA2_CURR_DESC_PTR 0xFFC00CA0 /* DMA Channel 2 Current Descriptor Pointer Register */
246#define DMA2_CURR_ADDR 0xFFC00CA4 /* DMA Channel 2 Current Address Register */
247#define DMA2_IRQ_STATUS 0xFFC00CA8 /* DMA Channel 2 Interrupt/Status Register */
248#define DMA2_PERIPHERAL_MAP 0xFFC00CAC /* DMA Channel 2 Peripheral Map Register */
249#define DMA2_CURR_X_COUNT 0xFFC00CB0 /* DMA Channel 2 Current X Count Register */
250#define DMA2_CURR_Y_COUNT 0xFFC00CB8 /* DMA Channel 2 Current Y Count Register */
251
252#define DMA3_NEXT_DESC_PTR 0xFFC00CC0 /* DMA Channel 3 Next Descriptor Pointer Register */
253#define DMA3_START_ADDR 0xFFC00CC4 /* DMA Channel 3 Start Address Register */
254#define DMA3_CONFIG 0xFFC00CC8 /* DMA Channel 3 Configuration Register */
255#define DMA3_X_COUNT 0xFFC00CD0 /* DMA Channel 3 X Count Register */
256#define DMA3_X_MODIFY 0xFFC00CD4 /* DMA Channel 3 X Modify Register */
257#define DMA3_Y_COUNT 0xFFC00CD8 /* DMA Channel 3 Y Count Register */
258#define DMA3_Y_MODIFY 0xFFC00CDC /* DMA Channel 3 Y Modify Register */
259#define DMA3_CURR_DESC_PTR 0xFFC00CE0 /* DMA Channel 3 Current Descriptor Pointer Register */
260#define DMA3_CURR_ADDR 0xFFC00CE4 /* DMA Channel 3 Current Address Register */
261#define DMA3_IRQ_STATUS 0xFFC00CE8 /* DMA Channel 3 Interrupt/Status Register */
262#define DMA3_PERIPHERAL_MAP 0xFFC00CEC /* DMA Channel 3 Peripheral Map Register */
263#define DMA3_CURR_X_COUNT 0xFFC00CF0 /* DMA Channel 3 Current X Count Register */
264#define DMA3_CURR_Y_COUNT 0xFFC00CF8 /* DMA Channel 3 Current Y Count Register */
265
266#define DMA4_NEXT_DESC_PTR 0xFFC00D00 /* DMA Channel 4 Next Descriptor Pointer Register */
267#define DMA4_START_ADDR 0xFFC00D04 /* DMA Channel 4 Start Address Register */
268#define DMA4_CONFIG 0xFFC00D08 /* DMA Channel 4 Configuration Register */
269#define DMA4_X_COUNT 0xFFC00D10 /* DMA Channel 4 X Count Register */
270#define DMA4_X_MODIFY 0xFFC00D14 /* DMA Channel 4 X Modify Register */
271#define DMA4_Y_COUNT 0xFFC00D18 /* DMA Channel 4 Y Count Register */
272#define DMA4_Y_MODIFY 0xFFC00D1C /* DMA Channel 4 Y Modify Register */
273#define DMA4_CURR_DESC_PTR 0xFFC00D20 /* DMA Channel 4 Current Descriptor Pointer Register */
274#define DMA4_CURR_ADDR 0xFFC00D24 /* DMA Channel 4 Current Address Register */
275#define DMA4_IRQ_STATUS 0xFFC00D28 /* DMA Channel 4 Interrupt/Status Register */
276#define DMA4_PERIPHERAL_MAP 0xFFC00D2C /* DMA Channel 4 Peripheral Map Register */
277#define DMA4_CURR_X_COUNT 0xFFC00D30 /* DMA Channel 4 Current X Count Register */
278#define DMA4_CURR_Y_COUNT 0xFFC00D38 /* DMA Channel 4 Current Y Count Register */
279
280#define DMA5_NEXT_DESC_PTR 0xFFC00D40 /* DMA Channel 5 Next Descriptor Pointer Register */
281#define DMA5_START_ADDR 0xFFC00D44 /* DMA Channel 5 Start Address Register */
282#define DMA5_CONFIG 0xFFC00D48 /* DMA Channel 5 Configuration Register */
283#define DMA5_X_COUNT 0xFFC00D50 /* DMA Channel 5 X Count Register */
284#define DMA5_X_MODIFY 0xFFC00D54 /* DMA Channel 5 X Modify Register */
285#define DMA5_Y_COUNT 0xFFC00D58 /* DMA Channel 5 Y Count Register */
286#define DMA5_Y_MODIFY 0xFFC00D5C /* DMA Channel 5 Y Modify Register */
287#define DMA5_CURR_DESC_PTR 0xFFC00D60 /* DMA Channel 5 Current Descriptor Pointer Register */
288#define DMA5_CURR_ADDR 0xFFC00D64 /* DMA Channel 5 Current Address Register */
289#define DMA5_IRQ_STATUS 0xFFC00D68 /* DMA Channel 5 Interrupt/Status Register */
290#define DMA5_PERIPHERAL_MAP 0xFFC00D6C /* DMA Channel 5 Peripheral Map Register */
291#define DMA5_CURR_X_COUNT 0xFFC00D70 /* DMA Channel 5 Current X Count Register */
292#define DMA5_CURR_Y_COUNT 0xFFC00D78 /* DMA Channel 5 Current Y Count Register */
293
294#define DMA6_NEXT_DESC_PTR 0xFFC00D80 /* DMA Channel 6 Next Descriptor Pointer Register */
295#define DMA6_START_ADDR 0xFFC00D84 /* DMA Channel 6 Start Address Register */
296#define DMA6_CONFIG 0xFFC00D88 /* DMA Channel 6 Configuration Register */
297#define DMA6_X_COUNT 0xFFC00D90 /* DMA Channel 6 X Count Register */
298#define DMA6_X_MODIFY 0xFFC00D94 /* DMA Channel 6 X Modify Register */
299#define DMA6_Y_COUNT 0xFFC00D98 /* DMA Channel 6 Y Count Register */
300#define DMA6_Y_MODIFY 0xFFC00D9C /* DMA Channel 6 Y Modify Register */
301#define DMA6_CURR_DESC_PTR 0xFFC00DA0 /* DMA Channel 6 Current Descriptor Pointer Register */
302#define DMA6_CURR_ADDR 0xFFC00DA4 /* DMA Channel 6 Current Address Register */
303#define DMA6_IRQ_STATUS 0xFFC00DA8 /* DMA Channel 6 Interrupt/Status Register */
304#define DMA6_PERIPHERAL_MAP 0xFFC00DAC /* DMA Channel 6 Peripheral Map Register */
305#define DMA6_CURR_X_COUNT 0xFFC00DB0 /* DMA Channel 6 Current X Count Register */
306#define DMA6_CURR_Y_COUNT 0xFFC00DB8 /* DMA Channel 6 Current Y Count Register */
307
308#define DMA7_NEXT_DESC_PTR 0xFFC00DC0 /* DMA Channel 7 Next Descriptor Pointer Register */
309#define DMA7_START_ADDR 0xFFC00DC4 /* DMA Channel 7 Start Address Register */
310#define DMA7_CONFIG 0xFFC00DC8 /* DMA Channel 7 Configuration Register */
311#define DMA7_X_COUNT 0xFFC00DD0 /* DMA Channel 7 X Count Register */
312#define DMA7_X_MODIFY 0xFFC00DD4 /* DMA Channel 7 X Modify Register */
313#define DMA7_Y_COUNT 0xFFC00DD8 /* DMA Channel 7 Y Count Register */
314#define DMA7_Y_MODIFY 0xFFC00DDC /* DMA Channel 7 Y Modify Register */
315#define DMA7_CURR_DESC_PTR 0xFFC00DE0 /* DMA Channel 7 Current Descriptor Pointer Register */
316#define DMA7_CURR_ADDR 0xFFC00DE4 /* DMA Channel 7 Current Address Register */
317#define DMA7_IRQ_STATUS 0xFFC00DE8 /* DMA Channel 7 Interrupt/Status Register */
318#define DMA7_PERIPHERAL_MAP 0xFFC00DEC /* DMA Channel 7 Peripheral Map Register */
319#define DMA7_CURR_X_COUNT 0xFFC00DF0 /* DMA Channel 7 Current X Count Register */
320#define DMA7_CURR_Y_COUNT 0xFFC00DF8 /* DMA Channel 7 Current Y Count Register */
321
322#define MDMA0_D0_NEXT_DESC_PTR 0xFFC00E00 /* MemDMA0 Stream 0 Destination Next Descriptor Pointer Register */
323#define MDMA0_D0_START_ADDR 0xFFC00E04 /* MemDMA0 Stream 0 Destination Start Address Register */
324#define MDMA0_D0_CONFIG 0xFFC00E08 /* MemDMA0 Stream 0 Destination Configuration Register */
325#define MDMA0_D0_X_COUNT 0xFFC00E10 /* MemDMA0 Stream 0 Destination X Count Register */
326#define MDMA0_D0_X_MODIFY 0xFFC00E14 /* MemDMA0 Stream 0 Destination X Modify Register */
327#define MDMA0_D0_Y_COUNT 0xFFC00E18 /* MemDMA0 Stream 0 Destination Y Count Register */
328#define MDMA0_D0_Y_MODIFY 0xFFC00E1C /* MemDMA0 Stream 0 Destination Y Modify Register */
329#define MDMA0_D0_CURR_DESC_PTR 0xFFC00E20 /* MemDMA0 Stream 0 Destination Current Descriptor Pointer Register */
330#define MDMA0_D0_CURR_ADDR 0xFFC00E24 /* MemDMA0 Stream 0 Destination Current Address Register */
331#define MDMA0_D0_IRQ_STATUS 0xFFC00E28 /* MemDMA0 Stream 0 Destination Interrupt/Status Register */
332#define MDMA0_D0_PERIPHERAL_MAP 0xFFC00E2C /* MemDMA0 Stream 0 Destination Peripheral Map Register */
333#define MDMA0_D0_CURR_X_COUNT 0xFFC00E30 /* MemDMA0 Stream 0 Destination Current X Count Register */
334#define MDMA0_D0_CURR_Y_COUNT 0xFFC00E38 /* MemDMA0 Stream 0 Destination Current Y Count Register */
335
336#define MDMA0_S0_NEXT_DESC_PTR 0xFFC00E40 /* MemDMA0 Stream 0 Source Next Descriptor Pointer Register */
337#define MDMA0_S0_START_ADDR 0xFFC00E44 /* MemDMA0 Stream 0 Source Start Address Register */
338#define MDMA0_S0_CONFIG 0xFFC00E48 /* MemDMA0 Stream 0 Source Configuration Register */
339#define MDMA0_S0_X_COUNT 0xFFC00E50 /* MemDMA0 Stream 0 Source X Count Register */
340#define MDMA0_S0_X_MODIFY 0xFFC00E54 /* MemDMA0 Stream 0 Source X Modify Register */
341#define MDMA0_S0_Y_COUNT 0xFFC00E58 /* MemDMA0 Stream 0 Source Y Count Register */
342#define MDMA0_S0_Y_MODIFY 0xFFC00E5C /* MemDMA0 Stream 0 Source Y Modify Register */
343#define MDMA0_S0_CURR_DESC_PTR 0xFFC00E60 /* MemDMA0 Stream 0 Source Current Descriptor Pointer Register */
344#define MDMA0_S0_CURR_ADDR 0xFFC00E64 /* MemDMA0 Stream 0 Source Current Address Register */
345#define MDMA0_S0_IRQ_STATUS 0xFFC00E68 /* MemDMA0 Stream 0 Source Interrupt/Status Register */
346#define MDMA0_S0_PERIPHERAL_MAP 0xFFC00E6C /* MemDMA0 Stream 0 Source Peripheral Map Register */
347#define MDMA0_S0_CURR_X_COUNT 0xFFC00E70 /* MemDMA0 Stream 0 Source Current X Count Register */
348#define MDMA0_S0_CURR_Y_COUNT 0xFFC00E78 /* MemDMA0 Stream 0 Source Current Y Count Register */
349
350#define MDMA0_D1_NEXT_DESC_PTR 0xFFC00E80 /* MemDMA0 Stream 1 Destination Next Descriptor Pointer Register */
351#define MDMA0_D1_START_ADDR 0xFFC00E84 /* MemDMA0 Stream 1 Destination Start Address Register */
352#define MDMA0_D1_CONFIG 0xFFC00E88 /* MemDMA0 Stream 1 Destination Configuration Register */
353#define MDMA0_D1_X_COUNT 0xFFC00E90 /* MemDMA0 Stream 1 Destination X Count Register */
354#define MDMA0_D1_X_MODIFY 0xFFC00E94 /* MemDMA0 Stream 1 Destination X Modify Register */
355#define MDMA0_D1_Y_COUNT 0xFFC00E98 /* MemDMA0 Stream 1 Destination Y Count Register */
356#define MDMA0_D1_Y_MODIFY 0xFFC00E9C /* MemDMA0 Stream 1 Destination Y Modify Register */
357#define MDMA0_D1_CURR_DESC_PTR 0xFFC00EA0 /* MemDMA0 Stream 1 Destination Current Descriptor Pointer Register */
358#define MDMA0_D1_CURR_ADDR 0xFFC00EA4 /* MemDMA0 Stream 1 Destination Current Address Register */
359#define MDMA0_D1_IRQ_STATUS 0xFFC00EA8 /* MemDMA0 Stream 1 Destination Interrupt/Status Register */
360#define MDMA0_D1_PERIPHERAL_MAP 0xFFC00EAC /* MemDMA0 Stream 1 Destination Peripheral Map Register */
361#define MDMA0_D1_CURR_X_COUNT 0xFFC00EB0 /* MemDMA0 Stream 1 Destination Current X Count Register */
362#define MDMA0_D1_CURR_Y_COUNT 0xFFC00EB8 /* MemDMA0 Stream 1 Destination Current Y Count Register */
363
364#define MDMA0_S1_NEXT_DESC_PTR 0xFFC00EC0 /* MemDMA0 Stream 1 Source Next Descriptor Pointer Register */
365#define MDMA0_S1_START_ADDR 0xFFC00EC4 /* MemDMA0 Stream 1 Source Start Address Register */
366#define MDMA0_S1_CONFIG 0xFFC00EC8 /* MemDMA0 Stream 1 Source Configuration Register */
367#define MDMA0_S1_X_COUNT 0xFFC00ED0 /* MemDMA0 Stream 1 Source X Count Register */
368#define MDMA0_S1_X_MODIFY 0xFFC00ED4 /* MemDMA0 Stream 1 Source X Modify Register */
369#define MDMA0_S1_Y_COUNT 0xFFC00ED8 /* MemDMA0 Stream 1 Source Y Count Register */
370#define MDMA0_S1_Y_MODIFY 0xFFC00EDC /* MemDMA0 Stream 1 Source Y Modify Register */
371#define MDMA0_S1_CURR_DESC_PTR 0xFFC00EE0 /* MemDMA0 Stream 1 Source Current Descriptor Pointer Register */
372#define MDMA0_S1_CURR_ADDR 0xFFC00EE4 /* MemDMA0 Stream 1 Source Current Address Register */
373#define MDMA0_S1_IRQ_STATUS 0xFFC00EE8 /* MemDMA0 Stream 1 Source Interrupt/Status Register */
374#define MDMA0_S1_PERIPHERAL_MAP 0xFFC00EEC /* MemDMA0 Stream 1 Source Peripheral Map Register */
375#define MDMA0_S1_CURR_X_COUNT 0xFFC00EF0 /* MemDMA0 Stream 1 Source Current X Count Register */
376#define MDMA0_S1_CURR_Y_COUNT 0xFFC00EF8 /* MemDMA0 Stream 1 Source Current Y Count Register */
377
378#define MDMA_D0_NEXT_DESC_PTR MDMA0_D0_NEXT_DESC_PTR
379#define MDMA_D0_START_ADDR MDMA0_D0_START_ADDR
380#define MDMA_D0_CONFIG MDMA0_D0_CONFIG
381#define MDMA_D0_X_COUNT MDMA0_D0_X_COUNT
382#define MDMA_D0_X_MODIFY MDMA0_D0_X_MODIFY
383#define MDMA_D0_Y_COUNT MDMA0_D0_Y_COUNT
384#define MDMA_D0_Y_MODIFY MDMA0_D0_Y_MODIFY
385#define MDMA_D0_CURR_DESC_PTR MDMA0_D0_CURR_DESC_PTR
386#define MDMA_D0_CURR_ADDR MDMA0_D0_CURR_ADDR
387#define MDMA_D0_IRQ_STATUS MDMA0_D0_IRQ_STATUS
388#define MDMA_D0_PERIPHERAL_MAP MDMA0_D0_PERIPHERAL_MAP
389#define MDMA_D0_CURR_X_COUNT MDMA0_D0_CURR_X_COUNT
390#define MDMA_D0_CURR_Y_COUNT MDMA0_D0_CURR_Y_COUNT
391
392#define MDMA_S0_NEXT_DESC_PTR MDMA0_S0_NEXT_DESC_PTR
393#define MDMA_S0_START_ADDR MDMA0_S0_START_ADDR
394#define MDMA_S0_CONFIG MDMA0_S0_CONFIG
395#define MDMA_S0_X_COUNT MDMA0_S0_X_COUNT
396#define MDMA_S0_X_MODIFY MDMA0_S0_X_MODIFY
397#define MDMA_S0_Y_COUNT MDMA0_S0_Y_COUNT
398#define MDMA_S0_Y_MODIFY MDMA0_S0_Y_MODIFY
399#define MDMA_S0_CURR_DESC_PTR MDMA0_S0_CURR_DESC_PTR
400#define MDMA_S0_CURR_ADDR MDMA0_S0_CURR_ADDR
401#define MDMA_S0_IRQ_STATUS MDMA0_S0_IRQ_STATUS
402#define MDMA_S0_PERIPHERAL_MAP MDMA0_S0_PERIPHERAL_MAP
403#define MDMA_S0_CURR_X_COUNT MDMA0_S0_CURR_X_COUNT
404#define MDMA_S0_CURR_Y_COUNT MDMA0_S0_CURR_Y_COUNT
405
406#define MDMA_D1_NEXT_DESC_PTR MDMA0_D1_NEXT_DESC_PTR
407#define MDMA_D1_START_ADDR MDMA0_D1_START_ADDR
408#define MDMA_D1_CONFIG MDMA0_D1_CONFIG
409#define MDMA_D1_X_COUNT MDMA0_D1_X_COUNT
410#define MDMA_D1_X_MODIFY MDMA0_D1_X_MODIFY
411#define MDMA_D1_Y_COUNT MDMA0_D1_Y_COUNT
412#define MDMA_D1_Y_MODIFY MDMA0_D1_Y_MODIFY
413#define MDMA_D1_CURR_DESC_PTR MDMA0_D1_CURR_DESC_PTR
414#define MDMA_D1_CURR_ADDR MDMA0_D1_CURR_ADDR
415#define MDMA_D1_IRQ_STATUS MDMA0_D1_IRQ_STATUS
416#define MDMA_D1_PERIPHERAL_MAP MDMA0_D1_PERIPHERAL_MAP
417#define MDMA_D1_CURR_X_COUNT MDMA0_D1_CURR_X_COUNT
418#define MDMA_D1_CURR_Y_COUNT MDMA0_D1_CURR_Y_COUNT
419
420#define MDMA_S1_NEXT_DESC_PTR MDMA0_S1_NEXT_DESC_PTR
421#define MDMA_S1_START_ADDR MDMA0_S1_START_ADDR
422#define MDMA_S1_CONFIG MDMA0_S1_CONFIG
423#define MDMA_S1_X_COUNT MDMA0_S1_X_COUNT
424#define MDMA_S1_X_MODIFY MDMA0_S1_X_MODIFY
425#define MDMA_S1_Y_COUNT MDMA0_S1_Y_COUNT
426#define MDMA_S1_Y_MODIFY MDMA0_S1_Y_MODIFY
427#define MDMA_S1_CURR_DESC_PTR MDMA0_S1_CURR_DESC_PTR
428#define MDMA_S1_CURR_ADDR MDMA0_S1_CURR_ADDR
429#define MDMA_S1_IRQ_STATUS MDMA0_S1_IRQ_STATUS
430#define MDMA_S1_PERIPHERAL_MAP MDMA0_S1_PERIPHERAL_MAP
431#define MDMA_S1_CURR_X_COUNT MDMA0_S1_CURR_X_COUNT
432#define MDMA_S1_CURR_Y_COUNT MDMA0_S1_CURR_Y_COUNT
433
434
435/* Parallel Peripheral Interface (PPI) (0xFFC01000 - 0xFFC010FF) */
436#define PPI_CONTROL 0xFFC01000 /* PPI Control Register */
437#define PPI_STATUS 0xFFC01004 /* PPI Status Register */
438#define PPI_COUNT 0xFFC01008 /* PPI Transfer Count Register */
439#define PPI_DELAY 0xFFC0100C /* PPI Delay Count Register */
440#define PPI_FRAME 0xFFC01010 /* PPI Frame Length Register */
441
442
443/* Two-Wire Interface 0 (0xFFC01400 - 0xFFC014FF) */
444#define TWI0_CLKDIV 0xFFC01400 /* Serial Clock Divider Register */
445#define TWI0_CONTROL 0xFFC01404 /* TWI0 Master Internal Time Reference Register */
446#define TWI0_SLAVE_CTL 0xFFC01408 /* Slave Mode Control Register */
447#define TWI0_SLAVE_STAT 0xFFC0140C /* Slave Mode Status Register */
448#define TWI0_SLAVE_ADDR 0xFFC01410 /* Slave Mode Address Register */
449#define TWI0_MASTER_CTL 0xFFC01414 /* Master Mode Control Register */
450#define TWI0_MASTER_STAT 0xFFC01418 /* Master Mode Status Register */
451#define TWI0_MASTER_ADDR 0xFFC0141C /* Master Mode Address Register */
452#define TWI0_INT_STAT 0xFFC01420 /* TWI0 Master Interrupt Register */
453#define TWI0_INT_MASK 0xFFC01424 /* TWI0 Master Interrupt Mask Register */
454#define TWI0_FIFO_CTL 0xFFC01428 /* FIFO Control Register */
455#define TWI0_FIFO_STAT 0xFFC0142C /* FIFO Status Register */
456#define TWI0_XMT_DATA8 0xFFC01480 /* FIFO Transmit Data Single Byte Register */
457#define TWI0_XMT_DATA16 0xFFC01484 /* FIFO Transmit Data Double Byte Register */
458#define TWI0_RCV_DATA8 0xFFC01488 /* FIFO Receive Data Single Byte Register */
459#define TWI0_RCV_DATA16 0xFFC0148C /* FIFO Receive Data Double Byte Register */
460
461#define TWI0_REGBASE TWI0_CLKDIV
462
463/* the following are for backwards compatibility */
464#define TWI0_PRESCALE TWI0_CONTROL
465#define TWI0_INT_SRC TWI0_INT_STAT
466#define TWI0_INT_ENABLE TWI0_INT_MASK
467
468
469/* General-Purpose Ports (0xFFC01500 - 0xFFC015FF) */
470
471/* GPIO Port C Register Names */
472#define PORTCIO_FER 0xFFC01500 /* GPIO Pin Port C Configuration Register */
473#define PORTCIO 0xFFC01510 /* GPIO Pin Port C Data Register */
474#define PORTCIO_CLEAR 0xFFC01520 /* Clear GPIO Pin Port C Register */
475#define PORTCIO_SET 0xFFC01530 /* Set GPIO Pin Port C Register */
476#define PORTCIO_TOGGLE 0xFFC01540 /* Toggle GPIO Pin Port C Register */
477#define PORTCIO_DIR 0xFFC01550 /* GPIO Pin Port C Direction Register */
478#define PORTCIO_INEN 0xFFC01560 /* GPIO Pin Port C Input Enable Register */
479
480/* GPIO Port D Register Names */
481#define PORTDIO_FER 0xFFC01504 /* GPIO Pin Port D Configuration Register */
482#define PORTDIO 0xFFC01514 /* GPIO Pin Port D Data Register */
483#define PORTDIO_CLEAR 0xFFC01524 /* Clear GPIO Pin Port D Register */
484#define PORTDIO_SET 0xFFC01534 /* Set GPIO Pin Port D Register */
485#define PORTDIO_TOGGLE 0xFFC01544 /* Toggle GPIO Pin Port D Register */
486#define PORTDIO_DIR 0xFFC01554 /* GPIO Pin Port D Direction Register */
487#define PORTDIO_INEN 0xFFC01564 /* GPIO Pin Port D Input Enable Register */
488
489/* GPIO Port E Register Names */
490#define PORTEIO_FER 0xFFC01508 /* GPIO Pin Port E Configuration Register */
491#define PORTEIO 0xFFC01518 /* GPIO Pin Port E Data Register */
492#define PORTEIO_CLEAR 0xFFC01528 /* Clear GPIO Pin Port E Register */
493#define PORTEIO_SET 0xFFC01538 /* Set GPIO Pin Port E Register */
494#define PORTEIO_TOGGLE 0xFFC01548 /* Toggle GPIO Pin Port E Register */
495#define PORTEIO_DIR 0xFFC01558 /* GPIO Pin Port E Direction Register */
496#define PORTEIO_INEN 0xFFC01568 /* GPIO Pin Port E Input Enable Register */
497
498/* DMA Controller 1 Traffic Control Registers (0xFFC01B00 - 0xFFC01BFF) */
499
500#define DMAC1_TC_PER 0xFFC01B0C /* DMA Controller 1 Traffic Control Periods Register */
501#define DMAC1_TC_CNT 0xFFC01B10 /* DMA Controller 1 Traffic Control Current Counts Register */
502
503/* Alternate deprecated register names (below) provided for backwards code compatibility */
504#define DMA1_TCPER DMAC1_TC_PER
505#define DMA1_TCCNT DMAC1_TC_CNT
506
507
508/* DMA Controller 1 (0xFFC01C00 - 0xFFC01FFF) */
509#define DMA8_NEXT_DESC_PTR 0xFFC01C00 /* DMA Channel 8 Next Descriptor Pointer Register */
510#define DMA8_START_ADDR 0xFFC01C04 /* DMA Channel 8 Start Address Register */
511#define DMA8_CONFIG 0xFFC01C08 /* DMA Channel 8 Configuration Register */
512#define DMA8_X_COUNT 0xFFC01C10 /* DMA Channel 8 X Count Register */
513#define DMA8_X_MODIFY 0xFFC01C14 /* DMA Channel 8 X Modify Register */
514#define DMA8_Y_COUNT 0xFFC01C18 /* DMA Channel 8 Y Count Register */
515#define DMA8_Y_MODIFY 0xFFC01C1C /* DMA Channel 8 Y Modify Register */
516#define DMA8_CURR_DESC_PTR 0xFFC01C20 /* DMA Channel 8 Current Descriptor Pointer Register */
517#define DMA8_CURR_ADDR 0xFFC01C24 /* DMA Channel 8 Current Address Register */
518#define DMA8_IRQ_STATUS 0xFFC01C28 /* DMA Channel 8 Interrupt/Status Register */
519#define DMA8_PERIPHERAL_MAP 0xFFC01C2C /* DMA Channel 8 Peripheral Map Register */
520#define DMA8_CURR_X_COUNT 0xFFC01C30 /* DMA Channel 8 Current X Count Register */
521#define DMA8_CURR_Y_COUNT 0xFFC01C38 /* DMA Channel 8 Current Y Count Register */
522
523#define DMA9_NEXT_DESC_PTR 0xFFC01C40 /* DMA Channel 9 Next Descriptor Pointer Register */
524#define DMA9_START_ADDR 0xFFC01C44 /* DMA Channel 9 Start Address Register */
525#define DMA9_CONFIG 0xFFC01C48 /* DMA Channel 9 Configuration Register */
526#define DMA9_X_COUNT 0xFFC01C50 /* DMA Channel 9 X Count Register */
527#define DMA9_X_MODIFY 0xFFC01C54 /* DMA Channel 9 X Modify Register */
528#define DMA9_Y_COUNT 0xFFC01C58 /* DMA Channel 9 Y Count Register */
529#define DMA9_Y_MODIFY 0xFFC01C5C /* DMA Channel 9 Y Modify Register */
530#define DMA9_CURR_DESC_PTR 0xFFC01C60 /* DMA Channel 9 Current Descriptor Pointer Register */
531#define DMA9_CURR_ADDR 0xFFC01C64 /* DMA Channel 9 Current Address Register */
532#define DMA9_IRQ_STATUS 0xFFC01C68 /* DMA Channel 9 Interrupt/Status Register */
533#define DMA9_PERIPHERAL_MAP 0xFFC01C6C /* DMA Channel 9 Peripheral Map Register */
534#define DMA9_CURR_X_COUNT 0xFFC01C70 /* DMA Channel 9 Current X Count Register */
535#define DMA9_CURR_Y_COUNT 0xFFC01C78 /* DMA Channel 9 Current Y Count Register */
536
537#define DMA10_NEXT_DESC_PTR 0xFFC01C80 /* DMA Channel 10 Next Descriptor Pointer Register */
538#define DMA10_START_ADDR 0xFFC01C84 /* DMA Channel 10 Start Address Register */
539#define DMA10_CONFIG 0xFFC01C88 /* DMA Channel 10 Configuration Register */
540#define DMA10_X_COUNT 0xFFC01C90 /* DMA Channel 10 X Count Register */
541#define DMA10_X_MODIFY 0xFFC01C94 /* DMA Channel 10 X Modify Register */
542#define DMA10_Y_COUNT 0xFFC01C98 /* DMA Channel 10 Y Count Register */
543#define DMA10_Y_MODIFY 0xFFC01C9C /* DMA Channel 10 Y Modify Register */
544#define DMA10_CURR_DESC_PTR 0xFFC01CA0 /* DMA Channel 10 Current Descriptor Pointer Register */
545#define DMA10_CURR_ADDR 0xFFC01CA4 /* DMA Channel 10 Current Address Register */
546#define DMA10_IRQ_STATUS 0xFFC01CA8 /* DMA Channel 10 Interrupt/Status Register */
547#define DMA10_PERIPHERAL_MAP 0xFFC01CAC /* DMA Channel 10 Peripheral Map Register */
548#define DMA10_CURR_X_COUNT 0xFFC01CB0 /* DMA Channel 10 Current X Count Register */
549#define DMA10_CURR_Y_COUNT 0xFFC01CB8 /* DMA Channel 10 Current Y Count Register */
550
551#define DMA11_NEXT_DESC_PTR 0xFFC01CC0 /* DMA Channel 11 Next Descriptor Pointer Register */
552#define DMA11_START_ADDR 0xFFC01CC4 /* DMA Channel 11 Start Address Register */
553#define DMA11_CONFIG 0xFFC01CC8 /* DMA Channel 11 Configuration Register */
554#define DMA11_X_COUNT 0xFFC01CD0 /* DMA Channel 11 X Count Register */
555#define DMA11_X_MODIFY 0xFFC01CD4 /* DMA Channel 11 X Modify Register */
556#define DMA11_Y_COUNT 0xFFC01CD8 /* DMA Channel 11 Y Count Register */
557#define DMA11_Y_MODIFY 0xFFC01CDC /* DMA Channel 11 Y Modify Register */
558#define DMA11_CURR_DESC_PTR 0xFFC01CE0 /* DMA Channel 11 Current Descriptor Pointer Register */
559#define DMA11_CURR_ADDR 0xFFC01CE4 /* DMA Channel 11 Current Address Register */
560#define DMA11_IRQ_STATUS 0xFFC01CE8 /* DMA Channel 11 Interrupt/Status Register */
561#define DMA11_PERIPHERAL_MAP 0xFFC01CEC /* DMA Channel 11 Peripheral Map Register */
562#define DMA11_CURR_X_COUNT 0xFFC01CF0 /* DMA Channel 11 Current X Count Register */
563#define DMA11_CURR_Y_COUNT 0xFFC01CF8 /* DMA Channel 11 Current Y Count Register */
564
565#define DMA12_NEXT_DESC_PTR 0xFFC01D00 /* DMA Channel 12 Next Descriptor Pointer Register */
566#define DMA12_START_ADDR 0xFFC01D04 /* DMA Channel 12 Start Address Register */
567#define DMA12_CONFIG 0xFFC01D08 /* DMA Channel 12 Configuration Register */
568#define DMA12_X_COUNT 0xFFC01D10 /* DMA Channel 12 X Count Register */
569#define DMA12_X_MODIFY 0xFFC01D14 /* DMA Channel 12 X Modify Register */
570#define DMA12_Y_COUNT 0xFFC01D18 /* DMA Channel 12 Y Count Register */
571#define DMA12_Y_MODIFY 0xFFC01D1C /* DMA Channel 12 Y Modify Register */
572#define DMA12_CURR_DESC_PTR 0xFFC01D20 /* DMA Channel 12 Current Descriptor Pointer Register */
573#define DMA12_CURR_ADDR 0xFFC01D24 /* DMA Channel 12 Current Address Register */
574#define DMA12_IRQ_STATUS 0xFFC01D28 /* DMA Channel 12 Interrupt/Status Register */
575#define DMA12_PERIPHERAL_MAP 0xFFC01D2C /* DMA Channel 12 Peripheral Map Register */
576#define DMA12_CURR_X_COUNT 0xFFC01D30 /* DMA Channel 12 Current X Count Register */
577#define DMA12_CURR_Y_COUNT 0xFFC01D38 /* DMA Channel 12 Current Y Count Register */
578
579#define DMA13_NEXT_DESC_PTR 0xFFC01D40 /* DMA Channel 13 Next Descriptor Pointer Register */
580#define DMA13_START_ADDR 0xFFC01D44 /* DMA Channel 13 Start Address Register */
581#define DMA13_CONFIG 0xFFC01D48 /* DMA Channel 13 Configuration Register */
582#define DMA13_X_COUNT 0xFFC01D50 /* DMA Channel 13 X Count Register */
583#define DMA13_X_MODIFY 0xFFC01D54 /* DMA Channel 13 X Modify Register */
584#define DMA13_Y_COUNT 0xFFC01D58 /* DMA Channel 13 Y Count Register */
585#define DMA13_Y_MODIFY 0xFFC01D5C /* DMA Channel 13 Y Modify Register */
586#define DMA13_CURR_DESC_PTR 0xFFC01D60 /* DMA Channel 13 Current Descriptor Pointer Register */
587#define DMA13_CURR_ADDR 0xFFC01D64 /* DMA Channel 13 Current Address Register */
588#define DMA13_IRQ_STATUS 0xFFC01D68 /* DMA Channel 13 Interrupt/Status Register */
589#define DMA13_PERIPHERAL_MAP 0xFFC01D6C /* DMA Channel 13 Peripheral Map Register */
590#define DMA13_CURR_X_COUNT 0xFFC01D70 /* DMA Channel 13 Current X Count Register */
591#define DMA13_CURR_Y_COUNT 0xFFC01D78 /* DMA Channel 13 Current Y Count Register */
592
593#define DMA14_NEXT_DESC_PTR 0xFFC01D80 /* DMA Channel 14 Next Descriptor Pointer Register */
594#define DMA14_START_ADDR 0xFFC01D84 /* DMA Channel 14 Start Address Register */
595#define DMA14_CONFIG 0xFFC01D88 /* DMA Channel 14 Configuration Register */
596#define DMA14_X_COUNT 0xFFC01D90 /* DMA Channel 14 X Count Register */
597#define DMA14_X_MODIFY 0xFFC01D94 /* DMA Channel 14 X Modify Register */
598#define DMA14_Y_COUNT 0xFFC01D98 /* DMA Channel 14 Y Count Register */
599#define DMA14_Y_MODIFY 0xFFC01D9C /* DMA Channel 14 Y Modify Register */
600#define DMA14_CURR_DESC_PTR 0xFFC01DA0 /* DMA Channel 14 Current Descriptor Pointer Register */
601#define DMA14_CURR_ADDR 0xFFC01DA4 /* DMA Channel 14 Current Address Register */
602#define DMA14_IRQ_STATUS 0xFFC01DA8 /* DMA Channel 14 Interrupt/Status Register */
603#define DMA14_PERIPHERAL_MAP 0xFFC01DAC /* DMA Channel 14 Peripheral Map Register */
604#define DMA14_CURR_X_COUNT 0xFFC01DB0 /* DMA Channel 14 Current X Count Register */
605#define DMA14_CURR_Y_COUNT 0xFFC01DB8 /* DMA Channel 14 Current Y Count Register */
606
607#define DMA15_NEXT_DESC_PTR 0xFFC01DC0 /* DMA Channel 15 Next Descriptor Pointer Register */
608#define DMA15_START_ADDR 0xFFC01DC4 /* DMA Channel 15 Start Address Register */
609#define DMA15_CONFIG 0xFFC01DC8 /* DMA Channel 15 Configuration Register */
610#define DMA15_X_COUNT 0xFFC01DD0 /* DMA Channel 15 X Count Register */
611#define DMA15_X_MODIFY 0xFFC01DD4 /* DMA Channel 15 X Modify Register */
612#define DMA15_Y_COUNT 0xFFC01DD8 /* DMA Channel 15 Y Count Register */
613#define DMA15_Y_MODIFY 0xFFC01DDC /* DMA Channel 15 Y Modify Register */
614#define DMA15_CURR_DESC_PTR 0xFFC01DE0 /* DMA Channel 15 Current Descriptor Pointer Register */
615#define DMA15_CURR_ADDR 0xFFC01DE4 /* DMA Channel 15 Current Address Register */
616#define DMA15_IRQ_STATUS 0xFFC01DE8 /* DMA Channel 15 Interrupt/Status Register */
617#define DMA15_PERIPHERAL_MAP 0xFFC01DEC /* DMA Channel 15 Peripheral Map Register */
618#define DMA15_CURR_X_COUNT 0xFFC01DF0 /* DMA Channel 15 Current X Count Register */
619#define DMA15_CURR_Y_COUNT 0xFFC01DF8 /* DMA Channel 15 Current Y Count Register */
620
621#define DMA16_NEXT_DESC_PTR 0xFFC01E00 /* DMA Channel 16 Next Descriptor Pointer Register */
622#define DMA16_START_ADDR 0xFFC01E04 /* DMA Channel 16 Start Address Register */
623#define DMA16_CONFIG 0xFFC01E08 /* DMA Channel 16 Configuration Register */
624#define DMA16_X_COUNT 0xFFC01E10 /* DMA Channel 16 X Count Register */
625#define DMA16_X_MODIFY 0xFFC01E14 /* DMA Channel 16 X Modify Register */
626#define DMA16_Y_COUNT 0xFFC01E18 /* DMA Channel 16 Y Count Register */
627#define DMA16_Y_MODIFY 0xFFC01E1C /* DMA Channel 16 Y Modify Register */
628#define DMA16_CURR_DESC_PTR 0xFFC01E20 /* DMA Channel 16 Current Descriptor Pointer Register */
629#define DMA16_CURR_ADDR 0xFFC01E24 /* DMA Channel 16 Current Address Register */
630#define DMA16_IRQ_STATUS 0xFFC01E28 /* DMA Channel 16 Interrupt/Status Register */
631#define DMA16_PERIPHERAL_MAP 0xFFC01E2C /* DMA Channel 16 Peripheral Map Register */
632#define DMA16_CURR_X_COUNT 0xFFC01E30 /* DMA Channel 16 Current X Count Register */
633#define DMA16_CURR_Y_COUNT 0xFFC01E38 /* DMA Channel 16 Current Y Count Register */
634
635#define DMA17_NEXT_DESC_PTR 0xFFC01E40 /* DMA Channel 17 Next Descriptor Pointer Register */
636#define DMA17_START_ADDR 0xFFC01E44 /* DMA Channel 17 Start Address Register */
637#define DMA17_CONFIG 0xFFC01E48 /* DMA Channel 17 Configuration Register */
638#define DMA17_X_COUNT 0xFFC01E50 /* DMA Channel 17 X Count Register */
639#define DMA17_X_MODIFY 0xFFC01E54 /* DMA Channel 17 X Modify Register */
640#define DMA17_Y_COUNT 0xFFC01E58 /* DMA Channel 17 Y Count Register */
641#define DMA17_Y_MODIFY 0xFFC01E5C /* DMA Channel 17 Y Modify Register */
642#define DMA17_CURR_DESC_PTR 0xFFC01E60 /* DMA Channel 17 Current Descriptor Pointer Register */
643#define DMA17_CURR_ADDR 0xFFC01E64 /* DMA Channel 17 Current Address Register */
644#define DMA17_IRQ_STATUS 0xFFC01E68 /* DMA Channel 17 Interrupt/Status Register */
645#define DMA17_PERIPHERAL_MAP 0xFFC01E6C /* DMA Channel 17 Peripheral Map Register */
646#define DMA17_CURR_X_COUNT 0xFFC01E70 /* DMA Channel 17 Current X Count Register */
647#define DMA17_CURR_Y_COUNT 0xFFC01E78 /* DMA Channel 17 Current Y Count Register */
648
649#define DMA18_NEXT_DESC_PTR 0xFFC01E80 /* DMA Channel 18 Next Descriptor Pointer Register */
650#define DMA18_START_ADDR 0xFFC01E84 /* DMA Channel 18 Start Address Register */
651#define DMA18_CONFIG 0xFFC01E88 /* DMA Channel 18 Configuration Register */
652#define DMA18_X_COUNT 0xFFC01E90 /* DMA Channel 18 X Count Register */
653#define DMA18_X_MODIFY 0xFFC01E94 /* DMA Channel 18 X Modify Register */
654#define DMA18_Y_COUNT 0xFFC01E98 /* DMA Channel 18 Y Count Register */
655#define DMA18_Y_MODIFY 0xFFC01E9C /* DMA Channel 18 Y Modify Register */
656#define DMA18_CURR_DESC_PTR 0xFFC01EA0 /* DMA Channel 18 Current Descriptor Pointer Register */
657#define DMA18_CURR_ADDR 0xFFC01EA4 /* DMA Channel 18 Current Address Register */
658#define DMA18_IRQ_STATUS 0xFFC01EA8 /* DMA Channel 18 Interrupt/Status Register */
659#define DMA18_PERIPHERAL_MAP 0xFFC01EAC /* DMA Channel 18 Peripheral Map Register */
660#define DMA18_CURR_X_COUNT 0xFFC01EB0 /* DMA Channel 18 Current X Count Register */
661#define DMA18_CURR_Y_COUNT 0xFFC01EB8 /* DMA Channel 18 Current Y Count Register */
662
663#define DMA19_NEXT_DESC_PTR 0xFFC01EC0 /* DMA Channel 19 Next Descriptor Pointer Register */
664#define DMA19_START_ADDR 0xFFC01EC4 /* DMA Channel 19 Start Address Register */
665#define DMA19_CONFIG 0xFFC01EC8 /* DMA Channel 19 Configuration Register */
666#define DMA19_X_COUNT 0xFFC01ED0 /* DMA Channel 19 X Count Register */
667#define DMA19_X_MODIFY 0xFFC01ED4 /* DMA Channel 19 X Modify Register */
668#define DMA19_Y_COUNT 0xFFC01ED8 /* DMA Channel 19 Y Count Register */
669#define DMA19_Y_MODIFY 0xFFC01EDC /* DMA Channel 19 Y Modify Register */
670#define DMA19_CURR_DESC_PTR 0xFFC01EE0 /* DMA Channel 19 Current Descriptor Pointer Register */
671#define DMA19_CURR_ADDR 0xFFC01EE4 /* DMA Channel 19 Current Address Register */
672#define DMA19_IRQ_STATUS 0xFFC01EE8 /* DMA Channel 19 Interrupt/Status Register */
673#define DMA19_PERIPHERAL_MAP 0xFFC01EEC /* DMA Channel 19 Peripheral Map Register */
674#define DMA19_CURR_X_COUNT 0xFFC01EF0 /* DMA Channel 19 Current X Count Register */
675#define DMA19_CURR_Y_COUNT 0xFFC01EF8 /* DMA Channel 19 Current Y Count Register */
676
677#define MDMA1_D0_NEXT_DESC_PTR 0xFFC01F00 /* MemDMA1 Stream 0 Destination Next Descriptor Pointer Register */
678#define MDMA1_D0_START_ADDR 0xFFC01F04 /* MemDMA1 Stream 0 Destination Start Address Register */
679#define MDMA1_D0_CONFIG 0xFFC01F08 /* MemDMA1 Stream 0 Destination Configuration Register */
680#define MDMA1_D0_X_COUNT 0xFFC01F10 /* MemDMA1 Stream 0 Destination X Count Register */
681#define MDMA1_D0_X_MODIFY 0xFFC01F14 /* MemDMA1 Stream 0 Destination X Modify Register */
682#define MDMA1_D0_Y_COUNT 0xFFC01F18 /* MemDMA1 Stream 0 Destination Y Count Register */
683#define MDMA1_D0_Y_MODIFY 0xFFC01F1C /* MemDMA1 Stream 0 Destination Y Modify Register */
684#define MDMA1_D0_CURR_DESC_PTR 0xFFC01F20 /* MemDMA1 Stream 0 Destination Current Descriptor Pointer Register */
685#define MDMA1_D0_CURR_ADDR 0xFFC01F24 /* MemDMA1 Stream 0 Destination Current Address Register */
686#define MDMA1_D0_IRQ_STATUS 0xFFC01F28 /* MemDMA1 Stream 0 Destination Interrupt/Status Register */
687#define MDMA1_D0_PERIPHERAL_MAP 0xFFC01F2C /* MemDMA1 Stream 0 Destination Peripheral Map Register */
688#define MDMA1_D0_CURR_X_COUNT 0xFFC01F30 /* MemDMA1 Stream 0 Destination Current X Count Register */
689#define MDMA1_D0_CURR_Y_COUNT 0xFFC01F38 /* MemDMA1 Stream 0 Destination Current Y Count Register */
690
691#define MDMA1_S0_NEXT_DESC_PTR 0xFFC01F40 /* MemDMA1 Stream 0 Source Next Descriptor Pointer Register */
692#define MDMA1_S0_START_ADDR 0xFFC01F44 /* MemDMA1 Stream 0 Source Start Address Register */
693#define MDMA1_S0_CONFIG 0xFFC01F48 /* MemDMA1 Stream 0 Source Configuration Register */
694#define MDMA1_S0_X_COUNT 0xFFC01F50 /* MemDMA1 Stream 0 Source X Count Register */
695#define MDMA1_S0_X_MODIFY 0xFFC01F54 /* MemDMA1 Stream 0 Source X Modify Register */
696#define MDMA1_S0_Y_COUNT 0xFFC01F58 /* MemDMA1 Stream 0 Source Y Count Register */
697#define MDMA1_S0_Y_MODIFY 0xFFC01F5C /* MemDMA1 Stream 0 Source Y Modify Register */
698#define MDMA1_S0_CURR_DESC_PTR 0xFFC01F60 /* MemDMA1 Stream 0 Source Current Descriptor Pointer Register */
699#define MDMA1_S0_CURR_ADDR 0xFFC01F64 /* MemDMA1 Stream 0 Source Current Address Register */
700#define MDMA1_S0_IRQ_STATUS 0xFFC01F68 /* MemDMA1 Stream 0 Source Interrupt/Status Register */
701#define MDMA1_S0_PERIPHERAL_MAP 0xFFC01F6C /* MemDMA1 Stream 0 Source Peripheral Map Register */
702#define MDMA1_S0_CURR_X_COUNT 0xFFC01F70 /* MemDMA1 Stream 0 Source Current X Count Register */
703#define MDMA1_S0_CURR_Y_COUNT 0xFFC01F78 /* MemDMA1 Stream 0 Source Current Y Count Register */
704
705#define MDMA1_D1_NEXT_DESC_PTR 0xFFC01F80 /* MemDMA1 Stream 1 Destination Next Descriptor Pointer Register */
706#define MDMA1_D1_START_ADDR 0xFFC01F84 /* MemDMA1 Stream 1 Destination Start Address Register */
707#define MDMA1_D1_CONFIG 0xFFC01F88 /* MemDMA1 Stream 1 Destination Configuration Register */
708#define MDMA1_D1_X_COUNT 0xFFC01F90 /* MemDMA1 Stream 1 Destination X Count Register */
709#define MDMA1_D1_X_MODIFY 0xFFC01F94 /* MemDMA1 Stream 1 Destination X Modify Register */
710#define MDMA1_D1_Y_COUNT 0xFFC01F98 /* MemDMA1 Stream 1 Destination Y Count Register */
711#define MDMA1_D1_Y_MODIFY 0xFFC01F9C /* MemDMA1 Stream 1 Destination Y Modify Register */
712#define MDMA1_D1_CURR_DESC_PTR 0xFFC01FA0 /* MemDMA1 Stream 1 Destination Current Descriptor Pointer Register */
713#define MDMA1_D1_CURR_ADDR 0xFFC01FA4 /* MemDMA1 Stream 1 Destination Current Address Register */
714#define MDMA1_D1_IRQ_STATUS 0xFFC01FA8 /* MemDMA1 Stream 1 Destination Interrupt/Status Register */
715#define MDMA1_D1_PERIPHERAL_MAP 0xFFC01FAC /* MemDMA1 Stream 1 Destination Peripheral Map Register */
716#define MDMA1_D1_CURR_X_COUNT 0xFFC01FB0 /* MemDMA1 Stream 1 Destination Current X Count Register */
717#define MDMA1_D1_CURR_Y_COUNT 0xFFC01FB8 /* MemDMA1 Stream 1 Destination Current Y Count Register */
718
719#define MDMA1_S1_NEXT_DESC_PTR 0xFFC01FC0 /* MemDMA1 Stream 1 Source Next Descriptor Pointer Register */
720#define MDMA1_S1_START_ADDR 0xFFC01FC4 /* MemDMA1 Stream 1 Source Start Address Register */
721#define MDMA1_S1_CONFIG 0xFFC01FC8 /* MemDMA1 Stream 1 Source Configuration Register */
722#define MDMA1_S1_X_COUNT 0xFFC01FD0 /* MemDMA1 Stream 1 Source X Count Register */
723#define MDMA1_S1_X_MODIFY 0xFFC01FD4 /* MemDMA1 Stream 1 Source X Modify Register */
724#define MDMA1_S1_Y_COUNT 0xFFC01FD8 /* MemDMA1 Stream 1 Source Y Count Register */
725#define MDMA1_S1_Y_MODIFY 0xFFC01FDC /* MemDMA1 Stream 1 Source Y Modify Register */
726#define MDMA1_S1_CURR_DESC_PTR 0xFFC01FE0 /* MemDMA1 Stream 1 Source Current Descriptor Pointer Register */
727#define MDMA1_S1_CURR_ADDR 0xFFC01FE4 /* MemDMA1 Stream 1 Source Current Address Register */
728#define MDMA1_S1_IRQ_STATUS 0xFFC01FE8 /* MemDMA1 Stream 1 Source Interrupt/Status Register */
729#define MDMA1_S1_PERIPHERAL_MAP 0xFFC01FEC /* MemDMA1 Stream 1 Source Peripheral Map Register */
730#define MDMA1_S1_CURR_X_COUNT 0xFFC01FF0 /* MemDMA1 Stream 1 Source Current X Count Register */
731#define MDMA1_S1_CURR_Y_COUNT 0xFFC01FF8 /* MemDMA1 Stream 1 Source Current Y Count Register */
732
733
734/* UART1 Controller (0xFFC02000 - 0xFFC020FF) */
735#define UART1_THR 0xFFC02000 /* Transmit Holding register */
736#define UART1_RBR 0xFFC02000 /* Receive Buffer register */
737#define UART1_DLL 0xFFC02000 /* Divisor Latch (Low-Byte) */
738#define UART1_IER 0xFFC02004 /* Interrupt Enable Register */
739#define UART1_DLH 0xFFC02004 /* Divisor Latch (High-Byte) */
740#define UART1_IIR 0xFFC02008 /* Interrupt Identification Register */
741#define UART1_LCR 0xFFC0200C /* Line Control Register */
742#define UART1_MCR 0xFFC02010 /* Modem Control Register */
743#define UART1_LSR 0xFFC02014 /* Line Status Register */
744#define UART1_SCR 0xFFC0201C /* SCR Scratch Register */
745#define UART1_GCTL 0xFFC02024 /* Global Control Register */
746
747
748/* UART2 Controller (0xFFC02100 - 0xFFC021FF) */
749#define UART2_THR 0xFFC02100 /* Transmit Holding register */
750#define UART2_RBR 0xFFC02100 /* Receive Buffer register */
751#define UART2_DLL 0xFFC02100 /* Divisor Latch (Low-Byte) */
752#define UART2_IER 0xFFC02104 /* Interrupt Enable Register */
753#define UART2_DLH 0xFFC02104 /* Divisor Latch (High-Byte) */
754#define UART2_IIR 0xFFC02108 /* Interrupt Identification Register */
755#define UART2_LCR 0xFFC0210C /* Line Control Register */
756#define UART2_MCR 0xFFC02110 /* Modem Control Register */
757#define UART2_LSR 0xFFC02114 /* Line Status Register */
758#define UART2_SCR 0xFFC0211C /* SCR Scratch Register */
759#define UART2_GCTL 0xFFC02124 /* Global Control Register */
760
761
762/* Two-Wire Interface 1 (0xFFC02200 - 0xFFC022FF) */
763#define TWI1_CLKDIV 0xFFC02200 /* Serial Clock Divider Register */
764#define TWI1_CONTROL 0xFFC02204 /* TWI1 Master Internal Time Reference Register */
765#define TWI1_SLAVE_CTL 0xFFC02208 /* Slave Mode Control Register */
766#define TWI1_SLAVE_STAT 0xFFC0220C /* Slave Mode Status Register */
767#define TWI1_SLAVE_ADDR 0xFFC02210 /* Slave Mode Address Register */
768#define TWI1_MASTER_CTL 0xFFC02214 /* Master Mode Control Register */
769#define TWI1_MASTER_STAT 0xFFC02218 /* Master Mode Status Register */
770#define TWI1_MASTER_ADDR 0xFFC0221C /* Master Mode Address Register */
771#define TWI1_INT_STAT 0xFFC02220 /* TWI1 Master Interrupt Register */
772#define TWI1_INT_MASK 0xFFC02224 /* TWI1 Master Interrupt Mask Register */
773#define TWI1_FIFO_CTL 0xFFC02228 /* FIFO Control Register */
774#define TWI1_FIFO_STAT 0xFFC0222C /* FIFO Status Register */
775#define TWI1_XMT_DATA8 0xFFC02280 /* FIFO Transmit Data Single Byte Register */
776#define TWI1_XMT_DATA16 0xFFC02284 /* FIFO Transmit Data Double Byte Register */
777#define TWI1_RCV_DATA8 0xFFC02288 /* FIFO Receive Data Single Byte Register */
778#define TWI1_RCV_DATA16 0xFFC0228C /* FIFO Receive Data Double Byte Register */
779#define TWI1_REGBASE TWI1_CLKDIV
780
781
782/* the following are for backwards compatibility */
783#define TWI1_PRESCALE TWI1_CONTROL
784#define TWI1_INT_SRC TWI1_INT_STAT
785#define TWI1_INT_ENABLE TWI1_INT_MASK
786
787
788/* SPI1 Controller (0xFFC02300 - 0xFFC023FF) */
789#define SPI1_CTL 0xFFC02300 /* SPI1 Control Register */
790#define SPI1_FLG 0xFFC02304 /* SPI1 Flag register */
791#define SPI1_STAT 0xFFC02308 /* SPI1 Status register */
792#define SPI1_TDBR 0xFFC0230C /* SPI1 Transmit Data Buffer Register */
793#define SPI1_RDBR 0xFFC02310 /* SPI1 Receive Data Buffer Register */
794#define SPI1_BAUD 0xFFC02314 /* SPI1 Baud rate Register */
795#define SPI1_SHADOW 0xFFC02318 /* SPI1_RDBR Shadow Register */
796#define SPI1_REGBASE SPI1_CTL
797
798/* SPI2 Controller (0xFFC02400 - 0xFFC024FF) */
799#define SPI2_CTL 0xFFC02400 /* SPI2 Control Register */
800#define SPI2_FLG 0xFFC02404 /* SPI2 Flag register */
801#define SPI2_STAT 0xFFC02408 /* SPI2 Status register */
802#define SPI2_TDBR 0xFFC0240C /* SPI2 Transmit Data Buffer Register */
803#define SPI2_RDBR 0xFFC02410 /* SPI2 Receive Data Buffer Register */
804#define SPI2_BAUD 0xFFC02414 /* SPI2 Baud rate Register */
805#define SPI2_SHADOW 0xFFC02418 /* SPI2_RDBR Shadow Register */
806#define SPI2_REGBASE SPI2_CTL
807
808/* SPORT2 Controller (0xFFC02500 - 0xFFC025FF) */
809#define SPORT2_TCR1 0xFFC02500 /* SPORT2 Transmit Configuration 1 Register */
810#define SPORT2_TCR2 0xFFC02504 /* SPORT2 Transmit Configuration 2 Register */
811#define SPORT2_TCLKDIV 0xFFC02508 /* SPORT2 Transmit Clock Divider */
812#define SPORT2_TFSDIV 0xFFC0250C /* SPORT2 Transmit Frame Sync Divider */
813#define SPORT2_TX 0xFFC02510 /* SPORT2 TX Data Register */
814#define SPORT2_RX 0xFFC02518 /* SPORT2 RX Data Register */
815#define SPORT2_RCR1 0xFFC02520 /* SPORT2 Transmit Configuration 1 Register */
816#define SPORT2_RCR2 0xFFC02524 /* SPORT2 Transmit Configuration 2 Register */
817#define SPORT2_RCLKDIV 0xFFC02528 /* SPORT2 Receive Clock Divider */
818#define SPORT2_RFSDIV 0xFFC0252C /* SPORT2 Receive Frame Sync Divider */
819#define SPORT2_STAT 0xFFC02530 /* SPORT2 Status Register */
820#define SPORT2_CHNL 0xFFC02534 /* SPORT2 Current Channel Register */
821#define SPORT2_MCMC1 0xFFC02538 /* SPORT2 Multi-Channel Configuration Register 1 */
822#define SPORT2_MCMC2 0xFFC0253C /* SPORT2 Multi-Channel Configuration Register 2 */
823#define SPORT2_MTCS0 0xFFC02540 /* SPORT2 Multi-Channel Transmit Select Register 0 */
824#define SPORT2_MTCS1 0xFFC02544 /* SPORT2 Multi-Channel Transmit Select Register 1 */
825#define SPORT2_MTCS2 0xFFC02548 /* SPORT2 Multi-Channel Transmit Select Register 2 */
826#define SPORT2_MTCS3 0xFFC0254C /* SPORT2 Multi-Channel Transmit Select Register 3 */
827#define SPORT2_MRCS0 0xFFC02550 /* SPORT2 Multi-Channel Receive Select Register 0 */
828#define SPORT2_MRCS1 0xFFC02554 /* SPORT2 Multi-Channel Receive Select Register 1 */
829#define SPORT2_MRCS2 0xFFC02558 /* SPORT2 Multi-Channel Receive Select Register 2 */
830#define SPORT2_MRCS3 0xFFC0255C /* SPORT2 Multi-Channel Receive Select Register 3 */
831
832
833/* SPORT3 Controller (0xFFC02600 - 0xFFC026FF) */
834#define SPORT3_TCR1 0xFFC02600 /* SPORT3 Transmit Configuration 1 Register */
835#define SPORT3_TCR2 0xFFC02604 /* SPORT3 Transmit Configuration 2 Register */
836#define SPORT3_TCLKDIV 0xFFC02608 /* SPORT3 Transmit Clock Divider */
837#define SPORT3_TFSDIV 0xFFC0260C /* SPORT3 Transmit Frame Sync Divider */
838#define SPORT3_TX 0xFFC02610 /* SPORT3 TX Data Register */
839#define SPORT3_RX 0xFFC02618 /* SPORT3 RX Data Register */
840#define SPORT3_RCR1 0xFFC02620 /* SPORT3 Transmit Configuration 1 Register */
841#define SPORT3_RCR2 0xFFC02624 /* SPORT3 Transmit Configuration 2 Register */
842#define SPORT3_RCLKDIV 0xFFC02628 /* SPORT3 Receive Clock Divider */
843#define SPORT3_RFSDIV 0xFFC0262C /* SPORT3 Receive Frame Sync Divider */
844#define SPORT3_STAT 0xFFC02630 /* SPORT3 Status Register */
845#define SPORT3_CHNL 0xFFC02634 /* SPORT3 Current Channel Register */
846#define SPORT3_MCMC1 0xFFC02638 /* SPORT3 Multi-Channel Configuration Register 1 */
847#define SPORT3_MCMC2 0xFFC0263C /* SPORT3 Multi-Channel Configuration Register 2 */
848#define SPORT3_MTCS0 0xFFC02640 /* SPORT3 Multi-Channel Transmit Select Register 0 */
849#define SPORT3_MTCS1 0xFFC02644 /* SPORT3 Multi-Channel Transmit Select Register 1 */
850#define SPORT3_MTCS2 0xFFC02648 /* SPORT3 Multi-Channel Transmit Select Register 2 */
851#define SPORT3_MTCS3 0xFFC0264C /* SPORT3 Multi-Channel Transmit Select Register 3 */
852#define SPORT3_MRCS0 0xFFC02650 /* SPORT3 Multi-Channel Receive Select Register 0 */
853#define SPORT3_MRCS1 0xFFC02654 /* SPORT3 Multi-Channel Receive Select Register 1 */
854#define SPORT3_MRCS2 0xFFC02658 /* SPORT3 Multi-Channel Receive Select Register 2 */
855#define SPORT3_MRCS3 0xFFC0265C /* SPORT3 Multi-Channel Receive Select Register 3 */
856
857 11
858/* Media Transceiver (MXVR) (0xFFC02700 - 0xFFC028FF) */ 12/* Media Transceiver (MXVR) (0xFFC02700 - 0xFFC028FF) */
859 13
@@ -995,1249 +149,4 @@
995#define MXVR_BLOCK_CNT 0xFFC028C0 /* MXVR Block Counter */ 149#define MXVR_BLOCK_CNT 0xFFC028C0 /* MXVR Block Counter */
996#define MXVR_PLL_CTL_2 0xFFC028C4 /* MXVR Phase Lock Loop Control Register 2 */ 150#define MXVR_PLL_CTL_2 0xFFC028C4 /* MXVR Phase Lock Loop Control Register 2 */
997 151
998
999/* CAN Controller (0xFFC02A00 - 0xFFC02FFF) */
1000/* For Mailboxes 0-15 */
1001#define CAN_MC1 0xFFC02A00 /* Mailbox config reg 1 */
1002#define CAN_MD1 0xFFC02A04 /* Mailbox direction reg 1 */
1003#define CAN_TRS1 0xFFC02A08 /* Transmit Request Set reg 1 */
1004#define CAN_TRR1 0xFFC02A0C /* Transmit Request Reset reg 1 */
1005#define CAN_TA1 0xFFC02A10 /* Transmit Acknowledge reg 1 */
1006#define CAN_AA1 0xFFC02A14 /* Transmit Abort Acknowledge reg 1 */
1007#define CAN_RMP1 0xFFC02A18 /* Receive Message Pending reg 1 */
1008#define CAN_RML1 0xFFC02A1C /* Receive Message Lost reg 1 */
1009#define CAN_MBTIF1 0xFFC02A20 /* Mailbox Transmit Interrupt Flag reg 1 */
1010#define CAN_MBRIF1 0xFFC02A24 /* Mailbox Receive Interrupt Flag reg 1 */
1011#define CAN_MBIM1 0xFFC02A28 /* Mailbox Interrupt Mask reg 1 */
1012#define CAN_RFH1 0xFFC02A2C /* Remote Frame Handling reg 1 */
1013#define CAN_OPSS1 0xFFC02A30 /* Overwrite Protection Single Shot Xmission reg 1 */
1014
1015/* For Mailboxes 16-31 */
1016#define CAN_MC2 0xFFC02A40 /* Mailbox config reg 2 */
1017#define CAN_MD2 0xFFC02A44 /* Mailbox direction reg 2 */
1018#define CAN_TRS2 0xFFC02A48 /* Transmit Request Set reg 2 */
1019#define CAN_TRR2 0xFFC02A4C /* Transmit Request Reset reg 2 */
1020#define CAN_TA2 0xFFC02A50 /* Transmit Acknowledge reg 2 */
1021#define CAN_AA2 0xFFC02A54 /* Transmit Abort Acknowledge reg 2 */
1022#define CAN_RMP2 0xFFC02A58 /* Receive Message Pending reg 2 */
1023#define CAN_RML2 0xFFC02A5C /* Receive Message Lost reg 2 */
1024#define CAN_MBTIF2 0xFFC02A60 /* Mailbox Transmit Interrupt Flag reg 2 */
1025#define CAN_MBRIF2 0xFFC02A64 /* Mailbox Receive Interrupt Flag reg 2 */
1026#define CAN_MBIM2 0xFFC02A68 /* Mailbox Interrupt Mask reg 2 */
1027#define CAN_RFH2 0xFFC02A6C /* Remote Frame Handling reg 2 */
1028#define CAN_OPSS2 0xFFC02A70 /* Overwrite Protection Single Shot Xmission reg 2 */
1029
1030#define CAN_CLOCK 0xFFC02A80 /* Bit Timing Configuration register 0 */
1031#define CAN_TIMING 0xFFC02A84 /* Bit Timing Configuration register 1 */
1032
1033#define CAN_DEBUG 0xFFC02A88 /* Debug Register */
1034/* the following is for backwards compatibility */
1035#define CAN_CNF CAN_DEBUG
1036
1037#define CAN_STATUS 0xFFC02A8C /* Global Status Register */
1038#define CAN_CEC 0xFFC02A90 /* Error Counter Register */
1039#define CAN_GIS 0xFFC02A94 /* Global Interrupt Status Register */
1040#define CAN_GIM 0xFFC02A98 /* Global Interrupt Mask Register */
1041#define CAN_GIF 0xFFC02A9C /* Global Interrupt Flag Register */
1042#define CAN_CONTROL 0xFFC02AA0 /* Master Control Register */
1043#define CAN_INTR 0xFFC02AA4 /* Interrupt Pending Register */
1044#define CAN_MBTD 0xFFC02AAC /* Mailbox Temporary Disable Feature */
1045#define CAN_EWR 0xFFC02AB0 /* Programmable Warning Level */
1046#define CAN_ESR 0xFFC02AB4 /* Error Status Register */
1047#define CAN_UCCNT 0xFFC02AC4 /* Universal Counter */
1048#define CAN_UCRC 0xFFC02AC8 /* Universal Counter Reload/Capture Register */
1049#define CAN_UCCNF 0xFFC02ACC /* Universal Counter Configuration Register */
1050
1051/* Mailbox Acceptance Masks */
1052#define CAN_AM00L 0xFFC02B00 /* Mailbox 0 Low Acceptance Mask */
1053#define CAN_AM00H 0xFFC02B04 /* Mailbox 0 High Acceptance Mask */
1054#define CAN_AM01L 0xFFC02B08 /* Mailbox 1 Low Acceptance Mask */
1055#define CAN_AM01H 0xFFC02B0C /* Mailbox 1 High Acceptance Mask */
1056#define CAN_AM02L 0xFFC02B10 /* Mailbox 2 Low Acceptance Mask */
1057#define CAN_AM02H 0xFFC02B14 /* Mailbox 2 High Acceptance Mask */
1058#define CAN_AM03L 0xFFC02B18 /* Mailbox 3 Low Acceptance Mask */
1059#define CAN_AM03H 0xFFC02B1C /* Mailbox 3 High Acceptance Mask */
1060#define CAN_AM04L 0xFFC02B20 /* Mailbox 4 Low Acceptance Mask */
1061#define CAN_AM04H 0xFFC02B24 /* Mailbox 4 High Acceptance Mask */
1062#define CAN_AM05L 0xFFC02B28 /* Mailbox 5 Low Acceptance Mask */
1063#define CAN_AM05H 0xFFC02B2C /* Mailbox 5 High Acceptance Mask */
1064#define CAN_AM06L 0xFFC02B30 /* Mailbox 6 Low Acceptance Mask */
1065#define CAN_AM06H 0xFFC02B34 /* Mailbox 6 High Acceptance Mask */
1066#define CAN_AM07L 0xFFC02B38 /* Mailbox 7 Low Acceptance Mask */
1067#define CAN_AM07H 0xFFC02B3C /* Mailbox 7 High Acceptance Mask */
1068#define CAN_AM08L 0xFFC02B40 /* Mailbox 8 Low Acceptance Mask */
1069#define CAN_AM08H 0xFFC02B44 /* Mailbox 8 High Acceptance Mask */
1070#define CAN_AM09L 0xFFC02B48 /* Mailbox 9 Low Acceptance Mask */
1071#define CAN_AM09H 0xFFC02B4C /* Mailbox 9 High Acceptance Mask */
1072#define CAN_AM10L 0xFFC02B50 /* Mailbox 10 Low Acceptance Mask */
1073#define CAN_AM10H 0xFFC02B54 /* Mailbox 10 High Acceptance Mask */
1074#define CAN_AM11L 0xFFC02B58 /* Mailbox 11 Low Acceptance Mask */
1075#define CAN_AM11H 0xFFC02B5C /* Mailbox 11 High Acceptance Mask */
1076#define CAN_AM12L 0xFFC02B60 /* Mailbox 12 Low Acceptance Mask */
1077#define CAN_AM12H 0xFFC02B64 /* Mailbox 12 High Acceptance Mask */
1078#define CAN_AM13L 0xFFC02B68 /* Mailbox 13 Low Acceptance Mask */
1079#define CAN_AM13H 0xFFC02B6C /* Mailbox 13 High Acceptance Mask */
1080#define CAN_AM14L 0xFFC02B70 /* Mailbox 14 Low Acceptance Mask */
1081#define CAN_AM14H 0xFFC02B74 /* Mailbox 14 High Acceptance Mask */
1082#define CAN_AM15L 0xFFC02B78 /* Mailbox 15 Low Acceptance Mask */
1083#define CAN_AM15H 0xFFC02B7C /* Mailbox 15 High Acceptance Mask */
1084
1085#define CAN_AM16L 0xFFC02B80 /* Mailbox 16 Low Acceptance Mask */
1086#define CAN_AM16H 0xFFC02B84 /* Mailbox 16 High Acceptance Mask */
1087#define CAN_AM17L 0xFFC02B88 /* Mailbox 17 Low Acceptance Mask */
1088#define CAN_AM17H 0xFFC02B8C /* Mailbox 17 High Acceptance Mask */
1089#define CAN_AM18L 0xFFC02B90 /* Mailbox 18 Low Acceptance Mask */
1090#define CAN_AM18H 0xFFC02B94 /* Mailbox 18 High Acceptance Mask */
1091#define CAN_AM19L 0xFFC02B98 /* Mailbox 19 Low Acceptance Mask */
1092#define CAN_AM19H 0xFFC02B9C /* Mailbox 19 High Acceptance Mask */
1093#define CAN_AM20L 0xFFC02BA0 /* Mailbox 20 Low Acceptance Mask */
1094#define CAN_AM20H 0xFFC02BA4 /* Mailbox 20 High Acceptance Mask */
1095#define CAN_AM21L 0xFFC02BA8 /* Mailbox 21 Low Acceptance Mask */
1096#define CAN_AM21H 0xFFC02BAC /* Mailbox 21 High Acceptance Mask */
1097#define CAN_AM22L 0xFFC02BB0 /* Mailbox 22 Low Acceptance Mask */
1098#define CAN_AM22H 0xFFC02BB4 /* Mailbox 22 High Acceptance Mask */
1099#define CAN_AM23L 0xFFC02BB8 /* Mailbox 23 Low Acceptance Mask */
1100#define CAN_AM23H 0xFFC02BBC /* Mailbox 23 High Acceptance Mask */
1101#define CAN_AM24L 0xFFC02BC0 /* Mailbox 24 Low Acceptance Mask */
1102#define CAN_AM24H 0xFFC02BC4 /* Mailbox 24 High Acceptance Mask */
1103#define CAN_AM25L 0xFFC02BC8 /* Mailbox 25 Low Acceptance Mask */
1104#define CAN_AM25H 0xFFC02BCC /* Mailbox 25 High Acceptance Mask */
1105#define CAN_AM26L 0xFFC02BD0 /* Mailbox 26 Low Acceptance Mask */
1106#define CAN_AM26H 0xFFC02BD4 /* Mailbox 26 High Acceptance Mask */
1107#define CAN_AM27L 0xFFC02BD8 /* Mailbox 27 Low Acceptance Mask */
1108#define CAN_AM27H 0xFFC02BDC /* Mailbox 27 High Acceptance Mask */
1109#define CAN_AM28L 0xFFC02BE0 /* Mailbox 28 Low Acceptance Mask */
1110#define CAN_AM28H 0xFFC02BE4 /* Mailbox 28 High Acceptance Mask */
1111#define CAN_AM29L 0xFFC02BE8 /* Mailbox 29 Low Acceptance Mask */
1112#define CAN_AM29H 0xFFC02BEC /* Mailbox 29 High Acceptance Mask */
1113#define CAN_AM30L 0xFFC02BF0 /* Mailbox 30 Low Acceptance Mask */
1114#define CAN_AM30H 0xFFC02BF4 /* Mailbox 30 High Acceptance Mask */
1115#define CAN_AM31L 0xFFC02BF8 /* Mailbox 31 Low Acceptance Mask */
1116#define CAN_AM31H 0xFFC02BFC /* Mailbox 31 High Acceptance Mask */
1117
1118/* CAN Acceptance Mask Macros */
1119#define CAN_AM_L(x) (CAN_AM00L+((x)*0x8))
1120#define CAN_AM_H(x) (CAN_AM00H+((x)*0x8))
1121
1122/* Mailbox Registers */
1123#define CAN_MB00_DATA0 0xFFC02C00 /* Mailbox 0 Data Word 0 [15:0] Register */
1124#define CAN_MB00_DATA1 0xFFC02C04 /* Mailbox 0 Data Word 1 [31:16] Register */
1125#define CAN_MB00_DATA2 0xFFC02C08 /* Mailbox 0 Data Word 2 [47:32] Register */
1126#define CAN_MB00_DATA3 0xFFC02C0C /* Mailbox 0 Data Word 3 [63:48] Register */
1127#define CAN_MB00_LENGTH 0xFFC02C10 /* Mailbox 0 Data Length Code Register */
1128#define CAN_MB00_TIMESTAMP 0xFFC02C14 /* Mailbox 0 Time Stamp Value Register */
1129#define CAN_MB00_ID0 0xFFC02C18 /* Mailbox 0 Identifier Low Register */
1130#define CAN_MB00_ID1 0xFFC02C1C /* Mailbox 0 Identifier High Register */
1131
1132#define CAN_MB01_DATA0 0xFFC02C20 /* Mailbox 1 Data Word 0 [15:0] Register */
1133#define CAN_MB01_DATA1 0xFFC02C24 /* Mailbox 1 Data Word 1 [31:16] Register */
1134#define CAN_MB01_DATA2 0xFFC02C28 /* Mailbox 1 Data Word 2 [47:32] Register */
1135#define CAN_MB01_DATA3 0xFFC02C2C /* Mailbox 1 Data Word 3 [63:48] Register */
1136#define CAN_MB01_LENGTH 0xFFC02C30 /* Mailbox 1 Data Length Code Register */
1137#define CAN_MB01_TIMESTAMP 0xFFC02C34 /* Mailbox 1 Time Stamp Value Register */
1138#define CAN_MB01_ID0 0xFFC02C38 /* Mailbox 1 Identifier Low Register */
1139#define CAN_MB01_ID1 0xFFC02C3C /* Mailbox 1 Identifier High Register */
1140
1141#define CAN_MB02_DATA0 0xFFC02C40 /* Mailbox 2 Data Word 0 [15:0] Register */
1142#define CAN_MB02_DATA1 0xFFC02C44 /* Mailbox 2 Data Word 1 [31:16] Register */
1143#define CAN_MB02_DATA2 0xFFC02C48 /* Mailbox 2 Data Word 2 [47:32] Register */
1144#define CAN_MB02_DATA3 0xFFC02C4C /* Mailbox 2 Data Word 3 [63:48] Register */
1145#define CAN_MB02_LENGTH 0xFFC02C50 /* Mailbox 2 Data Length Code Register */
1146#define CAN_MB02_TIMESTAMP 0xFFC02C54 /* Mailbox 2 Time Stamp Value Register */
1147#define CAN_MB02_ID0 0xFFC02C58 /* Mailbox 2 Identifier Low Register */
1148#define CAN_MB02_ID1 0xFFC02C5C /* Mailbox 2 Identifier High Register */
1149
1150#define CAN_MB03_DATA0 0xFFC02C60 /* Mailbox 3 Data Word 0 [15:0] Register */
1151#define CAN_MB03_DATA1 0xFFC02C64 /* Mailbox 3 Data Word 1 [31:16] Register */
1152#define CAN_MB03_DATA2 0xFFC02C68 /* Mailbox 3 Data Word 2 [47:32] Register */
1153#define CAN_MB03_DATA3 0xFFC02C6C /* Mailbox 3 Data Word 3 [63:48] Register */
1154#define CAN_MB03_LENGTH 0xFFC02C70 /* Mailbox 3 Data Length Code Register */
1155#define CAN_MB03_TIMESTAMP 0xFFC02C74 /* Mailbox 3 Time Stamp Value Register */
1156#define CAN_MB03_ID0 0xFFC02C78 /* Mailbox 3 Identifier Low Register */
1157#define CAN_MB03_ID1 0xFFC02C7C /* Mailbox 3 Identifier High Register */
1158
1159#define CAN_MB04_DATA0 0xFFC02C80 /* Mailbox 4 Data Word 0 [15:0] Register */
1160#define CAN_MB04_DATA1 0xFFC02C84 /* Mailbox 4 Data Word 1 [31:16] Register */
1161#define CAN_MB04_DATA2 0xFFC02C88 /* Mailbox 4 Data Word 2 [47:32] Register */
1162#define CAN_MB04_DATA3 0xFFC02C8C /* Mailbox 4 Data Word 3 [63:48] Register */
1163#define CAN_MB04_LENGTH 0xFFC02C90 /* Mailbox 4 Data Length Code Register */
1164#define CAN_MB04_TIMESTAMP 0xFFC02C94 /* Mailbox 4 Time Stamp Value Register */
1165#define CAN_MB04_ID0 0xFFC02C98 /* Mailbox 4 Identifier Low Register */
1166#define CAN_MB04_ID1 0xFFC02C9C /* Mailbox 4 Identifier High Register */
1167
1168#define CAN_MB05_DATA0 0xFFC02CA0 /* Mailbox 5 Data Word 0 [15:0] Register */
1169#define CAN_MB05_DATA1 0xFFC02CA4 /* Mailbox 5 Data Word 1 [31:16] Register */
1170#define CAN_MB05_DATA2 0xFFC02CA8 /* Mailbox 5 Data Word 2 [47:32] Register */
1171#define CAN_MB05_DATA3 0xFFC02CAC /* Mailbox 5 Data Word 3 [63:48] Register */
1172#define CAN_MB05_LENGTH 0xFFC02CB0 /* Mailbox 5 Data Length Code Register */
1173#define CAN_MB05_TIMESTAMP 0xFFC02CB4 /* Mailbox 5 Time Stamp Value Register */
1174#define CAN_MB05_ID0 0xFFC02CB8 /* Mailbox 5 Identifier Low Register */
1175#define CAN_MB05_ID1 0xFFC02CBC /* Mailbox 5 Identifier High Register */
1176
1177#define CAN_MB06_DATA0 0xFFC02CC0 /* Mailbox 6 Data Word 0 [15:0] Register */
1178#define CAN_MB06_DATA1 0xFFC02CC4 /* Mailbox 6 Data Word 1 [31:16] Register */
1179#define CAN_MB06_DATA2 0xFFC02CC8 /* Mailbox 6 Data Word 2 [47:32] Register */
1180#define CAN_MB06_DATA3 0xFFC02CCC /* Mailbox 6 Data Word 3 [63:48] Register */
1181#define CAN_MB06_LENGTH 0xFFC02CD0 /* Mailbox 6 Data Length Code Register */
1182#define CAN_MB06_TIMESTAMP 0xFFC02CD4 /* Mailbox 6 Time Stamp Value Register */
1183#define CAN_MB06_ID0 0xFFC02CD8 /* Mailbox 6 Identifier Low Register */
1184#define CAN_MB06_ID1 0xFFC02CDC /* Mailbox 6 Identifier High Register */
1185
1186#define CAN_MB07_DATA0 0xFFC02CE0 /* Mailbox 7 Data Word 0 [15:0] Register */
1187#define CAN_MB07_DATA1 0xFFC02CE4 /* Mailbox 7 Data Word 1 [31:16] Register */
1188#define CAN_MB07_DATA2 0xFFC02CE8 /* Mailbox 7 Data Word 2 [47:32] Register */
1189#define CAN_MB07_DATA3 0xFFC02CEC /* Mailbox 7 Data Word 3 [63:48] Register */
1190#define CAN_MB07_LENGTH 0xFFC02CF0 /* Mailbox 7 Data Length Code Register */
1191#define CAN_MB07_TIMESTAMP 0xFFC02CF4 /* Mailbox 7 Time Stamp Value Register */
1192#define CAN_MB07_ID0 0xFFC02CF8 /* Mailbox 7 Identifier Low Register */
1193#define CAN_MB07_ID1 0xFFC02CFC /* Mailbox 7 Identifier High Register */
1194
1195#define CAN_MB08_DATA0 0xFFC02D00 /* Mailbox 8 Data Word 0 [15:0] Register */
1196#define CAN_MB08_DATA1 0xFFC02D04 /* Mailbox 8 Data Word 1 [31:16] Register */
1197#define CAN_MB08_DATA2 0xFFC02D08 /* Mailbox 8 Data Word 2 [47:32] Register */
1198#define CAN_MB08_DATA3 0xFFC02D0C /* Mailbox 8 Data Word 3 [63:48] Register */
1199#define CAN_MB08_LENGTH 0xFFC02D10 /* Mailbox 8 Data Length Code Register */
1200#define CAN_MB08_TIMESTAMP 0xFFC02D14 /* Mailbox 8 Time Stamp Value Register */
1201#define CAN_MB08_ID0 0xFFC02D18 /* Mailbox 8 Identifier Low Register */
1202#define CAN_MB08_ID1 0xFFC02D1C /* Mailbox 8 Identifier High Register */
1203
1204#define CAN_MB09_DATA0 0xFFC02D20 /* Mailbox 9 Data Word 0 [15:0] Register */
1205#define CAN_MB09_DATA1 0xFFC02D24 /* Mailbox 9 Data Word 1 [31:16] Register */
1206#define CAN_MB09_DATA2 0xFFC02D28 /* Mailbox 9 Data Word 2 [47:32] Register */
1207#define CAN_MB09_DATA3 0xFFC02D2C /* Mailbox 9 Data Word 3 [63:48] Register */
1208#define CAN_MB09_LENGTH 0xFFC02D30 /* Mailbox 9 Data Length Code Register */
1209#define CAN_MB09_TIMESTAMP 0xFFC02D34 /* Mailbox 9 Time Stamp Value Register */
1210#define CAN_MB09_ID0 0xFFC02D38 /* Mailbox 9 Identifier Low Register */
1211#define CAN_MB09_ID1 0xFFC02D3C /* Mailbox 9 Identifier High Register */
1212
1213#define CAN_MB10_DATA0 0xFFC02D40 /* Mailbox 10 Data Word 0 [15:0] Register */
1214#define CAN_MB10_DATA1 0xFFC02D44 /* Mailbox 10 Data Word 1 [31:16] Register */
1215#define CAN_MB10_DATA2 0xFFC02D48 /* Mailbox 10 Data Word 2 [47:32] Register */
1216#define CAN_MB10_DATA3 0xFFC02D4C /* Mailbox 10 Data Word 3 [63:48] Register */
1217#define CAN_MB10_LENGTH 0xFFC02D50 /* Mailbox 10 Data Length Code Register */
1218#define CAN_MB10_TIMESTAMP 0xFFC02D54 /* Mailbox 10 Time Stamp Value Register */
1219#define CAN_MB10_ID0 0xFFC02D58 /* Mailbox 10 Identifier Low Register */
1220#define CAN_MB10_ID1 0xFFC02D5C /* Mailbox 10 Identifier High Register */
1221
1222#define CAN_MB11_DATA0 0xFFC02D60 /* Mailbox 11 Data Word 0 [15:0] Register */
1223#define CAN_MB11_DATA1 0xFFC02D64 /* Mailbox 11 Data Word 1 [31:16] Register */
1224#define CAN_MB11_DATA2 0xFFC02D68 /* Mailbox 11 Data Word 2 [47:32] Register */
1225#define CAN_MB11_DATA3 0xFFC02D6C /* Mailbox 11 Data Word 3 [63:48] Register */
1226#define CAN_MB11_LENGTH 0xFFC02D70 /* Mailbox 11 Data Length Code Register */
1227#define CAN_MB11_TIMESTAMP 0xFFC02D74 /* Mailbox 11 Time Stamp Value Register */
1228#define CAN_MB11_ID0 0xFFC02D78 /* Mailbox 11 Identifier Low Register */
1229#define CAN_MB11_ID1 0xFFC02D7C /* Mailbox 11 Identifier High Register */
1230
1231#define CAN_MB12_DATA0 0xFFC02D80 /* Mailbox 12 Data Word 0 [15:0] Register */
1232#define CAN_MB12_DATA1 0xFFC02D84 /* Mailbox 12 Data Word 1 [31:16] Register */
1233#define CAN_MB12_DATA2 0xFFC02D88 /* Mailbox 12 Data Word 2 [47:32] Register */
1234#define CAN_MB12_DATA3 0xFFC02D8C /* Mailbox 12 Data Word 3 [63:48] Register */
1235#define CAN_MB12_LENGTH 0xFFC02D90 /* Mailbox 12 Data Length Code Register */
1236#define CAN_MB12_TIMESTAMP 0xFFC02D94 /* Mailbox 12 Time Stamp Value Register */
1237#define CAN_MB12_ID0 0xFFC02D98 /* Mailbox 12 Identifier Low Register */
1238#define CAN_MB12_ID1 0xFFC02D9C /* Mailbox 12 Identifier High Register */
1239
1240#define CAN_MB13_DATA0 0xFFC02DA0 /* Mailbox 13 Data Word 0 [15:0] Register */
1241#define CAN_MB13_DATA1 0xFFC02DA4 /* Mailbox 13 Data Word 1 [31:16] Register */
1242#define CAN_MB13_DATA2 0xFFC02DA8 /* Mailbox 13 Data Word 2 [47:32] Register */
1243#define CAN_MB13_DATA3 0xFFC02DAC /* Mailbox 13 Data Word 3 [63:48] Register */
1244#define CAN_MB13_LENGTH 0xFFC02DB0 /* Mailbox 13 Data Length Code Register */
1245#define CAN_MB13_TIMESTAMP 0xFFC02DB4 /* Mailbox 13 Time Stamp Value Register */
1246#define CAN_MB13_ID0 0xFFC02DB8 /* Mailbox 13 Identifier Low Register */
1247#define CAN_MB13_ID1 0xFFC02DBC /* Mailbox 13 Identifier High Register */
1248
1249#define CAN_MB14_DATA0 0xFFC02DC0 /* Mailbox 14 Data Word 0 [15:0] Register */
1250#define CAN_MB14_DATA1 0xFFC02DC4 /* Mailbox 14 Data Word 1 [31:16] Register */
1251#define CAN_MB14_DATA2 0xFFC02DC8 /* Mailbox 14 Data Word 2 [47:32] Register */
1252#define CAN_MB14_DATA3 0xFFC02DCC /* Mailbox 14 Data Word 3 [63:48] Register */
1253#define CAN_MB14_LENGTH 0xFFC02DD0 /* Mailbox 14 Data Length Code Register */
1254#define CAN_MB14_TIMESTAMP 0xFFC02DD4 /* Mailbox 14 Time Stamp Value Register */
1255#define CAN_MB14_ID0 0xFFC02DD8 /* Mailbox 14 Identifier Low Register */
1256#define CAN_MB14_ID1 0xFFC02DDC /* Mailbox 14 Identifier High Register */
1257
1258#define CAN_MB15_DATA0 0xFFC02DE0 /* Mailbox 15 Data Word 0 [15:0] Register */
1259#define CAN_MB15_DATA1 0xFFC02DE4 /* Mailbox 15 Data Word 1 [31:16] Register */
1260#define CAN_MB15_DATA2 0xFFC02DE8 /* Mailbox 15 Data Word 2 [47:32] Register */
1261#define CAN_MB15_DATA3 0xFFC02DEC /* Mailbox 15 Data Word 3 [63:48] Register */
1262#define CAN_MB15_LENGTH 0xFFC02DF0 /* Mailbox 15 Data Length Code Register */
1263#define CAN_MB15_TIMESTAMP 0xFFC02DF4 /* Mailbox 15 Time Stamp Value Register */
1264#define CAN_MB15_ID0 0xFFC02DF8 /* Mailbox 15 Identifier Low Register */
1265#define CAN_MB15_ID1 0xFFC02DFC /* Mailbox 15 Identifier High Register */
1266
1267#define CAN_MB16_DATA0 0xFFC02E00 /* Mailbox 16 Data Word 0 [15:0] Register */
1268#define CAN_MB16_DATA1 0xFFC02E04 /* Mailbox 16 Data Word 1 [31:16] Register */
1269#define CAN_MB16_DATA2 0xFFC02E08 /* Mailbox 16 Data Word 2 [47:32] Register */
1270#define CAN_MB16_DATA3 0xFFC02E0C /* Mailbox 16 Data Word 3 [63:48] Register */
1271#define CAN_MB16_LENGTH 0xFFC02E10 /* Mailbox 16 Data Length Code Register */
1272#define CAN_MB16_TIMESTAMP 0xFFC02E14 /* Mailbox 16 Time Stamp Value Register */
1273#define CAN_MB16_ID0 0xFFC02E18 /* Mailbox 16 Identifier Low Register */
1274#define CAN_MB16_ID1 0xFFC02E1C /* Mailbox 16 Identifier High Register */
1275
1276#define CAN_MB17_DATA0 0xFFC02E20 /* Mailbox 17 Data Word 0 [15:0] Register */
1277#define CAN_MB17_DATA1 0xFFC02E24 /* Mailbox 17 Data Word 1 [31:16] Register */
1278#define CAN_MB17_DATA2 0xFFC02E28 /* Mailbox 17 Data Word 2 [47:32] Register */
1279#define CAN_MB17_DATA3 0xFFC02E2C /* Mailbox 17 Data Word 3 [63:48] Register */
1280#define CAN_MB17_LENGTH 0xFFC02E30 /* Mailbox 17 Data Length Code Register */
1281#define CAN_MB17_TIMESTAMP 0xFFC02E34 /* Mailbox 17 Time Stamp Value Register */
1282#define CAN_MB17_ID0 0xFFC02E38 /* Mailbox 17 Identifier Low Register */
1283#define CAN_MB17_ID1 0xFFC02E3C /* Mailbox 17 Identifier High Register */
1284
1285#define CAN_MB18_DATA0 0xFFC02E40 /* Mailbox 18 Data Word 0 [15:0] Register */
1286#define CAN_MB18_DATA1 0xFFC02E44 /* Mailbox 18 Data Word 1 [31:16] Register */
1287#define CAN_MB18_DATA2 0xFFC02E48 /* Mailbox 18 Data Word 2 [47:32] Register */
1288#define CAN_MB18_DATA3 0xFFC02E4C /* Mailbox 18 Data Word 3 [63:48] Register */
1289#define CAN_MB18_LENGTH 0xFFC02E50 /* Mailbox 18 Data Length Code Register */
1290#define CAN_MB18_TIMESTAMP 0xFFC02E54 /* Mailbox 18 Time Stamp Value Register */
1291#define CAN_MB18_ID0 0xFFC02E58 /* Mailbox 18 Identifier Low Register */
1292#define CAN_MB18_ID1 0xFFC02E5C /* Mailbox 18 Identifier High Register */
1293
1294#define CAN_MB19_DATA0 0xFFC02E60 /* Mailbox 19 Data Word 0 [15:0] Register */
1295#define CAN_MB19_DATA1 0xFFC02E64 /* Mailbox 19 Data Word 1 [31:16] Register */
1296#define CAN_MB19_DATA2 0xFFC02E68 /* Mailbox 19 Data Word 2 [47:32] Register */
1297#define CAN_MB19_DATA3 0xFFC02E6C /* Mailbox 19 Data Word 3 [63:48] Register */
1298#define CAN_MB19_LENGTH 0xFFC02E70 /* Mailbox 19 Data Length Code Register */
1299#define CAN_MB19_TIMESTAMP 0xFFC02E74 /* Mailbox 19 Time Stamp Value Register */
1300#define CAN_MB19_ID0 0xFFC02E78 /* Mailbox 19 Identifier Low Register */
1301#define CAN_MB19_ID1 0xFFC02E7C /* Mailbox 19 Identifier High Register */
1302
1303#define CAN_MB20_DATA0 0xFFC02E80 /* Mailbox 20 Data Word 0 [15:0] Register */
1304#define CAN_MB20_DATA1 0xFFC02E84 /* Mailbox 20 Data Word 1 [31:16] Register */
1305#define CAN_MB20_DATA2 0xFFC02E88 /* Mailbox 20 Data Word 2 [47:32] Register */
1306#define CAN_MB20_DATA3 0xFFC02E8C /* Mailbox 20 Data Word 3 [63:48] Register */
1307#define CAN_MB20_LENGTH 0xFFC02E90 /* Mailbox 20 Data Length Code Register */
1308#define CAN_MB20_TIMESTAMP 0xFFC02E94 /* Mailbox 20 Time Stamp Value Register */
1309#define CAN_MB20_ID0 0xFFC02E98 /* Mailbox 20 Identifier Low Register */
1310#define CAN_MB20_ID1 0xFFC02E9C /* Mailbox 20 Identifier High Register */
1311
1312#define CAN_MB21_DATA0 0xFFC02EA0 /* Mailbox 21 Data Word 0 [15:0] Register */
1313#define CAN_MB21_DATA1 0xFFC02EA4 /* Mailbox 21 Data Word 1 [31:16] Register */
1314#define CAN_MB21_DATA2 0xFFC02EA8 /* Mailbox 21 Data Word 2 [47:32] Register */
1315#define CAN_MB21_DATA3 0xFFC02EAC /* Mailbox 21 Data Word 3 [63:48] Register */
1316#define CAN_MB21_LENGTH 0xFFC02EB0 /* Mailbox 21 Data Length Code Register */
1317#define CAN_MB21_TIMESTAMP 0xFFC02EB4 /* Mailbox 21 Time Stamp Value Register */
1318#define CAN_MB21_ID0 0xFFC02EB8 /* Mailbox 21 Identifier Low Register */
1319#define CAN_MB21_ID1 0xFFC02EBC /* Mailbox 21 Identifier High Register */
1320
1321#define CAN_MB22_DATA0 0xFFC02EC0 /* Mailbox 22 Data Word 0 [15:0] Register */
1322#define CAN_MB22_DATA1 0xFFC02EC4 /* Mailbox 22 Data Word 1 [31:16] Register */
1323#define CAN_MB22_DATA2 0xFFC02EC8 /* Mailbox 22 Data Word 2 [47:32] Register */
1324#define CAN_MB22_DATA3 0xFFC02ECC /* Mailbox 22 Data Word 3 [63:48] Register */
1325#define CAN_MB22_LENGTH 0xFFC02ED0 /* Mailbox 22 Data Length Code Register */
1326#define CAN_MB22_TIMESTAMP 0xFFC02ED4 /* Mailbox 22 Time Stamp Value Register */
1327#define CAN_MB22_ID0 0xFFC02ED8 /* Mailbox 22 Identifier Low Register */
1328#define CAN_MB22_ID1 0xFFC02EDC /* Mailbox 22 Identifier High Register */
1329
1330#define CAN_MB23_DATA0 0xFFC02EE0 /* Mailbox 23 Data Word 0 [15:0] Register */
1331#define CAN_MB23_DATA1 0xFFC02EE4 /* Mailbox 23 Data Word 1 [31:16] Register */
1332#define CAN_MB23_DATA2 0xFFC02EE8 /* Mailbox 23 Data Word 2 [47:32] Register */
1333#define CAN_MB23_DATA3 0xFFC02EEC /* Mailbox 23 Data Word 3 [63:48] Register */
1334#define CAN_MB23_LENGTH 0xFFC02EF0 /* Mailbox 23 Data Length Code Register */
1335#define CAN_MB23_TIMESTAMP 0xFFC02EF4 /* Mailbox 23 Time Stamp Value Register */
1336#define CAN_MB23_ID0 0xFFC02EF8 /* Mailbox 23 Identifier Low Register */
1337#define CAN_MB23_ID1 0xFFC02EFC /* Mailbox 23 Identifier High Register */
1338
1339#define CAN_MB24_DATA0 0xFFC02F00 /* Mailbox 24 Data Word 0 [15:0] Register */
1340#define CAN_MB24_DATA1 0xFFC02F04 /* Mailbox 24 Data Word 1 [31:16] Register */
1341#define CAN_MB24_DATA2 0xFFC02F08 /* Mailbox 24 Data Word 2 [47:32] Register */
1342#define CAN_MB24_DATA3 0xFFC02F0C /* Mailbox 24 Data Word 3 [63:48] Register */
1343#define CAN_MB24_LENGTH 0xFFC02F10 /* Mailbox 24 Data Length Code Register */
1344#define CAN_MB24_TIMESTAMP 0xFFC02F14 /* Mailbox 24 Time Stamp Value Register */
1345#define CAN_MB24_ID0 0xFFC02F18 /* Mailbox 24 Identifier Low Register */
1346#define CAN_MB24_ID1 0xFFC02F1C /* Mailbox 24 Identifier High Register */
1347
1348#define CAN_MB25_DATA0 0xFFC02F20 /* Mailbox 25 Data Word 0 [15:0] Register */
1349#define CAN_MB25_DATA1 0xFFC02F24 /* Mailbox 25 Data Word 1 [31:16] Register */
1350#define CAN_MB25_DATA2 0xFFC02F28 /* Mailbox 25 Data Word 2 [47:32] Register */
1351#define CAN_MB25_DATA3 0xFFC02F2C /* Mailbox 25 Data Word 3 [63:48] Register */
1352#define CAN_MB25_LENGTH 0xFFC02F30 /* Mailbox 25 Data Length Code Register */
1353#define CAN_MB25_TIMESTAMP 0xFFC02F34 /* Mailbox 25 Time Stamp Value Register */
1354#define CAN_MB25_ID0 0xFFC02F38 /* Mailbox 25 Identifier Low Register */
1355#define CAN_MB25_ID1 0xFFC02F3C /* Mailbox 25 Identifier High Register */
1356
1357#define CAN_MB26_DATA0 0xFFC02F40 /* Mailbox 26 Data Word 0 [15:0] Register */
1358#define CAN_MB26_DATA1 0xFFC02F44 /* Mailbox 26 Data Word 1 [31:16] Register */
1359#define CAN_MB26_DATA2 0xFFC02F48 /* Mailbox 26 Data Word 2 [47:32] Register */
1360#define CAN_MB26_DATA3 0xFFC02F4C /* Mailbox 26 Data Word 3 [63:48] Register */
1361#define CAN_MB26_LENGTH 0xFFC02F50 /* Mailbox 26 Data Length Code Register */
1362#define CAN_MB26_TIMESTAMP 0xFFC02F54 /* Mailbox 26 Time Stamp Value Register */
1363#define CAN_MB26_ID0 0xFFC02F58 /* Mailbox 26 Identifier Low Register */
1364#define CAN_MB26_ID1 0xFFC02F5C /* Mailbox 26 Identifier High Register */
1365
1366#define CAN_MB27_DATA0 0xFFC02F60 /* Mailbox 27 Data Word 0 [15:0] Register */
1367#define CAN_MB27_DATA1 0xFFC02F64 /* Mailbox 27 Data Word 1 [31:16] Register */
1368#define CAN_MB27_DATA2 0xFFC02F68 /* Mailbox 27 Data Word 2 [47:32] Register */
1369#define CAN_MB27_DATA3 0xFFC02F6C /* Mailbox 27 Data Word 3 [63:48] Register */
1370#define CAN_MB27_LENGTH 0xFFC02F70 /* Mailbox 27 Data Length Code Register */
1371#define CAN_MB27_TIMESTAMP 0xFFC02F74 /* Mailbox 27 Time Stamp Value Register */
1372#define CAN_MB27_ID0 0xFFC02F78 /* Mailbox 27 Identifier Low Register */
1373#define CAN_MB27_ID1 0xFFC02F7C /* Mailbox 27 Identifier High Register */
1374
1375#define CAN_MB28_DATA0 0xFFC02F80 /* Mailbox 28 Data Word 0 [15:0] Register */
1376#define CAN_MB28_DATA1 0xFFC02F84 /* Mailbox 28 Data Word 1 [31:16] Register */
1377#define CAN_MB28_DATA2 0xFFC02F88 /* Mailbox 28 Data Word 2 [47:32] Register */
1378#define CAN_MB28_DATA3 0xFFC02F8C /* Mailbox 28 Data Word 3 [63:48] Register */
1379#define CAN_MB28_LENGTH 0xFFC02F90 /* Mailbox 28 Data Length Code Register */
1380#define CAN_MB28_TIMESTAMP 0xFFC02F94 /* Mailbox 28 Time Stamp Value Register */
1381#define CAN_MB28_ID0 0xFFC02F98 /* Mailbox 28 Identifier Low Register */
1382#define CAN_MB28_ID1 0xFFC02F9C /* Mailbox 28 Identifier High Register */
1383
1384#define CAN_MB29_DATA0 0xFFC02FA0 /* Mailbox 29 Data Word 0 [15:0] Register */
1385#define CAN_MB29_DATA1 0xFFC02FA4 /* Mailbox 29 Data Word 1 [31:16] Register */
1386#define CAN_MB29_DATA2 0xFFC02FA8 /* Mailbox 29 Data Word 2 [47:32] Register */
1387#define CAN_MB29_DATA3 0xFFC02FAC /* Mailbox 29 Data Word 3 [63:48] Register */
1388#define CAN_MB29_LENGTH 0xFFC02FB0 /* Mailbox 29 Data Length Code Register */
1389#define CAN_MB29_TIMESTAMP 0xFFC02FB4 /* Mailbox 29 Time Stamp Value Register */
1390#define CAN_MB29_ID0 0xFFC02FB8 /* Mailbox 29 Identifier Low Register */
1391#define CAN_MB29_ID1 0xFFC02FBC /* Mailbox 29 Identifier High Register */
1392
1393#define CAN_MB30_DATA0 0xFFC02FC0 /* Mailbox 30 Data Word 0 [15:0] Register */
1394#define CAN_MB30_DATA1 0xFFC02FC4 /* Mailbox 30 Data Word 1 [31:16] Register */
1395#define CAN_MB30_DATA2 0xFFC02FC8 /* Mailbox 30 Data Word 2 [47:32] Register */
1396#define CAN_MB30_DATA3 0xFFC02FCC /* Mailbox 30 Data Word 3 [63:48] Register */
1397#define CAN_MB30_LENGTH 0xFFC02FD0 /* Mailbox 30 Data Length Code Register */
1398#define CAN_MB30_TIMESTAMP 0xFFC02FD4 /* Mailbox 30 Time Stamp Value Register */
1399#define CAN_MB30_ID0 0xFFC02FD8 /* Mailbox 30 Identifier Low Register */
1400#define CAN_MB30_ID1 0xFFC02FDC /* Mailbox 30 Identifier High Register */
1401
1402#define CAN_MB31_DATA0 0xFFC02FE0 /* Mailbox 31 Data Word 0 [15:0] Register */
1403#define CAN_MB31_DATA1 0xFFC02FE4 /* Mailbox 31 Data Word 1 [31:16] Register */
1404#define CAN_MB31_DATA2 0xFFC02FE8 /* Mailbox 31 Data Word 2 [47:32] Register */
1405#define CAN_MB31_DATA3 0xFFC02FEC /* Mailbox 31 Data Word 3 [63:48] Register */
1406#define CAN_MB31_LENGTH 0xFFC02FF0 /* Mailbox 31 Data Length Code Register */
1407#define CAN_MB31_TIMESTAMP 0xFFC02FF4 /* Mailbox 31 Time Stamp Value Register */
1408#define CAN_MB31_ID0 0xFFC02FF8 /* Mailbox 31 Identifier Low Register */
1409#define CAN_MB31_ID1 0xFFC02FFC /* Mailbox 31 Identifier High Register */
1410
1411/* CAN Mailbox Area Macros */
1412#define CAN_MB_ID1(x) (CAN_MB00_ID1+((x)*0x20))
1413#define CAN_MB_ID0(x) (CAN_MB00_ID0+((x)*0x20))
1414#define CAN_MB_TIMESTAMP(x) (CAN_MB00_TIMESTAMP+((x)*0x20))
1415#define CAN_MB_LENGTH(x) (CAN_MB00_LENGTH+((x)*0x20))
1416#define CAN_MB_DATA3(x) (CAN_MB00_DATA3+((x)*0x20))
1417#define CAN_MB_DATA2(x) (CAN_MB00_DATA2+((x)*0x20))
1418#define CAN_MB_DATA1(x) (CAN_MB00_DATA1+((x)*0x20))
1419#define CAN_MB_DATA0(x) (CAN_MB00_DATA0+((x)*0x20))
1420
1421
1422/*********************************************************************************** */
1423/* System MMR Register Bits and Macros */
1424/******************************************************************************* */
1425
1426/* SWRST Mask */
1427#define SYSTEM_RESET 0x0007 /* Initiates A System Software Reset */
1428#define DOUBLE_FAULT 0x0008 /* Core Double Fault Causes Reset */
1429#define RESET_DOUBLE 0x2000 /* SW Reset Generated By Core Double-Fault */
1430#define RESET_WDOG 0x4000 /* SW Reset Generated By Watchdog Timer */
1431#define RESET_SOFTWARE 0x8000 /* SW Reset Occurred Since Last Read Of SWRST */
1432
1433/* SYSCR Masks */
1434#define BMODE 0x0006 /* Boot Mode - Latched During HW Reset From Mode Pins */
1435#define NOBOOT 0x0010 /* Execute From L1 or ASYNC Bank 0 When BMODE = 0 */
1436
1437
1438/* ************* SYSTEM INTERRUPT CONTROLLER MASKS ***************** */
1439
1440/* Peripheral Masks For SIC0_ISR, SIC0_IWR, SIC0_IMASK */
1441#define PLL_WAKEUP_IRQ 0x00000001 /* PLL Wakeup Interrupt Request */
1442#define DMAC0_ERR_IRQ 0x00000002 /* DMA Controller 0 Error Interrupt Request */
1443#define PPI_ERR_IRQ 0x00000004 /* PPI Error Interrupt Request */
1444#define SPORT0_ERR_IRQ 0x00000008 /* SPORT0 Error Interrupt Request */
1445#define SPORT1_ERR_IRQ 0x00000010 /* SPORT1 Error Interrupt Request */
1446#define SPI0_ERR_IRQ 0x00000020 /* SPI0 Error Interrupt Request */
1447#define UART0_ERR_IRQ 0x00000040 /* UART0 Error Interrupt Request */
1448#define RTC_IRQ 0x00000080 /* Real-Time Clock Interrupt Request */
1449#define DMA0_IRQ 0x00000100 /* DMA Channel 0 (PPI) Interrupt Request */
1450#define DMA1_IRQ 0x00000200 /* DMA Channel 1 (SPORT0 RX) Interrupt Request */
1451#define DMA2_IRQ 0x00000400 /* DMA Channel 2 (SPORT0 TX) Interrupt Request */
1452#define DMA3_IRQ 0x00000800 /* DMA Channel 3 (SPORT1 RX) Interrupt Request */
1453#define DMA4_IRQ 0x00001000 /* DMA Channel 4 (SPORT1 TX) Interrupt Request */
1454#define DMA5_IRQ 0x00002000 /* DMA Channel 5 (SPI) Interrupt Request */
1455#define DMA6_IRQ 0x00004000 /* DMA Channel 6 (UART RX) Interrupt Request */
1456#define DMA7_IRQ 0x00008000 /* DMA Channel 7 (UART TX) Interrupt Request */
1457#define TIMER0_IRQ 0x00010000 /* Timer 0 Interrupt Request */
1458#define TIMER1_IRQ 0x00020000 /* Timer 1 Interrupt Request */
1459#define TIMER2_IRQ 0x00040000 /* Timer 2 Interrupt Request */
1460#define PFA_IRQ 0x00080000 /* Programmable Flag Interrupt Request A */
1461#define PFB_IRQ 0x00100000 /* Programmable Flag Interrupt Request B */
1462#define MDMA0_0_IRQ 0x00200000 /* MemDMA0 Stream 0 Interrupt Request */
1463#define MDMA0_1_IRQ 0x00400000 /* MemDMA0 Stream 1 Interrupt Request */
1464#define WDOG_IRQ 0x00800000 /* Software Watchdog Timer Interrupt Request */
1465#define DMAC1_ERR_IRQ 0x01000000 /* DMA Controller 1 Error Interrupt Request */
1466#define SPORT2_ERR_IRQ 0x02000000 /* SPORT2 Error Interrupt Request */
1467#define SPORT3_ERR_IRQ 0x04000000 /* SPORT3 Error Interrupt Request */
1468#define MXVR_SD_IRQ 0x08000000 /* MXVR Synchronous Data Interrupt Request */
1469#define SPI1_ERR_IRQ 0x10000000 /* SPI1 Error Interrupt Request */
1470#define SPI2_ERR_IRQ 0x20000000 /* SPI2 Error Interrupt Request */
1471#define UART1_ERR_IRQ 0x40000000 /* UART1 Error Interrupt Request */
1472#define UART2_ERR_IRQ 0x80000000 /* UART2 Error Interrupt Request */
1473
1474/* the following are for backwards compatibility */
1475#define DMA0_ERR_IRQ DMAC0_ERR_IRQ
1476#define DMA1_ERR_IRQ DMAC1_ERR_IRQ
1477
1478
1479/* Peripheral Masks For SIC_ISR1, SIC_IWR1, SIC_IMASK1 */
1480#define CAN_ERR_IRQ 0x00000001 /* CAN Error Interrupt Request */
1481#define DMA8_IRQ 0x00000002 /* DMA Channel 8 (SPORT2 RX) Interrupt Request */
1482#define DMA9_IRQ 0x00000004 /* DMA Channel 9 (SPORT2 TX) Interrupt Request */
1483#define DMA10_IRQ 0x00000008 /* DMA Channel 10 (SPORT3 RX) Interrupt Request */
1484#define DMA11_IRQ 0x00000010 /* DMA Channel 11 (SPORT3 TX) Interrupt Request */
1485#define DMA12_IRQ 0x00000020 /* DMA Channel 12 Interrupt Request */
1486#define DMA13_IRQ 0x00000040 /* DMA Channel 13 Interrupt Request */
1487#define DMA14_IRQ 0x00000080 /* DMA Channel 14 (SPI1) Interrupt Request */
1488#define DMA15_IRQ 0x00000100 /* DMA Channel 15 (SPI2) Interrupt Request */
1489#define DMA16_IRQ 0x00000200 /* DMA Channel 16 (UART1 RX) Interrupt Request */
1490#define DMA17_IRQ 0x00000400 /* DMA Channel 17 (UART1 TX) Interrupt Request */
1491#define DMA18_IRQ 0x00000800 /* DMA Channel 18 (UART2 RX) Interrupt Request */
1492#define DMA19_IRQ 0x00001000 /* DMA Channel 19 (UART2 TX) Interrupt Request */
1493#define TWI0_IRQ 0x00002000 /* TWI0 Interrupt Request */
1494#define TWI1_IRQ 0x00004000 /* TWI1 Interrupt Request */
1495#define CAN_RX_IRQ 0x00008000 /* CAN Receive Interrupt Request */
1496#define CAN_TX_IRQ 0x00010000 /* CAN Transmit Interrupt Request */
1497#define MDMA1_0_IRQ 0x00020000 /* MemDMA1 Stream 0 Interrupt Request */
1498#define MDMA1_1_IRQ 0x00040000 /* MemDMA1 Stream 1 Interrupt Request */
1499#define MXVR_STAT_IRQ 0x00080000 /* MXVR Status Interrupt Request */
1500#define MXVR_CM_IRQ 0x00100000 /* MXVR Control Message Interrupt Request */
1501#define MXVR_AP_IRQ 0x00200000 /* MXVR Asynchronous Packet Interrupt */
1502
1503/* the following are for backwards compatibility */
1504#define MDMA0_IRQ MDMA1_0_IRQ
1505#define MDMA1_IRQ MDMA1_1_IRQ
1506
1507#ifdef _MISRA_RULES
1508#define _MF15 0xFu
1509#define _MF7 7u
1510#else
1511#define _MF15 0xF
1512#define _MF7 7
1513#endif /* _MISRA_RULES */
1514
1515/* SIC_IMASKx Masks */
1516#define SIC_UNMASK_ALL 0x00000000 /* Unmask all peripheral interrupts */
1517#define SIC_MASK_ALL 0xFFFFFFFF /* Mask all peripheral interrupts */
1518#ifdef _MISRA_RULES
1519#define SIC_MASK(x) (1 << ((x)&0x1Fu)) /* Mask Peripheral #x interrupt */
1520#define SIC_UNMASK(x) (0xFFFFFFFFu ^ (1 << ((x)&0x1Fu))) /* Unmask Peripheral #x interrupt */
1521#else
1522#define SIC_MASK(x) (1 << ((x)&0x1F)) /* Mask Peripheral #x interrupt */
1523#define SIC_UNMASK(x) (0xFFFFFFFF ^ (1 << ((x)&0x1F))) /* Unmask Peripheral #x interrupt */
1524#endif /* _MISRA_RULES */
1525
1526/* SIC_IWRx Masks */
1527#define IWR_DISABLE_ALL 0x00000000 /* Wakeup Disable all peripherals */
1528#define IWR_ENABLE_ALL 0xFFFFFFFF /* Wakeup Enable all peripherals */
1529#ifdef _MISRA_RULES
1530#define IWR_ENABLE(x) (1 << ((x)&0x1Fu)) /* Wakeup Enable Peripheral #x */
1531#define IWR_DISABLE(x) (0xFFFFFFFFu ^ (1 << ((x)&0x1Fu))) /* Wakeup Disable Peripheral #x */
1532#else
1533#define IWR_ENABLE(x) (1 << ((x)&0x1F)) /* Wakeup Enable Peripheral #x */
1534#define IWR_DISABLE(x) (0xFFFFFFFF ^ (1 << ((x)&0x1F))) /* Wakeup Disable Peripheral #x */
1535#endif /* _MISRA_RULES */
1536
1537
1538/* ***************************** UART CONTROLLER MASKS ********************** */
1539/* UARTx_LCR Register */
1540#ifdef _MISRA_RULES
1541#define WLS(x) (((x)-5u) & 0x03u) /* Word Length Select */
1542#else
1543#define WLS(x) (((x)-5) & 0x03) /* Word Length Select */
1544#endif /* _MISRA_RULES */
1545#define STB 0x04 /* Stop Bits */
1546#define PEN 0x08 /* Parity Enable */
1547#define EPS 0x10 /* Even Parity Select */
1548#define STP 0x20 /* Stick Parity */
1549#define SB 0x40 /* Set Break */
1550#define DLAB 0x80 /* Divisor Latch Access */
1551
1552#define DLAB_P 0x07
1553#define SB_P 0x06
1554#define STP_P 0x05
1555#define EPS_P 0x04
1556#define PEN_P 0x03
1557#define STB_P 0x02
1558#define WLS_P1 0x01
1559#define WLS_P0 0x00
1560
1561/* UARTx_MCR Register */
1562#define LOOP_ENA 0x10 /* Loopback Mode Enable */
1563#define LOOP_ENA_P 0x04
1564/* Deprecated UARTx_MCR Mask */
1565
1566/* UARTx_LSR Register */
1567#define DR 0x01 /* Data Ready */
1568#define OE 0x02 /* Overrun Error */
1569#define PE 0x04 /* Parity Error */
1570#define FE 0x08 /* Framing Error */
1571#define BI 0x10 /* Break Interrupt */
1572#define THRE 0x20 /* THR Empty */
1573#define TEMT 0x40 /* TSR and UART_THR Empty */
1574
1575#define TEMP_P 0x06
1576#define THRE_P 0x05
1577#define BI_P 0x04
1578#define FE_P 0x03
1579#define PE_P 0x02
1580#define OE_P 0x01
1581#define DR_P 0x00
1582
1583/* UARTx_IER Register */
1584#define ERBFI 0x01 /* Enable Receive Buffer Full Interrupt */
1585#define ETBEI 0x02 /* Enable Transmit Buffer Empty Interrupt */
1586#define ELSI 0x04 /* Enable RX Status Interrupt */
1587
1588#define ELSI_P 0x02
1589#define ETBEI_P 0x01
1590#define ERBFI_P 0x00
1591
1592/* UARTx_IIR Register */
1593#define NINT 0x01
1594#define STATUS_P1 0x02
1595#define STATUS_P0 0x01
1596#define NINT_P 0x00
1597
1598/* UARTx_GCTL Register */
1599#define UCEN 0x01 /* Enable UARTx Clocks */
1600#define IREN 0x02 /* Enable IrDA Mode */
1601#define TPOLC 0x04 /* IrDA TX Polarity Change */
1602#define RPOLC 0x08 /* IrDA RX Polarity Change */
1603#define FPE 0x10 /* Force Parity Error On Transmit */
1604#define FFE 0x20 /* Force Framing Error On Transmit */
1605
1606#define FFE_P 0x05
1607#define FPE_P 0x04
1608#define RPOLC_P 0x03
1609#define TPOLC_P 0x02
1610#define IREN_P 0x01
1611#define UCEN_P 0x00
1612
1613
1614/* ********* PARALLEL PERIPHERAL INTERFACE (PPI) MASKS **************** */
1615/* PPI_CONTROL Masks */
1616#define PORT_EN 0x0001 /* PPI Port Enable */
1617#define PORT_DIR 0x0002 /* PPI Port Direction */
1618#define XFR_TYPE 0x000C /* PPI Transfer Type */
1619#define PORT_CFG 0x0030 /* PPI Port Configuration */
1620#define FLD_SEL 0x0040 /* PPI Active Field Select */
1621#define PACK_EN 0x0080 /* PPI Packing Mode */
1622/* previous versions of defBF539.h erroneously included DMA32 (PPI 32-bit DMA Enable) */
1623#define SKIP_EN 0x0200 /* PPI Skip Element Enable */
1624#define SKIP_EO 0x0400 /* PPI Skip Even/Odd Elements */
1625#define DLENGTH 0x3800 /* PPI Data Length */
1626#define DLEN_8 0x0 /* PPI Data Length mask for DLEN=8 */
1627#define DLEN_10 0x0800 /* Data Length = 10 Bits */
1628#define DLEN_11 0x1000 /* Data Length = 11 Bits */
1629#define DLEN_12 0x1800 /* Data Length = 12 Bits */
1630#define DLEN_13 0x2000 /* Data Length = 13 Bits */
1631#define DLEN_14 0x2800 /* Data Length = 14 Bits */
1632#define DLEN_15 0x3000 /* Data Length = 15 Bits */
1633#define DLEN_16 0x3800 /* Data Length = 16 Bits */
1634#ifdef _MISRA_RULES
1635#define DLEN(x) ((((x)-9u) & 0x07u) << 11) /* PPI Data Length (only works for x=10-->x=16) */
1636#else
1637#define DLEN(x) ((((x)-9) & 0x07) << 11) /* PPI Data Length (only works for x=10-->x=16) */
1638#endif /* _MISRA_RULES */
1639#define POL 0xC000 /* PPI Signal Polarities */
1640#define POLC 0x4000 /* PPI Clock Polarity */
1641#define POLS 0x8000 /* PPI Frame Sync Polarity */
1642
1643
1644/* PPI_STATUS Masks */
1645#define FLD 0x0400 /* Field Indicator */
1646#define FT_ERR 0x0800 /* Frame Track Error */
1647#define OVR 0x1000 /* FIFO Overflow Error */
1648#define UNDR 0x2000 /* FIFO Underrun Error */
1649#define ERR_DET 0x4000 /* Error Detected Indicator */
1650#define ERR_NCOR 0x8000 /* Error Not Corrected Indicator */
1651
1652
1653/* ********** DMA CONTROLLER MASKS ***********************/
1654
1655/* DMAx_PERIPHERAL_MAP, MDMA_yy_PERIPHERAL_MAP Masks */
1656
1657#define CTYPE 0x0040 /* DMA Channel Type Indicator */
1658#define CTYPE_P 0x6 /* DMA Channel Type Indicator BIT POSITION */
1659#define PCAP8 0x0080 /* DMA 8-bit Operation Indicator */
1660#define PCAP16 0x0100 /* DMA 16-bit Operation Indicator */
1661#define PCAP32 0x0200 /* DMA 32-bit Operation Indicator */
1662#define PCAPWR 0x0400 /* DMA Write Operation Indicator */
1663#define PCAPRD 0x0800 /* DMA Read Operation Indicator */
1664#define PMAP 0xF000 /* DMA Peripheral Map Field */
1665
1666/* PMAP Encodings For DMA Controller 0 */
1667#define PMAP_PPI 0x0000 /* PMAP PPI Port DMA */
1668#define PMAP_SPORT0RX 0x1000 /* PMAP SPORT0 Receive DMA */
1669#define PMAP_SPORT0TX 0x2000 /* PMAP SPORT0 Transmit DMA */
1670#define PMAP_SPORT1RX 0x3000 /* PMAP SPORT1 Receive DMA */
1671#define PMAP_SPORT1TX 0x4000 /* PMAP SPORT1 Transmit DMA */
1672#define PMAP_SPI0 0x5000 /* PMAP SPI DMA */
1673#define PMAP_UART0RX 0x6000 /* PMAP UART Receive DMA */
1674#define PMAP_UART0TX 0x7000 /* PMAP UART Transmit DMA */
1675
1676/* PMAP Encodings For DMA Controller 1 */
1677#define PMAP_SPORT2RX 0x0000 /* PMAP SPORT2 Receive DMA */
1678#define PMAP_SPORT2TX 0x1000 /* PMAP SPORT2 Transmit DMA */
1679#define PMAP_SPORT3RX 0x2000 /* PMAP SPORT3 Receive DMA */
1680#define PMAP_SPORT3TX 0x3000 /* PMAP SPORT3 Transmit DMA */
1681#define PMAP_SPI1 0x6000 /* PMAP SPI1 DMA */
1682#define PMAP_SPI2 0x7000 /* PMAP SPI2 DMA */
1683#define PMAP_UART1RX 0x8000 /* PMAP UART1 Receive DMA */
1684#define PMAP_UART1TX 0x9000 /* PMAP UART1 Transmit DMA */
1685#define PMAP_UART2RX 0xA000 /* PMAP UART2 Receive DMA */
1686#define PMAP_UART2TX 0xB000 /* PMAP UART2 Transmit DMA */
1687
1688
1689/* ************* GENERAL PURPOSE TIMER MASKS ******************** */
1690/* PWM Timer bit definitions */
1691/* TIMER_ENABLE Register */
1692#define TIMEN0 0x0001 /* Enable Timer 0 */
1693#define TIMEN1 0x0002 /* Enable Timer 1 */
1694#define TIMEN2 0x0004 /* Enable Timer 2 */
1695
1696#define TIMEN0_P 0x00
1697#define TIMEN1_P 0x01
1698#define TIMEN2_P 0x02
1699
1700/* TIMER_DISABLE Register */
1701#define TIMDIS0 0x0001 /* Disable Timer 0 */
1702#define TIMDIS1 0x0002 /* Disable Timer 1 */
1703#define TIMDIS2 0x0004 /* Disable Timer 2 */
1704
1705#define TIMDIS0_P 0x00
1706#define TIMDIS1_P 0x01
1707#define TIMDIS2_P 0x02
1708
1709/* TIMER_STATUS Register */
1710#define TIMIL0 0x0001 /* Timer 0 Interrupt */
1711#define TIMIL1 0x0002 /* Timer 1 Interrupt */
1712#define TIMIL2 0x0004 /* Timer 2 Interrupt */
1713#define TOVF_ERR0 0x0010 /* Timer 0 Counter Overflow */
1714#define TOVF_ERR1 0x0020 /* Timer 1 Counter Overflow */
1715#define TOVF_ERR2 0x0040 /* Timer 2 Counter Overflow */
1716#define TRUN0 0x1000 /* Timer 0 Slave Enable Status */
1717#define TRUN1 0x2000 /* Timer 1 Slave Enable Status */
1718#define TRUN2 0x4000 /* Timer 2 Slave Enable Status */
1719
1720#define TIMIL0_P 0x00
1721#define TIMIL1_P 0x01
1722#define TIMIL2_P 0x02
1723#define TOVF_ERR0_P 0x04
1724#define TOVF_ERR1_P 0x05
1725#define TOVF_ERR2_P 0x06
1726#define TRUN0_P 0x0C
1727#define TRUN1_P 0x0D
1728#define TRUN2_P 0x0E
1729
1730/* Alternate Deprecated Macros Provided For Backwards Code Compatibility */
1731#define TOVL_ERR0 TOVF_ERR0
1732#define TOVL_ERR1 TOVF_ERR1
1733#define TOVL_ERR2 TOVF_ERR2
1734#define TOVL_ERR0_P TOVF_ERR0_P
1735#define TOVL_ERR1_P TOVF_ERR1_P
1736#define TOVL_ERR2_P TOVF_ERR2_P
1737
1738/* TIMERx_CONFIG Registers */
1739#define PWM_OUT 0x0001
1740#define WDTH_CAP 0x0002
1741#define EXT_CLK 0x0003
1742#define PULSE_HI 0x0004
1743#define PERIOD_CNT 0x0008
1744#define IRQ_ENA 0x0010
1745#define TIN_SEL 0x0020
1746#define OUT_DIS 0x0040
1747#define CLK_SEL 0x0080
1748#define TOGGLE_HI 0x0100
1749#define EMU_RUN 0x0200
1750#ifdef _MISRA_RULES
1751#define ERR_TYP(x) (((x) & 0x03u) << 14)
1752#else
1753#define ERR_TYP(x) (((x) & 0x03) << 14)
1754#endif /* _MISRA_RULES */
1755
1756#define TMODE_P0 0x00
1757#define TMODE_P1 0x01
1758#define PULSE_HI_P 0x02
1759#define PERIOD_CNT_P 0x03
1760#define IRQ_ENA_P 0x04
1761#define TIN_SEL_P 0x05
1762#define OUT_DIS_P 0x06
1763#define CLK_SEL_P 0x07
1764#define TOGGLE_HI_P 0x08
1765#define EMU_RUN_P 0x09
1766#define ERR_TYP_P0 0x0E
1767#define ERR_TYP_P1 0x0F
1768
1769
1770/*/ ****************** GENERAL-PURPOSE I/O ********************* */
1771/* Flag I/O (FIO_) Masks */
1772#define PF0 0x0001
1773#define PF1 0x0002
1774#define PF2 0x0004
1775#define PF3 0x0008
1776#define PF4 0x0010
1777#define PF5 0x0020
1778#define PF6 0x0040
1779#define PF7 0x0080
1780#define PF8 0x0100
1781#define PF9 0x0200
1782#define PF10 0x0400
1783#define PF11 0x0800
1784#define PF12 0x1000
1785#define PF13 0x2000
1786#define PF14 0x4000
1787#define PF15 0x8000
1788
1789/* PORT F BIT POSITIONS */
1790#define PF0_P 0x0
1791#define PF1_P 0x1
1792#define PF2_P 0x2
1793#define PF3_P 0x3
1794#define PF4_P 0x4
1795#define PF5_P 0x5
1796#define PF6_P 0x6
1797#define PF7_P 0x7
1798#define PF8_P 0x8
1799#define PF9_P 0x9
1800#define PF10_P 0xA
1801#define PF11_P 0xB
1802#define PF12_P 0xC
1803#define PF13_P 0xD
1804#define PF14_P 0xE
1805#define PF15_P 0xF
1806
1807
1808/******************* GPIO MASKS *********************/
1809/* Port C Masks */
1810#define PC0 0x0001
1811#define PC1 0x0002
1812#define PC4 0x0010
1813#define PC5 0x0020
1814#define PC6 0x0040
1815#define PC7 0x0080
1816#define PC8 0x0100
1817#define PC9 0x0200
1818/* Port C Bit Positions */
1819#define PC0_P 0x0
1820#define PC1_P 0x1
1821#define PC4_P 0x4
1822#define PC5_P 0x5
1823#define PC6_P 0x6
1824#define PC7_P 0x7
1825#define PC8_P 0x8
1826#define PC9_P 0x9
1827
1828/* Port D */
1829#define PD0 0x0001
1830#define PD1 0x0002
1831#define PD2 0x0004
1832#define PD3 0x0008
1833#define PD4 0x0010
1834#define PD5 0x0020
1835#define PD6 0x0040
1836#define PD7 0x0080
1837#define PD8 0x0100
1838#define PD9 0x0200
1839#define PD10 0x0400
1840#define PD11 0x0800
1841#define PD12 0x1000
1842#define PD13 0x2000
1843#define PD14 0x4000
1844#define PD15 0x8000
1845/* Port D Bit Positions */
1846#define PD0_P 0x0
1847#define PD1_P 0x1
1848#define PD2_P 0x2
1849#define PD3_P 0x3
1850#define PD4_P 0x4
1851#define PD5_P 0x5
1852#define PD6_P 0x6
1853#define PD7_P 0x7
1854#define PD8_P 0x8
1855#define PD9_P 0x9
1856#define PD10_P 0xA
1857#define PD11_P 0xB
1858#define PD12_P 0xC
1859#define PD13_P 0xD
1860#define PD14_P 0xE
1861#define PD15_P 0xF
1862
1863/* Port E */
1864#define PE0 0x0001
1865#define PE1 0x0002
1866#define PE2 0x0004
1867#define PE3 0x0008
1868#define PE4 0x0010
1869#define PE5 0x0020
1870#define PE6 0x0040
1871#define PE7 0x0080
1872#define PE8 0x0100
1873#define PE9 0x0200
1874#define PE10 0x0400
1875#define PE11 0x0800
1876#define PE12 0x1000
1877#define PE13 0x2000
1878#define PE14 0x4000
1879#define PE15 0x8000
1880/* Port E Bit Positions */
1881#define PE0_P 0x0
1882#define PE1_P 0x1
1883#define PE2_P 0x2
1884#define PE3_P 0x3
1885#define PE4_P 0x4
1886#define PE5_P 0x5
1887#define PE6_P 0x6
1888#define PE7_P 0x7
1889#define PE8_P 0x8
1890#define PE9_P 0x9
1891#define PE10_P 0xA
1892#define PE11_P 0xB
1893#define PE12_P 0xC
1894#define PE13_P 0xD
1895#define PE14_P 0xE
1896#define PE15_P 0xF
1897
1898/* ********************* ASYNCHRONOUS MEMORY CONTROLLER MASKS ************* */
1899/* EBIU_AMGCTL Masks */
1900#define AMCKEN 0x0001 /* Enable CLKOUT */
1901#define AMBEN_NONE 0x0000 /* All Banks Disabled */
1902#define AMBEN_B0 0x0002 /* Enable Asynchronous Memory Bank 0 only */
1903#define AMBEN_B0_B1 0x0004 /* Enable Asynchronous Memory Banks 0 & 1 only */
1904#define AMBEN_B0_B1_B2 0x0006 /* Enable Asynchronous Memory Banks 0, 1, and 2 */
1905#define AMBEN_ALL 0x0008 /* Enable Asynchronous Memory Banks (all) 0, 1, 2, and 3 */
1906#define CDPRIO 0x0100 /* DMA has priority over core for external accesses */
1907
1908/* EBIU_AMGCTL Bit Positions */
1909#define AMCKEN_P 0x0000 /* Enable CLKOUT */
1910#define AMBEN_P0 0x0001 /* Asynchronous Memory Enable, 000 - banks 0-3 disabled, 001 - Bank 0 enabled */
1911#define AMBEN_P1 0x0002 /* Asynchronous Memory Enable, 010 - banks 0&1 enabled, 011 - banks 0-3 enabled */
1912#define AMBEN_P2 0x0003 /* Asynchronous Memory Enable, 1xx - All banks (bank 0, 1, 2, and 3) enabled */
1913
1914/* EBIU_AMBCTL0 Masks */
1915#define B0RDYEN 0x00000001 /* Bank 0 RDY Enable, 0=disable, 1=enable */
1916#define B0RDYPOL 0x00000002 /* Bank 0 RDY Active high, 0=active low, 1=active high */
1917#define B0TT_1 0x00000004 /* Bank 0 Transition Time from Read to Write = 1 cycle */
1918#define B0TT_2 0x00000008 /* Bank 0 Transition Time from Read to Write = 2 cycles */
1919#define B0TT_3 0x0000000C /* Bank 0 Transition Time from Read to Write = 3 cycles */
1920#define B0TT_4 0x00000000 /* Bank 0 Transition Time from Read to Write = 4 cycles */
1921#define B0ST_1 0x00000010 /* Bank 0 Setup Time from AOE asserted to Read/Write asserted=1 cycle */
1922#define B0ST_2 0x00000020 /* Bank 0 Setup Time from AOE asserted to Read/Write asserted=2 cycles */
1923#define B0ST_3 0x00000030 /* Bank 0 Setup Time from AOE asserted to Read/Write asserted=3 cycles */
1924#define B0ST_4 0x00000000 /* Bank 0 Setup Time from AOE asserted to Read/Write asserted=4 cycles */
1925#define B0HT_1 0x00000040 /* Bank 0 Hold Time from Read/Write deasserted to AOE deasserted = 1 cycle */
1926#define B0HT_2 0x00000080 /* Bank 0 Hold Time from Read/Write deasserted to AOE deasserted = 2 cycles */
1927#define B0HT_3 0x000000C0 /* Bank 0 Hold Time from Read/Write deasserted to AOE deasserted = 3 cycles */
1928#define B0HT_0 0x00000000 /* Bank 0 Hold Time from Read/Write deasserted to AOE deasserted = 0 cycles */
1929#define B0RAT_1 0x00000100 /* Bank 0 Read Access Time = 1 cycle */
1930#define B0RAT_2 0x00000200 /* Bank 0 Read Access Time = 2 cycles */
1931#define B0RAT_3 0x00000300 /* Bank 0 Read Access Time = 3 cycles */
1932#define B0RAT_4 0x00000400 /* Bank 0 Read Access Time = 4 cycles */
1933#define B0RAT_5 0x00000500 /* Bank 0 Read Access Time = 5 cycles */
1934#define B0RAT_6 0x00000600 /* Bank 0 Read Access Time = 6 cycles */
1935#define B0RAT_7 0x00000700 /* Bank 0 Read Access Time = 7 cycles */
1936#define B0RAT_8 0x00000800 /* Bank 0 Read Access Time = 8 cycles */
1937#define B0RAT_9 0x00000900 /* Bank 0 Read Access Time = 9 cycles */
1938#define B0RAT_10 0x00000A00 /* Bank 0 Read Access Time = 10 cycles */
1939#define B0RAT_11 0x00000B00 /* Bank 0 Read Access Time = 11 cycles */
1940#define B0RAT_12 0x00000C00 /* Bank 0 Read Access Time = 12 cycles */
1941#define B0RAT_13 0x00000D00 /* Bank 0 Read Access Time = 13 cycles */
1942#define B0RAT_14 0x00000E00 /* Bank 0 Read Access Time = 14 cycles */
1943#define B0RAT_15 0x00000F00 /* Bank 0 Read Access Time = 15 cycles */
1944#define B0WAT_1 0x00001000 /* Bank 0 Write Access Time = 1 cycle */
1945#define B0WAT_2 0x00002000 /* Bank 0 Write Access Time = 2 cycles */
1946#define B0WAT_3 0x00003000 /* Bank 0 Write Access Time = 3 cycles */
1947#define B0WAT_4 0x00004000 /* Bank 0 Write Access Time = 4 cycles */
1948#define B0WAT_5 0x00005000 /* Bank 0 Write Access Time = 5 cycles */
1949#define B0WAT_6 0x00006000 /* Bank 0 Write Access Time = 6 cycles */
1950#define B0WAT_7 0x00007000 /* Bank 0 Write Access Time = 7 cycles */
1951#define B0WAT_8 0x00008000 /* Bank 0 Write Access Time = 8 cycles */
1952#define B0WAT_9 0x00009000 /* Bank 0 Write Access Time = 9 cycles */
1953#define B0WAT_10 0x0000A000 /* Bank 0 Write Access Time = 10 cycles */
1954#define B0WAT_11 0x0000B000 /* Bank 0 Write Access Time = 11 cycles */
1955#define B0WAT_12 0x0000C000 /* Bank 0 Write Access Time = 12 cycles */
1956#define B0WAT_13 0x0000D000 /* Bank 0 Write Access Time = 13 cycles */
1957#define B0WAT_14 0x0000E000 /* Bank 0 Write Access Time = 14 cycles */
1958#define B0WAT_15 0x0000F000 /* Bank 0 Write Access Time = 15 cycles */
1959#define B1RDYEN 0x00010000 /* Bank 1 RDY enable, 0=disable, 1=enable */
1960#define B1RDYPOL 0x00020000 /* Bank 1 RDY Active high, 0=active low, 1=active high */
1961#define B1TT_1 0x00040000 /* Bank 1 Transition Time from Read to Write = 1 cycle */
1962#define B1TT_2 0x00080000 /* Bank 1 Transition Time from Read to Write = 2 cycles */
1963#define B1TT_3 0x000C0000 /* Bank 1 Transition Time from Read to Write = 3 cycles */
1964#define B1TT_4 0x00000000 /* Bank 1 Transition Time from Read to Write = 4 cycles */
1965#define B1ST_1 0x00100000 /* Bank 1 Setup Time from AOE asserted to Read or Write asserted = 1 cycle */
1966#define B1ST_2 0x00200000 /* Bank 1 Setup Time from AOE asserted to Read or Write asserted = 2 cycles */
1967#define B1ST_3 0x00300000 /* Bank 1 Setup Time from AOE asserted to Read or Write asserted = 3 cycles */
1968#define B1ST_4 0x00000000 /* Bank 1 Setup Time from AOE asserted to Read or Write asserted = 4 cycles */
1969#define B1HT_1 0x00400000 /* Bank 1 Hold Time from Read or Write deasserted to AOE deasserted = 1 cycle */
1970#define B1HT_2 0x00800000 /* Bank 1 Hold Time from Read or Write deasserted to AOE deasserted = 2 cycles */
1971#define B1HT_3 0x00C00000 /* Bank 1 Hold Time from Read or Write deasserted to AOE deasserted = 3 cycles */
1972#define B1HT_0 0x00000000 /* Bank 1 Hold Time from Read or Write deasserted to AOE deasserted = 0 cycles */
1973#define B1RAT_1 0x01000000 /* Bank 1 Read Access Time = 1 cycle */
1974#define B1RAT_2 0x02000000 /* Bank 1 Read Access Time = 2 cycles */
1975#define B1RAT_3 0x03000000 /* Bank 1 Read Access Time = 3 cycles */
1976#define B1RAT_4 0x04000000 /* Bank 1 Read Access Time = 4 cycles */
1977#define B1RAT_5 0x05000000 /* Bank 1 Read Access Time = 5 cycles */
1978#define B1RAT_6 0x06000000 /* Bank 1 Read Access Time = 6 cycles */
1979#define B1RAT_7 0x07000000 /* Bank 1 Read Access Time = 7 cycles */
1980#define B1RAT_8 0x08000000 /* Bank 1 Read Access Time = 8 cycles */
1981#define B1RAT_9 0x09000000 /* Bank 1 Read Access Time = 9 cycles */
1982#define B1RAT_10 0x0A000000 /* Bank 1 Read Access Time = 10 cycles */
1983#define B1RAT_11 0x0B000000 /* Bank 1 Read Access Time = 11 cycles */
1984#define B1RAT_12 0x0C000000 /* Bank 1 Read Access Time = 12 cycles */
1985#define B1RAT_13 0x0D000000 /* Bank 1 Read Access Time = 13 cycles */
1986#define B1RAT_14 0x0E000000 /* Bank 1 Read Access Time = 14 cycles */
1987#define B1RAT_15 0x0F000000 /* Bank 1 Read Access Time = 15 cycles */
1988#define B1WAT_1 0x10000000 /* Bank 1 Write Access Time = 1 cycle */
1989#define B1WAT_2 0x20000000 /* Bank 1 Write Access Time = 2 cycles */
1990#define B1WAT_3 0x30000000 /* Bank 1 Write Access Time = 3 cycles */
1991#define B1WAT_4 0x40000000 /* Bank 1 Write Access Time = 4 cycles */
1992#define B1WAT_5 0x50000000 /* Bank 1 Write Access Time = 5 cycles */
1993#define B1WAT_6 0x60000000 /* Bank 1 Write Access Time = 6 cycles */
1994#define B1WAT_7 0x70000000 /* Bank 1 Write Access Time = 7 cycles */
1995#define B1WAT_8 0x80000000 /* Bank 1 Write Access Time = 8 cycles */
1996#define B1WAT_9 0x90000000 /* Bank 1 Write Access Time = 9 cycles */
1997#define B1WAT_10 0xA0000000 /* Bank 1 Write Access Time = 10 cycles */
1998#define B1WAT_11 0xB0000000 /* Bank 1 Write Access Time = 11 cycles */
1999#define B1WAT_12 0xC0000000 /* Bank 1 Write Access Time = 12 cycles */
2000#define B1WAT_13 0xD0000000 /* Bank 1 Write Access Time = 13 cycles */
2001#define B1WAT_14 0xE0000000 /* Bank 1 Write Access Time = 14 cycles */
2002#define B1WAT_15 0xF0000000 /* Bank 1 Write Access Time = 15 cycles */
2003
2004/* EBIU_AMBCTL1 Masks */
2005#define B2RDYEN 0x00000001 /* Bank 2 RDY Enable, 0=disable, 1=enable */
2006#define B2RDYPOL 0x00000002 /* Bank 2 RDY Active high, 0=active low, 1=active high */
2007#define B2TT_1 0x00000004 /* Bank 2 Transition Time from Read to Write = 1 cycle */
2008#define B2TT_2 0x00000008 /* Bank 2 Transition Time from Read to Write = 2 cycles */
2009#define B2TT_3 0x0000000C /* Bank 2 Transition Time from Read to Write = 3 cycles */
2010#define B2TT_4 0x00000000 /* Bank 2 Transition Time from Read to Write = 4 cycles */
2011#define B2ST_1 0x00000010 /* Bank 2 Setup Time from AOE asserted to Read or Write asserted = 1 cycle */
2012#define B2ST_2 0x00000020 /* Bank 2 Setup Time from AOE asserted to Read or Write asserted = 2 cycles */
2013#define B2ST_3 0x00000030 /* Bank 2 Setup Time from AOE asserted to Read or Write asserted = 3 cycles */
2014#define B2ST_4 0x00000000 /* Bank 2 Setup Time from AOE asserted to Read or Write asserted = 4 cycles */
2015#define B2HT_1 0x00000040 /* Bank 2 Hold Time from Read or Write deasserted to AOE deasserted = 1 cycle */
2016#define B2HT_2 0x00000080 /* Bank 2 Hold Time from Read or Write deasserted to AOE deasserted = 2 cycles */
2017#define B2HT_3 0x000000C0 /* Bank 2 Hold Time from Read or Write deasserted to AOE deasserted = 3 cycles */
2018#define B2HT_0 0x00000000 /* Bank 2 Hold Time from Read or Write deasserted to AOE deasserted = 0 cycles */
2019#define B2RAT_1 0x00000100 /* Bank 2 Read Access Time = 1 cycle */
2020#define B2RAT_2 0x00000200 /* Bank 2 Read Access Time = 2 cycles */
2021#define B2RAT_3 0x00000300 /* Bank 2 Read Access Time = 3 cycles */
2022#define B2RAT_4 0x00000400 /* Bank 2 Read Access Time = 4 cycles */
2023#define B2RAT_5 0x00000500 /* Bank 2 Read Access Time = 5 cycles */
2024#define B2RAT_6 0x00000600 /* Bank 2 Read Access Time = 6 cycles */
2025#define B2RAT_7 0x00000700 /* Bank 2 Read Access Time = 7 cycles */
2026#define B2RAT_8 0x00000800 /* Bank 2 Read Access Time = 8 cycles */
2027#define B2RAT_9 0x00000900 /* Bank 2 Read Access Time = 9 cycles */
2028#define B2RAT_10 0x00000A00 /* Bank 2 Read Access Time = 10 cycles */
2029#define B2RAT_11 0x00000B00 /* Bank 2 Read Access Time = 11 cycles */
2030#define B2RAT_12 0x00000C00 /* Bank 2 Read Access Time = 12 cycles */
2031#define B2RAT_13 0x00000D00 /* Bank 2 Read Access Time = 13 cycles */
2032#define B2RAT_14 0x00000E00 /* Bank 2 Read Access Time = 14 cycles */
2033#define B2RAT_15 0x00000F00 /* Bank 2 Read Access Time = 15 cycles */
2034#define B2WAT_1 0x00001000 /* Bank 2 Write Access Time = 1 cycle */
2035#define B2WAT_2 0x00002000 /* Bank 2 Write Access Time = 2 cycles */
2036#define B2WAT_3 0x00003000 /* Bank 2 Write Access Time = 3 cycles */
2037#define B2WAT_4 0x00004000 /* Bank 2 Write Access Time = 4 cycles */
2038#define B2WAT_5 0x00005000 /* Bank 2 Write Access Time = 5 cycles */
2039#define B2WAT_6 0x00006000 /* Bank 2 Write Access Time = 6 cycles */
2040#define B2WAT_7 0x00007000 /* Bank 2 Write Access Time = 7 cycles */
2041#define B2WAT_8 0x00008000 /* Bank 2 Write Access Time = 8 cycles */
2042#define B2WAT_9 0x00009000 /* Bank 2 Write Access Time = 9 cycles */
2043#define B2WAT_10 0x0000A000 /* Bank 2 Write Access Time = 10 cycles */
2044#define B2WAT_11 0x0000B000 /* Bank 2 Write Access Time = 11 cycles */
2045#define B2WAT_12 0x0000C000 /* Bank 2 Write Access Time = 12 cycles */
2046#define B2WAT_13 0x0000D000 /* Bank 2 Write Access Time = 13 cycles */
2047#define B2WAT_14 0x0000E000 /* Bank 2 Write Access Time = 14 cycles */
2048#define B2WAT_15 0x0000F000 /* Bank 2 Write Access Time = 15 cycles */
2049#define B3RDYEN 0x00010000 /* Bank 3 RDY enable, 0=disable, 1=enable */
2050#define B3RDYPOL 0x00020000 /* Bank 3 RDY Active high, 0=active low, 1=active high */
2051#define B3TT_1 0x00040000 /* Bank 3 Transition Time from Read to Write = 1 cycle */
2052#define B3TT_2 0x00080000 /* Bank 3 Transition Time from Read to Write = 2 cycles */
2053#define B3TT_3 0x000C0000 /* Bank 3 Transition Time from Read to Write = 3 cycles */
2054#define B3TT_4 0x00000000 /* Bank 3 Transition Time from Read to Write = 4 cycles */
2055#define B3ST_1 0x00100000 /* Bank 3 Setup Time from AOE asserted to Read or Write asserted = 1 cycle */
2056#define B3ST_2 0x00200000 /* Bank 3 Setup Time from AOE asserted to Read or Write asserted = 2 cycles */
2057#define B3ST_3 0x00300000 /* Bank 3 Setup Time from AOE asserted to Read or Write asserted = 3 cycles */
2058#define B3ST_4 0x00000000 /* Bank 3 Setup Time from AOE asserted to Read or Write asserted = 4 cycles */
2059#define B3HT_1 0x00400000 /* Bank 3 Hold Time from Read or Write deasserted to AOE deasserted = 1 cycle */
2060#define B3HT_2 0x00800000 /* Bank 3 Hold Time from Read or Write deasserted to AOE deasserted = 2 cycles */
2061#define B3HT_3 0x00C00000 /* Bank 3 Hold Time from Read or Write deasserted to AOE deasserted = 3 cycles */
2062#define B3HT_0 0x00000000 /* Bank 3 Hold Time from Read or Write deasserted to AOE deasserted = 0 cycles */
2063#define B3RAT_1 0x01000000 /* Bank 3 Read Access Time = 1 cycle */
2064#define B3RAT_2 0x02000000 /* Bank 3 Read Access Time = 2 cycles */
2065#define B3RAT_3 0x03000000 /* Bank 3 Read Access Time = 3 cycles */
2066#define B3RAT_4 0x04000000 /* Bank 3 Read Access Time = 4 cycles */
2067#define B3RAT_5 0x05000000 /* Bank 3 Read Access Time = 5 cycles */
2068#define B3RAT_6 0x06000000 /* Bank 3 Read Access Time = 6 cycles */
2069#define B3RAT_7 0x07000000 /* Bank 3 Read Access Time = 7 cycles */
2070#define B3RAT_8 0x08000000 /* Bank 3 Read Access Time = 8 cycles */
2071#define B3RAT_9 0x09000000 /* Bank 3 Read Access Time = 9 cycles */
2072#define B3RAT_10 0x0A000000 /* Bank 3 Read Access Time = 10 cycles */
2073#define B3RAT_11 0x0B000000 /* Bank 3 Read Access Time = 11 cycles */
2074#define B3RAT_12 0x0C000000 /* Bank 3 Read Access Time = 12 cycles */
2075#define B3RAT_13 0x0D000000 /* Bank 3 Read Access Time = 13 cycles */
2076#define B3RAT_14 0x0E000000 /* Bank 3 Read Access Time = 14 cycles */
2077#define B3RAT_15 0x0F000000 /* Bank 3 Read Access Time = 15 cycles */
2078#define B3WAT_1 0x10000000 /* Bank 3 Write Access Time = 1 cycle */
2079#define B3WAT_2 0x20000000 /* Bank 3 Write Access Time = 2 cycles */
2080#define B3WAT_3 0x30000000 /* Bank 3 Write Access Time = 3 cycles */
2081#define B3WAT_4 0x40000000 /* Bank 3 Write Access Time = 4 cycles */
2082#define B3WAT_5 0x50000000 /* Bank 3 Write Access Time = 5 cycles */
2083#define B3WAT_6 0x60000000 /* Bank 3 Write Access Time = 6 cycles */
2084#define B3WAT_7 0x70000000 /* Bank 3 Write Access Time = 7 cycles */
2085#define B3WAT_8 0x80000000 /* Bank 3 Write Access Time = 8 cycles */
2086#define B3WAT_9 0x90000000 /* Bank 3 Write Access Time = 9 cycles */
2087#define B3WAT_10 0xA0000000 /* Bank 3 Write Access Time = 10 cycles */
2088#define B3WAT_11 0xB0000000 /* Bank 3 Write Access Time = 11 cycles */
2089#define B3WAT_12 0xC0000000 /* Bank 3 Write Access Time = 12 cycles */
2090#define B3WAT_13 0xD0000000 /* Bank 3 Write Access Time = 13 cycles */
2091#define B3WAT_14 0xE0000000 /* Bank 3 Write Access Time = 14 cycles */
2092#define B3WAT_15 0xF0000000 /* Bank 3 Write Access Time = 15 cycles */
2093
2094/* ********************** SDRAM CONTROLLER MASKS *************************** */
2095/* EBIU_SDGCTL Masks */
2096#define SCTLE 0x00000001 /* Enable SCLK[0], /SRAS, /SCAS, /SWE, SDQM[3:0] */
2097#define CL_2 0x00000008 /* SDRAM CAS latency = 2 cycles */
2098#define CL_3 0x0000000C /* SDRAM CAS latency = 3 cycles */
2099#define PFE 0x00000010 /* Enable SDRAM prefetch */
2100#define PFP 0x00000020 /* Prefetch has priority over AMC requests */
2101#define PASR_ALL 0x00000000 /* All 4 SDRAM Banks Refreshed In Self-Refresh */
2102#define PASR_B0_B1 0x00000010 /* SDRAM Banks 0 and 1 Are Refreshed In Self-Refresh */
2103#define PASR_B0 0x00000020 /* Only SDRAM Bank 0 Is Refreshed In Self-Refresh */
2104#define TRAS_1 0x00000040 /* SDRAM tRAS = 1 cycle */
2105#define TRAS_2 0x00000080 /* SDRAM tRAS = 2 cycles */
2106#define TRAS_3 0x000000C0 /* SDRAM tRAS = 3 cycles */
2107#define TRAS_4 0x00000100 /* SDRAM tRAS = 4 cycles */
2108#define TRAS_5 0x00000140 /* SDRAM tRAS = 5 cycles */
2109#define TRAS_6 0x00000180 /* SDRAM tRAS = 6 cycles */
2110#define TRAS_7 0x000001C0 /* SDRAM tRAS = 7 cycles */
2111#define TRAS_8 0x00000200 /* SDRAM tRAS = 8 cycles */
2112#define TRAS_9 0x00000240 /* SDRAM tRAS = 9 cycles */
2113#define TRAS_10 0x00000280 /* SDRAM tRAS = 10 cycles */
2114#define TRAS_11 0x000002C0 /* SDRAM tRAS = 11 cycles */
2115#define TRAS_12 0x00000300 /* SDRAM tRAS = 12 cycles */
2116#define TRAS_13 0x00000340 /* SDRAM tRAS = 13 cycles */
2117#define TRAS_14 0x00000380 /* SDRAM tRAS = 14 cycles */
2118#define TRAS_15 0x000003C0 /* SDRAM tRAS = 15 cycles */
2119#define TRP_1 0x00000800 /* SDRAM tRP = 1 cycle */
2120#define TRP_2 0x00001000 /* SDRAM tRP = 2 cycles */
2121#define TRP_3 0x00001800 /* SDRAM tRP = 3 cycles */
2122#define TRP_4 0x00002000 /* SDRAM tRP = 4 cycles */
2123#define TRP_5 0x00002800 /* SDRAM tRP = 5 cycles */
2124#define TRP_6 0x00003000 /* SDRAM tRP = 6 cycles */
2125#define TRP_7 0x00003800 /* SDRAM tRP = 7 cycles */
2126#define TRCD_1 0x00008000 /* SDRAM tRCD = 1 cycle */
2127#define TRCD_2 0x00010000 /* SDRAM tRCD = 2 cycles */
2128#define TRCD_3 0x00018000 /* SDRAM tRCD = 3 cycles */
2129#define TRCD_4 0x00020000 /* SDRAM tRCD = 4 cycles */
2130#define TRCD_5 0x00028000 /* SDRAM tRCD = 5 cycles */
2131#define TRCD_6 0x00030000 /* SDRAM tRCD = 6 cycles */
2132#define TRCD_7 0x00038000 /* SDRAM tRCD = 7 cycles */
2133#define TWR_1 0x00080000 /* SDRAM tWR = 1 cycle */
2134#define TWR_2 0x00100000 /* SDRAM tWR = 2 cycles */
2135#define TWR_3 0x00180000 /* SDRAM tWR = 3 cycles */
2136#define PUPSD 0x00200000 /*Power-up start delay */
2137#define PSM 0x00400000 /* SDRAM power-up sequence = Precharge, mode register set, 8 CBR refresh cycles */
2138#define PSS 0x00800000 /* enable SDRAM power-up sequence on next SDRAM access */
2139#define SRFS 0x01000000 /* Start SDRAM self-refresh mode */
2140#define EBUFE 0x02000000 /* Enable external buffering timing */
2141#define FBBRW 0x04000000 /* Fast back-to-back read write enable */
2142#define EMREN 0x10000000 /* Extended mode register enable */
2143#define TCSR 0x20000000 /* Temp compensated self refresh value 85 deg C */
2144#define CDDBG 0x40000000 /* Tristate SDRAM controls during bus grant */
2145
2146/* EBIU_SDBCTL Masks */
2147#define EBE 0x00000001 /* Enable SDRAM external bank */
2148#define EBSZ_16 0x00000000 /* SDRAM external bank size = 16MB */
2149#define EBSZ_32 0x00000002 /* SDRAM external bank size = 32MB */
2150#define EBSZ_64 0x00000004 /* SDRAM external bank size = 64MB */
2151#define EBSZ_128 0x00000006 /* SDRAM external bank size = 128MB */
2152#define EBSZ_256 0x00000008 /* SDRAM External Bank Size = 256MB */
2153#define EBSZ_512 0x0000000A /* SDRAM External Bank Size = 512MB */
2154#define EBCAW_8 0x00000000 /* SDRAM external bank column address width = 8 bits */
2155#define EBCAW_9 0x00000010 /* SDRAM external bank column address width = 9 bits */
2156#define EBCAW_10 0x00000020 /* SDRAM external bank column address width = 9 bits */
2157#define EBCAW_11 0x00000030 /* SDRAM external bank column address width = 9 bits */
2158
2159/* EBIU_SDSTAT Masks */
2160#define SDCI 0x00000001 /* SDRAM controller is idle */
2161#define SDSRA 0x00000002 /* SDRAM SDRAM self refresh is active */
2162#define SDPUA 0x00000004 /* SDRAM power up active */
2163#define SDRS 0x00000008 /* SDRAM is in reset state */
2164#define SDEASE 0x00000010 /* SDRAM EAB sticky error status - W1C */
2165#define BGSTAT 0x00000020 /* Bus granted */
2166
2167
2168/* ******************** TWO-WIRE INTERFACE (TWIx) MASKS ***********************/
2169/* TWIx_CLKDIV Macros (Use: *pTWIx_CLKDIV = CLKLOW(x)|CLKHI(y); ) */
2170#ifdef _MISRA_RULES
2171#define CLKLOW(x) ((x) & 0xFFu) /* Periods Clock Is Held Low */
2172#define CLKHI(y) (((y)&0xFFu)<<0x8) /* Periods Before New Clock Low */
2173#else
2174#define CLKLOW(x) ((x) & 0xFF) /* Periods Clock Is Held Low */
2175#define CLKHI(y) (((y)&0xFF)<<0x8) /* Periods Before New Clock Low */
2176#endif /* _MISRA_RULES */
2177
2178/* TWIx_PRESCALE Masks */
2179#define PRESCALE 0x007F /* SCLKs Per Internal Time Reference (10MHz) */
2180#define TWI_ENA 0x0080 /* TWI Enable */
2181#define SCCB 0x0200 /* SCCB Compatibility Enable */
2182
2183/* TWIx_SLAVE_CTRL Masks */
2184#define SEN 0x0001 /* Slave Enable */
2185#define SADD_LEN 0x0002 /* Slave Address Length */
2186#define STDVAL 0x0004 /* Slave Transmit Data Valid */
2187#define NAK 0x0008 /* NAK/ACK* Generated At Conclusion Of Transfer */
2188#define GEN 0x0010 /* General Call Adrress Matching Enabled */
2189
2190/* TWIx_SLAVE_STAT Masks */
2191#define SDIR 0x0001 /* Slave Transfer Direction (Transmit/Receive*) */
2192#define GCALL 0x0002 /* General Call Indicator */
2193
2194/* TWIx_MASTER_CTRL Masks */
2195#define MEN 0x0001 /* Master Mode Enable */
2196#define MADD_LEN 0x0002 /* Master Address Length */
2197#define MDIR 0x0004 /* Master Transmit Direction (RX/TX*) */
2198#define FAST 0x0008 /* Use Fast Mode Timing Specs */
2199#define STOP 0x0010 /* Issue Stop Condition */
2200#define RSTART 0x0020 /* Repeat Start or Stop* At End Of Transfer */
2201#define DCNT 0x3FC0 /* Data Bytes To Transfer */
2202#define SDAOVR 0x4000 /* Serial Data Override */
2203#define SCLOVR 0x8000 /* Serial Clock Override */
2204
2205/* TWIx_MASTER_STAT Masks */
2206#define MPROG 0x0001 /* Master Transfer In Progress */
2207#define LOSTARB 0x0002 /* Lost Arbitration Indicator (Xfer Aborted) */
2208#define ANAK 0x0004 /* Address Not Acknowledged */
2209#define DNAK 0x0008 /* Data Not Acknowledged */
2210#define BUFRDERR 0x0010 /* Buffer Read Error */
2211#define BUFWRERR 0x0020 /* Buffer Write Error */
2212#define SDASEN 0x0040 /* Serial Data Sense */
2213#define SCLSEN 0x0080 /* Serial Clock Sense */
2214#define BUSBUSY 0x0100 /* Bus Busy Indicator */
2215
2216/* TWIx_INT_SRC and TWIx_INT_ENABLE Masks */
2217#define SINIT 0x0001 /* Slave Transfer Initiated */
2218#define SCOMP 0x0002 /* Slave Transfer Complete */
2219#define SERR 0x0004 /* Slave Transfer Error */
2220#define SOVF 0x0008 /* Slave Overflow */
2221#define MCOMP 0x0010 /* Master Transfer Complete */
2222#define MERR 0x0020 /* Master Transfer Error */
2223#define XMTSERV 0x0040 /* Transmit FIFO Service */
2224#define RCVSERV 0x0080 /* Receive FIFO Service */
2225
2226/* TWIx_FIFO_CTL Masks */
2227#define XMTFLUSH 0x0001 /* Transmit Buffer Flush */
2228#define RCVFLUSH 0x0002 /* Receive Buffer Flush */
2229#define XMTINTLEN 0x0004 /* Transmit Buffer Interrupt Length */
2230#define RCVINTLEN 0x0008 /* Receive Buffer Interrupt Length */
2231
2232/* TWIx_FIFO_STAT Masks */
2233#define XMTSTAT 0x0003 /* Transmit FIFO Status */
2234#define XMT_EMPTY 0x0000 /* Transmit FIFO Empty */
2235#define XMT_HALF 0x0001 /* Transmit FIFO Has 1 Byte To Write */
2236#define XMT_FULL 0x0003 /* Transmit FIFO Full (2 Bytes To Write) */
2237
2238#define RCVSTAT 0x000C /* Receive FIFO Status */
2239#define RCV_EMPTY 0x0000 /* Receive FIFO Empty */
2240#define RCV_HALF 0x0004 /* Receive FIFO Has 1 Byte To Read */
2241#define RCV_FULL 0x000C /* Receive FIFO Full (2 Bytes To Read) */
2242
2243#endif /* _DEF_BF539_H */ 152#endif /* _DEF_BF539_H */
diff --git a/arch/blackfin/mach-bf538/include/mach/gpio.h b/arch/blackfin/mach-bf538/include/mach/gpio.h
index bd9adb7183da..8a5beeece996 100644
--- a/arch/blackfin/mach-bf538/include/mach/gpio.h
+++ b/arch/blackfin/mach-bf538/include/mach/gpio.h
@@ -70,4 +70,9 @@
70#define PORT_D GPIO_PD0 70#define PORT_D GPIO_PD0
71#define PORT_E GPIO_PE0 71#define PORT_E GPIO_PE0
72 72
73#include <mach-common/ports-c.h>
74#include <mach-common/ports-d.h>
75#include <mach-common/ports-e.h>
76#include <mach-common/ports-f.h>
77
73#endif /* _MACH_GPIO_H_ */ 78#endif /* _MACH_GPIO_H_ */
diff --git a/arch/blackfin/mach-bf538/include/mach/pll.h b/arch/blackfin/mach-bf538/include/mach/pll.h
index b30bbcd412a7..94cca674d835 100644
--- a/arch/blackfin/mach-bf538/include/mach/pll.h
+++ b/arch/blackfin/mach-bf538/include/mach/pll.h
@@ -1,63 +1 @@
1/* #include <mach-common/pll.h>
2 * Copyright 2008-2009 Analog Devices Inc.
3 *
4 * Licensed under the GPL-2 or later.
5 */
6
7#ifndef _MACH_PLL_H
8#define _MACH_PLL_H
9
10#include <asm/blackfin.h>
11#include <asm/irqflags.h>
12
13/* Writing to PLL_CTL initiates a PLL relock sequence. */
14static __inline__ void bfin_write_PLL_CTL(unsigned int val)
15{
16 unsigned long flags, iwr0, iwr1;
17
18 if (val == bfin_read_PLL_CTL())
19 return;
20
21 flags = hard_local_irq_save();
22 /* Enable the PLL Wakeup bit in SIC IWR */
23 iwr0 = bfin_read32(SIC_IWR0);
24 iwr1 = bfin_read32(SIC_IWR1);
25 /* Only allow PPL Wakeup) */
26 bfin_write32(SIC_IWR0, IWR_ENABLE(0));
27 bfin_write32(SIC_IWR1, 0);
28
29 bfin_write16(PLL_CTL, val);
30 SSYNC();
31 asm("IDLE;");
32
33 bfin_write32(SIC_IWR0, iwr0);
34 bfin_write32(SIC_IWR1, iwr1);
35 hard_local_irq_restore(flags);
36}
37
38/* Writing to VR_CTL initiates a PLL relock sequence. */
39static __inline__ void bfin_write_VR_CTL(unsigned int val)
40{
41 unsigned long flags, iwr0, iwr1;
42
43 if (val == bfin_read_VR_CTL())
44 return;
45
46 flags = hard_local_irq_save();
47 /* Enable the PLL Wakeup bit in SIC IWR */
48 iwr0 = bfin_read32(SIC_IWR0);
49 iwr1 = bfin_read32(SIC_IWR1);
50 /* Only allow PPL Wakeup) */
51 bfin_write32(SIC_IWR0, IWR_ENABLE(0));
52 bfin_write32(SIC_IWR1, 0);
53
54 bfin_write16(VR_CTL, val);
55 SSYNC();
56 asm("IDLE;");
57
58 bfin_write32(SIC_IWR0, iwr0);
59 bfin_write32(SIC_IWR1, iwr1);
60 hard_local_irq_restore(flags);
61}
62
63#endif /* _MACH_PLL_H */
diff --git a/arch/blackfin/mach-bf548/boards/cm_bf548.c b/arch/blackfin/mach-bf548/boards/cm_bf548.c
index 4c2ee6789099..d11502ac5623 100644
--- a/arch/blackfin/mach-bf548/boards/cm_bf548.c
+++ b/arch/blackfin/mach-bf548/boards/cm_bf548.c
@@ -156,7 +156,7 @@ static struct resource bfin_uart0_resources[] = {
156 }, 156 },
157}; 157};
158 158
159unsigned short bfin_uart0_peripherals[] = { 159static unsigned short bfin_uart0_peripherals[] = {
160 P_UART0_TX, P_UART0_RX, 0 160 P_UART0_TX, P_UART0_RX, 0
161}; 161};
162 162
@@ -211,7 +211,7 @@ static struct resource bfin_uart1_resources[] = {
211#endif 211#endif
212}; 212};
213 213
214unsigned short bfin_uart1_peripherals[] = { 214static unsigned short bfin_uart1_peripherals[] = {
215 P_UART1_TX, P_UART1_RX, 215 P_UART1_TX, P_UART1_RX,
216#ifdef CONFIG_BFIN_UART1_CTSRTS 216#ifdef CONFIG_BFIN_UART1_CTSRTS
217 P_UART1_RTS, P_UART1_CTS, 217 P_UART1_RTS, P_UART1_CTS,
@@ -258,7 +258,7 @@ static struct resource bfin_uart2_resources[] = {
258 }, 258 },
259}; 259};
260 260
261unsigned short bfin_uart2_peripherals[] = { 261static unsigned short bfin_uart2_peripherals[] = {
262 P_UART2_TX, P_UART2_RX, 0 262 P_UART2_TX, P_UART2_RX, 0
263}; 263};
264 264
@@ -313,7 +313,7 @@ static struct resource bfin_uart3_resources[] = {
313#endif 313#endif
314}; 314};
315 315
316unsigned short bfin_uart3_peripherals[] = { 316static unsigned short bfin_uart3_peripherals[] = {
317 P_UART3_TX, P_UART3_RX, 317 P_UART3_TX, P_UART3_RX,
318#ifdef CONFIG_BFIN_UART3_CTSRTS 318#ifdef CONFIG_BFIN_UART3_CTSRTS
319 P_UART3_RTS, P_UART3_CTS, 319 P_UART3_RTS, P_UART3_CTS,
@@ -504,6 +504,7 @@ static struct musb_hdrc_config musb_config = {
504 * if it is the case. 504 * if it is the case.
505 */ 505 */
506 .gpio_vrsel_active = 1, 506 .gpio_vrsel_active = 1,
507 .clkin = 24, /* musb CLKIN in MHZ */
507}; 508};
508 509
509static struct musb_hdrc_platform_data musb_plat = { 510static struct musb_hdrc_platform_data musb_plat = {
@@ -552,9 +553,9 @@ static struct resource bfin_sport0_uart_resources[] = {
552 }, 553 },
553}; 554};
554 555
555unsigned short bfin_sport0_peripherals[] = { 556static unsigned short bfin_sport0_peripherals[] = {
556 P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS, 557 P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
557 P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0 558 P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
558}; 559};
559 560
560static struct platform_device bfin_sport0_uart_device = { 561static struct platform_device bfin_sport0_uart_device = {
@@ -586,9 +587,9 @@ static struct resource bfin_sport1_uart_resources[] = {
586 }, 587 },
587}; 588};
588 589
589unsigned short bfin_sport1_peripherals[] = { 590static unsigned short bfin_sport1_peripherals[] = {
590 P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS, 591 P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
591 P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0 592 P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
592}; 593};
593 594
594static struct platform_device bfin_sport1_uart_device = { 595static struct platform_device bfin_sport1_uart_device = {
@@ -620,7 +621,7 @@ static struct resource bfin_sport2_uart_resources[] = {
620 }, 621 },
621}; 622};
622 623
623unsigned short bfin_sport2_peripherals[] = { 624static unsigned short bfin_sport2_peripherals[] = {
624 P_SPORT2_TFS, P_SPORT2_DTPRI, P_SPORT2_TSCLK, P_SPORT2_RFS, 625 P_SPORT2_TFS, P_SPORT2_DTPRI, P_SPORT2_TSCLK, P_SPORT2_RFS,
625 P_SPORT2_DRPRI, P_SPORT2_RSCLK, P_SPORT2_DRSEC, P_SPORT2_DTSEC, 0 626 P_SPORT2_DRPRI, P_SPORT2_RSCLK, P_SPORT2_DRSEC, P_SPORT2_DTSEC, 0
626}; 627};
@@ -654,7 +655,7 @@ static struct resource bfin_sport3_uart_resources[] = {
654 }, 655 },
655}; 656};
656 657
657unsigned short bfin_sport3_peripherals[] = { 658static unsigned short bfin_sport3_peripherals[] = {
658 P_SPORT3_TFS, P_SPORT3_DTPRI, P_SPORT3_TSCLK, P_SPORT3_RFS, 659 P_SPORT3_TFS, P_SPORT3_DTPRI, P_SPORT3_TSCLK, P_SPORT3_RFS,
659 P_SPORT3_DRPRI, P_SPORT3_RSCLK, P_SPORT3_DRSEC, P_SPORT3_DTSEC, 0 660 P_SPORT3_DRPRI, P_SPORT3_RSCLK, P_SPORT3_DRSEC, P_SPORT3_DTSEC, 0
660}; 661};
@@ -756,7 +757,7 @@ static struct platform_device bf54x_sdh_device = {
756#endif 757#endif
757 758
758#if defined(CONFIG_CAN_BFIN) || defined(CONFIG_CAN_BFIN_MODULE) 759#if defined(CONFIG_CAN_BFIN) || defined(CONFIG_CAN_BFIN_MODULE)
759unsigned short bfin_can_peripherals[] = { 760static unsigned short bfin_can_peripherals[] = {
760 P_CAN0_RX, P_CAN0_TX, 0 761 P_CAN0_RX, P_CAN0_TX, 0
761}; 762};
762 763
diff --git a/arch/blackfin/mach-bf548/boards/ezkit.c b/arch/blackfin/mach-bf548/boards/ezkit.c
index 4f03fbc4c9be..ce5a2bb147dc 100644
--- a/arch/blackfin/mach-bf548/boards/ezkit.c
+++ b/arch/blackfin/mach-bf548/boards/ezkit.c
@@ -261,7 +261,7 @@ static struct resource bfin_uart0_resources[] = {
261 }, 261 },
262}; 262};
263 263
264unsigned short bfin_uart0_peripherals[] = { 264static unsigned short bfin_uart0_peripherals[] = {
265 P_UART0_TX, P_UART0_RX, 0 265 P_UART0_TX, P_UART0_RX, 0
266}; 266};
267 267
@@ -316,7 +316,7 @@ static struct resource bfin_uart1_resources[] = {
316#endif 316#endif
317}; 317};
318 318
319unsigned short bfin_uart1_peripherals[] = { 319static unsigned short bfin_uart1_peripherals[] = {
320 P_UART1_TX, P_UART1_RX, 320 P_UART1_TX, P_UART1_RX,
321#ifdef CONFIG_BFIN_UART1_CTSRTS 321#ifdef CONFIG_BFIN_UART1_CTSRTS
322 P_UART1_RTS, P_UART1_CTS, 322 P_UART1_RTS, P_UART1_CTS,
@@ -363,7 +363,7 @@ static struct resource bfin_uart2_resources[] = {
363 }, 363 },
364}; 364};
365 365
366unsigned short bfin_uart2_peripherals[] = { 366static unsigned short bfin_uart2_peripherals[] = {
367 P_UART2_TX, P_UART2_RX, 0 367 P_UART2_TX, P_UART2_RX, 0
368}; 368};
369 369
@@ -418,7 +418,7 @@ static struct resource bfin_uart3_resources[] = {
418#endif 418#endif
419}; 419};
420 420
421unsigned short bfin_uart3_peripherals[] = { 421static unsigned short bfin_uart3_peripherals[] = {
422 P_UART3_TX, P_UART3_RX, 422 P_UART3_TX, P_UART3_RX,
423#ifdef CONFIG_BFIN_UART3_CTSRTS 423#ifdef CONFIG_BFIN_UART3_CTSRTS
424 P_UART3_RTS, P_UART3_CTS, 424 P_UART3_RTS, P_UART3_CTS,
@@ -609,6 +609,7 @@ static struct musb_hdrc_config musb_config = {
609 * if it is the case. 609 * if it is the case.
610 */ 610 */
611 .gpio_vrsel_active = 1, 611 .gpio_vrsel_active = 1,
612 .clkin = 24, /* musb CLKIN in MHZ */
612}; 613};
613 614
614static struct musb_hdrc_platform_data musb_plat = { 615static struct musb_hdrc_platform_data musb_plat = {
@@ -657,9 +658,9 @@ static struct resource bfin_sport0_uart_resources[] = {
657 }, 658 },
658}; 659};
659 660
660unsigned short bfin_sport0_peripherals[] = { 661static unsigned short bfin_sport0_peripherals[] = {
661 P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS, 662 P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
662 P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0 663 P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
663}; 664};
664 665
665static struct platform_device bfin_sport0_uart_device = { 666static struct platform_device bfin_sport0_uart_device = {
@@ -691,9 +692,9 @@ static struct resource bfin_sport1_uart_resources[] = {
691 }, 692 },
692}; 693};
693 694
694unsigned short bfin_sport1_peripherals[] = { 695static unsigned short bfin_sport1_peripherals[] = {
695 P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS, 696 P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
696 P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0 697 P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
697}; 698};
698 699
699static struct platform_device bfin_sport1_uart_device = { 700static struct platform_device bfin_sport1_uart_device = {
@@ -725,7 +726,7 @@ static struct resource bfin_sport2_uart_resources[] = {
725 }, 726 },
726}; 727};
727 728
728unsigned short bfin_sport2_peripherals[] = { 729static unsigned short bfin_sport2_peripherals[] = {
729 P_SPORT2_TFS, P_SPORT2_DTPRI, P_SPORT2_TSCLK, P_SPORT2_RFS, 730 P_SPORT2_TFS, P_SPORT2_DTPRI, P_SPORT2_TSCLK, P_SPORT2_RFS,
730 P_SPORT2_DRPRI, P_SPORT2_RSCLK, P_SPORT2_DRSEC, P_SPORT2_DTSEC, 0 731 P_SPORT2_DRPRI, P_SPORT2_RSCLK, P_SPORT2_DRSEC, P_SPORT2_DTSEC, 0
731}; 732};
@@ -759,7 +760,7 @@ static struct resource bfin_sport3_uart_resources[] = {
759 }, 760 },
760}; 761};
761 762
762unsigned short bfin_sport3_peripherals[] = { 763static unsigned short bfin_sport3_peripherals[] = {
763 P_SPORT3_TFS, P_SPORT3_DTPRI, P_SPORT3_TSCLK, P_SPORT3_RFS, 764 P_SPORT3_TFS, P_SPORT3_DTPRI, P_SPORT3_TSCLK, P_SPORT3_RFS,
764 P_SPORT3_DRPRI, P_SPORT3_RSCLK, P_SPORT3_DRSEC, P_SPORT3_DTSEC, 0 765 P_SPORT3_DRPRI, P_SPORT3_RSCLK, P_SPORT3_DRSEC, P_SPORT3_DTSEC, 0
765}; 766};
@@ -777,7 +778,7 @@ static struct platform_device bfin_sport3_uart_device = {
777#endif 778#endif
778 779
779#if defined(CONFIG_CAN_BFIN) || defined(CONFIG_CAN_BFIN_MODULE) 780#if defined(CONFIG_CAN_BFIN) || defined(CONFIG_CAN_BFIN_MODULE)
780unsigned short bfin_can_peripherals[] = { 781static unsigned short bfin_can_peripherals[] = {
781 P_CAN0_RX, P_CAN0_TX, 0 782 P_CAN0_RX, P_CAN0_TX, 0
782}; 783};
783 784
diff --git a/arch/blackfin/mach-bf548/dma.c b/arch/blackfin/mach-bf548/dma.c
index 888b9cc0b822..69ead33cbf91 100644
--- a/arch/blackfin/mach-bf548/dma.c
+++ b/arch/blackfin/mach-bf548/dma.c
@@ -11,7 +11,7 @@
11#include <asm/blackfin.h> 11#include <asm/blackfin.h>
12#include <asm/dma.h> 12#include <asm/dma.h>
13 13
14struct dma_register *dma_io_base_addr[MAX_DMA_CHANNELS] = { 14struct dma_register * const dma_io_base_addr[MAX_DMA_CHANNELS] = {
15 (struct dma_register *) DMA0_NEXT_DESC_PTR, 15 (struct dma_register *) DMA0_NEXT_DESC_PTR,
16 (struct dma_register *) DMA1_NEXT_DESC_PTR, 16 (struct dma_register *) DMA1_NEXT_DESC_PTR,
17 (struct dma_register *) DMA2_NEXT_DESC_PTR, 17 (struct dma_register *) DMA2_NEXT_DESC_PTR,
diff --git a/arch/blackfin/mach-bf548/include/mach/bfin_serial.h b/arch/blackfin/mach-bf548/include/mach/bfin_serial.h
new file mode 100644
index 000000000000..a77109f99720
--- /dev/null
+++ b/arch/blackfin/mach-bf548/include/mach/bfin_serial.h
@@ -0,0 +1,16 @@
1/*
2 * mach/bfin_serial.h - Blackfin UART/Serial definitions
3 *
4 * Copyright 2006-2010 Analog Devices Inc.
5 *
6 * Licensed under the GPL-2 or later.
7 */
8
9#ifndef __BFIN_MACH_SERIAL_H__
10#define __BFIN_MACH_SERIAL_H__
11
12#define BFIN_UART_NR_PORTS 4
13
14#define BFIN_UART_BF54X_STYLE
15
16#endif
diff --git a/arch/blackfin/mach-bf548/include/mach/bfin_serial_5xx.h b/arch/blackfin/mach-bf548/include/mach/bfin_serial_5xx.h
index dd44aa75fe72..0d94edaaaa2e 100644
--- a/arch/blackfin/mach-bf548/include/mach/bfin_serial_5xx.h
+++ b/arch/blackfin/mach-bf548/include/mach/bfin_serial_5xx.h
@@ -4,72 +4,14 @@
4 * Licensed under the GPL-2 or later. 4 * Licensed under the GPL-2 or later.
5 */ 5 */
6 6
7#include <linux/serial.h>
8#include <asm/dma.h> 7#include <asm/dma.h>
9#include <asm/portmux.h> 8#include <asm/portmux.h>
10 9
11#define UART_GET_CHAR(uart) bfin_read16(((uart)->port.membase + OFFSET_RBR))
12#define UART_GET_DLL(uart) bfin_read16(((uart)->port.membase + OFFSET_DLL))
13#define UART_GET_DLH(uart) bfin_read16(((uart)->port.membase + OFFSET_DLH))
14#define UART_GET_IER(uart) bfin_read16(((uart)->port.membase + OFFSET_IER_SET))
15#define UART_GET_LCR(uart) bfin_read16(((uart)->port.membase + OFFSET_LCR))
16#define UART_GET_LSR(uart) bfin_read16(((uart)->port.membase + OFFSET_LSR))
17#define UART_GET_GCTL(uart) bfin_read16(((uart)->port.membase + OFFSET_GCTL))
18#define UART_GET_MSR(uart) bfin_read16(((uart)->port.membase + OFFSET_MSR))
19#define UART_GET_MCR(uart) bfin_read16(((uart)->port.membase + OFFSET_MCR))
20
21#define UART_PUT_CHAR(uart,v) bfin_write16(((uart)->port.membase + OFFSET_THR),v)
22#define UART_PUT_DLL(uart,v) bfin_write16(((uart)->port.membase + OFFSET_DLL),v)
23#define UART_SET_IER(uart,v) bfin_write16(((uart)->port.membase + OFFSET_IER_SET),v)
24#define UART_CLEAR_IER(uart,v) bfin_write16(((uart)->port.membase + OFFSET_IER_CLEAR),v)
25#define UART_PUT_DLH(uart,v) bfin_write16(((uart)->port.membase + OFFSET_DLH),v)
26#define UART_PUT_LSR(uart,v) bfin_write16(((uart)->port.membase + OFFSET_LSR),v)
27#define UART_PUT_LCR(uart,v) bfin_write16(((uart)->port.membase + OFFSET_LCR),v)
28#define UART_CLEAR_LSR(uart) bfin_write16(((uart)->port.membase + OFFSET_LSR), -1)
29#define UART_PUT_GCTL(uart,v) bfin_write16(((uart)->port.membase + OFFSET_GCTL),v)
30#define UART_PUT_MCR(uart,v) bfin_write16(((uart)->port.membase + OFFSET_MCR),v)
31#define UART_CLEAR_SCTS(uart) bfin_write16(((uart)->port.membase + OFFSET_MSR),SCTS)
32
33#define UART_SET_DLAB(uart) /* MMRs not muxed on BF54x */
34#define UART_CLEAR_DLAB(uart) /* MMRs not muxed on BF54x */
35
36#define UART_GET_CTS(x) (UART_GET_MSR(x) & CTS)
37#define UART_DISABLE_RTS(x) UART_PUT_MCR(x, UART_GET_MCR(x) & ~(ARTS|MRTS))
38#define UART_ENABLE_RTS(x) UART_PUT_MCR(x, UART_GET_MCR(x) | MRTS | ARTS)
39#define UART_ENABLE_INTS(x, v) UART_SET_IER(x, v)
40#define UART_DISABLE_INTS(x) UART_CLEAR_IER(x, 0xF)
41
42#if defined(CONFIG_BFIN_UART0_CTSRTS) || defined(CONFIG_BFIN_UART1_CTSRTS) || \ 10#if defined(CONFIG_BFIN_UART0_CTSRTS) || defined(CONFIG_BFIN_UART1_CTSRTS) || \
43 defined(CONFIG_BFIN_UART2_CTSRTS) || defined(CONFIG_BFIN_UART3_CTSRTS) 11 defined(CONFIG_BFIN_UART2_CTSRTS) || defined(CONFIG_BFIN_UART3_CTSRTS)
44# define CONFIG_SERIAL_BFIN_HARD_CTSRTS 12# define CONFIG_SERIAL_BFIN_HARD_CTSRTS
45#endif 13#endif
46 14
47#define BFIN_UART_TX_FIFO_SIZE 2
48
49/*
50 * The pin configuration is different from schematic
51 */
52struct bfin_serial_port {
53 struct uart_port port;
54 unsigned int old_status;
55 int status_irq;
56#ifdef CONFIG_SERIAL_BFIN_DMA
57 int tx_done;
58 int tx_count;
59 struct circ_buf rx_dma_buf;
60 struct timer_list rx_dma_timer;
61 int rx_dma_nrows;
62 unsigned int tx_dma_channel;
63 unsigned int rx_dma_channel;
64 struct work_struct tx_dma_workqueue;
65#endif
66#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
67 int scts;
68 int cts_pin;
69 int rts_pin;
70#endif
71};
72
73struct bfin_serial_res { 15struct bfin_serial_res {
74 unsigned long uart_base_addr; 16 unsigned long uart_base_addr;
75 int uart_irq; 17 int uart_irq;
@@ -148,3 +90,5 @@ struct bfin_serial_res bfin_serial_resource[] = {
148}; 90};
149 91
150#define DRIVER_NAME "bfin-uart" 92#define DRIVER_NAME "bfin-uart"
93
94#include <asm/bfin_serial.h>
diff --git a/arch/blackfin/mach-bf548/include/mach/blackfin.h b/arch/blackfin/mach-bf548/include/mach/blackfin.h
index 5684030ccc21..72da721a77f5 100644
--- a/arch/blackfin/mach-bf548/include/mach/blackfin.h
+++ b/arch/blackfin/mach-bf548/include/mach/blackfin.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2007-2009 Analog Devices Inc. 2 * Copyright 2007-2010 Analog Devices Inc.
3 * 3 *
4 * Licensed under the GPL-2 or later. 4 * Licensed under the GPL-2 or later.
5 */ 5 */
@@ -10,58 +10,40 @@
10#include "bf548.h" 10#include "bf548.h"
11#include "anomaly.h" 11#include "anomaly.h"
12 12
13#include <asm/def_LPBlackfin.h>
13#ifdef CONFIG_BF542 14#ifdef CONFIG_BF542
14#include "defBF542.h" 15# include "defBF542.h"
15#endif 16#endif
16
17#ifdef CONFIG_BF544 17#ifdef CONFIG_BF544
18#include "defBF544.h" 18# include "defBF544.h"
19#endif 19#endif
20
21#ifdef CONFIG_BF547 20#ifdef CONFIG_BF547
22#include "defBF547.h" 21# include "defBF547.h"
23#endif 22#endif
24
25#ifdef CONFIG_BF548 23#ifdef CONFIG_BF548
26#include "defBF548.h" 24# include "defBF548.h"
27#endif 25#endif
28
29#ifdef CONFIG_BF549 26#ifdef CONFIG_BF549
30#include "defBF549.h" 27# include "defBF549.h"
31#endif 28#endif
32 29
33#if !defined(__ASSEMBLY__) 30#ifndef __ASSEMBLY__
34#ifdef CONFIG_BF542 31# include <asm/cdef_LPBlackfin.h>
35#include "cdefBF542.h" 32# ifdef CONFIG_BF542
33# include "cdefBF542.h"
34# endif
35# ifdef CONFIG_BF544
36# include "cdefBF544.h"
37# endif
38# ifdef CONFIG_BF547
39# include "cdefBF547.h"
40# endif
41# ifdef CONFIG_BF548
42# include "cdefBF548.h"
43# endif
44# ifdef CONFIG_BF549
45# include "cdefBF549.h"
46# endif
36#endif 47#endif
37#ifdef CONFIG_BF544
38#include "cdefBF544.h"
39#endif
40#ifdef CONFIG_BF547
41#include "cdefBF547.h"
42#endif
43#ifdef CONFIG_BF548
44#include "cdefBF548.h"
45#endif
46#ifdef CONFIG_BF549
47#include "cdefBF549.h"
48#endif
49
50#endif
51
52#define BFIN_UART_NR_PORTS 4
53
54#define OFFSET_DLL 0x00 /* Divisor Latch (Low-Byte) */
55#define OFFSET_DLH 0x04 /* Divisor Latch (High-Byte) */
56#define OFFSET_GCTL 0x08 /* Global Control Register */
57#define OFFSET_LCR 0x0C /* Line Control Register */
58#define OFFSET_MCR 0x10 /* Modem Control Register */
59#define OFFSET_LSR 0x14 /* Line Status Register */
60#define OFFSET_MSR 0x18 /* Modem Status Register */
61#define OFFSET_SCR 0x1C /* SCR Scratch Register */
62#define OFFSET_IER_SET 0x20 /* Set Interrupt Enable Register */
63#define OFFSET_IER_CLEAR 0x24 /* Clear Interrupt Enable Register */
64#define OFFSET_THR 0x28 /* Transmit Holding register */
65#define OFFSET_RBR 0x2C /* Receive Buffer register */
66 48
67#endif 49#endif
diff --git a/arch/blackfin/mach-bf548/include/mach/cdefBF542.h b/arch/blackfin/mach-bf548/include/mach/cdefBF542.h
index 42f4a9469549..d09c19cd1b7b 100644
--- a/arch/blackfin/mach-bf548/include/mach/cdefBF542.h
+++ b/arch/blackfin/mach-bf548/include/mach/cdefBF542.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2007-2008 Analog Devices Inc. 2 * Copyright 2007-2010 Analog Devices Inc.
3 * 3 *
4 * Licensed under the GPL-2 or later. 4 * Licensed under the GPL-2 or later.
5 */ 5 */
@@ -7,14 +7,6 @@
7#ifndef _CDEF_BF542_H 7#ifndef _CDEF_BF542_H
8#define _CDEF_BF542_H 8#define _CDEF_BF542_H
9 9
10/* include all Core registers and bit definitions */
11#include "defBF542.h"
12
13/* include core sbfin_read_()ecific register pointer definitions */
14#include <asm/cdef_LPBlackfin.h>
15
16/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF542 */
17
18/* include cdefBF54x_base.h for the set of #defines that are common to all ADSP-BF54x bfin_read_()rocessors */ 10/* include cdefBF54x_base.h for the set of #defines that are common to all ADSP-BF54x bfin_read_()rocessors */
19#include "cdefBF54x_base.h" 11#include "cdefBF54x_base.h"
20 12
diff --git a/arch/blackfin/mach-bf548/include/mach/cdefBF544.h b/arch/blackfin/mach-bf548/include/mach/cdefBF544.h
index 2207799575ff..33ec8102ceda 100644
--- a/arch/blackfin/mach-bf548/include/mach/cdefBF544.h
+++ b/arch/blackfin/mach-bf548/include/mach/cdefBF544.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2007-2008 Analog Devices Inc. 2 * Copyright 2007-2010 Analog Devices Inc.
3 * 3 *
4 * Licensed under the GPL-2 or later. 4 * Licensed under the GPL-2 or later.
5 */ 5 */
@@ -7,14 +7,6 @@
7#ifndef _CDEF_BF544_H 7#ifndef _CDEF_BF544_H
8#define _CDEF_BF544_H 8#define _CDEF_BF544_H
9 9
10/* include all Core registers and bit definitions */
11#include "defBF544.h"
12
13/* include core sbfin_read_()ecific register pointer definitions */
14#include <asm/cdef_LPBlackfin.h>
15
16/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF544 */
17
18/* include cdefBF54x_base.h for the set of #defines that are common to all ADSP-BF54x bfin_read_()rocessors */ 10/* include cdefBF54x_base.h for the set of #defines that are common to all ADSP-BF54x bfin_read_()rocessors */
19#include "cdefBF54x_base.h" 11#include "cdefBF54x_base.h"
20 12
diff --git a/arch/blackfin/mach-bf548/include/mach/cdefBF547.h b/arch/blackfin/mach-bf548/include/mach/cdefBF547.h
index bc650e6ea482..bcb9726dea54 100644
--- a/arch/blackfin/mach-bf548/include/mach/cdefBF547.h
+++ b/arch/blackfin/mach-bf548/include/mach/cdefBF547.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2008 Analog Devices Inc. 2 * Copyright 2008-2010 Analog Devices Inc.
3 * 3 *
4 * Licensed under the GPL-2 or later. 4 * Licensed under the GPL-2 or later.
5 */ 5 */
@@ -7,14 +7,6 @@
7#ifndef _CDEF_BF547_H 7#ifndef _CDEF_BF547_H
8#define _CDEF_BF547_H 8#define _CDEF_BF547_H
9 9
10/* include all Core registers and bit definitions */
11#include "defBF547.h"
12
13/* include core sbfin_read_()ecific register pointer definitions */
14#include <asm/cdef_LPBlackfin.h>
15
16/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF547 */
17
18/* include cdefBF54x_base.h for the set of #defines that are common to all ADSP-BF54x bfin_read_()rocessors */ 10/* include cdefBF54x_base.h for the set of #defines that are common to all ADSP-BF54x bfin_read_()rocessors */
19#include "cdefBF54x_base.h" 11#include "cdefBF54x_base.h"
20 12
diff --git a/arch/blackfin/mach-bf548/include/mach/cdefBF548.h b/arch/blackfin/mach-bf548/include/mach/cdefBF548.h
index 3523e08f7968..bae67a65633e 100644
--- a/arch/blackfin/mach-bf548/include/mach/cdefBF548.h
+++ b/arch/blackfin/mach-bf548/include/mach/cdefBF548.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2007-2008 Analog Devices Inc. 2 * Copyright 2007-2010 Analog Devices Inc.
3 * 3 *
4 * Licensed under the GPL-2 or later. 4 * Licensed under the GPL-2 or later.
5 */ 5 */
@@ -7,14 +7,6 @@
7#ifndef _CDEF_BF548_H 7#ifndef _CDEF_BF548_H
8#define _CDEF_BF548_H 8#define _CDEF_BF548_H
9 9
10/* include all Core registers and bit definitions */
11#include "defBF548.h"
12
13/* include core sbfin_read_()ecific register pointer definitions */
14#include <asm/cdef_LPBlackfin.h>
15
16/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF548 */
17
18/* include cdefBF54x_base.h for the set of #defines that are common to all ADSP-BF54x bfin_read_()rocessors */ 10/* include cdefBF54x_base.h for the set of #defines that are common to all ADSP-BF54x bfin_read_()rocessors */
19#include "cdefBF54x_base.h" 11#include "cdefBF54x_base.h"
20 12
diff --git a/arch/blackfin/mach-bf548/include/mach/cdefBF549.h b/arch/blackfin/mach-bf548/include/mach/cdefBF549.h
index 80201ed41f80..002136ad5a44 100644
--- a/arch/blackfin/mach-bf548/include/mach/cdefBF549.h
+++ b/arch/blackfin/mach-bf548/include/mach/cdefBF549.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2007-2008 Analog Devices Inc. 2 * Copyright 2007-2010 Analog Devices Inc.
3 * 3 *
4 * Licensed under the GPL-2 or later. 4 * Licensed under the GPL-2 or later.
5 */ 5 */
@@ -7,14 +7,6 @@
7#ifndef _CDEF_BF549_H 7#ifndef _CDEF_BF549_H
8#define _CDEF_BF549_H 8#define _CDEF_BF549_H
9 9
10/* include all Core registers and bit definitions */
11#include "defBF549.h"
12
13/* include core sbfin_read_()ecific register pointer definitions */
14#include <asm/cdef_LPBlackfin.h>
15
16/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF549 */
17
18/* include cdefBF54x_base.h for the set of #defines that are common to all ADSP-BF54x bfin_read_()rocessors */ 10/* include cdefBF54x_base.h for the set of #defines that are common to all ADSP-BF54x bfin_read_()rocessors */
19#include "cdefBF54x_base.h" 11#include "cdefBF54x_base.h"
20 12
diff --git a/arch/blackfin/mach-bf548/include/mach/cdefBF54x_base.h b/arch/blackfin/mach-bf548/include/mach/cdefBF54x_base.h
index deaf5d6542d5..50c89c8052f3 100644
--- a/arch/blackfin/mach-bf548/include/mach/cdefBF54x_base.h
+++ b/arch/blackfin/mach-bf548/include/mach/cdefBF54x_base.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2007-2008 Analog Devices Inc. 2 * Copyright 2007-2010 Analog Devices Inc.
3 * 3 *
4 * Licensed under the GPL-2 or later. 4 * Licensed under the GPL-2 or later.
5 */ 5 */
@@ -7,10 +7,6 @@
7#ifndef _CDEF_BF54X_H 7#ifndef _CDEF_BF54X_H
8#define _CDEF_BF54X_H 8#define _CDEF_BF54X_H
9 9
10#include <asm/blackfin.h>
11
12#include "defBF54x_base.h"
13
14/* ************************************************************** */ 10/* ************************************************************** */
15/* SYSTEM & MMR ADDRESS DEFINITIONS COMMON TO ALL ADSP-BF54x */ 11/* SYSTEM & MMR ADDRESS DEFINITIONS COMMON TO ALL ADSP-BF54x */
16/* ************************************************************** */ 12/* ************************************************************** */
@@ -2633,22 +2629,5 @@
2633 2629
2634/* Handshake MDMA is not defined in the shared file because it is not available on the ADSP-BF542 bfin_read_()rocessor */ 2630/* Handshake MDMA is not defined in the shared file because it is not available on the ADSP-BF542 bfin_read_()rocessor */
2635 2631
2636/* legacy definitions */
2637#define bfin_read_EBIU_AMCBCTL0 bfin_read_EBIU_AMBCTL0
2638#define bfin_write_EBIU_AMCBCTL0 bfin_write_EBIU_AMBCTL0
2639#define bfin_read_EBIU_AMCBCTL1 bfin_read_EBIU_AMBCTL1
2640#define bfin_write_EBIU_AMCBCTL1 bfin_write_EBIU_AMBCTL1
2641#define bfin_read_PINT0_IRQ bfin_read_PINT0_REQUEST
2642#define bfin_write_PINT0_IRQ bfin_write_PINT0_REQUEST
2643#define bfin_read_PINT1_IRQ bfin_read_PINT1_REQUEST
2644#define bfin_write_PINT1_IRQ bfin_write_PINT1_REQUEST
2645#define bfin_read_PINT2_IRQ bfin_read_PINT2_REQUEST
2646#define bfin_write_PINT2_IRQ bfin_write_PINT2_REQUEST
2647#define bfin_read_PINT3_IRQ bfin_read_PINT3_REQUEST
2648#define bfin_write_PINT3_IRQ bfin_write_PINT3_REQUEST
2649
2650/* These need to be last due to the cdef/linux inter-dependencies */
2651#include <asm/irq.h>
2652
2653#endif /* _CDEF_BF54X_H */ 2632#endif /* _CDEF_BF54X_H */
2654 2633
diff --git a/arch/blackfin/mach-bf548/include/mach/defBF542.h b/arch/blackfin/mach-bf548/include/mach/defBF542.h
index abf5f750dd8b..629bf216e2b5 100644
--- a/arch/blackfin/mach-bf548/include/mach/defBF542.h
+++ b/arch/blackfin/mach-bf548/include/mach/defBF542.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2007-2008 Analog Devices Inc. 2 * Copyright 2007-2010 Analog Devices Inc.
3 * 3 *
4 * Licensed under the ADI BSD license or the GPL-2 (or later) 4 * Licensed under the ADI BSD license or the GPL-2 (or later)
5 */ 5 */
@@ -7,11 +7,6 @@
7#ifndef _DEF_BF542_H 7#ifndef _DEF_BF542_H
8#define _DEF_BF542_H 8#define _DEF_BF542_H
9 9
10/* Include all Core registers and bit definitions */
11#include <asm/def_LPBlackfin.h>
12
13/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF542 */
14
15/* Include defBF54x_base.h for the set of #defines that are common to all ADSP-BF54x processors */ 10/* Include defBF54x_base.h for the set of #defines that are common to all ADSP-BF54x processors */
16#include "defBF54x_base.h" 11#include "defBF54x_base.h"
17 12
diff --git a/arch/blackfin/mach-bf548/include/mach/defBF544.h b/arch/blackfin/mach-bf548/include/mach/defBF544.h
index e2771094de02..642468c1bcb1 100644
--- a/arch/blackfin/mach-bf548/include/mach/defBF544.h
+++ b/arch/blackfin/mach-bf548/include/mach/defBF544.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2007-2008 Analog Devices Inc. 2 * Copyright 2007-2010 Analog Devices Inc.
3 * 3 *
4 * Licensed under the ADI BSD license or the GPL-2 (or later) 4 * Licensed under the ADI BSD license or the GPL-2 (or later)
5 */ 5 */
@@ -7,11 +7,6 @@
7#ifndef _DEF_BF544_H 7#ifndef _DEF_BF544_H
8#define _DEF_BF544_H 8#define _DEF_BF544_H
9 9
10/* Include all Core registers and bit definitions */
11#include <asm/def_LPBlackfin.h>
12
13/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF544 */
14
15/* Include defBF54x_base.h for the set of #defines that are common to all ADSP-BF54x processors */ 10/* Include defBF54x_base.h for the set of #defines that are common to all ADSP-BF54x processors */
16#include "defBF54x_base.h" 11#include "defBF54x_base.h"
17 12
diff --git a/arch/blackfin/mach-bf548/include/mach/defBF547.h b/arch/blackfin/mach-bf548/include/mach/defBF547.h
index be21ba5b3aa8..2f3337cd311e 100644
--- a/arch/blackfin/mach-bf548/include/mach/defBF547.h
+++ b/arch/blackfin/mach-bf548/include/mach/defBF547.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2008 Analog Devices Inc. 2 * Copyright 2008-2010 Analog Devices Inc.
3 * 3 *
4 * Licensed under the ADI BSD license or the GPL-2 (or later) 4 * Licensed under the ADI BSD license or the GPL-2 (or later)
5 */ 5 */
@@ -7,11 +7,6 @@
7#ifndef _DEF_BF547_H 7#ifndef _DEF_BF547_H
8#define _DEF_BF547_H 8#define _DEF_BF547_H
9 9
10/* Include all Core registers and bit definitions */
11#include <asm/def_LPBlackfin.h>
12
13/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF547 */
14
15/* Include defBF54x_base.h for the set of #defines that are common to all ADSP-BF54x processors */ 10/* Include defBF54x_base.h for the set of #defines that are common to all ADSP-BF54x processors */
16#include "defBF54x_base.h" 11#include "defBF54x_base.h"
17 12
diff --git a/arch/blackfin/mach-bf548/include/mach/defBF548.h b/arch/blackfin/mach-bf548/include/mach/defBF548.h
index 3fb33b040ab7..3c7f1b69349e 100644
--- a/arch/blackfin/mach-bf548/include/mach/defBF548.h
+++ b/arch/blackfin/mach-bf548/include/mach/defBF548.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2007-2008 Analog Devices Inc. 2 * Copyright 2007-2010 Analog Devices Inc.
3 * 3 *
4 * Licensed under the ADI BSD license or the GPL-2 (or later) 4 * Licensed under the ADI BSD license or the GPL-2 (or later)
5 */ 5 */
@@ -7,11 +7,6 @@
7#ifndef _DEF_BF548_H 7#ifndef _DEF_BF548_H
8#define _DEF_BF548_H 8#define _DEF_BF548_H
9 9
10/* Include all Core registers and bit definitions */
11#include <asm/def_LPBlackfin.h>
12
13/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF548 */
14
15/* Include defBF54x_base.h for the set of #defines that are common to all ADSP-BF54x processors */ 10/* Include defBF54x_base.h for the set of #defines that are common to all ADSP-BF54x processors */
16#include "defBF54x_base.h" 11#include "defBF54x_base.h"
17 12
diff --git a/arch/blackfin/mach-bf548/include/mach/defBF549.h b/arch/blackfin/mach-bf548/include/mach/defBF549.h
index 5a04e6d4017e..9a45cb6b30da 100644
--- a/arch/blackfin/mach-bf548/include/mach/defBF549.h
+++ b/arch/blackfin/mach-bf548/include/mach/defBF549.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2007-2008 Analog Devices Inc. 2 * Copyright 2007-2010 Analog Devices Inc.
3 * 3 *
4 * Licensed under the ADI BSD license or the GPL-2 (or later) 4 * Licensed under the ADI BSD license or the GPL-2 (or later)
5 */ 5 */
@@ -7,11 +7,6 @@
7#ifndef _DEF_BF549_H 7#ifndef _DEF_BF549_H
8#define _DEF_BF549_H 8#define _DEF_BF549_H
9 9
10/* Include all Core registers and bit definitions */
11#include <asm/def_LPBlackfin.h>
12
13/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF549 */
14
15/* Include defBF54x_base.h for the set of #defines that are common to all ADSP-BF54x processors */ 10/* Include defBF54x_base.h for the set of #defines that are common to all ADSP-BF54x processors */
16#include "defBF54x_base.h" 11#include "defBF54x_base.h"
17 12
diff --git a/arch/blackfin/mach-bf548/include/mach/defBF54x_base.h b/arch/blackfin/mach-bf548/include/mach/defBF54x_base.h
index 78f91103f175..0867c2bedb43 100644
--- a/arch/blackfin/mach-bf548/include/mach/defBF54x_base.h
+++ b/arch/blackfin/mach-bf548/include/mach/defBF54x_base.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2007-2008 Analog Devices Inc. 2 * Copyright 2007-2010 Analog Devices Inc.
3 * 3 *
4 * Licensed under the ADI BSD license or the GPL-2 (or later) 4 * Licensed under the ADI BSD license or the GPL-2 (or later)
5 */ 5 */
@@ -1615,14 +1615,14 @@
1615#define CTYPE 0x40 /* DMA Channel Type */ 1615#define CTYPE 0x40 /* DMA Channel Type */
1616#define PMAP 0xf000 /* Peripheral Mapped To This Channel */ 1616#define PMAP 0xf000 /* Peripheral Mapped To This Channel */
1617 1617
1618/* Bit masks for DMACx_TCPER */ 1618/* Bit masks for DMACx_TC_PER */
1619 1619
1620#define DCB_TRAFFIC_PERIOD 0xf /* DCB Traffic Control Period */ 1620#define DCB_TRAFFIC_PERIOD 0xf /* DCB Traffic Control Period */
1621#define DEB_TRAFFIC_PERIOD 0xf0 /* DEB Traffic Control Period */ 1621#define DEB_TRAFFIC_PERIOD 0xf0 /* DEB Traffic Control Period */
1622#define DAB_TRAFFIC_PERIOD 0x700 /* DAB Traffic Control Period */ 1622#define DAB_TRAFFIC_PERIOD 0x700 /* DAB Traffic Control Period */
1623#define MDMA_ROUND_ROBIN_PERIOD 0xf800 /* MDMA Round Robin Period */ 1623#define MDMA_ROUND_ROBIN_PERIOD 0xf800 /* MDMA Round Robin Period */
1624 1624
1625/* Bit masks for DMACx_TCCNT */ 1625/* Bit masks for DMACx_TC_CNT */
1626 1626
1627#define DCB_TRAFFIC_COUNT 0xf /* DCB Traffic Control Count */ 1627#define DCB_TRAFFIC_COUNT 0xf /* DCB Traffic Control Count */
1628#define DEB_TRAFFIC_COUNT 0xf0 /* DEB Traffic Control Count */ 1628#define DEB_TRAFFIC_COUNT 0xf0 /* DEB Traffic Control Count */
@@ -2172,68 +2172,6 @@
2172 2172
2173#define RCVDATA16 0xffff /* Receive FIFO 16-Bit Data */ 2173#define RCVDATA16 0xffff /* Receive FIFO 16-Bit Data */
2174 2174
2175/* Bit masks for UARTx_LCR */
2176
2177#if 0
2178/* conflicts with legacy one in last section */
2179#define WLS 0x3 /* Word Length Select */
2180#endif
2181#define STB 0x4 /* Stop Bits */
2182#define PEN 0x8 /* Parity Enable */
2183#define EPS 0x10 /* Even Parity Select */
2184#define STP 0x20 /* Sticky Parity */
2185#define SB 0x40 /* Set Break */
2186
2187/* Bit masks for UARTx_MCR */
2188
2189#define XOFF 0x1 /* Transmitter Off */
2190#define MRTS 0x2 /* Manual Request To Send */
2191#define RFIT 0x4 /* Receive FIFO IRQ Threshold */
2192#define RFRT 0x8 /* Receive FIFO RTS Threshold */
2193#define LOOP_ENA 0x10 /* Loopback Mode Enable */
2194#define FCPOL 0x20 /* Flow Control Pin Polarity */
2195#define ARTS 0x40 /* Automatic Request To Send */
2196#define ACTS 0x80 /* Automatic Clear To Send */
2197
2198/* Bit masks for UARTx_LSR */
2199
2200#define DR 0x1 /* Data Ready */
2201#define OE 0x2 /* Overrun Error */
2202#define PE 0x4 /* Parity Error */
2203#define FE 0x8 /* Framing Error */
2204#define BI 0x10 /* Break Interrupt */
2205#define THRE 0x20 /* THR Empty */
2206#define TEMT 0x40 /* Transmitter Empty */
2207#define TFI 0x80 /* Transmission Finished Indicator */
2208
2209/* Bit masks for UARTx_MSR */
2210
2211#define SCTS 0x1 /* Sticky CTS */
2212#define CTS 0x10 /* Clear To Send */
2213#define RFCS 0x20 /* Receive FIFO Count Status */
2214
2215/* Bit masks for UARTx_IER_SET & UARTx_IER_CLEAR */
2216
2217#define ERBFI 0x1 /* Enable Receive Buffer Full Interrupt */
2218#define ETBEI 0x2 /* Enable Transmit Buffer Empty Interrupt */
2219#define ELSI 0x4 /* Enable Receive Status Interrupt */
2220#define EDSSI 0x8 /* Enable Modem Status Interrupt */
2221#define EDTPTI 0x10 /* Enable DMA Transmit PIRQ Interrupt */
2222#define ETFI 0x20 /* Enable Transmission Finished Interrupt */
2223#define ERFCI 0x40 /* Enable Receive FIFO Count Interrupt */
2224
2225/* Bit masks for UARTx_GCTL */
2226
2227#define UCEN 0x1 /* UART Enable */
2228#define IREN 0x2 /* IrDA Mode Enable */
2229#define TPOLC 0x4 /* IrDA TX Polarity Change */
2230#define RPOLC 0x8 /* IrDA RX Polarity Change */
2231#define FPE 0x10 /* Force Parity Error */
2232#define FFE 0x20 /* Force Framing Error */
2233#define EDBO 0x40 /* Enable Divide-by-One */
2234#define EGLSI 0x80 /* Enable Global LS Interrupt */
2235
2236
2237/* ******************************************* */ 2175/* ******************************************* */
2238/* MULTI BIT MACRO ENUMERATIONS */ 2176/* MULTI BIT MACRO ENUMERATIONS */
2239/* ******************************************* */ 2177/* ******************************************* */
@@ -2251,13 +2189,6 @@
2251#define WDTH_CAP 0x0002 2189#define WDTH_CAP 0x0002
2252#define EXT_CLK 0x0003 2190#define EXT_CLK 0x0003
2253 2191
2254/* UARTx_LCR bit field options */
2255
2256#define WLS_5 0x0000 /* 5 data bits */
2257#define WLS_6 0x0001 /* 6 data bits */
2258#define WLS_7 0x0002 /* 7 data bits */
2259#define WLS_8 0x0003 /* 8 data bits */
2260
2261/* PINTx Register Bit Definitions */ 2192/* PINTx Register Bit Definitions */
2262 2193
2263#define PIQ0 0x00000001 2194#define PIQ0 0x00000001
@@ -2300,240 +2231,6 @@
2300#define PIQ30 0x40000000 2231#define PIQ30 0x40000000
2301#define PIQ31 0x80000000 2232#define PIQ31 0x80000000
2302 2233
2303/* PORT A Bit Definitions for the registers
2304PORTA, PORTA_SET, PORTA_CLEAR,
2305PORTA_DIR_SET, PORTA_DIR_CLEAR, PORTA_INEN,
2306PORTA_FER registers
2307*/
2308
2309#define PA0 0x0001
2310#define PA1 0x0002
2311#define PA2 0x0004
2312#define PA3 0x0008
2313#define PA4 0x0010
2314#define PA5 0x0020
2315#define PA6 0x0040
2316#define PA7 0x0080
2317#define PA8 0x0100
2318#define PA9 0x0200
2319#define PA10 0x0400
2320#define PA11 0x0800
2321#define PA12 0x1000
2322#define PA13 0x2000
2323#define PA14 0x4000
2324#define PA15 0x8000
2325
2326/* PORT B Bit Definitions for the registers
2327PORTB, PORTB_SET, PORTB_CLEAR,
2328PORTB_DIR_SET, PORTB_DIR_CLEAR, PORTB_INEN,
2329PORTB_FER registers
2330*/
2331
2332#define PB0 0x0001
2333#define PB1 0x0002
2334#define PB2 0x0004
2335#define PB3 0x0008
2336#define PB4 0x0010
2337#define PB5 0x0020
2338#define PB6 0x0040
2339#define PB7 0x0080
2340#define PB8 0x0100
2341#define PB9 0x0200
2342#define PB10 0x0400
2343#define PB11 0x0800
2344#define PB12 0x1000
2345#define PB13 0x2000
2346#define PB14 0x4000
2347
2348
2349/* PORT C Bit Definitions for the registers
2350PORTC, PORTC_SET, PORTC_CLEAR,
2351PORTC_DIR_SET, PORTC_DIR_CLEAR, PORTC_INEN,
2352PORTC_FER registers
2353*/
2354
2355
2356#define PC0 0x0001
2357#define PC1 0x0002
2358#define PC2 0x0004
2359#define PC3 0x0008
2360#define PC4 0x0010
2361#define PC5 0x0020
2362#define PC6 0x0040
2363#define PC7 0x0080
2364#define PC8 0x0100
2365#define PC9 0x0200
2366#define PC10 0x0400
2367#define PC11 0x0800
2368#define PC12 0x1000
2369#define PC13 0x2000
2370
2371
2372/* PORT D Bit Definitions for the registers
2373PORTD, PORTD_SET, PORTD_CLEAR,
2374PORTD_DIR_SET, PORTD_DIR_CLEAR, PORTD_INEN,
2375PORTD_FER registers
2376*/
2377
2378#define PD0 0x0001
2379#define PD1 0x0002
2380#define PD2 0x0004
2381#define PD3 0x0008
2382#define PD4 0x0010
2383#define PD5 0x0020
2384#define PD6 0x0040
2385#define PD7 0x0080
2386#define PD8 0x0100
2387#define PD9 0x0200
2388#define PD10 0x0400
2389#define PD11 0x0800
2390#define PD12 0x1000
2391#define PD13 0x2000
2392#define PD14 0x4000
2393#define PD15 0x8000
2394
2395/* PORT E Bit Definitions for the registers
2396PORTE, PORTE_SET, PORTE_CLEAR,
2397PORTE_DIR_SET, PORTE_DIR_CLEAR, PORTE_INEN,
2398PORTE_FER registers
2399*/
2400
2401
2402#define PE0 0x0001
2403#define PE1 0x0002
2404#define PE2 0x0004
2405#define PE3 0x0008
2406#define PE4 0x0010
2407#define PE5 0x0020
2408#define PE6 0x0040
2409#define PE7 0x0080
2410#define PE8 0x0100
2411#define PE9 0x0200
2412#define PE10 0x0400
2413#define PE11 0x0800
2414#define PE12 0x1000
2415#define PE13 0x2000
2416#define PE14 0x4000
2417#define PE15 0x8000
2418
2419/* PORT F Bit Definitions for the registers
2420PORTF, PORTF_SET, PORTF_CLEAR,
2421PORTF_DIR_SET, PORTF_DIR_CLEAR, PORTF_INEN,
2422PORTF_FER registers
2423*/
2424
2425
2426#define PF0 0x0001
2427#define PF1 0x0002
2428#define PF2 0x0004
2429#define PF3 0x0008
2430#define PF4 0x0010
2431#define PF5 0x0020
2432#define PF6 0x0040
2433#define PF7 0x0080
2434#define PF8 0x0100
2435#define PF9 0x0200
2436#define PF10 0x0400
2437#define PF11 0x0800
2438#define PF12 0x1000
2439#define PF13 0x2000
2440#define PF14 0x4000
2441#define PF15 0x8000
2442
2443/* PORT G Bit Definitions for the registers
2444PORTG, PORTG_SET, PORTG_CLEAR,
2445PORTG_DIR_SET, PORTG_DIR_CLEAR, PORTG_INEN,
2446PORTG_FER registers
2447*/
2448
2449
2450#define PG0 0x0001
2451#define PG1 0x0002
2452#define PG2 0x0004
2453#define PG3 0x0008
2454#define PG4 0x0010
2455#define PG5 0x0020
2456#define PG6 0x0040
2457#define PG7 0x0080
2458#define PG8 0x0100
2459#define PG9 0x0200
2460#define PG10 0x0400
2461#define PG11 0x0800
2462#define PG12 0x1000
2463#define PG13 0x2000
2464#define PG14 0x4000
2465#define PG15 0x8000
2466
2467/* PORT H Bit Definitions for the registers
2468PORTH, PORTH_SET, PORTH_CLEAR,
2469PORTH_DIR_SET, PORTH_DIR_CLEAR, PORTH_INEN,
2470PORTH_FER registers
2471*/
2472
2473
2474#define PH0 0x0001
2475#define PH1 0x0002
2476#define PH2 0x0004
2477#define PH3 0x0008
2478#define PH4 0x0010
2479#define PH5 0x0020
2480#define PH6 0x0040
2481#define PH7 0x0080
2482#define PH8 0x0100
2483#define PH9 0x0200
2484#define PH10 0x0400
2485#define PH11 0x0800
2486#define PH12 0x1000
2487#define PH13 0x2000
2488
2489
2490/* PORT I Bit Definitions for the registers
2491PORTI, PORTI_SET, PORTI_CLEAR,
2492PORTI_DIR_SET, PORTI_DIR_CLEAR, PORTI_INEN,
2493PORTI_FER registers
2494*/
2495
2496
2497#define PI0 0x0001
2498#define PI1 0x0002
2499#define PI2 0x0004
2500#define PI3 0x0008
2501#define PI4 0x0010
2502#define PI5 0x0020
2503#define PI6 0x0040
2504#define PI7 0x0080
2505#define PI8 0x0100
2506#define PI9 0x0200
2507#define PI10 0x0400
2508#define PI11 0x0800
2509#define PI12 0x1000
2510#define PI13 0x2000
2511#define PI14 0x4000
2512#define PI15 0x8000
2513
2514/* PORT J Bit Definitions for the registers
2515PORTJ, PORTJ_SET, PORTJ_CLEAR,
2516PORTJ_DIR_SET, PORTJ_DIR_CLEAR, PORTJ_INEN,
2517PORTJ_FER registers
2518*/
2519
2520
2521#define PJ0 0x0001
2522#define PJ1 0x0002
2523#define PJ2 0x0004
2524#define PJ3 0x0008
2525#define PJ4 0x0010
2526#define PJ5 0x0020
2527#define PJ6 0x0040
2528#define PJ7 0x0080
2529#define PJ8 0x0100
2530#define PJ9 0x0200
2531#define PJ10 0x0400
2532#define PJ11 0x0800
2533#define PJ12 0x1000
2534#define PJ13 0x2000
2535
2536
2537/* Port Muxing Bit Fields for PORTx_MUX Registers */ 2234/* Port Muxing Bit Fields for PORTx_MUX Registers */
2538 2235
2539#define MUX0 0x00000003 2236#define MUX0 0x00000003
@@ -2703,16 +2400,4 @@ PORTJ_FER registers
2703#define B3MAP_PIH 0x06000000 /* Map Port I High to Byte 3 */ 2400#define B3MAP_PIH 0x06000000 /* Map Port I High to Byte 3 */
2704#define B3MAP_PJH 0x07000000 /* Map Port J High to Byte 3 */ 2401#define B3MAP_PJH 0x07000000 /* Map Port J High to Byte 3 */
2705 2402
2706
2707/* for legacy compatibility */
2708
2709#define WLS(x) (((x)-5) & 0x03) /* Word Length Select */
2710#define W1LMAX_MAX W1LMAX_MIN
2711#define EBIU_AMCBCTL0 EBIU_AMBCTL0
2712#define EBIU_AMCBCTL1 EBIU_AMBCTL1
2713#define PINT0_IRQ PINT0_REQUEST
2714#define PINT1_IRQ PINT1_REQUEST
2715#define PINT2_IRQ PINT2_REQUEST
2716#define PINT3_IRQ PINT3_REQUEST
2717
2718#endif /* _DEF_BF54X_H */ 2403#endif /* _DEF_BF54X_H */
diff --git a/arch/blackfin/mach-bf548/include/mach/gpio.h b/arch/blackfin/mach-bf548/include/mach/gpio.h
index 28037e331964..7db433514e3f 100644
--- a/arch/blackfin/mach-bf548/include/mach/gpio.h
+++ b/arch/blackfin/mach-bf548/include/mach/gpio.h
@@ -200,4 +200,15 @@ struct gpio_port_s {
200 200
201#endif 201#endif
202 202
203#include <mach-common/ports-a.h>
204#include <mach-common/ports-b.h>
205#include <mach-common/ports-c.h>
206#include <mach-common/ports-d.h>
207#include <mach-common/ports-e.h>
208#include <mach-common/ports-f.h>
209#include <mach-common/ports-g.h>
210#include <mach-common/ports-h.h>
211#include <mach-common/ports-i.h>
212#include <mach-common/ports-j.h>
213
203#endif /* _MACH_GPIO_H_ */ 214#endif /* _MACH_GPIO_H_ */
diff --git a/arch/blackfin/mach-bf548/include/mach/irq.h b/arch/blackfin/mach-bf548/include/mach/irq.h
index 1f99b51a3d56..99fd1b2c53d8 100644
--- a/arch/blackfin/mach-bf548/include/mach/irq.h
+++ b/arch/blackfin/mach-bf548/include/mach/irq.h
@@ -474,4 +474,26 @@ Events (highest priority) EMU 0
474#define IRQ_PINT2_POS 24 474#define IRQ_PINT2_POS 24
475#define IRQ_PINT3_POS 28 475#define IRQ_PINT3_POS 28
476 476
477#ifndef __ASSEMBLY__
478#include <linux/types.h>
479
480/*
481 * bfin pint registers layout
482 */
483struct bfin_pint_regs {
484 u32 mask_set;
485 u32 mask_clear;
486 u32 irq;
487 u32 assign;
488 u32 edge_set;
489 u32 edge_clear;
490 u32 invert_set;
491 u32 invert_clear;
492 u32 pinstate;
493 u32 latch;
494 u32 __pad0[2];
495};
496
497#endif
498
477#endif /* _BF548_IRQ_H_ */ 499#endif /* _BF548_IRQ_H_ */
diff --git a/arch/blackfin/mach-bf548/include/mach/pll.h b/arch/blackfin/mach-bf548/include/mach/pll.h
index 7865a090d333..94cca674d835 100644
--- a/arch/blackfin/mach-bf548/include/mach/pll.h
+++ b/arch/blackfin/mach-bf548/include/mach/pll.h
@@ -1,69 +1 @@
1/* #include <mach-common/pll.h>
2 * Copyright 2007-2008 Analog Devices Inc.
3 *
4 * Licensed under the GPL-2 or later.
5 */
6
7#ifndef _MACH_PLL_H
8#define _MACH_PLL_H
9
10#include <asm/blackfin.h>
11#include <asm/irqflags.h>
12
13/* Writing to PLL_CTL initiates a PLL relock sequence. */
14static __inline__ void bfin_write_PLL_CTL(unsigned int val)
15{
16 unsigned long flags, iwr0, iwr1, iwr2;
17
18 if (val == bfin_read_PLL_CTL())
19 return;
20
21 flags = hard_local_irq_save();
22 /* Enable the PLL Wakeup bit in SIC IWR */
23 iwr0 = bfin_read32(SIC_IWR0);
24 iwr1 = bfin_read32(SIC_IWR1);
25 iwr2 = bfin_read32(SIC_IWR2);
26 /* Only allow PPL Wakeup) */
27 bfin_write32(SIC_IWR0, IWR_ENABLE(0));
28 bfin_write32(SIC_IWR1, 0);
29 bfin_write32(SIC_IWR2, 0);
30
31 bfin_write16(PLL_CTL, val);
32 SSYNC();
33 asm("IDLE;");
34
35 bfin_write32(SIC_IWR0, iwr0);
36 bfin_write32(SIC_IWR1, iwr1);
37 bfin_write32(SIC_IWR2, iwr2);
38 hard_local_irq_restore(flags);
39}
40
41/* Writing to VR_CTL initiates a PLL relock sequence. */
42static __inline__ void bfin_write_VR_CTL(unsigned int val)
43{
44 unsigned long flags, iwr0, iwr1, iwr2;
45
46 if (val == bfin_read_VR_CTL())
47 return;
48
49 flags = hard_local_irq_save();
50 /* Enable the PLL Wakeup bit in SIC IWR */
51 iwr0 = bfin_read32(SIC_IWR0);
52 iwr1 = bfin_read32(SIC_IWR1);
53 iwr2 = bfin_read32(SIC_IWR2);
54 /* Only allow PPL Wakeup) */
55 bfin_write32(SIC_IWR0, IWR_ENABLE(0));
56 bfin_write32(SIC_IWR1, 0);
57 bfin_write32(SIC_IWR2, 0);
58
59 bfin_write16(VR_CTL, val);
60 SSYNC();
61 asm("IDLE;");
62
63 bfin_write32(SIC_IWR0, iwr0);
64 bfin_write32(SIC_IWR1, iwr1);
65 bfin_write32(SIC_IWR2, iwr2);
66 hard_local_irq_restore(flags);
67}
68
69#endif /* _MACH_PLL_H */
diff --git a/arch/blackfin/mach-bf561/atomic.S b/arch/blackfin/mach-bf561/atomic.S
index f99f174b129f..52d6f73fcced 100644
--- a/arch/blackfin/mach-bf561/atomic.S
+++ b/arch/blackfin/mach-bf561/atomic.S
@@ -49,6 +49,7 @@ ENTRY(_get_core_lock)
49 jump .Lretry_corelock 49 jump .Lretry_corelock
50.Ldone_corelock: 50.Ldone_corelock:
51 p0 = r1; 51 p0 = r1;
52 /* flush core internal write buffer before invalidate dcache */
52 CSYNC(r2); 53 CSYNC(r2);
53 flushinv[p0]; 54 flushinv[p0];
54 SSYNC(r2); 55 SSYNC(r2);
@@ -685,6 +686,8 @@ ENTRY(___raw_atomic_test_asm)
685 r1 = -L1_CACHE_BYTES; 686 r1 = -L1_CACHE_BYTES;
686 r1 = r0 & r1; 687 r1 = r0 & r1;
687 p0 = r1; 688 p0 = r1;
689 /* flush core internal write buffer before invalidate dcache */
690 CSYNC(r2);
688 flushinv[p0]; 691 flushinv[p0];
689 SSYNC(r2); 692 SSYNC(r2);
690 r0 = [p1]; 693 r0 = [p1];
@@ -907,6 +910,8 @@ ENTRY(___raw_uncached_fetch_asm)
907 r1 = -L1_CACHE_BYTES; 910 r1 = -L1_CACHE_BYTES;
908 r1 = r0 & r1; 911 r1 = r0 & r1;
909 p0 = r1; 912 p0 = r1;
913 /* flush core internal write buffer before invalidate dcache */
914 CSYNC(r2);
910 flushinv[p0]; 915 flushinv[p0];
911 SSYNC(r2); 916 SSYNC(r2);
912 r0 = [p1]; 917 r0 = [p1];
diff --git a/arch/blackfin/mach-bf561/boards/acvilon.c b/arch/blackfin/mach-bf561/boards/acvilon.c
index 0b1c20f14fe0..3926cd909b66 100644
--- a/arch/blackfin/mach-bf561/boards/acvilon.c
+++ b/arch/blackfin/mach-bf561/boards/acvilon.c
@@ -224,7 +224,7 @@ static struct resource bfin_uart0_resources[] = {
224 }, 224 },
225}; 225};
226 226
227unsigned short bfin_uart0_peripherals[] = { 227static unsigned short bfin_uart0_peripherals[] = {
228 P_UART0_TX, P_UART0_RX, 0 228 P_UART0_TX, P_UART0_RX, 0
229}; 229};
230 230
diff --git a/arch/blackfin/mach-bf561/boards/cm_bf561.c b/arch/blackfin/mach-bf561/boards/cm_bf561.c
index 087b6b05cc73..3b67929d4c0a 100644
--- a/arch/blackfin/mach-bf561/boards/cm_bf561.c
+++ b/arch/blackfin/mach-bf561/boards/cm_bf561.c
@@ -334,7 +334,7 @@ static struct resource bfin_uart0_resources[] = {
334 }, 334 },
335}; 335};
336 336
337unsigned short bfin_uart0_peripherals[] = { 337static unsigned short bfin_uart0_peripherals[] = {
338 P_UART0_TX, P_UART0_RX, 0 338 P_UART0_TX, P_UART0_RX, 0
339}; 339};
340 340
diff --git a/arch/blackfin/mach-bf561/boards/ezkit.c b/arch/blackfin/mach-bf561/boards/ezkit.c
index ab7a487975fd..f667e7704197 100644
--- a/arch/blackfin/mach-bf561/boards/ezkit.c
+++ b/arch/blackfin/mach-bf561/boards/ezkit.c
@@ -190,7 +190,7 @@ static struct resource bfin_uart0_resources[] = {
190 }, 190 },
191}; 191};
192 192
193unsigned short bfin_uart0_peripherals[] = { 193static unsigned short bfin_uart0_peripherals[] = {
194 P_UART0_TX, P_UART0_RX, 0 194 P_UART0_TX, P_UART0_RX, 0
195}; 195};
196 196
diff --git a/arch/blackfin/mach-bf561/boards/tepla.c b/arch/blackfin/mach-bf561/boards/tepla.c
index d3017e53686b..bb056e60f6ed 100644
--- a/arch/blackfin/mach-bf561/boards/tepla.c
+++ b/arch/blackfin/mach-bf561/boards/tepla.c
@@ -72,7 +72,7 @@ static struct resource bfin_uart0_resources[] = {
72 }, 72 },
73}; 73};
74 74
75unsigned short bfin_uart0_peripherals[] = { 75static unsigned short bfin_uart0_peripherals[] = {
76 P_UART0_TX, P_UART0_RX, 0 76 P_UART0_TX, P_UART0_RX, 0
77}; 77};
78 78
diff --git a/arch/blackfin/mach-bf561/dma.c b/arch/blackfin/mach-bf561/dma.c
index c938c3c7355d..8ffdd6b4a242 100644
--- a/arch/blackfin/mach-bf561/dma.c
+++ b/arch/blackfin/mach-bf561/dma.c
@@ -11,7 +11,7 @@
11#include <asm/blackfin.h> 11#include <asm/blackfin.h>
12#include <asm/dma.h> 12#include <asm/dma.h>
13 13
14struct dma_register *dma_io_base_addr[MAX_DMA_CHANNELS] = { 14struct dma_register * const dma_io_base_addr[MAX_DMA_CHANNELS] = {
15 (struct dma_register *) DMA1_0_NEXT_DESC_PTR, 15 (struct dma_register *) DMA1_0_NEXT_DESC_PTR,
16 (struct dma_register *) DMA1_1_NEXT_DESC_PTR, 16 (struct dma_register *) DMA1_1_NEXT_DESC_PTR,
17 (struct dma_register *) DMA1_2_NEXT_DESC_PTR, 17 (struct dma_register *) DMA1_2_NEXT_DESC_PTR,
@@ -36,14 +36,14 @@ struct dma_register *dma_io_base_addr[MAX_DMA_CHANNELS] = {
36 (struct dma_register *) DMA2_9_NEXT_DESC_PTR, 36 (struct dma_register *) DMA2_9_NEXT_DESC_PTR,
37 (struct dma_register *) DMA2_10_NEXT_DESC_PTR, 37 (struct dma_register *) DMA2_10_NEXT_DESC_PTR,
38 (struct dma_register *) DMA2_11_NEXT_DESC_PTR, 38 (struct dma_register *) DMA2_11_NEXT_DESC_PTR,
39 (struct dma_register *) MDMA1_D0_NEXT_DESC_PTR, 39 (struct dma_register *) MDMA_D0_NEXT_DESC_PTR,
40 (struct dma_register *) MDMA1_S0_NEXT_DESC_PTR, 40 (struct dma_register *) MDMA_S0_NEXT_DESC_PTR,
41 (struct dma_register *) MDMA1_D1_NEXT_DESC_PTR, 41 (struct dma_register *) MDMA_D1_NEXT_DESC_PTR,
42 (struct dma_register *) MDMA1_S1_NEXT_DESC_PTR, 42 (struct dma_register *) MDMA_S1_NEXT_DESC_PTR,
43 (struct dma_register *) MDMA2_D0_NEXT_DESC_PTR, 43 (struct dma_register *) MDMA_D2_NEXT_DESC_PTR,
44 (struct dma_register *) MDMA2_S0_NEXT_DESC_PTR, 44 (struct dma_register *) MDMA_S2_NEXT_DESC_PTR,
45 (struct dma_register *) MDMA2_D1_NEXT_DESC_PTR, 45 (struct dma_register *) MDMA_D3_NEXT_DESC_PTR,
46 (struct dma_register *) MDMA2_S1_NEXT_DESC_PTR, 46 (struct dma_register *) MDMA_S3_NEXT_DESC_PTR,
47 (struct dma_register *) IMDMA_D0_NEXT_DESC_PTR, 47 (struct dma_register *) IMDMA_D0_NEXT_DESC_PTR,
48 (struct dma_register *) IMDMA_S0_NEXT_DESC_PTR, 48 (struct dma_register *) IMDMA_S0_NEXT_DESC_PTR,
49 (struct dma_register *) IMDMA_D1_NEXT_DESC_PTR, 49 (struct dma_register *) IMDMA_D1_NEXT_DESC_PTR,
diff --git a/arch/blackfin/mach-bf561/hotplug.c b/arch/blackfin/mach-bf561/hotplug.c
index c95169b612dc..4cd3b28cd046 100644
--- a/arch/blackfin/mach-bf561/hotplug.c
+++ b/arch/blackfin/mach-bf561/hotplug.c
@@ -6,7 +6,9 @@
6 */ 6 */
7 7
8#include <asm/blackfin.h> 8#include <asm/blackfin.h>
9#include <asm/irq.h>
9#include <asm/smp.h> 10#include <asm/smp.h>
11
10#define SIC_SYSIRQ(irq) (irq - (IRQ_CORETMR + 1)) 12#define SIC_SYSIRQ(irq) (irq - (IRQ_CORETMR + 1))
11 13
12int hotplug_coreb; 14int hotplug_coreb;
diff --git a/arch/blackfin/mach-bf561/include/mach/anomaly.h b/arch/blackfin/mach-bf561/include/mach/anomaly.h
index 4c108c99cb6e..6a3499b02097 100644
--- a/arch/blackfin/mach-bf561/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf561/include/mach/anomaly.h
@@ -181,7 +181,11 @@
181/* Incorrect Timer Pulse Width in Single-Shot PWM_OUT Mode with External Clock */ 181/* Incorrect Timer Pulse Width in Single-Shot PWM_OUT Mode with External Clock */
182#define ANOMALY_05000254 (__SILICON_REVISION__ > 3) 182#define ANOMALY_05000254 (__SILICON_REVISION__ > 3)
183/* Interrupt/Exception During Short Hardware Loop May Cause Bad Instruction Fetches */ 183/* Interrupt/Exception During Short Hardware Loop May Cause Bad Instruction Fetches */
184#define ANOMALY_05000257 (__SILICON_REVISION__ < 5) 184/* Tempoary work around for kgdb bug 6333 in SMP kernel. It looks coreb hangs in exception
185 * without handling anomaly 05000257 properly on bf561 v0.5. This work around may change
186 * after the behavior and the root cause are confirmed with hardware team.
187 */
188#define ANOMALY_05000257 (__SILICON_REVISION__ < 5 || (__SILICON_REVISION__ == 5 && CONFIG_SMP))
185/* Instruction Cache Is Corrupted When Bits 9 and 12 of the ICPLB Data Registers Differ */ 189/* Instruction Cache Is Corrupted When Bits 9 and 12 of the ICPLB Data Registers Differ */
186#define ANOMALY_05000258 (__SILICON_REVISION__ < 5) 190#define ANOMALY_05000258 (__SILICON_REVISION__ < 5)
187/* ICPLB_STATUS MMR Register May Be Corrupted */ 191/* ICPLB_STATUS MMR Register May Be Corrupted */
diff --git a/arch/blackfin/mach-bf561/include/mach/bfin_serial.h b/arch/blackfin/mach-bf561/include/mach/bfin_serial.h
new file mode 100644
index 000000000000..08072c86d5dc
--- /dev/null
+++ b/arch/blackfin/mach-bf561/include/mach/bfin_serial.h
@@ -0,0 +1,14 @@
1/*
2 * mach/bfin_serial.h - Blackfin UART/Serial definitions
3 *
4 * Copyright 2006-2010 Analog Devices Inc.
5 *
6 * Licensed under the GPL-2 or later.
7 */
8
9#ifndef __BFIN_MACH_SERIAL_H__
10#define __BFIN_MACH_SERIAL_H__
11
12#define BFIN_UART_NR_PORTS 1
13
14#endif
diff --git a/arch/blackfin/mach-bf561/include/mach/bfin_serial_5xx.h b/arch/blackfin/mach-bf561/include/mach/bfin_serial_5xx.h
index e33e158bc16d..3a6947456cf1 100644
--- a/arch/blackfin/mach-bf561/include/mach/bfin_serial_5xx.h
+++ b/arch/blackfin/mach-bf561/include/mach/bfin_serial_5xx.h
@@ -4,36 +4,9 @@
4 * Licensed under the GPL-2 or later. 4 * Licensed under the GPL-2 or later.
5 */ 5 */
6 6
7#include <linux/serial.h>
8#include <asm/dma.h> 7#include <asm/dma.h>
9#include <asm/portmux.h> 8#include <asm/portmux.h>
10 9
11#define UART_GET_CHAR(uart) bfin_read16(((uart)->port.membase + OFFSET_RBR))
12#define UART_GET_DLL(uart) bfin_read16(((uart)->port.membase + OFFSET_DLL))
13#define UART_GET_IER(uart) bfin_read16(((uart)->port.membase + OFFSET_IER))
14#define UART_GET_DLH(uart) bfin_read16(((uart)->port.membase + OFFSET_DLH))
15#define UART_GET_IIR(uart) bfin_read16(((uart)->port.membase + OFFSET_IIR))
16#define UART_GET_LCR(uart) bfin_read16(((uart)->port.membase + OFFSET_LCR))
17#define UART_GET_GCTL(uart) bfin_read16(((uart)->port.membase + OFFSET_GCTL))
18
19#define UART_PUT_CHAR(uart,v) bfin_write16(((uart)->port.membase + OFFSET_THR),v)
20#define UART_PUT_DLL(uart,v) bfin_write16(((uart)->port.membase + OFFSET_DLL),v)
21#define UART_PUT_IER(uart,v) bfin_write16(((uart)->port.membase + OFFSET_IER),v)
22#define UART_SET_IER(uart,v) UART_PUT_IER(uart, UART_GET_IER(uart) | (v))
23#define UART_CLEAR_IER(uart,v) UART_PUT_IER(uart, UART_GET_IER(uart) & ~(v))
24#define UART_PUT_DLH(uart,v) bfin_write16(((uart)->port.membase + OFFSET_DLH),v)
25#define UART_PUT_LCR(uart,v) bfin_write16(((uart)->port.membase + OFFSET_LCR),v)
26#define UART_PUT_GCTL(uart,v) bfin_write16(((uart)->port.membase + OFFSET_GCTL),v)
27
28#define UART_SET_DLAB(uart) do { UART_PUT_LCR(uart, UART_GET_LCR(uart) | DLAB); SSYNC(); } while (0)
29#define UART_CLEAR_DLAB(uart) do { UART_PUT_LCR(uart, UART_GET_LCR(uart) & ~DLAB); SSYNC(); } while (0)
30
31#define UART_GET_CTS(x) gpio_get_value(x->cts_pin)
32#define UART_DISABLE_RTS(x) gpio_set_value(x->rts_pin, 1)
33#define UART_ENABLE_RTS(x) gpio_set_value(x->rts_pin, 0)
34#define UART_ENABLE_INTS(x, v) UART_PUT_IER(x, v)
35#define UART_DISABLE_INTS(x) UART_PUT_IER(x, 0)
36
37#ifdef CONFIG_BFIN_UART0_CTSRTS 10#ifdef CONFIG_BFIN_UART0_CTSRTS
38# define CONFIG_SERIAL_BFIN_CTSRTS 11# define CONFIG_SERIAL_BFIN_CTSRTS
39# ifndef CONFIG_UART0_CTS_PIN 12# ifndef CONFIG_UART0_CTS_PIN
@@ -44,51 +17,6 @@
44# endif 17# endif
45#endif 18#endif
46 19
47#define BFIN_UART_TX_FIFO_SIZE 2
48
49struct bfin_serial_port {
50 struct uart_port port;
51 unsigned int old_status;
52 int status_irq;
53 unsigned int lsr;
54#ifdef CONFIG_SERIAL_BFIN_DMA
55 int tx_done;
56 int tx_count;
57 struct circ_buf rx_dma_buf;
58 struct timer_list rx_dma_timer;
59 int rx_dma_nrows;
60 unsigned int tx_dma_channel;
61 unsigned int rx_dma_channel;
62 struct work_struct tx_dma_workqueue;
63#else
64# if ANOMALY_05000363
65 unsigned int anomaly_threshold;
66# endif
67#endif
68#ifdef CONFIG_SERIAL_BFIN_CTSRTS
69 struct timer_list cts_timer;
70 int cts_pin;
71 int rts_pin;
72#endif
73};
74
75/* The hardware clears the LSR bits upon read, so we need to cache
76 * some of the more fun bits in software so they don't get lost
77 * when checking the LSR in other code paths (TX).
78 */
79static inline unsigned int UART_GET_LSR(struct bfin_serial_port *uart)
80{
81 unsigned int lsr = bfin_read16(uart->port.membase + OFFSET_LSR);
82 uart->lsr |= (lsr & (BI|FE|PE|OE));
83 return lsr | uart->lsr;
84}
85
86static inline void UART_CLEAR_LSR(struct bfin_serial_port *uart)
87{
88 uart->lsr = 0;
89 bfin_write16(uart->port.membase + OFFSET_LSR, -1);
90}
91
92struct bfin_serial_res { 20struct bfin_serial_res {
93 unsigned long uart_base_addr; 21 unsigned long uart_base_addr;
94 int uart_irq; 22 int uart_irq;
@@ -120,3 +48,5 @@ struct bfin_serial_res bfin_serial_resource[] = {
120}; 48};
121 49
122#define DRIVER_NAME "bfin-uart" 50#define DRIVER_NAME "bfin-uart"
51
52#include <asm/bfin_serial.h>
diff --git a/arch/blackfin/mach-bf561/include/mach/blackfin.h b/arch/blackfin/mach-bf561/include/mach/blackfin.h
index 6c7dc58c018c..dc470534c085 100644
--- a/arch/blackfin/mach-bf561/include/mach/blackfin.h
+++ b/arch/blackfin/mach-bf561/include/mach/blackfin.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2005-2009 Analog Devices Inc. 2 * Copyright 2005-2010 Analog Devices Inc.
3 * 3 *
4 * Licensed under the GPL-2 or later. 4 * Licensed under the GPL-2 or later.
5 */ 5 */
@@ -10,11 +10,14 @@
10#define BF561_FAMILY 10#define BF561_FAMILY
11 11
12#include "bf561.h" 12#include "bf561.h"
13#include "defBF561.h"
14#include "anomaly.h" 13#include "anomaly.h"
15 14
16#if !defined(__ASSEMBLY__) 15#include <asm/def_LPBlackfin.h>
17#include "cdefBF561.h" 16#include "defBF561.h"
17
18#ifndef __ASSEMBLY__
19# include <asm/cdef_LPBlackfin.h>
20# include "cdefBF561.h"
18#endif 21#endif
19 22
20#define bfin_read_FIO_FLAG_D() bfin_read_FIO0_FLAG_D() 23#define bfin_read_FIO_FLAG_D() bfin_read_FIO0_FLAG_D()
@@ -35,19 +38,4 @@
35#define bfin_read_SICB_ISR(x) bfin_read32(__SIC_MUX(SICB_ISR0, x)) 38#define bfin_read_SICB_ISR(x) bfin_read32(__SIC_MUX(SICB_ISR0, x))
36#define bfin_write_SICB_ISR(x, val) bfin_write32(__SIC_MUX(SICB_ISR0, x), val) 39#define bfin_write_SICB_ISR(x, val) bfin_write32(__SIC_MUX(SICB_ISR0, x), val)
37 40
38#define BFIN_UART_NR_PORTS 1
39
40#define OFFSET_THR 0x00 /* Transmit Holding register */
41#define OFFSET_RBR 0x00 /* Receive Buffer register */
42#define OFFSET_DLL 0x00 /* Divisor Latch (Low-Byte) */
43#define OFFSET_IER 0x04 /* Interrupt Enable Register */
44#define OFFSET_DLH 0x04 /* Divisor Latch (High-Byte) */
45#define OFFSET_IIR 0x08 /* Interrupt Identification Register */
46#define OFFSET_LCR 0x0C /* Line Control Register */
47#define OFFSET_MCR 0x10 /* Modem Control Register */
48#define OFFSET_LSR 0x14 /* Line Status Register */
49#define OFFSET_MSR 0x18 /* Modem Status Register */
50#define OFFSET_SCR 0x1C /* SCR Scratch Register */
51#define OFFSET_GCTL 0x24 /* Global Control Register */
52
53#endif /* _MACH_BLACKFIN_H_ */ 41#endif /* _MACH_BLACKFIN_H_ */
diff --git a/arch/blackfin/mach-bf561/include/mach/cdefBF561.h b/arch/blackfin/mach-bf561/include/mach/cdefBF561.h
index 2bab99152495..753331597207 100644
--- a/arch/blackfin/mach-bf561/include/mach/cdefBF561.h
+++ b/arch/blackfin/mach-bf561/include/mach/cdefBF561.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2005-2009 Analog Devices Inc. 2 * Copyright 2005-2010 Analog Devices Inc.
3 * 3 *
4 * Licensed under the GPL-2 or later. 4 * Licensed under the GPL-2 or later.
5 */ 5 */
@@ -7,14 +7,6 @@
7#ifndef _CDEF_BF561_H 7#ifndef _CDEF_BF561_H
8#define _CDEF_BF561_H 8#define _CDEF_BF561_H
9 9
10#include <asm/blackfin.h>
11
12/* include all Core registers and bit definitions */
13#include "defBF561.h"
14
15/*include core specific register pointer definitions*/
16#include <asm/cdef_LPBlackfin.h>
17
18/*********************************************************************************** */ 10/*********************************************************************************** */
19/* System MMR Register Map */ 11/* System MMR Register Map */
20/*********************************************************************************** */ 12/*********************************************************************************** */
@@ -523,14 +515,14 @@
523#define bfin_read_PPI1_FRAME() bfin_read16(PPI1_FRAME) 515#define bfin_read_PPI1_FRAME() bfin_read16(PPI1_FRAME)
524#define bfin_write_PPI1_FRAME(val) bfin_write16(PPI1_FRAME,val) 516#define bfin_write_PPI1_FRAME(val) bfin_write16(PPI1_FRAME,val)
525/*DMA traffic control registers */ 517/*DMA traffic control registers */
526#define bfin_read_DMA1_TC_PER() bfin_read16(DMA1_TC_PER) 518#define bfin_read_DMAC0_TC_PER() bfin_read16(DMAC0_TC_PER)
527#define bfin_write_DMA1_TC_PER(val) bfin_write16(DMA1_TC_PER,val) 519#define bfin_write_DMAC0_TC_PER(val) bfin_write16(DMAC0_TC_PER,val)
528#define bfin_read_DMA1_TC_CNT() bfin_read16(DMA1_TC_CNT) 520#define bfin_read_DMAC0_TC_CNT() bfin_read16(DMAC0_TC_CNT)
529#define bfin_write_DMA1_TC_CNT(val) bfin_write16(DMA1_TC_CNT,val) 521#define bfin_write_DMAC0_TC_CNT(val) bfin_write16(DMAC0_TC_CNT,val)
530#define bfin_read_DMA2_TC_PER() bfin_read16(DMA2_TC_PER) 522#define bfin_read_DMAC1_TC_PER() bfin_read16(DMAC1_TC_PER)
531#define bfin_write_DMA2_TC_PER(val) bfin_write16(DMA2_TC_PER,val) 523#define bfin_write_DMAC1_TC_PER(val) bfin_write16(DMAC1_TC_PER,val)
532#define bfin_read_DMA2_TC_CNT() bfin_read16(DMA2_TC_CNT) 524#define bfin_read_DMAC1_TC_CNT() bfin_read16(DMAC1_TC_CNT)
533#define bfin_write_DMA2_TC_CNT(val) bfin_write16(DMA2_TC_CNT,val) 525#define bfin_write_DMAC1_TC_CNT(val) bfin_write16(DMAC1_TC_CNT,val)
534/* DMA1 Controller registers (0xFFC0 1C00-0xFFC0 1FFF) */ 526/* DMA1 Controller registers (0xFFC0 1C00-0xFFC0 1FFF) */
535#define bfin_read_DMA1_0_CONFIG() bfin_read16(DMA1_0_CONFIG) 527#define bfin_read_DMA1_0_CONFIG() bfin_read16(DMA1_0_CONFIG)
536#define bfin_write_DMA1_0_CONFIG(val) bfin_write16(DMA1_0_CONFIG,val) 528#define bfin_write_DMA1_0_CONFIG(val) bfin_write16(DMA1_0_CONFIG,val)
@@ -845,110 +837,110 @@
845#define bfin_read_DMA1_11_PERIPHERAL_MAP() bfin_read16(DMA1_11_PERIPHERAL_MAP) 837#define bfin_read_DMA1_11_PERIPHERAL_MAP() bfin_read16(DMA1_11_PERIPHERAL_MAP)
846#define bfin_write_DMA1_11_PERIPHERAL_MAP(val) bfin_write16(DMA1_11_PERIPHERAL_MAP,val) 838#define bfin_write_DMA1_11_PERIPHERAL_MAP(val) bfin_write16(DMA1_11_PERIPHERAL_MAP,val)
847/* Memory DMA1 Controller registers (0xFFC0 1E80-0xFFC0 1FFF) */ 839/* Memory DMA1 Controller registers (0xFFC0 1E80-0xFFC0 1FFF) */
848#define bfin_read_MDMA1_D0_CONFIG() bfin_read16(MDMA1_D0_CONFIG) 840#define bfin_read_MDMA_D2_CONFIG() bfin_read16(MDMA_D2_CONFIG)
849#define bfin_write_MDMA1_D0_CONFIG(val) bfin_write16(MDMA1_D0_CONFIG,val) 841#define bfin_write_MDMA_D2_CONFIG(val) bfin_write16(MDMA_D2_CONFIG,val)
850#define bfin_read_MDMA1_D0_NEXT_DESC_PTR() bfin_read32(MDMA1_D0_NEXT_DESC_PTR) 842#define bfin_read_MDMA_D2_NEXT_DESC_PTR() bfin_read32(MDMA_D2_NEXT_DESC_PTR)
851#define bfin_write_MDMA1_D0_NEXT_DESC_PTR(val) bfin_write32(MDMA1_D0_NEXT_DESC_PTR,val) 843#define bfin_write_MDMA_D2_NEXT_DESC_PTR(val) bfin_write32(MDMA_D2_NEXT_DESC_PTR,val)
852#define bfin_read_MDMA1_D0_START_ADDR() bfin_read32(MDMA1_D0_START_ADDR) 844#define bfin_read_MDMA_D2_START_ADDR() bfin_read32(MDMA_D2_START_ADDR)
853#define bfin_write_MDMA1_D0_START_ADDR(val) bfin_write32(MDMA1_D0_START_ADDR,val) 845#define bfin_write_MDMA_D2_START_ADDR(val) bfin_write32(MDMA_D2_START_ADDR,val)
854#define bfin_read_MDMA1_D0_X_COUNT() bfin_read16(MDMA1_D0_X_COUNT) 846#define bfin_read_MDMA_D2_X_COUNT() bfin_read16(MDMA_D2_X_COUNT)
855#define bfin_write_MDMA1_D0_X_COUNT(val) bfin_write16(MDMA1_D0_X_COUNT,val) 847#define bfin_write_MDMA_D2_X_COUNT(val) bfin_write16(MDMA_D2_X_COUNT,val)
856#define bfin_read_MDMA1_D0_Y_COUNT() bfin_read16(MDMA1_D0_Y_COUNT) 848#define bfin_read_MDMA_D2_Y_COUNT() bfin_read16(MDMA_D2_Y_COUNT)
857#define bfin_write_MDMA1_D0_Y_COUNT(val) bfin_write16(MDMA1_D0_Y_COUNT,val) 849#define bfin_write_MDMA_D2_Y_COUNT(val) bfin_write16(MDMA_D2_Y_COUNT,val)
858#define bfin_read_MDMA1_D0_X_MODIFY() bfin_read16(MDMA1_D0_X_MODIFY) 850#define bfin_read_MDMA_D2_X_MODIFY() bfin_read16(MDMA_D2_X_MODIFY)
859#define bfin_write_MDMA1_D0_X_MODIFY(val) bfin_write16(MDMA1_D0_X_MODIFY,val) 851#define bfin_write_MDMA_D2_X_MODIFY(val) bfin_write16(MDMA_D2_X_MODIFY,val)
860#define bfin_read_MDMA1_D0_Y_MODIFY() bfin_read16(MDMA1_D0_Y_MODIFY) 852#define bfin_read_MDMA_D2_Y_MODIFY() bfin_read16(MDMA_D2_Y_MODIFY)
861#define bfin_write_MDMA1_D0_Y_MODIFY(val) bfin_write16(MDMA1_D0_Y_MODIFY,val) 853#define bfin_write_MDMA_D2_Y_MODIFY(val) bfin_write16(MDMA_D2_Y_MODIFY,val)
862#define bfin_read_MDMA1_D0_CURR_DESC_PTR() bfin_read32(MDMA1_D0_CURR_DESC_PTR) 854#define bfin_read_MDMA_D2_CURR_DESC_PTR() bfin_read32(MDMA_D2_CURR_DESC_PTR)
863#define bfin_write_MDMA1_D0_CURR_DESC_PTR(val) bfin_write32(MDMA1_D0_CURR_DESC_PTR,val) 855#define bfin_write_MDMA_D2_CURR_DESC_PTR(val) bfin_write32(MDMA_D2_CURR_DESC_PTR,val)
864#define bfin_read_MDMA1_D0_CURR_ADDR() bfin_read32(MDMA1_D0_CURR_ADDR) 856#define bfin_read_MDMA_D2_CURR_ADDR() bfin_read32(MDMA_D2_CURR_ADDR)
865#define bfin_write_MDMA1_D0_CURR_ADDR(val) bfin_write32(MDMA1_D0_CURR_ADDR,val) 857#define bfin_write_MDMA_D2_CURR_ADDR(val) bfin_write32(MDMA_D2_CURR_ADDR,val)
866#define bfin_read_MDMA1_D0_CURR_X_COUNT() bfin_read16(MDMA1_D0_CURR_X_COUNT) 858#define bfin_read_MDMA_D2_CURR_X_COUNT() bfin_read16(MDMA_D2_CURR_X_COUNT)
867#define bfin_write_MDMA1_D0_CURR_X_COUNT(val) bfin_write16(MDMA1_D0_CURR_X_COUNT,val) 859#define bfin_write_MDMA_D2_CURR_X_COUNT(val) bfin_write16(MDMA_D2_CURR_X_COUNT,val)
868#define bfin_read_MDMA1_D0_CURR_Y_COUNT() bfin_read16(MDMA1_D0_CURR_Y_COUNT) 860#define bfin_read_MDMA_D2_CURR_Y_COUNT() bfin_read16(MDMA_D2_CURR_Y_COUNT)
869#define bfin_write_MDMA1_D0_CURR_Y_COUNT(val) bfin_write16(MDMA1_D0_CURR_Y_COUNT,val) 861#define bfin_write_MDMA_D2_CURR_Y_COUNT(val) bfin_write16(MDMA_D2_CURR_Y_COUNT,val)
870#define bfin_read_MDMA1_D0_IRQ_STATUS() bfin_read16(MDMA1_D0_IRQ_STATUS) 862#define bfin_read_MDMA_D2_IRQ_STATUS() bfin_read16(MDMA_D2_IRQ_STATUS)
871#define bfin_write_MDMA1_D0_IRQ_STATUS(val) bfin_write16(MDMA1_D0_IRQ_STATUS,val) 863#define bfin_write_MDMA_D2_IRQ_STATUS(val) bfin_write16(MDMA_D2_IRQ_STATUS,val)
872#define bfin_read_MDMA1_D0_PERIPHERAL_MAP() bfin_read16(MDMA1_D0_PERIPHERAL_MAP) 864#define bfin_read_MDMA_D2_PERIPHERAL_MAP() bfin_read16(MDMA_D2_PERIPHERAL_MAP)
873#define bfin_write_MDMA1_D0_PERIPHERAL_MAP(val) bfin_write16(MDMA1_D0_PERIPHERAL_MAP,val) 865#define bfin_write_MDMA_D2_PERIPHERAL_MAP(val) bfin_write16(MDMA_D2_PERIPHERAL_MAP,val)
874#define bfin_read_MDMA1_S0_CONFIG() bfin_read16(MDMA1_S0_CONFIG) 866#define bfin_read_MDMA_S2_CONFIG() bfin_read16(MDMA_S2_CONFIG)
875#define bfin_write_MDMA1_S0_CONFIG(val) bfin_write16(MDMA1_S0_CONFIG,val) 867#define bfin_write_MDMA_S2_CONFIG(val) bfin_write16(MDMA_S2_CONFIG,val)
876#define bfin_read_MDMA1_S0_NEXT_DESC_PTR() bfin_read32(MDMA1_S0_NEXT_DESC_PTR) 868#define bfin_read_MDMA_S2_NEXT_DESC_PTR() bfin_read32(MDMA_S2_NEXT_DESC_PTR)
877#define bfin_write_MDMA1_S0_NEXT_DESC_PTR(val) bfin_write32(MDMA1_S0_NEXT_DESC_PTR,val) 869#define bfin_write_MDMA_S2_NEXT_DESC_PTR(val) bfin_write32(MDMA_S2_NEXT_DESC_PTR,val)
878#define bfin_read_MDMA1_S0_START_ADDR() bfin_read32(MDMA1_S0_START_ADDR) 870#define bfin_read_MDMA_S2_START_ADDR() bfin_read32(MDMA_S2_START_ADDR)
879#define bfin_write_MDMA1_S0_START_ADDR(val) bfin_write32(MDMA1_S0_START_ADDR,val) 871#define bfin_write_MDMA_S2_START_ADDR(val) bfin_write32(MDMA_S2_START_ADDR,val)
880#define bfin_read_MDMA1_S0_X_COUNT() bfin_read16(MDMA1_S0_X_COUNT) 872#define bfin_read_MDMA_S2_X_COUNT() bfin_read16(MDMA_S2_X_COUNT)
881#define bfin_write_MDMA1_S0_X_COUNT(val) bfin_write16(MDMA1_S0_X_COUNT,val) 873#define bfin_write_MDMA_S2_X_COUNT(val) bfin_write16(MDMA_S2_X_COUNT,val)
882#define bfin_read_MDMA1_S0_Y_COUNT() bfin_read16(MDMA1_S0_Y_COUNT) 874#define bfin_read_MDMA_S2_Y_COUNT() bfin_read16(MDMA_S2_Y_COUNT)
883#define bfin_write_MDMA1_S0_Y_COUNT(val) bfin_write16(MDMA1_S0_Y_COUNT,val) 875#define bfin_write_MDMA_S2_Y_COUNT(val) bfin_write16(MDMA_S2_Y_COUNT,val)
884#define bfin_read_MDMA1_S0_X_MODIFY() bfin_read16(MDMA1_S0_X_MODIFY) 876#define bfin_read_MDMA_S2_X_MODIFY() bfin_read16(MDMA_S2_X_MODIFY)
885#define bfin_write_MDMA1_S0_X_MODIFY(val) bfin_write16(MDMA1_S0_X_MODIFY,val) 877#define bfin_write_MDMA_S2_X_MODIFY(val) bfin_write16(MDMA_S2_X_MODIFY,val)
886#define bfin_read_MDMA1_S0_Y_MODIFY() bfin_read16(MDMA1_S0_Y_MODIFY) 878#define bfin_read_MDMA_S2_Y_MODIFY() bfin_read16(MDMA_S2_Y_MODIFY)
887#define bfin_write_MDMA1_S0_Y_MODIFY(val) bfin_write16(MDMA1_S0_Y_MODIFY,val) 879#define bfin_write_MDMA_S2_Y_MODIFY(val) bfin_write16(MDMA_S2_Y_MODIFY,val)
888#define bfin_read_MDMA1_S0_CURR_DESC_PTR() bfin_read32(MDMA1_S0_CURR_DESC_PTR) 880#define bfin_read_MDMA_S2_CURR_DESC_PTR() bfin_read32(MDMA_S2_CURR_DESC_PTR)
889#define bfin_write_MDMA1_S0_CURR_DESC_PTR(val) bfin_write32(MDMA1_S0_CURR_DESC_PTR,val) 881#define bfin_write_MDMA_S2_CURR_DESC_PTR(val) bfin_write32(MDMA_S2_CURR_DESC_PTR,val)
890#define bfin_read_MDMA1_S0_CURR_ADDR() bfin_read32(MDMA1_S0_CURR_ADDR) 882#define bfin_read_MDMA_S2_CURR_ADDR() bfin_read32(MDMA_S2_CURR_ADDR)
891#define bfin_write_MDMA1_S0_CURR_ADDR(val) bfin_write32(MDMA1_S0_CURR_ADDR,val) 883#define bfin_write_MDMA_S2_CURR_ADDR(val) bfin_write32(MDMA_S2_CURR_ADDR,val)
892#define bfin_read_MDMA1_S0_CURR_X_COUNT() bfin_read16(MDMA1_S0_CURR_X_COUNT) 884#define bfin_read_MDMA_S2_CURR_X_COUNT() bfin_read16(MDMA_S2_CURR_X_COUNT)
893#define bfin_write_MDMA1_S0_CURR_X_COUNT(val) bfin_write16(MDMA1_S0_CURR_X_COUNT,val) 885#define bfin_write_MDMA_S2_CURR_X_COUNT(val) bfin_write16(MDMA_S2_CURR_X_COUNT,val)
894#define bfin_read_MDMA1_S0_CURR_Y_COUNT() bfin_read16(MDMA1_S0_CURR_Y_COUNT) 886#define bfin_read_MDMA_S2_CURR_Y_COUNT() bfin_read16(MDMA_S2_CURR_Y_COUNT)
895#define bfin_write_MDMA1_S0_CURR_Y_COUNT(val) bfin_write16(MDMA1_S0_CURR_Y_COUNT,val) 887#define bfin_write_MDMA_S2_CURR_Y_COUNT(val) bfin_write16(MDMA_S2_CURR_Y_COUNT,val)
896#define bfin_read_MDMA1_S0_IRQ_STATUS() bfin_read16(MDMA1_S0_IRQ_STATUS) 888#define bfin_read_MDMA_S2_IRQ_STATUS() bfin_read16(MDMA_S2_IRQ_STATUS)
897#define bfin_write_MDMA1_S0_IRQ_STATUS(val) bfin_write16(MDMA1_S0_IRQ_STATUS,val) 889#define bfin_write_MDMA_S2_IRQ_STATUS(val) bfin_write16(MDMA_S2_IRQ_STATUS,val)
898#define bfin_read_MDMA1_S0_PERIPHERAL_MAP() bfin_read16(MDMA1_S0_PERIPHERAL_MAP) 890#define bfin_read_MDMA_S2_PERIPHERAL_MAP() bfin_read16(MDMA_S2_PERIPHERAL_MAP)
899#define bfin_write_MDMA1_S0_PERIPHERAL_MAP(val) bfin_write16(MDMA1_S0_PERIPHERAL_MAP,val) 891#define bfin_write_MDMA_S2_PERIPHERAL_MAP(val) bfin_write16(MDMA_S2_PERIPHERAL_MAP,val)
900#define bfin_read_MDMA1_D1_CONFIG() bfin_read16(MDMA1_D1_CONFIG) 892#define bfin_read_MDMA_D3_CONFIG() bfin_read16(MDMA_D3_CONFIG)
901#define bfin_write_MDMA1_D1_CONFIG(val) bfin_write16(MDMA1_D1_CONFIG,val) 893#define bfin_write_MDMA_D3_CONFIG(val) bfin_write16(MDMA_D3_CONFIG,val)
902#define bfin_read_MDMA1_D1_NEXT_DESC_PTR() bfin_read32(MDMA1_D1_NEXT_DESC_PTR) 894#define bfin_read_MDMA_D3_NEXT_DESC_PTR() bfin_read32(MDMA_D3_NEXT_DESC_PTR)
903#define bfin_write_MDMA1_D1_NEXT_DESC_PTR(val) bfin_write32(MDMA1_D1_NEXT_DESC_PTR,val) 895#define bfin_write_MDMA_D3_NEXT_DESC_PTR(val) bfin_write32(MDMA_D3_NEXT_DESC_PTR,val)
904#define bfin_read_MDMA1_D1_START_ADDR() bfin_read32(MDMA1_D1_START_ADDR) 896#define bfin_read_MDMA_D3_START_ADDR() bfin_read32(MDMA_D3_START_ADDR)
905#define bfin_write_MDMA1_D1_START_ADDR(val) bfin_write32(MDMA1_D1_START_ADDR,val) 897#define bfin_write_MDMA_D3_START_ADDR(val) bfin_write32(MDMA_D3_START_ADDR,val)
906#define bfin_read_MDMA1_D1_X_COUNT() bfin_read16(MDMA1_D1_X_COUNT) 898#define bfin_read_MDMA_D3_X_COUNT() bfin_read16(MDMA_D3_X_COUNT)
907#define bfin_write_MDMA1_D1_X_COUNT(val) bfin_write16(MDMA1_D1_X_COUNT,val) 899#define bfin_write_MDMA_D3_X_COUNT(val) bfin_write16(MDMA_D3_X_COUNT,val)
908#define bfin_read_MDMA1_D1_Y_COUNT() bfin_read16(MDMA1_D1_Y_COUNT) 900#define bfin_read_MDMA_D3_Y_COUNT() bfin_read16(MDMA_D3_Y_COUNT)
909#define bfin_write_MDMA1_D1_Y_COUNT(val) bfin_write16(MDMA1_D1_Y_COUNT,val) 901#define bfin_write_MDMA_D3_Y_COUNT(val) bfin_write16(MDMA_D3_Y_COUNT,val)
910#define bfin_read_MDMA1_D1_X_MODIFY() bfin_read16(MDMA1_D1_X_MODIFY) 902#define bfin_read_MDMA_D3_X_MODIFY() bfin_read16(MDMA_D3_X_MODIFY)
911#define bfin_write_MDMA1_D1_X_MODIFY(val) bfin_write16(MDMA1_D1_X_MODIFY,val) 903#define bfin_write_MDMA_D3_X_MODIFY(val) bfin_write16(MDMA_D3_X_MODIFY,val)
912#define bfin_read_MDMA1_D1_Y_MODIFY() bfin_read16(MDMA1_D1_Y_MODIFY) 904#define bfin_read_MDMA_D3_Y_MODIFY() bfin_read16(MDMA_D3_Y_MODIFY)
913#define bfin_write_MDMA1_D1_Y_MODIFY(val) bfin_write16(MDMA1_D1_Y_MODIFY,val) 905#define bfin_write_MDMA_D3_Y_MODIFY(val) bfin_write16(MDMA_D3_Y_MODIFY,val)
914#define bfin_read_MDMA1_D1_CURR_DESC_PTR() bfin_read32(MDMA1_D1_CURR_DESC_PTR) 906#define bfin_read_MDMA_D3_CURR_DESC_PTR() bfin_read32(MDMA_D3_CURR_DESC_PTR)
915#define bfin_write_MDMA1_D1_CURR_DESC_PTR(val) bfin_write32(MDMA1_D1_CURR_DESC_PTR,val) 907#define bfin_write_MDMA_D3_CURR_DESC_PTR(val) bfin_write32(MDMA_D3_CURR_DESC_PTR,val)
916#define bfin_read_MDMA1_D1_CURR_ADDR() bfin_read32(MDMA1_D1_CURR_ADDR) 908#define bfin_read_MDMA_D3_CURR_ADDR() bfin_read32(MDMA_D3_CURR_ADDR)
917#define bfin_write_MDMA1_D1_CURR_ADDR(val) bfin_write32(MDMA1_D1_CURR_ADDR,val) 909#define bfin_write_MDMA_D3_CURR_ADDR(val) bfin_write32(MDMA_D3_CURR_ADDR,val)
918#define bfin_read_MDMA1_D1_CURR_X_COUNT() bfin_read16(MDMA1_D1_CURR_X_COUNT) 910#define bfin_read_MDMA_D3_CURR_X_COUNT() bfin_read16(MDMA_D3_CURR_X_COUNT)
919#define bfin_write_MDMA1_D1_CURR_X_COUNT(val) bfin_write16(MDMA1_D1_CURR_X_COUNT,val) 911#define bfin_write_MDMA_D3_CURR_X_COUNT(val) bfin_write16(MDMA_D3_CURR_X_COUNT,val)
920#define bfin_read_MDMA1_D1_CURR_Y_COUNT() bfin_read16(MDMA1_D1_CURR_Y_COUNT) 912#define bfin_read_MDMA_D3_CURR_Y_COUNT() bfin_read16(MDMA_D3_CURR_Y_COUNT)
921#define bfin_write_MDMA1_D1_CURR_Y_COUNT(val) bfin_write16(MDMA1_D1_CURR_Y_COUNT,val) 913#define bfin_write_MDMA_D3_CURR_Y_COUNT(val) bfin_write16(MDMA_D3_CURR_Y_COUNT,val)
922#define bfin_read_MDMA1_D1_IRQ_STATUS() bfin_read16(MDMA1_D1_IRQ_STATUS) 914#define bfin_read_MDMA_D3_IRQ_STATUS() bfin_read16(MDMA_D3_IRQ_STATUS)
923#define bfin_write_MDMA1_D1_IRQ_STATUS(val) bfin_write16(MDMA1_D1_IRQ_STATUS,val) 915#define bfin_write_MDMA_D3_IRQ_STATUS(val) bfin_write16(MDMA_D3_IRQ_STATUS,val)
924#define bfin_read_MDMA1_D1_PERIPHERAL_MAP() bfin_read16(MDMA1_D1_PERIPHERAL_MAP) 916#define bfin_read_MDMA_D3_PERIPHERAL_MAP() bfin_read16(MDMA_D3_PERIPHERAL_MAP)
925#define bfin_write_MDMA1_D1_PERIPHERAL_MAP(val) bfin_write16(MDMA1_D1_PERIPHERAL_MAP,val) 917#define bfin_write_MDMA_D3_PERIPHERAL_MAP(val) bfin_write16(MDMA_D3_PERIPHERAL_MAP,val)
926#define bfin_read_MDMA1_S1_CONFIG() bfin_read16(MDMA1_S1_CONFIG) 918#define bfin_read_MDMA_S3_CONFIG() bfin_read16(MDMA_S3_CONFIG)
927#define bfin_write_MDMA1_S1_CONFIG(val) bfin_write16(MDMA1_S1_CONFIG,val) 919#define bfin_write_MDMA_S3_CONFIG(val) bfin_write16(MDMA_S3_CONFIG,val)
928#define bfin_read_MDMA1_S1_NEXT_DESC_PTR() bfin_read32(MDMA1_S1_NEXT_DESC_PTR) 920#define bfin_read_MDMA_S3_NEXT_DESC_PTR() bfin_read32(MDMA_S3_NEXT_DESC_PTR)
929#define bfin_write_MDMA1_S1_NEXT_DESC_PTR(val) bfin_write32(MDMA1_S1_NEXT_DESC_PTR,val) 921#define bfin_write_MDMA_S3_NEXT_DESC_PTR(val) bfin_write32(MDMA_S3_NEXT_DESC_PTR,val)
930#define bfin_read_MDMA1_S1_START_ADDR() bfin_read32(MDMA1_S1_START_ADDR) 922#define bfin_read_MDMA_S3_START_ADDR() bfin_read32(MDMA_S3_START_ADDR)
931#define bfin_write_MDMA1_S1_START_ADDR(val) bfin_write32(MDMA1_S1_START_ADDR,val) 923#define bfin_write_MDMA_S3_START_ADDR(val) bfin_write32(MDMA_S3_START_ADDR,val)
932#define bfin_read_MDMA1_S1_X_COUNT() bfin_read16(MDMA1_S1_X_COUNT) 924#define bfin_read_MDMA_S3_X_COUNT() bfin_read16(MDMA_S3_X_COUNT)
933#define bfin_write_MDMA1_S1_X_COUNT(val) bfin_write16(MDMA1_S1_X_COUNT,val) 925#define bfin_write_MDMA_S3_X_COUNT(val) bfin_write16(MDMA_S3_X_COUNT,val)
934#define bfin_read_MDMA1_S1_Y_COUNT() bfin_read16(MDMA1_S1_Y_COUNT) 926#define bfin_read_MDMA_S3_Y_COUNT() bfin_read16(MDMA_S3_Y_COUNT)
935#define bfin_write_MDMA1_S1_Y_COUNT(val) bfin_write16(MDMA1_S1_Y_COUNT,val) 927#define bfin_write_MDMA_S3_Y_COUNT(val) bfin_write16(MDMA_S3_Y_COUNT,val)
936#define bfin_read_MDMA1_S1_X_MODIFY() bfin_read16(MDMA1_S1_X_MODIFY) 928#define bfin_read_MDMA_S3_X_MODIFY() bfin_read16(MDMA_S3_X_MODIFY)
937#define bfin_write_MDMA1_S1_X_MODIFY(val) bfin_write16(MDMA1_S1_X_MODIFY,val) 929#define bfin_write_MDMA_S3_X_MODIFY(val) bfin_write16(MDMA_S3_X_MODIFY,val)
938#define bfin_read_MDMA1_S1_Y_MODIFY() bfin_read16(MDMA1_S1_Y_MODIFY) 930#define bfin_read_MDMA_S3_Y_MODIFY() bfin_read16(MDMA_S3_Y_MODIFY)
939#define bfin_write_MDMA1_S1_Y_MODIFY(val) bfin_write16(MDMA1_S1_Y_MODIFY,val) 931#define bfin_write_MDMA_S3_Y_MODIFY(val) bfin_write16(MDMA_S3_Y_MODIFY,val)
940#define bfin_read_MDMA1_S1_CURR_DESC_PTR() bfin_read32(MDMA1_S1_CURR_DESC_PTR) 932#define bfin_read_MDMA_S3_CURR_DESC_PTR() bfin_read32(MDMA_S3_CURR_DESC_PTR)
941#define bfin_write_MDMA1_S1_CURR_DESC_PTR(val) bfin_write32(MDMA1_S1_CURR_DESC_PTR,val) 933#define bfin_write_MDMA_S3_CURR_DESC_PTR(val) bfin_write32(MDMA_S3_CURR_DESC_PTR,val)
942#define bfin_read_MDMA1_S1_CURR_ADDR() bfin_read32(MDMA1_S1_CURR_ADDR) 934#define bfin_read_MDMA_S3_CURR_ADDR() bfin_read32(MDMA_S3_CURR_ADDR)
943#define bfin_write_MDMA1_S1_CURR_ADDR(val) bfin_write32(MDMA1_S1_CURR_ADDR,val) 935#define bfin_write_MDMA_S3_CURR_ADDR(val) bfin_write32(MDMA_S3_CURR_ADDR,val)
944#define bfin_read_MDMA1_S1_CURR_X_COUNT() bfin_read16(MDMA1_S1_CURR_X_COUNT) 936#define bfin_read_MDMA_S3_CURR_X_COUNT() bfin_read16(MDMA_S3_CURR_X_COUNT)
945#define bfin_write_MDMA1_S1_CURR_X_COUNT(val) bfin_write16(MDMA1_S1_CURR_X_COUNT,val) 937#define bfin_write_MDMA_S3_CURR_X_COUNT(val) bfin_write16(MDMA_S3_CURR_X_COUNT,val)
946#define bfin_read_MDMA1_S1_CURR_Y_COUNT() bfin_read16(MDMA1_S1_CURR_Y_COUNT) 938#define bfin_read_MDMA_S3_CURR_Y_COUNT() bfin_read16(MDMA_S3_CURR_Y_COUNT)
947#define bfin_write_MDMA1_S1_CURR_Y_COUNT(val) bfin_write16(MDMA1_S1_CURR_Y_COUNT,val) 939#define bfin_write_MDMA_S3_CURR_Y_COUNT(val) bfin_write16(MDMA_S3_CURR_Y_COUNT,val)
948#define bfin_read_MDMA1_S1_IRQ_STATUS() bfin_read16(MDMA1_S1_IRQ_STATUS) 940#define bfin_read_MDMA_S3_IRQ_STATUS() bfin_read16(MDMA_S3_IRQ_STATUS)
949#define bfin_write_MDMA1_S1_IRQ_STATUS(val) bfin_write16(MDMA1_S1_IRQ_STATUS,val) 941#define bfin_write_MDMA_S3_IRQ_STATUS(val) bfin_write16(MDMA_S3_IRQ_STATUS,val)
950#define bfin_read_MDMA1_S1_PERIPHERAL_MAP() bfin_read16(MDMA1_S1_PERIPHERAL_MAP) 942#define bfin_read_MDMA_S3_PERIPHERAL_MAP() bfin_read16(MDMA_S3_PERIPHERAL_MAP)
951#define bfin_write_MDMA1_S1_PERIPHERAL_MAP(val) bfin_write16(MDMA1_S1_PERIPHERAL_MAP,val) 943#define bfin_write_MDMA_S3_PERIPHERAL_MAP(val) bfin_write16(MDMA_S3_PERIPHERAL_MAP,val)
952/* DMA2 Controller registers (0xFFC0 0C00-0xFFC0 0DFF) */ 944/* DMA2 Controller registers (0xFFC0 0C00-0xFFC0 0DFF) */
953#define bfin_read_DMA2_0_CONFIG() bfin_read16(DMA2_0_CONFIG) 945#define bfin_read_DMA2_0_CONFIG() bfin_read16(DMA2_0_CONFIG)
954#define bfin_write_DMA2_0_CONFIG(val) bfin_write16(DMA2_0_CONFIG,val) 946#define bfin_write_DMA2_0_CONFIG(val) bfin_write16(DMA2_0_CONFIG,val)
@@ -1263,110 +1255,110 @@
1263#define bfin_read_DMA2_11_PERIPHERAL_MAP() bfin_read16(DMA2_11_PERIPHERAL_MAP) 1255#define bfin_read_DMA2_11_PERIPHERAL_MAP() bfin_read16(DMA2_11_PERIPHERAL_MAP)
1264#define bfin_write_DMA2_11_PERIPHERAL_MAP(val) bfin_write16(DMA2_11_PERIPHERAL_MAP,val) 1256#define bfin_write_DMA2_11_PERIPHERAL_MAP(val) bfin_write16(DMA2_11_PERIPHERAL_MAP,val)
1265/* Memory DMA2 Controller registers (0xFFC0 0E80-0xFFC0 0FFF) */ 1257/* Memory DMA2 Controller registers (0xFFC0 0E80-0xFFC0 0FFF) */
1266#define bfin_read_MDMA2_D0_CONFIG() bfin_read16(MDMA2_D0_CONFIG) 1258#define bfin_read_MDMA_D0_CONFIG() bfin_read16(MDMA_D0_CONFIG)
1267#define bfin_write_MDMA2_D0_CONFIG(val) bfin_write16(MDMA2_D0_CONFIG,val) 1259#define bfin_write_MDMA_D0_CONFIG(val) bfin_write16(MDMA_D0_CONFIG,val)
1268#define bfin_read_MDMA2_D0_NEXT_DESC_PTR() bfin_read32(MDMA2_D0_NEXT_DESC_PTR) 1260#define bfin_read_MDMA_D0_NEXT_DESC_PTR() bfin_read32(MDMA_D0_NEXT_DESC_PTR)
1269#define bfin_write_MDMA2_D0_NEXT_DESC_PTR(val) bfin_write32(MDMA2_D0_NEXT_DESC_PTR,val) 1261#define bfin_write_MDMA_D0_NEXT_DESC_PTR(val) bfin_write32(MDMA_D0_NEXT_DESC_PTR,val)
1270#define bfin_read_MDMA2_D0_START_ADDR() bfin_read32(MDMA2_D0_START_ADDR) 1262#define bfin_read_MDMA_D0_START_ADDR() bfin_read32(MDMA_D0_START_ADDR)
1271#define bfin_write_MDMA2_D0_START_ADDR(val) bfin_write32(MDMA2_D0_START_ADDR,val) 1263#define bfin_write_MDMA_D0_START_ADDR(val) bfin_write32(MDMA_D0_START_ADDR,val)
1272#define bfin_read_MDMA2_D0_X_COUNT() bfin_read16(MDMA2_D0_X_COUNT) 1264#define bfin_read_MDMA_D0_X_COUNT() bfin_read16(MDMA_D0_X_COUNT)
1273#define bfin_write_MDMA2_D0_X_COUNT(val) bfin_write16(MDMA2_D0_X_COUNT,val) 1265#define bfin_write_MDMA_D0_X_COUNT(val) bfin_write16(MDMA_D0_X_COUNT,val)
1274#define bfin_read_MDMA2_D0_Y_COUNT() bfin_read16(MDMA2_D0_Y_COUNT) 1266#define bfin_read_MDMA_D0_Y_COUNT() bfin_read16(MDMA_D0_Y_COUNT)
1275#define bfin_write_MDMA2_D0_Y_COUNT(val) bfin_write16(MDMA2_D0_Y_COUNT,val) 1267#define bfin_write_MDMA_D0_Y_COUNT(val) bfin_write16(MDMA_D0_Y_COUNT,val)
1276#define bfin_read_MDMA2_D0_X_MODIFY() bfin_read16(MDMA2_D0_X_MODIFY) 1268#define bfin_read_MDMA_D0_X_MODIFY() bfin_read16(MDMA_D0_X_MODIFY)
1277#define bfin_write_MDMA2_D0_X_MODIFY(val) bfin_write16(MDMA2_D0_X_MODIFY,val) 1269#define bfin_write_MDMA_D0_X_MODIFY(val) bfin_write16(MDMA_D0_X_MODIFY,val)
1278#define bfin_read_MDMA2_D0_Y_MODIFY() bfin_read16(MDMA2_D0_Y_MODIFY) 1270#define bfin_read_MDMA_D0_Y_MODIFY() bfin_read16(MDMA_D0_Y_MODIFY)
1279#define bfin_write_MDMA2_D0_Y_MODIFY(val) bfin_write16(MDMA2_D0_Y_MODIFY,val) 1271#define bfin_write_MDMA_D0_Y_MODIFY(val) bfin_write16(MDMA_D0_Y_MODIFY,val)
1280#define bfin_read_MDMA2_D0_CURR_DESC_PTR() bfin_read32(MDMA2_D0_CURR_DESC_PTR) 1272#define bfin_read_MDMA_D0_CURR_DESC_PTR() bfin_read32(MDMA_D0_CURR_DESC_PTR)
1281#define bfin_write_MDMA2_D0_CURR_DESC_PTR(val) bfin_write32(MDMA2_D0_CURR_DESC_PTR,val) 1273#define bfin_write_MDMA_D0_CURR_DESC_PTR(val) bfin_write32(MDMA_D0_CURR_DESC_PTR,val)
1282#define bfin_read_MDMA2_D0_CURR_ADDR() bfin_read32(MDMA2_D0_CURR_ADDR) 1274#define bfin_read_MDMA_D0_CURR_ADDR() bfin_read32(MDMA_D0_CURR_ADDR)
1283#define bfin_write_MDMA2_D0_CURR_ADDR(val) bfin_write32(MDMA2_D0_CURR_ADDR,val) 1275#define bfin_write_MDMA_D0_CURR_ADDR(val) bfin_write32(MDMA_D0_CURR_ADDR,val)
1284#define bfin_read_MDMA2_D0_CURR_X_COUNT() bfin_read16(MDMA2_D0_CURR_X_COUNT) 1276#define bfin_read_MDMA_D0_CURR_X_COUNT() bfin_read16(MDMA_D0_CURR_X_COUNT)
1285#define bfin_write_MDMA2_D0_CURR_X_COUNT(val) bfin_write16(MDMA2_D0_CURR_X_COUNT,val) 1277#define bfin_write_MDMA_D0_CURR_X_COUNT(val) bfin_write16(MDMA_D0_CURR_X_COUNT,val)
1286#define bfin_read_MDMA2_D0_CURR_Y_COUNT() bfin_read16(MDMA2_D0_CURR_Y_COUNT) 1278#define bfin_read_MDMA_D0_CURR_Y_COUNT() bfin_read16(MDMA_D0_CURR_Y_COUNT)
1287#define bfin_write_MDMA2_D0_CURR_Y_COUNT(val) bfin_write16(MDMA2_D0_CURR_Y_COUNT,val) 1279#define bfin_write_MDMA_D0_CURR_Y_COUNT(val) bfin_write16(MDMA_D0_CURR_Y_COUNT,val)
1288#define bfin_read_MDMA2_D0_IRQ_STATUS() bfin_read16(MDMA2_D0_IRQ_STATUS) 1280#define bfin_read_MDMA_D0_IRQ_STATUS() bfin_read16(MDMA_D0_IRQ_STATUS)
1289#define bfin_write_MDMA2_D0_IRQ_STATUS(val) bfin_write16(MDMA2_D0_IRQ_STATUS,val) 1281#define bfin_write_MDMA_D0_IRQ_STATUS(val) bfin_write16(MDMA_D0_IRQ_STATUS,val)
1290#define bfin_read_MDMA2_D0_PERIPHERAL_MAP() bfin_read16(MDMA2_D0_PERIPHERAL_MAP) 1282#define bfin_read_MDMA_D0_PERIPHERAL_MAP() bfin_read16(MDMA_D0_PERIPHERAL_MAP)
1291#define bfin_write_MDMA2_D0_PERIPHERAL_MAP(val) bfin_write16(MDMA2_D0_PERIPHERAL_MAP,val) 1283#define bfin_write_MDMA_D0_PERIPHERAL_MAP(val) bfin_write16(MDMA_D0_PERIPHERAL_MAP,val)
1292#define bfin_read_MDMA2_S0_CONFIG() bfin_read16(MDMA2_S0_CONFIG) 1284#define bfin_read_MDMA_S0_CONFIG() bfin_read16(MDMA_S0_CONFIG)
1293#define bfin_write_MDMA2_S0_CONFIG(val) bfin_write16(MDMA2_S0_CONFIG,val) 1285#define bfin_write_MDMA_S0_CONFIG(val) bfin_write16(MDMA_S0_CONFIG,val)
1294#define bfin_read_MDMA2_S0_NEXT_DESC_PTR() bfin_read32(MDMA2_S0_NEXT_DESC_PTR) 1286#define bfin_read_MDMA_S0_NEXT_DESC_PTR() bfin_read32(MDMA_S0_NEXT_DESC_PTR)
1295#define bfin_write_MDMA2_S0_NEXT_DESC_PTR(val) bfin_write32(MDMA2_S0_NEXT_DESC_PTR,val) 1287#define bfin_write_MDMA_S0_NEXT_DESC_PTR(val) bfin_write32(MDMA_S0_NEXT_DESC_PTR,val)
1296#define bfin_read_MDMA2_S0_START_ADDR() bfin_read32(MDMA2_S0_START_ADDR) 1288#define bfin_read_MDMA_S0_START_ADDR() bfin_read32(MDMA_S0_START_ADDR)
1297#define bfin_write_MDMA2_S0_START_ADDR(val) bfin_write32(MDMA2_S0_START_ADDR,val) 1289#define bfin_write_MDMA_S0_START_ADDR(val) bfin_write32(MDMA_S0_START_ADDR,val)
1298#define bfin_read_MDMA2_S0_X_COUNT() bfin_read16(MDMA2_S0_X_COUNT) 1290#define bfin_read_MDMA_S0_X_COUNT() bfin_read16(MDMA_S0_X_COUNT)
1299#define bfin_write_MDMA2_S0_X_COUNT(val) bfin_write16(MDMA2_S0_X_COUNT,val) 1291#define bfin_write_MDMA_S0_X_COUNT(val) bfin_write16(MDMA_S0_X_COUNT,val)
1300#define bfin_read_MDMA2_S0_Y_COUNT() bfin_read16(MDMA2_S0_Y_COUNT) 1292#define bfin_read_MDMA_S0_Y_COUNT() bfin_read16(MDMA_S0_Y_COUNT)
1301#define bfin_write_MDMA2_S0_Y_COUNT(val) bfin_write16(MDMA2_S0_Y_COUNT,val) 1293#define bfin_write_MDMA_S0_Y_COUNT(val) bfin_write16(MDMA_S0_Y_COUNT,val)
1302#define bfin_read_MDMA2_S0_X_MODIFY() bfin_read16(MDMA2_S0_X_MODIFY) 1294#define bfin_read_MDMA_S0_X_MODIFY() bfin_read16(MDMA_S0_X_MODIFY)
1303#define bfin_write_MDMA2_S0_X_MODIFY(val) bfin_write16(MDMA2_S0_X_MODIFY,val) 1295#define bfin_write_MDMA_S0_X_MODIFY(val) bfin_write16(MDMA_S0_X_MODIFY,val)
1304#define bfin_read_MDMA2_S0_Y_MODIFY() bfin_read16(MDMA2_S0_Y_MODIFY) 1296#define bfin_read_MDMA_S0_Y_MODIFY() bfin_read16(MDMA_S0_Y_MODIFY)
1305#define bfin_write_MDMA2_S0_Y_MODIFY(val) bfin_write16(MDMA2_S0_Y_MODIFY,val) 1297#define bfin_write_MDMA_S0_Y_MODIFY(val) bfin_write16(MDMA_S0_Y_MODIFY,val)
1306#define bfin_read_MDMA2_S0_CURR_DESC_PTR() bfin_read32(MDMA2_S0_CURR_DESC_PTR) 1298#define bfin_read_MDMA_S0_CURR_DESC_PTR() bfin_read32(MDMA_S0_CURR_DESC_PTR)
1307#define bfin_write_MDMA2_S0_CURR_DESC_PTR(val) bfin_write32(MDMA2_S0_CURR_DESC_PTR,val) 1299#define bfin_write_MDMA_S0_CURR_DESC_PTR(val) bfin_write32(MDMA_S0_CURR_DESC_PTR,val)
1308#define bfin_read_MDMA2_S0_CURR_ADDR() bfin_read32(MDMA2_S0_CURR_ADDR) 1300#define bfin_read_MDMA_S0_CURR_ADDR() bfin_read32(MDMA_S0_CURR_ADDR)
1309#define bfin_write_MDMA2_S0_CURR_ADDR(val) bfin_write32(MDMA2_S0_CURR_ADDR,val) 1301#define bfin_write_MDMA_S0_CURR_ADDR(val) bfin_write32(MDMA_S0_CURR_ADDR,val)
1310#define bfin_read_MDMA2_S0_CURR_X_COUNT() bfin_read16(MDMA2_S0_CURR_X_COUNT) 1302#define bfin_read_MDMA_S0_CURR_X_COUNT() bfin_read16(MDMA_S0_CURR_X_COUNT)
1311#define bfin_write_MDMA2_S0_CURR_X_COUNT(val) bfin_write16(MDMA2_S0_CURR_X_COUNT,val) 1303#define bfin_write_MDMA_S0_CURR_X_COUNT(val) bfin_write16(MDMA_S0_CURR_X_COUNT,val)
1312#define bfin_read_MDMA2_S0_CURR_Y_COUNT() bfin_read16(MDMA2_S0_CURR_Y_COUNT) 1304#define bfin_read_MDMA_S0_CURR_Y_COUNT() bfin_read16(MDMA_S0_CURR_Y_COUNT)
1313#define bfin_write_MDMA2_S0_CURR_Y_COUNT(val) bfin_write16(MDMA2_S0_CURR_Y_COUNT,val) 1305#define bfin_write_MDMA_S0_CURR_Y_COUNT(val) bfin_write16(MDMA_S0_CURR_Y_COUNT,val)
1314#define bfin_read_MDMA2_S0_IRQ_STATUS() bfin_read16(MDMA2_S0_IRQ_STATUS) 1306#define bfin_read_MDMA_S0_IRQ_STATUS() bfin_read16(MDMA_S0_IRQ_STATUS)
1315#define bfin_write_MDMA2_S0_IRQ_STATUS(val) bfin_write16(MDMA2_S0_IRQ_STATUS,val) 1307#define bfin_write_MDMA_S0_IRQ_STATUS(val) bfin_write16(MDMA_S0_IRQ_STATUS,val)
1316#define bfin_read_MDMA2_S0_PERIPHERAL_MAP() bfin_read16(MDMA2_S0_PERIPHERAL_MAP) 1308#define bfin_read_MDMA_S0_PERIPHERAL_MAP() bfin_read16(MDMA_S0_PERIPHERAL_MAP)
1317#define bfin_write_MDMA2_S0_PERIPHERAL_MAP(val) bfin_write16(MDMA2_S0_PERIPHERAL_MAP,val) 1309#define bfin_write_MDMA_S0_PERIPHERAL_MAP(val) bfin_write16(MDMA_S0_PERIPHERAL_MAP,val)
1318#define bfin_read_MDMA2_D1_CONFIG() bfin_read16(MDMA2_D1_CONFIG) 1310#define bfin_read_MDMA_D1_CONFIG() bfin_read16(MDMA_D1_CONFIG)
1319#define bfin_write_MDMA2_D1_CONFIG(val) bfin_write16(MDMA2_D1_CONFIG,val) 1311#define bfin_write_MDMA_D1_CONFIG(val) bfin_write16(MDMA_D1_CONFIG,val)
1320#define bfin_read_MDMA2_D1_NEXT_DESC_PTR() bfin_read32(MDMA2_D1_NEXT_DESC_PTR) 1312#define bfin_read_MDMA_D1_NEXT_DESC_PTR() bfin_read32(MDMA_D1_NEXT_DESC_PTR)
1321#define bfin_write_MDMA2_D1_NEXT_DESC_PTR(val) bfin_write32(MDMA2_D1_NEXT_DESC_PTR,val) 1313#define bfin_write_MDMA_D1_NEXT_DESC_PTR(val) bfin_write32(MDMA_D1_NEXT_DESC_PTR,val)
1322#define bfin_read_MDMA2_D1_START_ADDR() bfin_read32(MDMA2_D1_START_ADDR) 1314#define bfin_read_MDMA_D1_START_ADDR() bfin_read32(MDMA_D1_START_ADDR)
1323#define bfin_write_MDMA2_D1_START_ADDR(val) bfin_write32(MDMA2_D1_START_ADDR,val) 1315#define bfin_write_MDMA_D1_START_ADDR(val) bfin_write32(MDMA_D1_START_ADDR,val)
1324#define bfin_read_MDMA2_D1_X_COUNT() bfin_read16(MDMA2_D1_X_COUNT) 1316#define bfin_read_MDMA_D1_X_COUNT() bfin_read16(MDMA_D1_X_COUNT)
1325#define bfin_write_MDMA2_D1_X_COUNT(val) bfin_write16(MDMA2_D1_X_COUNT,val) 1317#define bfin_write_MDMA_D1_X_COUNT(val) bfin_write16(MDMA_D1_X_COUNT,val)
1326#define bfin_read_MDMA2_D1_Y_COUNT() bfin_read16(MDMA2_D1_Y_COUNT) 1318#define bfin_read_MDMA_D1_Y_COUNT() bfin_read16(MDMA_D1_Y_COUNT)
1327#define bfin_write_MDMA2_D1_Y_COUNT(val) bfin_write16(MDMA2_D1_Y_COUNT,val) 1319#define bfin_write_MDMA_D1_Y_COUNT(val) bfin_write16(MDMA_D1_Y_COUNT,val)
1328#define bfin_read_MDMA2_D1_X_MODIFY() bfin_read16(MDMA2_D1_X_MODIFY) 1320#define bfin_read_MDMA_D1_X_MODIFY() bfin_read16(MDMA_D1_X_MODIFY)
1329#define bfin_write_MDMA2_D1_X_MODIFY(val) bfin_write16(MDMA2_D1_X_MODIFY,val) 1321#define bfin_write_MDMA_D1_X_MODIFY(val) bfin_write16(MDMA_D1_X_MODIFY,val)
1330#define bfin_read_MDMA2_D1_Y_MODIFY() bfin_read16(MDMA2_D1_Y_MODIFY) 1322#define bfin_read_MDMA_D1_Y_MODIFY() bfin_read16(MDMA_D1_Y_MODIFY)
1331#define bfin_write_MDMA2_D1_Y_MODIFY(val) bfin_write16(MDMA2_D1_Y_MODIFY,val) 1323#define bfin_write_MDMA_D1_Y_MODIFY(val) bfin_write16(MDMA_D1_Y_MODIFY,val)
1332#define bfin_read_MDMA2_D1_CURR_DESC_PTR() bfin_read32(MDMA2_D1_CURR_DESC_PTR) 1324#define bfin_read_MDMA_D1_CURR_DESC_PTR() bfin_read32(MDMA_D1_CURR_DESC_PTR)
1333#define bfin_write_MDMA2_D1_CURR_DESC_PTR(val) bfin_write32(MDMA2_D1_CURR_DESC_PTR,val) 1325#define bfin_write_MDMA_D1_CURR_DESC_PTR(val) bfin_write32(MDMA_D1_CURR_DESC_PTR,val)
1334#define bfin_read_MDMA2_D1_CURR_ADDR() bfin_read32(MDMA2_D1_CURR_ADDR) 1326#define bfin_read_MDMA_D1_CURR_ADDR() bfin_read32(MDMA_D1_CURR_ADDR)
1335#define bfin_write_MDMA2_D1_CURR_ADDR(val) bfin_write32(MDMA2_D1_CURR_ADDR,val) 1327#define bfin_write_MDMA_D1_CURR_ADDR(val) bfin_write32(MDMA_D1_CURR_ADDR,val)
1336#define bfin_read_MDMA2_D1_CURR_X_COUNT() bfin_read16(MDMA2_D1_CURR_X_COUNT) 1328#define bfin_read_MDMA_D1_CURR_X_COUNT() bfin_read16(MDMA_D1_CURR_X_COUNT)
1337#define bfin_write_MDMA2_D1_CURR_X_COUNT(val) bfin_write16(MDMA2_D1_CURR_X_COUNT,val) 1329#define bfin_write_MDMA_D1_CURR_X_COUNT(val) bfin_write16(MDMA_D1_CURR_X_COUNT,val)
1338#define bfin_read_MDMA2_D1_CURR_Y_COUNT() bfin_read16(MDMA2_D1_CURR_Y_COUNT) 1330#define bfin_read_MDMA_D1_CURR_Y_COUNT() bfin_read16(MDMA_D1_CURR_Y_COUNT)
1339#define bfin_write_MDMA2_D1_CURR_Y_COUNT(val) bfin_write16(MDMA2_D1_CURR_Y_COUNT,val) 1331#define bfin_write_MDMA_D1_CURR_Y_COUNT(val) bfin_write16(MDMA_D1_CURR_Y_COUNT,val)
1340#define bfin_read_MDMA2_D1_IRQ_STATUS() bfin_read16(MDMA2_D1_IRQ_STATUS) 1332#define bfin_read_MDMA_D1_IRQ_STATUS() bfin_read16(MDMA_D1_IRQ_STATUS)
1341#define bfin_write_MDMA2_D1_IRQ_STATUS(val) bfin_write16(MDMA2_D1_IRQ_STATUS,val) 1333#define bfin_write_MDMA_D1_IRQ_STATUS(val) bfin_write16(MDMA_D1_IRQ_STATUS,val)
1342#define bfin_read_MDMA2_D1_PERIPHERAL_MAP() bfin_read16(MDMA2_D1_PERIPHERAL_MAP) 1334#define bfin_read_MDMA_D1_PERIPHERAL_MAP() bfin_read16(MDMA_D1_PERIPHERAL_MAP)
1343#define bfin_write_MDMA2_D1_PERIPHERAL_MAP(val) bfin_write16(MDMA2_D1_PERIPHERAL_MAP,val) 1335#define bfin_write_MDMA_D1_PERIPHERAL_MAP(val) bfin_write16(MDMA_D1_PERIPHERAL_MAP,val)
1344#define bfin_read_MDMA2_S1_CONFIG() bfin_read16(MDMA2_S1_CONFIG) 1336#define bfin_read_MDMA_S1_CONFIG() bfin_read16(MDMA_S1_CONFIG)
1345#define bfin_write_MDMA2_S1_CONFIG(val) bfin_write16(MDMA2_S1_CONFIG,val) 1337#define bfin_write_MDMA_S1_CONFIG(val) bfin_write16(MDMA_S1_CONFIG,val)
1346#define bfin_read_MDMA2_S1_NEXT_DESC_PTR() bfin_read32(MDMA2_S1_NEXT_DESC_PTR) 1338#define bfin_read_MDMA_S1_NEXT_DESC_PTR() bfin_read32(MDMA_S1_NEXT_DESC_PTR)
1347#define bfin_write_MDMA2_S1_NEXT_DESC_PTR(val) bfin_write32(MDMA2_S1_NEXT_DESC_PTR,val) 1339#define bfin_write_MDMA_S1_NEXT_DESC_PTR(val) bfin_write32(MDMA_S1_NEXT_DESC_PTR,val)
1348#define bfin_read_MDMA2_S1_START_ADDR() bfin_read32(MDMA2_S1_START_ADDR) 1340#define bfin_read_MDMA_S1_START_ADDR() bfin_read32(MDMA_S1_START_ADDR)
1349#define bfin_write_MDMA2_S1_START_ADDR(val) bfin_write32(MDMA2_S1_START_ADDR,val) 1341#define bfin_write_MDMA_S1_START_ADDR(val) bfin_write32(MDMA_S1_START_ADDR,val)
1350#define bfin_read_MDMA2_S1_X_COUNT() bfin_read16(MDMA2_S1_X_COUNT) 1342#define bfin_read_MDMA_S1_X_COUNT() bfin_read16(MDMA_S1_X_COUNT)
1351#define bfin_write_MDMA2_S1_X_COUNT(val) bfin_write16(MDMA2_S1_X_COUNT,val) 1343#define bfin_write_MDMA_S1_X_COUNT(val) bfin_write16(MDMA_S1_X_COUNT,val)
1352#define bfin_read_MDMA2_S1_Y_COUNT() bfin_read16(MDMA2_S1_Y_COUNT) 1344#define bfin_read_MDMA_S1_Y_COUNT() bfin_read16(MDMA_S1_Y_COUNT)
1353#define bfin_write_MDMA2_S1_Y_COUNT(val) bfin_write16(MDMA2_S1_Y_COUNT,val) 1345#define bfin_write_MDMA_S1_Y_COUNT(val) bfin_write16(MDMA_S1_Y_COUNT,val)
1354#define bfin_read_MDMA2_S1_X_MODIFY() bfin_read16(MDMA2_S1_X_MODIFY) 1346#define bfin_read_MDMA_S1_X_MODIFY() bfin_read16(MDMA_S1_X_MODIFY)
1355#define bfin_write_MDMA2_S1_X_MODIFY(val) bfin_write16(MDMA2_S1_X_MODIFY,val) 1347#define bfin_write_MDMA_S1_X_MODIFY(val) bfin_write16(MDMA_S1_X_MODIFY,val)
1356#define bfin_read_MDMA2_S1_Y_MODIFY() bfin_read16(MDMA2_S1_Y_MODIFY) 1348#define bfin_read_MDMA_S1_Y_MODIFY() bfin_read16(MDMA_S1_Y_MODIFY)
1357#define bfin_write_MDMA2_S1_Y_MODIFY(val) bfin_write16(MDMA2_S1_Y_MODIFY,val) 1349#define bfin_write_MDMA_S1_Y_MODIFY(val) bfin_write16(MDMA_S1_Y_MODIFY,val)
1358#define bfin_read_MDMA2_S1_CURR_DESC_PTR() bfin_read32(MDMA2_S1_CURR_DESC_PTR) 1350#define bfin_read_MDMA_S1_CURR_DESC_PTR() bfin_read32(MDMA_S1_CURR_DESC_PTR)
1359#define bfin_write_MDMA2_S1_CURR_DESC_PTR(val) bfin_write32(MDMA2_S1_CURR_DESC_PTR,val) 1351#define bfin_write_MDMA_S1_CURR_DESC_PTR(val) bfin_write32(MDMA_S1_CURR_DESC_PTR,val)
1360#define bfin_read_MDMA2_S1_CURR_ADDR() bfin_read32(MDMA2_S1_CURR_ADDR) 1352#define bfin_read_MDMA_S1_CURR_ADDR() bfin_read32(MDMA_S1_CURR_ADDR)
1361#define bfin_write_MDMA2_S1_CURR_ADDR(val) bfin_write32(MDMA2_S1_CURR_ADDR,val) 1353#define bfin_write_MDMA_S1_CURR_ADDR(val) bfin_write32(MDMA_S1_CURR_ADDR,val)
1362#define bfin_read_MDMA2_S1_CURR_X_COUNT() bfin_read16(MDMA2_S1_CURR_X_COUNT) 1354#define bfin_read_MDMA_S1_CURR_X_COUNT() bfin_read16(MDMA_S1_CURR_X_COUNT)
1363#define bfin_write_MDMA2_S1_CURR_X_COUNT(val) bfin_write16(MDMA2_S1_CURR_X_COUNT,val) 1355#define bfin_write_MDMA_S1_CURR_X_COUNT(val) bfin_write16(MDMA_S1_CURR_X_COUNT,val)
1364#define bfin_read_MDMA2_S1_CURR_Y_COUNT() bfin_read16(MDMA2_S1_CURR_Y_COUNT) 1356#define bfin_read_MDMA_S1_CURR_Y_COUNT() bfin_read16(MDMA_S1_CURR_Y_COUNT)
1365#define bfin_write_MDMA2_S1_CURR_Y_COUNT(val) bfin_write16(MDMA2_S1_CURR_Y_COUNT,val) 1357#define bfin_write_MDMA_S1_CURR_Y_COUNT(val) bfin_write16(MDMA_S1_CURR_Y_COUNT,val)
1366#define bfin_read_MDMA2_S1_IRQ_STATUS() bfin_read16(MDMA2_S1_IRQ_STATUS) 1358#define bfin_read_MDMA_S1_IRQ_STATUS() bfin_read16(MDMA_S1_IRQ_STATUS)
1367#define bfin_write_MDMA2_S1_IRQ_STATUS(val) bfin_write16(MDMA2_S1_IRQ_STATUS,val) 1359#define bfin_write_MDMA_S1_IRQ_STATUS(val) bfin_write16(MDMA_S1_IRQ_STATUS,val)
1368#define bfin_read_MDMA2_S1_PERIPHERAL_MAP() bfin_read16(MDMA2_S1_PERIPHERAL_MAP) 1360#define bfin_read_MDMA_S1_PERIPHERAL_MAP() bfin_read16(MDMA_S1_PERIPHERAL_MAP)
1369#define bfin_write_MDMA2_S1_PERIPHERAL_MAP(val) bfin_write16(MDMA2_S1_PERIPHERAL_MAP,val) 1361#define bfin_write_MDMA_S1_PERIPHERAL_MAP(val) bfin_write16(MDMA_S1_PERIPHERAL_MAP,val)
1370/* Internal Memory DMA Registers (0xFFC0_1800 - 0xFFC0_19FF) */ 1362/* Internal Memory DMA Registers (0xFFC0_1800 - 0xFFC0_19FF) */
1371#define bfin_read_IMDMA_D0_CONFIG() bfin_read16(IMDMA_D0_CONFIG) 1363#define bfin_read_IMDMA_D0_CONFIG() bfin_read16(IMDMA_D0_CONFIG)
1372#define bfin_write_IMDMA_D0_CONFIG(val) bfin_write16(IMDMA_D0_CONFIG,val) 1364#define bfin_write_IMDMA_D0_CONFIG(val) bfin_write16(IMDMA_D0_CONFIG,val)
@@ -1465,65 +1457,4 @@
1465#define bfin_read_IMDMA_S1_IRQ_STATUS() bfin_read16(IMDMA_S1_IRQ_STATUS) 1457#define bfin_read_IMDMA_S1_IRQ_STATUS() bfin_read16(IMDMA_S1_IRQ_STATUS)
1466#define bfin_write_IMDMA_S1_IRQ_STATUS(val) bfin_write16(IMDMA_S1_IRQ_STATUS,val) 1458#define bfin_write_IMDMA_S1_IRQ_STATUS(val) bfin_write16(IMDMA_S1_IRQ_STATUS,val)
1467 1459
1468#define bfin_read_MDMA_S0_CONFIG() bfin_read_MDMA1_S0_CONFIG()
1469#define bfin_write_MDMA_S0_CONFIG(val) bfin_write_MDMA1_S0_CONFIG(val)
1470#define bfin_read_MDMA_S0_IRQ_STATUS() bfin_read_MDMA1_S0_IRQ_STATUS()
1471#define bfin_write_MDMA_S0_IRQ_STATUS(val) bfin_write_MDMA1_S0_IRQ_STATUS(val)
1472#define bfin_read_MDMA_S0_X_MODIFY() bfin_read_MDMA1_S0_X_MODIFY()
1473#define bfin_write_MDMA_S0_X_MODIFY(val) bfin_write_MDMA1_S0_X_MODIFY(val)
1474#define bfin_read_MDMA_S0_Y_MODIFY() bfin_read_MDMA1_S0_Y_MODIFY()
1475#define bfin_write_MDMA_S0_Y_MODIFY(val) bfin_write_MDMA1_S0_Y_MODIFY(val)
1476#define bfin_read_MDMA_S0_X_COUNT() bfin_read_MDMA1_S0_X_COUNT()
1477#define bfin_write_MDMA_S0_X_COUNT(val) bfin_write_MDMA1_S0_X_COUNT(val)
1478#define bfin_read_MDMA_S0_Y_COUNT() bfin_read_MDMA1_S0_Y_COUNT()
1479#define bfin_write_MDMA_S0_Y_COUNT(val) bfin_write_MDMA1_S0_Y_COUNT(val)
1480#define bfin_read_MDMA_S0_START_ADDR() bfin_read_MDMA1_S0_START_ADDR()
1481#define bfin_write_MDMA_S0_START_ADDR(val) bfin_write_MDMA1_S0_START_ADDR(val)
1482#define bfin_read_MDMA_D0_CONFIG() bfin_read_MDMA1_D0_CONFIG()
1483#define bfin_write_MDMA_D0_CONFIG(val) bfin_write_MDMA1_D0_CONFIG(val)
1484#define bfin_read_MDMA_D0_IRQ_STATUS() bfin_read_MDMA1_D0_IRQ_STATUS()
1485#define bfin_write_MDMA_D0_IRQ_STATUS(val) bfin_write_MDMA1_D0_IRQ_STATUS(val)
1486#define bfin_read_MDMA_D0_X_MODIFY() bfin_read_MDMA1_D0_X_MODIFY()
1487#define bfin_write_MDMA_D0_X_MODIFY(val) bfin_write_MDMA1_D0_X_MODIFY(val)
1488#define bfin_read_MDMA_D0_Y_MODIFY() bfin_read_MDMA1_D0_Y_MODIFY()
1489#define bfin_write_MDMA_D0_Y_MODIFY(val) bfin_write_MDMA1_D0_Y_MODIFY(val)
1490#define bfin_read_MDMA_D0_X_COUNT() bfin_read_MDMA1_D0_X_COUNT()
1491#define bfin_write_MDMA_D0_X_COUNT(val) bfin_write_MDMA1_D0_X_COUNT(val)
1492#define bfin_read_MDMA_D0_Y_COUNT() bfin_read_MDMA1_D0_Y_COUNT()
1493#define bfin_write_MDMA_D0_Y_COUNT(val) bfin_write_MDMA1_D0_Y_COUNT(val)
1494#define bfin_read_MDMA_D0_START_ADDR() bfin_read_MDMA1_D0_START_ADDR()
1495#define bfin_write_MDMA_D0_START_ADDR(val) bfin_write_MDMA1_D0_START_ADDR(val)
1496
1497#define bfin_read_MDMA_S1_CONFIG() bfin_read_MDMA1_S1_CONFIG()
1498#define bfin_write_MDMA_S1_CONFIG(val) bfin_write_MDMA1_S1_CONFIG(val)
1499#define bfin_read_MDMA_S1_IRQ_STATUS() bfin_read_MDMA1_S1_IRQ_STATUS()
1500#define bfin_write_MDMA_S1_IRQ_STATUS(val) bfin_write_MDMA1_S1_IRQ_STATUS(val)
1501#define bfin_read_MDMA_S1_X_MODIFY() bfin_read_MDMA1_S1_X_MODIFY()
1502#define bfin_write_MDMA_S1_X_MODIFY(val) bfin_write_MDMA1_S1_X_MODIFY(val)
1503#define bfin_read_MDMA_S1_Y_MODIFY() bfin_read_MDMA1_S1_Y_MODIFY()
1504#define bfin_write_MDMA_S1_Y_MODIFY(val) bfin_write_MDMA1_S1_Y_MODIFY(val)
1505#define bfin_read_MDMA_S1_X_COUNT() bfin_read_MDMA1_S1_X_COUNT()
1506#define bfin_write_MDMA_S1_X_COUNT(val) bfin_write_MDMA1_S1_X_COUNT(val)
1507#define bfin_read_MDMA_S1_Y_COUNT() bfin_read_MDMA1_S1_Y_COUNT()
1508#define bfin_write_MDMA_S1_Y_COUNT(val) bfin_write_MDMA1_S1_Y_COUNT(val)
1509#define bfin_read_MDMA_S1_START_ADDR() bfin_read_MDMA1_S1_START_ADDR()
1510#define bfin_write_MDMA_S1_START_ADDR(val) bfin_write_MDMA1_S1_START_ADDR(val)
1511#define bfin_read_MDMA_D1_CONFIG() bfin_read_MDMA1_D1_CONFIG()
1512#define bfin_write_MDMA_D1_CONFIG(val) bfin_write_MDMA1_D1_CONFIG(val)
1513#define bfin_read_MDMA_D1_IRQ_STATUS() bfin_read_MDMA1_D1_IRQ_STATUS()
1514#define bfin_write_MDMA_D1_IRQ_STATUS(val) bfin_write_MDMA1_D1_IRQ_STATUS(val)
1515#define bfin_read_MDMA_D1_X_MODIFY() bfin_read_MDMA1_D1_X_MODIFY()
1516#define bfin_write_MDMA_D1_X_MODIFY(val) bfin_write_MDMA1_D1_X_MODIFY(val)
1517#define bfin_read_MDMA_D1_Y_MODIFY() bfin_read_MDMA1_D1_Y_MODIFY()
1518#define bfin_write_MDMA_D1_Y_MODIFY(val) bfin_write_MDMA1_D1_Y_MODIFY(val)
1519#define bfin_read_MDMA_D1_X_COUNT() bfin_read_MDMA1_D1_X_COUNT()
1520#define bfin_write_MDMA_D1_X_COUNT(val) bfin_write_MDMA1_D1_X_COUNT(val)
1521#define bfin_read_MDMA_D1_Y_COUNT() bfin_read_MDMA1_D1_Y_COUNT()
1522#define bfin_write_MDMA_D1_Y_COUNT(val) bfin_write_MDMA1_D1_Y_COUNT(val)
1523#define bfin_read_MDMA_D1_START_ADDR() bfin_read_MDMA1_D1_START_ADDR()
1524#define bfin_write_MDMA_D1_START_ADDR(val) bfin_write_MDMA1_D1_START_ADDR(val)
1525
1526/* These need to be last due to the cdef/linux inter-dependencies */
1527#include <asm/irq.h>
1528
1529#endif /* _CDEF_BF561_H */ 1460#endif /* _CDEF_BF561_H */
diff --git a/arch/blackfin/mach-bf561/include/mach/defBF561.h b/arch/blackfin/mach-bf561/include/mach/defBF561.h
index 79e048d452e0..71e805ea74e5 100644
--- a/arch/blackfin/mach-bf561/include/mach/defBF561.h
+++ b/arch/blackfin/mach-bf561/include/mach/defBF561.h
@@ -1,18 +1,11 @@
1/* 1/*
2 * Copyright 2005-2009 Analog Devices Inc. 2 * Copyright 2005-2010 Analog Devices Inc.
3 * 3 *
4 * Licensed under the ADI BSD license or the GPL-2 (or later) 4 * Licensed under the ADI BSD license or the GPL-2 (or later)
5 */ 5 */
6 6
7#ifndef _DEF_BF561_H 7#ifndef _DEF_BF561_H
8#define _DEF_BF561_H 8#define _DEF_BF561_H
9/*
10#if !defined(__ADSPBF561__)
11#warning defBF561.h should only be included for BF561 chip.
12#endif
13*/
14/* include all Core registers and bit definitions */
15#include <asm/def_LPBlackfin.h>
16 9
17/*********************************************************************************** */ 10/*********************************************************************************** */
18/* System MMR Register Map */ 11/* System MMR Register Map */
@@ -311,10 +304,10 @@
311#define PPI1_FRAME 0xFFC01310 /* PPI1 Frame Length register */ 304#define PPI1_FRAME 0xFFC01310 /* PPI1 Frame Length register */
312 305
313/*DMA traffic control registers */ 306/*DMA traffic control registers */
314#define DMA1_TC_PER 0xFFC01B0C /* Traffic control periods */ 307#define DMAC0_TC_PER 0xFFC00B0C /* Traffic control periods */
315#define DMA1_TC_CNT 0xFFC01B10 /* Traffic control current counts */ 308#define DMAC0_TC_CNT 0xFFC00B10 /* Traffic control current counts */
316#define DMA2_TC_PER 0xFFC00B0C /* Traffic control periods */ 309#define DMAC1_TC_PER 0xFFC01B0C /* Traffic control periods */
317#define DMA2_TC_CNT 0xFFC00B10 /* Traffic control current counts */ 310#define DMAC1_TC_CNT 0xFFC01B10 /* Traffic control current counts */
318 311
319/* DMA1 Controller registers (0xFFC0 1C00-0xFFC0 1FFF) */ 312/* DMA1 Controller registers (0xFFC0 1C00-0xFFC0 1FFF) */
320#define DMA1_0_CONFIG 0xFFC01C08 /* DMA1 Channel 0 Configuration register */ 313#define DMA1_0_CONFIG 0xFFC01C08 /* DMA1 Channel 0 Configuration register */
@@ -486,61 +479,61 @@
486#define DMA1_11_PERIPHERAL_MAP 0xFFC01EEC /* DMA1 Channel 11 Peripheral Map Register */ 479#define DMA1_11_PERIPHERAL_MAP 0xFFC01EEC /* DMA1 Channel 11 Peripheral Map Register */
487 480
488/* Memory DMA1 Controller registers (0xFFC0 1E80-0xFFC0 1FFF) */ 481/* Memory DMA1 Controller registers (0xFFC0 1E80-0xFFC0 1FFF) */
489#define MDMA1_D0_CONFIG 0xFFC01F08 /*MemDMA1 Stream 0 Destination Configuration */ 482#define MDMA_D2_CONFIG 0xFFC01F08 /*MemDMA1 Stream 0 Destination Configuration */
490#define MDMA1_D0_NEXT_DESC_PTR 0xFFC01F00 /*MemDMA1 Stream 0 Destination Next Descriptor Ptr Reg */ 483#define MDMA_D2_NEXT_DESC_PTR 0xFFC01F00 /*MemDMA1 Stream 0 Destination Next Descriptor Ptr Reg */
491#define MDMA1_D0_START_ADDR 0xFFC01F04 /*MemDMA1 Stream 0 Destination Start Address */ 484#define MDMA_D2_START_ADDR 0xFFC01F04 /*MemDMA1 Stream 0 Destination Start Address */
492#define MDMA1_D0_X_COUNT 0xFFC01F10 /*MemDMA1 Stream 0 Destination Inner-Loop Count */ 485#define MDMA_D2_X_COUNT 0xFFC01F10 /*MemDMA1 Stream 0 Destination Inner-Loop Count */
493#define MDMA1_D0_Y_COUNT 0xFFC01F18 /*MemDMA1 Stream 0 Destination Outer-Loop Count */ 486#define MDMA_D2_Y_COUNT 0xFFC01F18 /*MemDMA1 Stream 0 Destination Outer-Loop Count */
494#define MDMA1_D0_X_MODIFY 0xFFC01F14 /*MemDMA1 Stream 0 Dest Inner-Loop Address-Increment */ 487#define MDMA_D2_X_MODIFY 0xFFC01F14 /*MemDMA1 Stream 0 Dest Inner-Loop Address-Increment */
495#define MDMA1_D0_Y_MODIFY 0xFFC01F1C /*MemDMA1 Stream 0 Dest Outer-Loop Address-Increment */ 488#define MDMA_D2_Y_MODIFY 0xFFC01F1C /*MemDMA1 Stream 0 Dest Outer-Loop Address-Increment */
496#define MDMA1_D0_CURR_DESC_PTR 0xFFC01F20 /*MemDMA1 Stream 0 Dest Current Descriptor Ptr reg */ 489#define MDMA_D2_CURR_DESC_PTR 0xFFC01F20 /*MemDMA1 Stream 0 Dest Current Descriptor Ptr reg */
497#define MDMA1_D0_CURR_ADDR 0xFFC01F24 /*MemDMA1 Stream 0 Destination Current Address */ 490#define MDMA_D2_CURR_ADDR 0xFFC01F24 /*MemDMA1 Stream 0 Destination Current Address */
498#define MDMA1_D0_CURR_X_COUNT 0xFFC01F30 /*MemDMA1 Stream 0 Dest Current Inner-Loop Count */ 491#define MDMA_D2_CURR_X_COUNT 0xFFC01F30 /*MemDMA1 Stream 0 Dest Current Inner-Loop Count */
499#define MDMA1_D0_CURR_Y_COUNT 0xFFC01F38 /*MemDMA1 Stream 0 Dest Current Outer-Loop Count */ 492#define MDMA_D2_CURR_Y_COUNT 0xFFC01F38 /*MemDMA1 Stream 0 Dest Current Outer-Loop Count */
500#define MDMA1_D0_IRQ_STATUS 0xFFC01F28 /*MemDMA1 Stream 0 Destination Interrupt/Status */ 493#define MDMA_D2_IRQ_STATUS 0xFFC01F28 /*MemDMA1 Stream 0 Destination Interrupt/Status */
501#define MDMA1_D0_PERIPHERAL_MAP 0xFFC01F2C /*MemDMA1 Stream 0 Destination Peripheral Map */ 494#define MDMA_D2_PERIPHERAL_MAP 0xFFC01F2C /*MemDMA1 Stream 0 Destination Peripheral Map */
502 495
503#define MDMA1_S0_CONFIG 0xFFC01F48 /*MemDMA1 Stream 0 Source Configuration */ 496#define MDMA_S2_CONFIG 0xFFC01F48 /*MemDMA1 Stream 0 Source Configuration */
504#define MDMA1_S0_NEXT_DESC_PTR 0xFFC01F40 /*MemDMA1 Stream 0 Source Next Descriptor Ptr Reg */ 497#define MDMA_S2_NEXT_DESC_PTR 0xFFC01F40 /*MemDMA1 Stream 0 Source Next Descriptor Ptr Reg */
505#define MDMA1_S0_START_ADDR 0xFFC01F44 /*MemDMA1 Stream 0 Source Start Address */ 498#define MDMA_S2_START_ADDR 0xFFC01F44 /*MemDMA1 Stream 0 Source Start Address */
506#define MDMA1_S0_X_COUNT 0xFFC01F50 /*MemDMA1 Stream 0 Source Inner-Loop Count */ 499#define MDMA_S2_X_COUNT 0xFFC01F50 /*MemDMA1 Stream 0 Source Inner-Loop Count */
507#define MDMA1_S0_Y_COUNT 0xFFC01F58 /*MemDMA1 Stream 0 Source Outer-Loop Count */ 500#define MDMA_S2_Y_COUNT 0xFFC01F58 /*MemDMA1 Stream 0 Source Outer-Loop Count */
508#define MDMA1_S0_X_MODIFY 0xFFC01F54 /*MemDMA1 Stream 0 Source Inner-Loop Address-Increment */ 501#define MDMA_S2_X_MODIFY 0xFFC01F54 /*MemDMA1 Stream 0 Source Inner-Loop Address-Increment */
509#define MDMA1_S0_Y_MODIFY 0xFFC01F5C /*MemDMA1 Stream 0 Source Outer-Loop Address-Increment */ 502#define MDMA_S2_Y_MODIFY 0xFFC01F5C /*MemDMA1 Stream 0 Source Outer-Loop Address-Increment */
510#define MDMA1_S0_CURR_DESC_PTR 0xFFC01F60 /*MemDMA1 Stream 0 Source Current Descriptor Ptr reg */ 503#define MDMA_S2_CURR_DESC_PTR 0xFFC01F60 /*MemDMA1 Stream 0 Source Current Descriptor Ptr reg */
511#define MDMA1_S0_CURR_ADDR 0xFFC01F64 /*MemDMA1 Stream 0 Source Current Address */ 504#define MDMA_S2_CURR_ADDR 0xFFC01F64 /*MemDMA1 Stream 0 Source Current Address */
512#define MDMA1_S0_CURR_X_COUNT 0xFFC01F70 /*MemDMA1 Stream 0 Source Current Inner-Loop Count */ 505#define MDMA_S2_CURR_X_COUNT 0xFFC01F70 /*MemDMA1 Stream 0 Source Current Inner-Loop Count */
513#define MDMA1_S0_CURR_Y_COUNT 0xFFC01F78 /*MemDMA1 Stream 0 Source Current Outer-Loop Count */ 506#define MDMA_S2_CURR_Y_COUNT 0xFFC01F78 /*MemDMA1 Stream 0 Source Current Outer-Loop Count */
514#define MDMA1_S0_IRQ_STATUS 0xFFC01F68 /*MemDMA1 Stream 0 Source Interrupt/Status */ 507#define MDMA_S2_IRQ_STATUS 0xFFC01F68 /*MemDMA1 Stream 0 Source Interrupt/Status */
515#define MDMA1_S0_PERIPHERAL_MAP 0xFFC01F6C /*MemDMA1 Stream 0 Source Peripheral Map */ 508#define MDMA_S2_PERIPHERAL_MAP 0xFFC01F6C /*MemDMA1 Stream 0 Source Peripheral Map */
516 509
517#define MDMA1_D1_CONFIG 0xFFC01F88 /*MemDMA1 Stream 1 Destination Configuration */ 510#define MDMA_D3_CONFIG 0xFFC01F88 /*MemDMA1 Stream 1 Destination Configuration */
518#define MDMA1_D1_NEXT_DESC_PTR 0xFFC01F80 /*MemDMA1 Stream 1 Destination Next Descriptor Ptr Reg */ 511#define MDMA_D3_NEXT_DESC_PTR 0xFFC01F80 /*MemDMA1 Stream 1 Destination Next Descriptor Ptr Reg */
519#define MDMA1_D1_START_ADDR 0xFFC01F84 /*MemDMA1 Stream 1 Destination Start Address */ 512#define MDMA_D3_START_ADDR 0xFFC01F84 /*MemDMA1 Stream 1 Destination Start Address */
520#define MDMA1_D1_X_COUNT 0xFFC01F90 /*MemDMA1 Stream 1 Destination Inner-Loop Count */ 513#define MDMA_D3_X_COUNT 0xFFC01F90 /*MemDMA1 Stream 1 Destination Inner-Loop Count */
521#define MDMA1_D1_Y_COUNT 0xFFC01F98 /*MemDMA1 Stream 1 Destination Outer-Loop Count */ 514#define MDMA_D3_Y_COUNT 0xFFC01F98 /*MemDMA1 Stream 1 Destination Outer-Loop Count */
522#define MDMA1_D1_X_MODIFY 0xFFC01F94 /*MemDMA1 Stream 1 Dest Inner-Loop Address-Increment */ 515#define MDMA_D3_X_MODIFY 0xFFC01F94 /*MemDMA1 Stream 1 Dest Inner-Loop Address-Increment */
523#define MDMA1_D1_Y_MODIFY 0xFFC01F9C /*MemDMA1 Stream 1 Dest Outer-Loop Address-Increment */ 516#define MDMA_D3_Y_MODIFY 0xFFC01F9C /*MemDMA1 Stream 1 Dest Outer-Loop Address-Increment */
524#define MDMA1_D1_CURR_DESC_PTR 0xFFC01FA0 /*MemDMA1 Stream 1 Dest Current Descriptor Ptr reg */ 517#define MDMA_D3_CURR_DESC_PTR 0xFFC01FA0 /*MemDMA1 Stream 1 Dest Current Descriptor Ptr reg */
525#define MDMA1_D1_CURR_ADDR 0xFFC01FA4 /*MemDMA1 Stream 1 Dest Current Address */ 518#define MDMA_D3_CURR_ADDR 0xFFC01FA4 /*MemDMA1 Stream 1 Dest Current Address */
526#define MDMA1_D1_CURR_X_COUNT 0xFFC01FB0 /*MemDMA1 Stream 1 Dest Current Inner-Loop Count */ 519#define MDMA_D3_CURR_X_COUNT 0xFFC01FB0 /*MemDMA1 Stream 1 Dest Current Inner-Loop Count */
527#define MDMA1_D1_CURR_Y_COUNT 0xFFC01FB8 /*MemDMA1 Stream 1 Dest Current Outer-Loop Count */ 520#define MDMA_D3_CURR_Y_COUNT 0xFFC01FB8 /*MemDMA1 Stream 1 Dest Current Outer-Loop Count */
528#define MDMA1_D1_IRQ_STATUS 0xFFC01FA8 /*MemDMA1 Stream 1 Dest Interrupt/Status */ 521#define MDMA_D3_IRQ_STATUS 0xFFC01FA8 /*MemDMA1 Stream 1 Dest Interrupt/Status */
529#define MDMA1_D1_PERIPHERAL_MAP 0xFFC01FAC /*MemDMA1 Stream 1 Dest Peripheral Map */ 522#define MDMA_D3_PERIPHERAL_MAP 0xFFC01FAC /*MemDMA1 Stream 1 Dest Peripheral Map */
530 523
531#define MDMA1_S1_CONFIG 0xFFC01FC8 /*MemDMA1 Stream 1 Source Configuration */ 524#define MDMA_S3_CONFIG 0xFFC01FC8 /*MemDMA1 Stream 1 Source Configuration */
532#define MDMA1_S1_NEXT_DESC_PTR 0xFFC01FC0 /*MemDMA1 Stream 1 Source Next Descriptor Ptr Reg */ 525#define MDMA_S3_NEXT_DESC_PTR 0xFFC01FC0 /*MemDMA1 Stream 1 Source Next Descriptor Ptr Reg */
533#define MDMA1_S1_START_ADDR 0xFFC01FC4 /*MemDMA1 Stream 1 Source Start Address */ 526#define MDMA_S3_START_ADDR 0xFFC01FC4 /*MemDMA1 Stream 1 Source Start Address */
534#define MDMA1_S1_X_COUNT 0xFFC01FD0 /*MemDMA1 Stream 1 Source Inner-Loop Count */ 527#define MDMA_S3_X_COUNT 0xFFC01FD0 /*MemDMA1 Stream 1 Source Inner-Loop Count */
535#define MDMA1_S1_Y_COUNT 0xFFC01FD8 /*MemDMA1 Stream 1 Source Outer-Loop Count */ 528#define MDMA_S3_Y_COUNT 0xFFC01FD8 /*MemDMA1 Stream 1 Source Outer-Loop Count */
536#define MDMA1_S1_X_MODIFY 0xFFC01FD4 /*MemDMA1 Stream 1 Source Inner-Loop Address-Increment */ 529#define MDMA_S3_X_MODIFY 0xFFC01FD4 /*MemDMA1 Stream 1 Source Inner-Loop Address-Increment */
537#define MDMA1_S1_Y_MODIFY 0xFFC01FDC /*MemDMA1 Stream 1 Source Outer-Loop Address-Increment */ 530#define MDMA_S3_Y_MODIFY 0xFFC01FDC /*MemDMA1 Stream 1 Source Outer-Loop Address-Increment */
538#define MDMA1_S1_CURR_DESC_PTR 0xFFC01FE0 /*MemDMA1 Stream 1 Source Current Descriptor Ptr reg */ 531#define MDMA_S3_CURR_DESC_PTR 0xFFC01FE0 /*MemDMA1 Stream 1 Source Current Descriptor Ptr reg */
539#define MDMA1_S1_CURR_ADDR 0xFFC01FE4 /*MemDMA1 Stream 1 Source Current Address */ 532#define MDMA_S3_CURR_ADDR 0xFFC01FE4 /*MemDMA1 Stream 1 Source Current Address */
540#define MDMA1_S1_CURR_X_COUNT 0xFFC01FF0 /*MemDMA1 Stream 1 Source Current Inner-Loop Count */ 533#define MDMA_S3_CURR_X_COUNT 0xFFC01FF0 /*MemDMA1 Stream 1 Source Current Inner-Loop Count */
541#define MDMA1_S1_CURR_Y_COUNT 0xFFC01FF8 /*MemDMA1 Stream 1 Source Current Outer-Loop Count */ 534#define MDMA_S3_CURR_Y_COUNT 0xFFC01FF8 /*MemDMA1 Stream 1 Source Current Outer-Loop Count */
542#define MDMA1_S1_IRQ_STATUS 0xFFC01FE8 /*MemDMA1 Stream 1 Source Interrupt/Status */ 535#define MDMA_S3_IRQ_STATUS 0xFFC01FE8 /*MemDMA1 Stream 1 Source Interrupt/Status */
543#define MDMA1_S1_PERIPHERAL_MAP 0xFFC01FEC /*MemDMA1 Stream 1 Source Peripheral Map */ 536#define MDMA_S3_PERIPHERAL_MAP 0xFFC01FEC /*MemDMA1 Stream 1 Source Peripheral Map */
544 537
545/* DMA2 Controller registers (0xFFC0 0C00-0xFFC0 0DFF) */ 538/* DMA2 Controller registers (0xFFC0 0C00-0xFFC0 0DFF) */
546#define DMA2_0_CONFIG 0xFFC00C08 /* DMA2 Channel 0 Configuration register */ 539#define DMA2_0_CONFIG 0xFFC00C08 /* DMA2 Channel 0 Configuration register */
@@ -712,117 +705,61 @@
712#define DMA2_11_PERIPHERAL_MAP 0xFFC00EEC /* DMA2 Channel 11 Peripheral Map Register */ 705#define DMA2_11_PERIPHERAL_MAP 0xFFC00EEC /* DMA2 Channel 11 Peripheral Map Register */
713 706
714/* Memory DMA2 Controller registers (0xFFC0 0E80-0xFFC0 0FFF) */ 707/* Memory DMA2 Controller registers (0xFFC0 0E80-0xFFC0 0FFF) */
715#define MDMA2_D0_CONFIG 0xFFC00F08 /*MemDMA2 Stream 0 Destination Configuration register */ 708#define MDMA_D0_CONFIG 0xFFC00F08 /*MemDMA2 Stream 0 Destination Configuration register */
716#define MDMA2_D0_NEXT_DESC_PTR 0xFFC00F00 /*MemDMA2 Stream 0 Destination Next Descriptor Ptr Reg */ 709#define MDMA_D0_NEXT_DESC_PTR 0xFFC00F00 /*MemDMA2 Stream 0 Destination Next Descriptor Ptr Reg */
717#define MDMA2_D0_START_ADDR 0xFFC00F04 /*MemDMA2 Stream 0 Destination Start Address */ 710#define MDMA_D0_START_ADDR 0xFFC00F04 /*MemDMA2 Stream 0 Destination Start Address */
718#define MDMA2_D0_X_COUNT 0xFFC00F10 /*MemDMA2 Stream 0 Dest Inner-Loop Count register */ 711#define MDMA_D0_X_COUNT 0xFFC00F10 /*MemDMA2 Stream 0 Dest Inner-Loop Count register */
719#define MDMA2_D0_Y_COUNT 0xFFC00F18 /*MemDMA2 Stream 0 Dest Outer-Loop Count register */ 712#define MDMA_D0_Y_COUNT 0xFFC00F18 /*MemDMA2 Stream 0 Dest Outer-Loop Count register */
720#define MDMA2_D0_X_MODIFY 0xFFC00F14 /*MemDMA2 Stream 0 Dest Inner-Loop Address-Increment */ 713#define MDMA_D0_X_MODIFY 0xFFC00F14 /*MemDMA2 Stream 0 Dest Inner-Loop Address-Increment */
721#define MDMA2_D0_Y_MODIFY 0xFFC00F1C /*MemDMA2 Stream 0 Dest Outer-Loop Address-Increment */ 714#define MDMA_D0_Y_MODIFY 0xFFC00F1C /*MemDMA2 Stream 0 Dest Outer-Loop Address-Increment */
722#define MDMA2_D0_CURR_DESC_PTR 0xFFC00F20 /*MemDMA2 Stream 0 Dest Current Descriptor Ptr reg */ 715#define MDMA_D0_CURR_DESC_PTR 0xFFC00F20 /*MemDMA2 Stream 0 Dest Current Descriptor Ptr reg */
723#define MDMA2_D0_CURR_ADDR 0xFFC00F24 /*MemDMA2 Stream 0 Destination Current Address */ 716#define MDMA_D0_CURR_ADDR 0xFFC00F24 /*MemDMA2 Stream 0 Destination Current Address */
724#define MDMA2_D0_CURR_X_COUNT 0xFFC00F30 /*MemDMA2 Stream 0 Dest Current Inner-Loop Count reg */ 717#define MDMA_D0_CURR_X_COUNT 0xFFC00F30 /*MemDMA2 Stream 0 Dest Current Inner-Loop Count reg */
725#define MDMA2_D0_CURR_Y_COUNT 0xFFC00F38 /*MemDMA2 Stream 0 Dest Current Outer-Loop Count reg */ 718#define MDMA_D0_CURR_Y_COUNT 0xFFC00F38 /*MemDMA2 Stream 0 Dest Current Outer-Loop Count reg */
726#define MDMA2_D0_IRQ_STATUS 0xFFC00F28 /*MemDMA2 Stream 0 Dest Interrupt/Status Register */ 719#define MDMA_D0_IRQ_STATUS 0xFFC00F28 /*MemDMA2 Stream 0 Dest Interrupt/Status Register */
727#define MDMA2_D0_PERIPHERAL_MAP 0xFFC00F2C /*MemDMA2 Stream 0 Destination Peripheral Map register */ 720#define MDMA_D0_PERIPHERAL_MAP 0xFFC00F2C /*MemDMA2 Stream 0 Destination Peripheral Map register */
728 721
729#define MDMA2_S0_CONFIG 0xFFC00F48 /*MemDMA2 Stream 0 Source Configuration register */ 722#define MDMA_S0_CONFIG 0xFFC00F48 /*MemDMA2 Stream 0 Source Configuration register */
730#define MDMA2_S0_NEXT_DESC_PTR 0xFFC00F40 /*MemDMA2 Stream 0 Source Next Descriptor Ptr Reg */ 723#define MDMA_S0_NEXT_DESC_PTR 0xFFC00F40 /*MemDMA2 Stream 0 Source Next Descriptor Ptr Reg */
731#define MDMA2_S0_START_ADDR 0xFFC00F44 /*MemDMA2 Stream 0 Source Start Address */ 724#define MDMA_S0_START_ADDR 0xFFC00F44 /*MemDMA2 Stream 0 Source Start Address */
732#define MDMA2_S0_X_COUNT 0xFFC00F50 /*MemDMA2 Stream 0 Source Inner-Loop Count register */ 725#define MDMA_S0_X_COUNT 0xFFC00F50 /*MemDMA2 Stream 0 Source Inner-Loop Count register */
733#define MDMA2_S0_Y_COUNT 0xFFC00F58 /*MemDMA2 Stream 0 Source Outer-Loop Count register */ 726#define MDMA_S0_Y_COUNT 0xFFC00F58 /*MemDMA2 Stream 0 Source Outer-Loop Count register */
734#define MDMA2_S0_X_MODIFY 0xFFC00F54 /*MemDMA2 Stream 0 Src Inner-Loop Addr-Increment reg */ 727#define MDMA_S0_X_MODIFY 0xFFC00F54 /*MemDMA2 Stream 0 Src Inner-Loop Addr-Increment reg */
735#define MDMA2_S0_Y_MODIFY 0xFFC00F5C /*MemDMA2 Stream 0 Src Outer-Loop Addr-Increment reg */ 728#define MDMA_S0_Y_MODIFY 0xFFC00F5C /*MemDMA2 Stream 0 Src Outer-Loop Addr-Increment reg */
736#define MDMA2_S0_CURR_DESC_PTR 0xFFC00F60 /*MemDMA2 Stream 0 Source Current Descriptor Ptr reg */ 729#define MDMA_S0_CURR_DESC_PTR 0xFFC00F60 /*MemDMA2 Stream 0 Source Current Descriptor Ptr reg */
737#define MDMA2_S0_CURR_ADDR 0xFFC00F64 /*MemDMA2 Stream 0 Source Current Address */ 730#define MDMA_S0_CURR_ADDR 0xFFC00F64 /*MemDMA2 Stream 0 Source Current Address */
738#define MDMA2_S0_CURR_X_COUNT 0xFFC00F70 /*MemDMA2 Stream 0 Src Current Inner-Loop Count reg */ 731#define MDMA_S0_CURR_X_COUNT 0xFFC00F70 /*MemDMA2 Stream 0 Src Current Inner-Loop Count reg */
739#define MDMA2_S0_CURR_Y_COUNT 0xFFC00F78 /*MemDMA2 Stream 0 Src Current Outer-Loop Count reg */ 732#define MDMA_S0_CURR_Y_COUNT 0xFFC00F78 /*MemDMA2 Stream 0 Src Current Outer-Loop Count reg */
740#define MDMA2_S0_IRQ_STATUS 0xFFC00F68 /*MemDMA2 Stream 0 Source Interrupt/Status Register */ 733#define MDMA_S0_IRQ_STATUS 0xFFC00F68 /*MemDMA2 Stream 0 Source Interrupt/Status Register */
741#define MDMA2_S0_PERIPHERAL_MAP 0xFFC00F6C /*MemDMA2 Stream 0 Source Peripheral Map register */ 734#define MDMA_S0_PERIPHERAL_MAP 0xFFC00F6C /*MemDMA2 Stream 0 Source Peripheral Map register */
742 735
743#define MDMA2_D1_CONFIG 0xFFC00F88 /*MemDMA2 Stream 1 Destination Configuration register */ 736#define MDMA_D1_CONFIG 0xFFC00F88 /*MemDMA2 Stream 1 Destination Configuration register */
744#define MDMA2_D1_NEXT_DESC_PTR 0xFFC00F80 /*MemDMA2 Stream 1 Destination Next Descriptor Ptr Reg */ 737#define MDMA_D1_NEXT_DESC_PTR 0xFFC00F80 /*MemDMA2 Stream 1 Destination Next Descriptor Ptr Reg */
745#define MDMA2_D1_START_ADDR 0xFFC00F84 /*MemDMA2 Stream 1 Destination Start Address */ 738#define MDMA_D1_START_ADDR 0xFFC00F84 /*MemDMA2 Stream 1 Destination Start Address */
746#define MDMA2_D1_X_COUNT 0xFFC00F90 /*MemDMA2 Stream 1 Dest Inner-Loop Count register */ 739#define MDMA_D1_X_COUNT 0xFFC00F90 /*MemDMA2 Stream 1 Dest Inner-Loop Count register */
747#define MDMA2_D1_Y_COUNT 0xFFC00F98 /*MemDMA2 Stream 1 Dest Outer-Loop Count register */ 740#define MDMA_D1_Y_COUNT 0xFFC00F98 /*MemDMA2 Stream 1 Dest Outer-Loop Count register */
748#define MDMA2_D1_X_MODIFY 0xFFC00F94 /*MemDMA2 Stream 1 Dest Inner-Loop Address-Increment */ 741#define MDMA_D1_X_MODIFY 0xFFC00F94 /*MemDMA2 Stream 1 Dest Inner-Loop Address-Increment */
749#define MDMA2_D1_Y_MODIFY 0xFFC00F9C /*MemDMA2 Stream 1 Dest Outer-Loop Address-Increment */ 742#define MDMA_D1_Y_MODIFY 0xFFC00F9C /*MemDMA2 Stream 1 Dest Outer-Loop Address-Increment */
750#define MDMA2_D1_CURR_DESC_PTR 0xFFC00FA0 /*MemDMA2 Stream 1 Destination Current Descriptor Ptr */ 743#define MDMA_D1_CURR_DESC_PTR 0xFFC00FA0 /*MemDMA2 Stream 1 Destination Current Descriptor Ptr */
751#define MDMA2_D1_CURR_ADDR 0xFFC00FA4 /*MemDMA2 Stream 1 Destination Current Address reg */ 744#define MDMA_D1_CURR_ADDR 0xFFC00FA4 /*MemDMA2 Stream 1 Destination Current Address reg */
752#define MDMA2_D1_CURR_X_COUNT 0xFFC00FB0 /*MemDMA2 Stream 1 Dest Current Inner-Loop Count reg */ 745#define MDMA_D1_CURR_X_COUNT 0xFFC00FB0 /*MemDMA2 Stream 1 Dest Current Inner-Loop Count reg */
753#define MDMA2_D1_CURR_Y_COUNT 0xFFC00FB8 /*MemDMA2 Stream 1 Dest Current Outer-Loop Count reg */ 746#define MDMA_D1_CURR_Y_COUNT 0xFFC00FB8 /*MemDMA2 Stream 1 Dest Current Outer-Loop Count reg */
754#define MDMA2_D1_IRQ_STATUS 0xFFC00FA8 /*MemDMA2 Stream 1 Destination Interrupt/Status Reg */ 747#define MDMA_D1_IRQ_STATUS 0xFFC00FA8 /*MemDMA2 Stream 1 Destination Interrupt/Status Reg */
755#define MDMA2_D1_PERIPHERAL_MAP 0xFFC00FAC /*MemDMA2 Stream 1 Destination Peripheral Map register */ 748#define MDMA_D1_PERIPHERAL_MAP 0xFFC00FAC /*MemDMA2 Stream 1 Destination Peripheral Map register */
756 749
757#define MDMA2_S1_CONFIG 0xFFC00FC8 /*MemDMA2 Stream 1 Source Configuration register */ 750#define MDMA_S1_CONFIG 0xFFC00FC8 /*MemDMA2 Stream 1 Source Configuration register */
758#define MDMA2_S1_NEXT_DESC_PTR 0xFFC00FC0 /*MemDMA2 Stream 1 Source Next Descriptor Ptr Reg */ 751#define MDMA_S1_NEXT_DESC_PTR 0xFFC00FC0 /*MemDMA2 Stream 1 Source Next Descriptor Ptr Reg */
759#define MDMA2_S1_START_ADDR 0xFFC00FC4 /*MemDMA2 Stream 1 Source Start Address */ 752#define MDMA_S1_START_ADDR 0xFFC00FC4 /*MemDMA2 Stream 1 Source Start Address */
760#define MDMA2_S1_X_COUNT 0xFFC00FD0 /*MemDMA2 Stream 1 Source Inner-Loop Count register */ 753#define MDMA_S1_X_COUNT 0xFFC00FD0 /*MemDMA2 Stream 1 Source Inner-Loop Count register */
761#define MDMA2_S1_Y_COUNT 0xFFC00FD8 /*MemDMA2 Stream 1 Source Outer-Loop Count register */ 754#define MDMA_S1_Y_COUNT 0xFFC00FD8 /*MemDMA2 Stream 1 Source Outer-Loop Count register */
762#define MDMA2_S1_X_MODIFY 0xFFC00FD4 /*MemDMA2 Stream 1 Src Inner-Loop Address-Increment */ 755#define MDMA_S1_X_MODIFY 0xFFC00FD4 /*MemDMA2 Stream 1 Src Inner-Loop Address-Increment */
763#define MDMA2_S1_Y_MODIFY 0xFFC00FDC /*MemDMA2 Stream 1 Source Outer-Loop Address-Increment */ 756#define MDMA_S1_Y_MODIFY 0xFFC00FDC /*MemDMA2 Stream 1 Source Outer-Loop Address-Increment */
764#define MDMA2_S1_CURR_DESC_PTR 0xFFC00FE0 /*MemDMA2 Stream 1 Source Current Descriptor Ptr reg */ 757#define MDMA_S1_CURR_DESC_PTR 0xFFC00FE0 /*MemDMA2 Stream 1 Source Current Descriptor Ptr reg */
765#define MDMA2_S1_CURR_ADDR 0xFFC00FE4 /*MemDMA2 Stream 1 Source Current Address */ 758#define MDMA_S1_CURR_ADDR 0xFFC00FE4 /*MemDMA2 Stream 1 Source Current Address */
766#define MDMA2_S1_CURR_X_COUNT 0xFFC00FF0 /*MemDMA2 Stream 1 Source Current Inner-Loop Count */ 759#define MDMA_S1_CURR_X_COUNT 0xFFC00FF0 /*MemDMA2 Stream 1 Source Current Inner-Loop Count */
767#define MDMA2_S1_CURR_Y_COUNT 0xFFC00FF8 /*MemDMA2 Stream 1 Source Current Outer-Loop Count */ 760#define MDMA_S1_CURR_Y_COUNT 0xFFC00FF8 /*MemDMA2 Stream 1 Source Current Outer-Loop Count */
768#define MDMA2_S1_IRQ_STATUS 0xFFC00FE8 /*MemDMA2 Stream 1 Source Interrupt/Status Register */ 761#define MDMA_S1_IRQ_STATUS 0xFFC00FE8 /*MemDMA2 Stream 1 Source Interrupt/Status Register */
769#define MDMA2_S1_PERIPHERAL_MAP 0xFFC00FEC /*MemDMA2 Stream 1 Source Peripheral Map register */ 762#define MDMA_S1_PERIPHERAL_MAP 0xFFC00FEC /*MemDMA2 Stream 1 Source Peripheral Map register */
770
771#define MDMA_D0_NEXT_DESC_PTR MDMA1_D0_NEXT_DESC_PTR
772#define MDMA_D0_START_ADDR MDMA1_D0_START_ADDR
773#define MDMA_D0_CONFIG MDMA1_D0_CONFIG
774#define MDMA_D0_X_COUNT MDMA1_D0_X_COUNT
775#define MDMA_D0_X_MODIFY MDMA1_D0_X_MODIFY
776#define MDMA_D0_Y_COUNT MDMA1_D0_Y_COUNT
777#define MDMA_D0_Y_MODIFY MDMA1_D0_Y_MODIFY
778#define MDMA_D0_CURR_DESC_PTR MDMA1_D0_CURR_DESC_PTR
779#define MDMA_D0_CURR_ADDR MDMA1_D0_CURR_ADDR
780#define MDMA_D0_IRQ_STATUS MDMA1_D0_IRQ_STATUS
781#define MDMA_D0_PERIPHERAL_MAP MDMA1_D0_PERIPHERAL_MAP
782#define MDMA_D0_CURR_X_COUNT MDMA1_D0_CURR_X_COUNT
783#define MDMA_D0_CURR_Y_COUNT MDMA1_D0_CURR_Y_COUNT
784
785#define MDMA_S0_NEXT_DESC_PTR MDMA1_S0_NEXT_DESC_PTR
786#define MDMA_S0_START_ADDR MDMA1_S0_START_ADDR
787#define MDMA_S0_CONFIG MDMA1_S0_CONFIG
788#define MDMA_S0_X_COUNT MDMA1_S0_X_COUNT
789#define MDMA_S0_X_MODIFY MDMA1_S0_X_MODIFY
790#define MDMA_S0_Y_COUNT MDMA1_S0_Y_COUNT
791#define MDMA_S0_Y_MODIFY MDMA1_S0_Y_MODIFY
792#define MDMA_S0_CURR_DESC_PTR MDMA1_S0_CURR_DESC_PTR
793#define MDMA_S0_CURR_ADDR MDMA1_S0_CURR_ADDR
794#define MDMA_S0_IRQ_STATUS MDMA1_S0_IRQ_STATUS
795#define MDMA_S0_PERIPHERAL_MAP MDMA1_S0_PERIPHERAL_MAP
796#define MDMA_S0_CURR_X_COUNT MDMA1_S0_CURR_X_COUNT
797#define MDMA_S0_CURR_Y_COUNT MDMA1_S0_CURR_Y_COUNT
798
799#define MDMA_D1_NEXT_DESC_PTR MDMA1_D1_NEXT_DESC_PTR
800#define MDMA_D1_START_ADDR MDMA1_D1_START_ADDR
801#define MDMA_D1_CONFIG MDMA1_D1_CONFIG
802#define MDMA_D1_X_COUNT MDMA1_D1_X_COUNT
803#define MDMA_D1_X_MODIFY MDMA1_D1_X_MODIFY
804#define MDMA_D1_Y_COUNT MDMA1_D1_Y_COUNT
805#define MDMA_D1_Y_MODIFY MDMA1_D1_Y_MODIFY
806#define MDMA_D1_CURR_DESC_PTR MDMA1_D1_CURR_DESC_PTR
807#define MDMA_D1_CURR_ADDR MDMA1_D1_CURR_ADDR
808#define MDMA_D1_IRQ_STATUS MDMA1_D1_IRQ_STATUS
809#define MDMA_D1_PERIPHERAL_MAP MDMA1_D1_PERIPHERAL_MAP
810#define MDMA_D1_CURR_X_COUNT MDMA1_D1_CURR_X_COUNT
811#define MDMA_D1_CURR_Y_COUNT MDMA1_D1_CURR_Y_COUNT
812
813#define MDMA_S1_NEXT_DESC_PTR MDMA1_S1_NEXT_DESC_PTR
814#define MDMA_S1_START_ADDR MDMA1_S1_START_ADDR
815#define MDMA_S1_CONFIG MDMA1_S1_CONFIG
816#define MDMA_S1_X_COUNT MDMA1_S1_X_COUNT
817#define MDMA_S1_X_MODIFY MDMA1_S1_X_MODIFY
818#define MDMA_S1_Y_COUNT MDMA1_S1_Y_COUNT
819#define MDMA_S1_Y_MODIFY MDMA1_S1_Y_MODIFY
820#define MDMA_S1_CURR_DESC_PTR MDMA1_S1_CURR_DESC_PTR
821#define MDMA_S1_CURR_ADDR MDMA1_S1_CURR_ADDR
822#define MDMA_S1_IRQ_STATUS MDMA1_S1_IRQ_STATUS
823#define MDMA_S1_PERIPHERAL_MAP MDMA1_S1_PERIPHERAL_MAP
824#define MDMA_S1_CURR_X_COUNT MDMA1_S1_CURR_X_COUNT
825#define MDMA_S1_CURR_Y_COUNT MDMA1_S1_CURR_Y_COUNT
826 763
827/* Internal Memory DMA Registers (0xFFC0_1800 - 0xFFC0_19FF) */ 764/* Internal Memory DMA Registers (0xFFC0_1800 - 0xFFC0_19FF) */
828#define IMDMA_D0_CONFIG 0xFFC01808 /*IMDMA Stream 0 Destination Configuration */ 765#define IMDMA_D0_CONFIG 0xFFC01808 /*IMDMA Stream 0 Destination Configuration */
@@ -927,83 +864,6 @@
927#define IWR_ENABLE(x) (1 << (x)) /* Wakeup Enable Peripheral #x */ 864#define IWR_ENABLE(x) (1 << (x)) /* Wakeup Enable Peripheral #x */
928#define IWR_DISABLE(x) (0xFFFFFFFF ^ (1 << (x))) /* Wakeup Disable Peripheral #x */ 865#define IWR_DISABLE(x) (0xFFFFFFFF ^ (1 << (x))) /* Wakeup Disable Peripheral #x */
929 866
930/* ***************************** UART CONTROLLER MASKS ********************** */
931
932/* UART_LCR Register */
933
934#define DLAB 0x80
935#define SB 0x40
936#define STP 0x20
937#define EPS 0x10
938#define PEN 0x08
939#define STB 0x04
940#define WLS(x) ((x-5) & 0x03)
941
942#define DLAB_P 0x07
943#define SB_P 0x06
944#define STP_P 0x05
945#define EPS_P 0x04
946#define PEN_P 0x03
947#define STB_P 0x02
948#define WLS_P1 0x01
949#define WLS_P0 0x00
950
951/* UART_MCR Register */
952#define LOOP_ENA 0x10
953#define LOOP_ENA_P 0x04
954
955/* UART_LSR Register */
956#define TEMT 0x40
957#define THRE 0x20
958#define BI 0x10
959#define FE 0x08
960#define PE 0x04
961#define OE 0x02
962#define DR 0x01
963
964#define TEMP_P 0x06
965#define THRE_P 0x05
966#define BI_P 0x04
967#define FE_P 0x03
968#define PE_P 0x02
969#define OE_P 0x01
970#define DR_P 0x00
971
972/* UART_IER Register */
973#define ELSI 0x04
974#define ETBEI 0x02
975#define ERBFI 0x01
976
977#define ELSI_P 0x02
978#define ETBEI_P 0x01
979#define ERBFI_P 0x00
980
981/* UART_IIR Register */
982#define STATUS(x) ((x << 1) & 0x06)
983#define NINT 0x01
984#define STATUS_P1 0x02
985#define STATUS_P0 0x01
986#define NINT_P 0x00
987#define IIR_TX_READY 0x02 /* UART_THR empty */
988#define IIR_RX_READY 0x04 /* Receive data ready */
989#define IIR_LINE_CHANGE 0x06 /* Receive line status */
990#define IIR_STATUS 0x06
991
992/* UART_GCTL Register */
993#define FFE 0x20
994#define FPE 0x10
995#define RPOLC 0x08
996#define TPOLC 0x04
997#define IREN 0x02
998#define UCEN 0x01
999
1000#define FFE_P 0x05
1001#define FPE_P 0x04
1002#define RPOLC_P 0x03
1003#define TPOLC_P 0x02
1004#define IREN_P 0x01
1005#define UCEN_P 0x00
1006
1007/* ********* PARALLEL PERIPHERAL INTERFACE (PPI) MASKS **************** */ 867/* ********* PARALLEL PERIPHERAL INTERFACE (PPI) MASKS **************** */
1008 868
1009/* PPI_CONTROL Masks */ 869/* PPI_CONTROL Masks */
@@ -1230,44 +1090,6 @@
1230#define ERR_TYP_P0 0x0E 1090#define ERR_TYP_P0 0x0E
1231#define ERR_TYP_P1 0x0F 1091#define ERR_TYP_P1 0x0F
1232 1092
1233/*/ ****************** PROGRAMMABLE FLAG MASKS ********************* */
1234
1235/* General Purpose IO (0xFFC00700 - 0xFFC007FF) Masks */
1236#define PF0 0x0001
1237#define PF1 0x0002
1238#define PF2 0x0004
1239#define PF3 0x0008
1240#define PF4 0x0010
1241#define PF5 0x0020
1242#define PF6 0x0040
1243#define PF7 0x0080
1244#define PF8 0x0100
1245#define PF9 0x0200
1246#define PF10 0x0400
1247#define PF11 0x0800
1248#define PF12 0x1000
1249#define PF13 0x2000
1250#define PF14 0x4000
1251#define PF15 0x8000
1252
1253/* General Purpose IO (0xFFC00700 - 0xFFC007FF) BIT POSITIONS */
1254#define PF0_P 0
1255#define PF1_P 1
1256#define PF2_P 2
1257#define PF3_P 3
1258#define PF4_P 4
1259#define PF5_P 5
1260#define PF6_P 6
1261#define PF7_P 7
1262#define PF8_P 8
1263#define PF9_P 9
1264#define PF10_P 10
1265#define PF11_P 11
1266#define PF12_P 12
1267#define PF13_P 13
1268#define PF14_P 14
1269#define PF15_P 15
1270
1271/* ********************* ASYNCHRONOUS MEMORY CONTROLLER MASKS ************* */ 1093/* ********************* ASYNCHRONOUS MEMORY CONTROLLER MASKS ************* */
1272 1094
1273/* AMGCTL Masks */ 1095/* AMGCTL Masks */
diff --git a/arch/blackfin/mach-bf561/include/mach/gpio.h b/arch/blackfin/mach-bf561/include/mach/gpio.h
index 4f8aa5d08802..57d5eab59faf 100644
--- a/arch/blackfin/mach-bf561/include/mach/gpio.h
+++ b/arch/blackfin/mach-bf561/include/mach/gpio.h
@@ -62,4 +62,6 @@
62#define PORT_FIO1 GPIO_16 62#define PORT_FIO1 GPIO_16
63#define PORT_FIO2 GPIO_32 63#define PORT_FIO2 GPIO_32
64 64
65#include <mach-common/ports-f.h>
66
65#endif /* _MACH_GPIO_H_ */ 67#endif /* _MACH_GPIO_H_ */
diff --git a/arch/blackfin/mach-bf561/include/mach/mem_map.h b/arch/blackfin/mach-bf561/include/mach/mem_map.h
index 5b96ea549a04..4cc91995f781 100644
--- a/arch/blackfin/mach-bf561/include/mach/mem_map.h
+++ b/arch/blackfin/mach-bf561/include/mach/mem_map.h
@@ -106,7 +106,7 @@
106#define COREA_L1_SCRATCH_START 0xFFB00000 106#define COREA_L1_SCRATCH_START 0xFFB00000
107#define COREB_L1_SCRATCH_START 0xFF700000 107#define COREB_L1_SCRATCH_START 0xFF700000
108 108
109#ifdef __ASSEMBLY__ 109#ifdef CONFIG_SMP
110 110
111/* 111/*
112 * The following macros both return the address of the PDA for the 112 * The following macros both return the address of the PDA for the
@@ -121,8 +121,7 @@
121 * is allowed to use the specified Dreg for determining the PDA 121 * is allowed to use the specified Dreg for determining the PDA
122 * address to be returned into Preg. 122 * address to be returned into Preg.
123 */ 123 */
124#ifdef CONFIG_SMP 124# define GET_PDA_SAFE(preg) \
125#define GET_PDA_SAFE(preg) \
126 preg.l = lo(DSPID); \ 125 preg.l = lo(DSPID); \
127 preg.h = hi(DSPID); \ 126 preg.h = hi(DSPID); \
128 preg = [preg]; \ 127 preg = [preg]; \
@@ -158,7 +157,7 @@
158 preg = [preg]; \ 157 preg = [preg]; \
1594: 1584:
160 159
161#define GET_PDA(preg, dreg) \ 160# define GET_PDA(preg, dreg) \
162 preg.l = lo(DSPID); \ 161 preg.l = lo(DSPID); \
163 preg.h = hi(DSPID); \ 162 preg.h = hi(DSPID); \
164 dreg = [preg]; \ 163 dreg = [preg]; \
@@ -169,13 +168,17 @@
169 preg = [preg]; \ 168 preg = [preg]; \
1701: \ 1691: \
171 170
172#define GET_CPUID(preg, dreg) \ 171# define GET_CPUID(preg, dreg) \
173 preg.l = lo(DSPID); \ 172 preg.l = lo(DSPID); \
174 preg.h = hi(DSPID); \ 173 preg.h = hi(DSPID); \
175 dreg = [preg]; \ 174 dreg = [preg]; \
176 dreg = ROT dreg BY -1; \ 175 dreg = ROT dreg BY -1; \
177 dreg = CC; 176 dreg = CC;
178 177
178# ifndef __ASSEMBLY__
179
180# include <asm/processor.h>
181
179static inline unsigned long get_l1_scratch_start_cpu(int cpu) 182static inline unsigned long get_l1_scratch_start_cpu(int cpu)
180{ 183{
181 return cpu ? COREB_L1_SCRATCH_START : COREA_L1_SCRATCH_START; 184 return cpu ? COREB_L1_SCRATCH_START : COREA_L1_SCRATCH_START;
@@ -210,8 +213,7 @@ static inline unsigned long get_l1_data_b_start(void)
210 return get_l1_data_b_start_cpu(blackfin_core_id()); 213 return get_l1_data_b_start_cpu(blackfin_core_id());
211} 214}
212 215
216# endif /* __ASSEMBLY__ */
213#endif /* CONFIG_SMP */ 217#endif /* CONFIG_SMP */
214 218
215#endif /* __ASSEMBLY__ */
216
217#endif 219#endif
diff --git a/arch/blackfin/mach-bf561/include/mach/pll.h b/arch/blackfin/mach-bf561/include/mach/pll.h
index f2b1fbdb8e72..7977db2f1c12 100644
--- a/arch/blackfin/mach-bf561/include/mach/pll.h
+++ b/arch/blackfin/mach-bf561/include/mach/pll.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2005-2009 Analog Devices Inc. 2 * Copyright 2005-2010 Analog Devices Inc.
3 * 3 *
4 * Licensed under the GPL-2 or later. 4 * Licensed under the GPL-2 or later.
5 */ 5 */
@@ -7,57 +7,48 @@
7#ifndef _MACH_PLL_H 7#ifndef _MACH_PLL_H
8#define _MACH_PLL_H 8#define _MACH_PLL_H
9 9
10#ifndef __ASSEMBLY__
11
12#ifdef CONFIG_SMP
13
10#include <asm/blackfin.h> 14#include <asm/blackfin.h>
11#include <asm/irqflags.h> 15#include <asm/irqflags.h>
16#include <mach/irq.h>
17
18#define SUPPLE_0_WAKEUP ((IRQ_SUPPLE_0 - (IRQ_CORETMR + 1)) % 32)
12 19
13/* Writing to PLL_CTL initiates a PLL relock sequence. */ 20static inline void
14static __inline__ void bfin_write_PLL_CTL(unsigned int val) 21bfin_iwr_restore(unsigned long iwr0, unsigned long iwr1, unsigned long iwr2)
15{ 22{
16 unsigned long flags, iwr0, iwr1; 23 unsigned long SICA_SICB_OFF = ((bfin_read_DSPID() & 0xff) ? 0x1000 : 0);
17 24
18 if (val == bfin_read_PLL_CTL()) 25 bfin_write32(SIC_IWR0 + SICA_SICB_OFF, iwr0);
19 return; 26 bfin_write32(SIC_IWR1 + SICA_SICB_OFF, iwr1);
20
21 flags = hard_local_irq_save();
22 /* Enable the PLL Wakeup bit in SIC IWR */
23 iwr0 = bfin_read32(SICA_IWR0);
24 iwr1 = bfin_read32(SICA_IWR1);
25 /* Only allow PPL Wakeup) */
26 bfin_write32(SICA_IWR0, IWR_ENABLE(0));
27 bfin_write32(SICA_IWR1, 0);
28
29 bfin_write16(PLL_CTL, val);
30 SSYNC();
31 asm("IDLE;");
32
33 bfin_write32(SICA_IWR0, iwr0);
34 bfin_write32(SICA_IWR1, iwr1);
35 hard_local_irq_restore(flags);
36} 27}
28#define bfin_iwr_restore bfin_iwr_restore
37 29
38/* Writing to VR_CTL initiates a PLL relock sequence. */ 30static inline void
39static __inline__ void bfin_write_VR_CTL(unsigned int val) 31bfin_iwr_save(unsigned long niwr0, unsigned long niwr1, unsigned long niwr2,
32 unsigned long *iwr0, unsigned long *iwr1, unsigned long *iwr2)
40{ 33{
41 unsigned long flags, iwr0, iwr1; 34 unsigned long SICA_SICB_OFF = ((bfin_read_DSPID() & 0xff) ? 0x1000 : 0);
42 35
43 if (val == bfin_read_VR_CTL()) 36 *iwr0 = bfin_read32(SIC_IWR0 + SICA_SICB_OFF);
44 return; 37 *iwr1 = bfin_read32(SIC_IWR1 + SICA_SICB_OFF);
45 38 bfin_iwr_restore(niwr0, niwr1, niwr2);
46 flags = hard_local_irq_save();
47 /* Enable the PLL Wakeup bit in SIC IWR */
48 iwr0 = bfin_read32(SICA_IWR0);
49 iwr1 = bfin_read32(SICA_IWR1);
50 /* Only allow PPL Wakeup) */
51 bfin_write32(SICA_IWR0, IWR_ENABLE(0));
52 bfin_write32(SICA_IWR1, 0);
53
54 bfin_write16(VR_CTL, val);
55 SSYNC();
56 asm("IDLE;");
57
58 bfin_write32(SICA_IWR0, iwr0);
59 bfin_write32(SICA_IWR1, iwr1);
60 hard_local_irq_restore(flags);
61} 39}
40#define bfin_iwr_save bfin_iwr_save
41
42static inline void
43bfin_iwr_set_sup0(unsigned long *iwr0, unsigned long *iwr1, unsigned long *iwr2)
44{
45 bfin_iwr_save(0, IWR_ENABLE(SUPPLE_0_WAKEUP), 0, iwr0, iwr1, iwr2);
46}
47
48#endif
49
50#endif
51
52#include <mach-common/pll.h>
62 53
63#endif /* _MACH_PLL_H */ 54#endif
diff --git a/arch/blackfin/mach-bf561/include/mach/smp.h b/arch/blackfin/mach-bf561/include/mach/smp.h
index 2c8c514dd386..346c60589be6 100644
--- a/arch/blackfin/mach-bf561/include/mach/smp.h
+++ b/arch/blackfin/mach-bf561/include/mach/smp.h
@@ -7,6 +7,8 @@
7#ifndef _MACH_BF561_SMP 7#ifndef _MACH_BF561_SMP
8#define _MACH_BF561_SMP 8#define _MACH_BF561_SMP
9 9
10/* This header has to stand alone to avoid circular deps */
11
10struct task_struct; 12struct task_struct;
11 13
12void platform_init_cpus(void); 14void platform_init_cpus(void);
@@ -17,13 +19,13 @@ int platform_boot_secondary(unsigned int cpu, struct task_struct *idle);
17 19
18void platform_secondary_init(unsigned int cpu); 20void platform_secondary_init(unsigned int cpu);
19 21
20void platform_request_ipi(int (*handler)(int, void *)); 22void platform_request_ipi(int irq, /*irq_handler_t*/ void *handler);
21 23
22void platform_send_ipi(cpumask_t callmap); 24void platform_send_ipi(cpumask_t callmap, int irq);
23 25
24void platform_send_ipi_cpu(unsigned int cpu); 26void platform_send_ipi_cpu(unsigned int cpu, int irq);
25 27
26void platform_clear_ipi(unsigned int cpu); 28void platform_clear_ipi(unsigned int cpu, int irq);
27 29
28void bfin_local_timer_setup(void); 30void bfin_local_timer_setup(void);
29 31
diff --git a/arch/blackfin/mach-bf561/smp.c b/arch/blackfin/mach-bf561/smp.c
index f540ed1257d6..1074a7ef81c7 100644
--- a/arch/blackfin/mach-bf561/smp.c
+++ b/arch/blackfin/mach-bf561/smp.c
@@ -86,12 +86,12 @@ int __cpuinit platform_boot_secondary(unsigned int cpu, struct task_struct *idle
86 86
87 spin_lock(&boot_lock); 87 spin_lock(&boot_lock);
88 88
89 if ((bfin_read_SIC_SYSCR() & COREB_SRAM_INIT) == 0) { 89 if ((bfin_read_SYSCR() & COREB_SRAM_INIT) == 0) {
90 /* CoreB already running, sending ipi to wakeup it */ 90 /* CoreB already running, sending ipi to wakeup it */
91 platform_send_ipi_cpu(cpu, IRQ_SUPPLE_0); 91 platform_send_ipi_cpu(cpu, IRQ_SUPPLE_0);
92 } else { 92 } else {
93 /* Kick CoreB, which should start execution from CORE_SRAM_BASE. */ 93 /* Kick CoreB, which should start execution from CORE_SRAM_BASE. */
94 bfin_write_SIC_SYSCR(bfin_read_SIC_SYSCR() & ~COREB_SRAM_INIT); 94 bfin_write_SYSCR(bfin_read_SYSCR() & ~COREB_SRAM_INIT);
95 SSYNC(); 95 SSYNC();
96 } 96 }
97 97
@@ -111,41 +111,46 @@ int __cpuinit platform_boot_secondary(unsigned int cpu, struct task_struct *idle
111 panic("CPU%u: processor failed to boot\n", cpu); 111 panic("CPU%u: processor failed to boot\n", cpu);
112} 112}
113 113
114void __init platform_request_ipi(irq_handler_t handler) 114static const char supple0[] = "IRQ_SUPPLE_0";
115static const char supple1[] = "IRQ_SUPPLE_1";
116void __init platform_request_ipi(int irq, void *handler)
115{ 117{
116 int ret; 118 int ret;
119 const char *name = (irq == IRQ_SUPPLE_0) ? supple0 : supple1;
117 120
118 ret = request_irq(IRQ_SUPPLE_0, handler, IRQF_DISABLED, 121 ret = request_irq(irq, handler, IRQF_DISABLED | IRQF_PERCPU, name, handler);
119 "Supplemental Interrupt0", handler);
120 if (ret) 122 if (ret)
121 panic("Cannot request supplemental interrupt 0 for IPI service"); 123 panic("Cannot request %s for IPI service", name);
122} 124}
123 125
124void platform_send_ipi(cpumask_t callmap) 126void platform_send_ipi(cpumask_t callmap, int irq)
125{ 127{
126 unsigned int cpu; 128 unsigned int cpu;
129 int offset = (irq == IRQ_SUPPLE_0) ? 6 : 8;
127 130
128 for_each_cpu_mask(cpu, callmap) { 131 for_each_cpu_mask(cpu, callmap) {
129 BUG_ON(cpu >= 2); 132 BUG_ON(cpu >= 2);
130 SSYNC(); 133 SSYNC();
131 bfin_write_SICB_SYSCR(bfin_read_SICB_SYSCR() | (1 << (6 + cpu))); 134 bfin_write_SICB_SYSCR(bfin_read_SICB_SYSCR() | (1 << (offset + cpu)));
132 SSYNC(); 135 SSYNC();
133 } 136 }
134} 137}
135 138
136void platform_send_ipi_cpu(unsigned int cpu) 139void platform_send_ipi_cpu(unsigned int cpu, int irq)
137{ 140{
141 int offset = (irq == IRQ_SUPPLE_0) ? 6 : 8;
138 BUG_ON(cpu >= 2); 142 BUG_ON(cpu >= 2);
139 SSYNC(); 143 SSYNC();
140 bfin_write_SICB_SYSCR(bfin_read_SICB_SYSCR() | (1 << (6 + cpu))); 144 bfin_write_SICB_SYSCR(bfin_read_SICB_SYSCR() | (1 << (offset + cpu)));
141 SSYNC(); 145 SSYNC();
142} 146}
143 147
144void platform_clear_ipi(unsigned int cpu) 148void platform_clear_ipi(unsigned int cpu, int irq)
145{ 149{
150 int offset = (irq == IRQ_SUPPLE_0) ? 10 : 12;
146 BUG_ON(cpu >= 2); 151 BUG_ON(cpu >= 2);
147 SSYNC(); 152 SSYNC();
148 bfin_write_SICB_SYSCR(bfin_read_SICB_SYSCR() | (1 << (10 + cpu))); 153 bfin_write_SICB_SYSCR(bfin_read_SICB_SYSCR() | (1 << (offset + cpu)));
149 SSYNC(); 154 SSYNC();
150} 155}
151 156
diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S
index 2ca915ee181f..bc08c98d008d 100644
--- a/arch/blackfin/mach-common/entry.S
+++ b/arch/blackfin/mach-common/entry.S
@@ -615,7 +615,7 @@ ENTRY(_system_call)
615#ifdef CONFIG_IPIPE 615#ifdef CONFIG_IPIPE
616 r0 = sp; 616 r0 = sp;
617 SP += -12; 617 SP += -12;
618 call ___ipipe_syscall_root; 618 pseudo_long_call ___ipipe_syscall_root, p0;
619 SP += 12; 619 SP += 12;
620 cc = r0 == 1; 620 cc = r0 == 1;
621 if cc jump .Lsyscall_really_exit; 621 if cc jump .Lsyscall_really_exit;
@@ -692,7 +692,7 @@ ENTRY(_system_call)
692 [--sp] = reti; 692 [--sp] = reti;
693 SP += 4; /* don't merge with next insn to keep the pattern obvious */ 693 SP += 4; /* don't merge with next insn to keep the pattern obvious */
694 SP += -12; 694 SP += -12;
695 call ___ipipe_sync_root; 695 pseudo_long_call ___ipipe_sync_root, p4;
696 SP += 12; 696 SP += 12;
697 jump .Lresume_userspace_1; 697 jump .Lresume_userspace_1;
698.Lsyscall_no_irqsync: 698.Lsyscall_no_irqsync:
diff --git a/arch/blackfin/mach-common/ints-priority.c b/arch/blackfin/mach-common/ints-priority.c
index da7e3c63746b..a604f19d8dc3 100644
--- a/arch/blackfin/mach-common/ints-priority.c
+++ b/arch/blackfin/mach-common/ints-priority.c
@@ -866,7 +866,6 @@ static void bfin_gpio_unmask_irq(unsigned int irq)
866 u32 pintbit = PINT_BIT(pint_val); 866 u32 pintbit = PINT_BIT(pint_val);
867 u32 bank = PINT_2_BANK(pint_val); 867 u32 bank = PINT_2_BANK(pint_val);
868 868
869 pint[bank]->request = pintbit;
870 pint[bank]->mask_set = pintbit; 869 pint[bank]->mask_set = pintbit;
871} 870}
872 871
diff --git a/arch/blackfin/mach-common/pm.c b/arch/blackfin/mach-common/pm.c
index 80884b136a0c..42fa87e8375c 100644
--- a/arch/blackfin/mach-common/pm.c
+++ b/arch/blackfin/mach-common/pm.c
@@ -23,9 +23,6 @@
23 23
24void bfin_pm_suspend_standby_enter(void) 24void bfin_pm_suspend_standby_enter(void)
25{ 25{
26 unsigned long flags;
27
28 flags = hard_local_irq_save();
29 bfin_pm_standby_setup(); 26 bfin_pm_standby_setup();
30 27
31#ifdef CONFIG_PM_BFIN_SLEEP_DEEPER 28#ifdef CONFIG_PM_BFIN_SLEEP_DEEPER
@@ -55,8 +52,6 @@ void bfin_pm_suspend_standby_enter(void)
55#else 52#else
56 bfin_write_SIC_IWR(IWR_DISABLE_ALL); 53 bfin_write_SIC_IWR(IWR_DISABLE_ALL);
57#endif 54#endif
58
59 hard_local_irq_restore(flags);
60} 55}
61 56
62int bf53x_suspend_l1_mem(unsigned char *memptr) 57int bf53x_suspend_l1_mem(unsigned char *memptr)
@@ -127,7 +122,6 @@ static void flushinv_all_dcache(void)
127 122
128int bfin_pm_suspend_mem_enter(void) 123int bfin_pm_suspend_mem_enter(void)
129{ 124{
130 unsigned long flags;
131 int wakeup, ret; 125 int wakeup, ret;
132 126
133 unsigned char *memptr = kmalloc(L1_CODE_LENGTH + L1_DATA_A_LENGTH 127 unsigned char *memptr = kmalloc(L1_CODE_LENGTH + L1_DATA_A_LENGTH
@@ -149,12 +143,9 @@ int bfin_pm_suspend_mem_enter(void)
149 wakeup |= GPWE; 143 wakeup |= GPWE;
150#endif 144#endif
151 145
152 flags = hard_local_irq_save();
153
154 ret = blackfin_dma_suspend(); 146 ret = blackfin_dma_suspend();
155 147
156 if (ret) { 148 if (ret) {
157 hard_local_irq_restore(flags);
158 kfree(memptr); 149 kfree(memptr);
159 return ret; 150 return ret;
160 } 151 }
@@ -178,7 +169,6 @@ int bfin_pm_suspend_mem_enter(void)
178 bfin_gpio_pm_hibernate_restore(); 169 bfin_gpio_pm_hibernate_restore();
179 blackfin_dma_resume(); 170 blackfin_dma_resume();
180 171
181 hard_local_irq_restore(flags);
182 kfree(memptr); 172 kfree(memptr);
183 173
184 return 0; 174 return 0;
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c
index a17107a700d5..9f251406a76a 100644
--- a/arch/blackfin/mach-common/smp.c
+++ b/arch/blackfin/mach-common/smp.c
@@ -19,6 +19,7 @@
19#include <linux/mm.h> 19#include <linux/mm.h>
20#include <linux/cpu.h> 20#include <linux/cpu.h>
21#include <linux/smp.h> 21#include <linux/smp.h>
22#include <linux/cpumask.h>
22#include <linux/seq_file.h> 23#include <linux/seq_file.h>
23#include <linux/irq.h> 24#include <linux/irq.h>
24#include <linux/slab.h> 25#include <linux/slab.h>
@@ -43,12 +44,6 @@ void __cpuinitdata *init_retx_coreb, *init_saved_retx_coreb,
43 *init_saved_seqstat_coreb, *init_saved_icplb_fault_addr_coreb, 44 *init_saved_seqstat_coreb, *init_saved_icplb_fault_addr_coreb,
44 *init_saved_dcplb_fault_addr_coreb; 45 *init_saved_dcplb_fault_addr_coreb;
45 46
46cpumask_t cpu_possible_map;
47EXPORT_SYMBOL(cpu_possible_map);
48
49cpumask_t cpu_online_map;
50EXPORT_SYMBOL(cpu_online_map);
51
52#define BFIN_IPI_RESCHEDULE 0 47#define BFIN_IPI_RESCHEDULE 0
53#define BFIN_IPI_CALL_FUNC 1 48#define BFIN_IPI_CALL_FUNC 1
54#define BFIN_IPI_CPU_STOP 2 49#define BFIN_IPI_CPU_STOP 2
@@ -65,8 +60,7 @@ struct smp_call_struct {
65 void (*func)(void *info); 60 void (*func)(void *info);
66 void *info; 61 void *info;
67 int wait; 62 int wait;
68 cpumask_t pending; 63 cpumask_t *waitmask;
69 cpumask_t waitmask;
70}; 64};
71 65
72static struct blackfin_flush_data smp_flush_data; 66static struct blackfin_flush_data smp_flush_data;
@@ -74,15 +68,19 @@ static struct blackfin_flush_data smp_flush_data;
74static DEFINE_SPINLOCK(stop_lock); 68static DEFINE_SPINLOCK(stop_lock);
75 69
76struct ipi_message { 70struct ipi_message {
77 struct list_head list;
78 unsigned long type; 71 unsigned long type;
79 struct smp_call_struct call_struct; 72 struct smp_call_struct call_struct;
80}; 73};
81 74
75/* A magic number - stress test shows this is safe for common cases */
76#define BFIN_IPI_MSGQ_LEN 5
77
78/* Simple FIFO buffer, overflow leads to panic */
82struct ipi_message_queue { 79struct ipi_message_queue {
83 struct list_head head;
84 spinlock_t lock; 80 spinlock_t lock;
85 unsigned long count; 81 unsigned long count;
82 unsigned long head; /* head of the queue */
83 struct ipi_message ipi_message[BFIN_IPI_MSGQ_LEN];
86}; 84};
87 85
88static DEFINE_PER_CPU(struct ipi_message_queue, ipi_msg_queue); 86static DEFINE_PER_CPU(struct ipi_message_queue, ipi_msg_queue);
@@ -121,7 +119,6 @@ static void ipi_call_function(unsigned int cpu, struct ipi_message *msg)
121 func = msg->call_struct.func; 119 func = msg->call_struct.func;
122 info = msg->call_struct.info; 120 info = msg->call_struct.info;
123 wait = msg->call_struct.wait; 121 wait = msg->call_struct.wait;
124 cpu_clear(cpu, msg->call_struct.pending);
125 func(info); 122 func(info);
126 if (wait) { 123 if (wait) {
127#ifdef __ARCH_SYNC_CORE_DCACHE 124#ifdef __ARCH_SYNC_CORE_DCACHE
@@ -132,51 +129,57 @@ static void ipi_call_function(unsigned int cpu, struct ipi_message *msg)
132 */ 129 */
133 resync_core_dcache(); 130 resync_core_dcache();
134#endif 131#endif
135 cpu_clear(cpu, msg->call_struct.waitmask); 132 cpu_clear(cpu, *msg->call_struct.waitmask);
136 } else 133 }
137 kfree(msg); 134}
135
136/* Use IRQ_SUPPLE_0 to request reschedule.
137 * When returning from interrupt to user space,
138 * there is chance to reschedule */
139static irqreturn_t ipi_handler_int0(int irq, void *dev_instance)
140{
141 unsigned int cpu = smp_processor_id();
142
143 platform_clear_ipi(cpu, IRQ_SUPPLE_0);
144 return IRQ_HANDLED;
138} 145}
139 146
140static irqreturn_t ipi_handler(int irq, void *dev_instance) 147static irqreturn_t ipi_handler_int1(int irq, void *dev_instance)
141{ 148{
142 struct ipi_message *msg; 149 struct ipi_message *msg;
143 struct ipi_message_queue *msg_queue; 150 struct ipi_message_queue *msg_queue;
144 unsigned int cpu = smp_processor_id(); 151 unsigned int cpu = smp_processor_id();
152 unsigned long flags;
145 153
146 platform_clear_ipi(cpu); 154 platform_clear_ipi(cpu, IRQ_SUPPLE_1);
147 155
148 msg_queue = &__get_cpu_var(ipi_msg_queue); 156 msg_queue = &__get_cpu_var(ipi_msg_queue);
149 msg_queue->count++;
150 157
151 spin_lock(&msg_queue->lock); 158 spin_lock_irqsave(&msg_queue->lock, flags);
152 while (!list_empty(&msg_queue->head)) { 159
153 msg = list_entry(msg_queue->head.next, typeof(*msg), list); 160 while (msg_queue->count) {
154 list_del(&msg->list); 161 msg = &msg_queue->ipi_message[msg_queue->head];
155 switch (msg->type) { 162 switch (msg->type) {
156 case BFIN_IPI_RESCHEDULE:
157 /* That's the easiest one; leave it to
158 * return_from_int. */
159 kfree(msg);
160 break;
161 case BFIN_IPI_CALL_FUNC: 163 case BFIN_IPI_CALL_FUNC:
162 spin_unlock(&msg_queue->lock); 164 spin_unlock_irqrestore(&msg_queue->lock, flags);
163 ipi_call_function(cpu, msg); 165 ipi_call_function(cpu, msg);
164 spin_lock(&msg_queue->lock); 166 spin_lock_irqsave(&msg_queue->lock, flags);
165 break; 167 break;
166 case BFIN_IPI_CPU_STOP: 168 case BFIN_IPI_CPU_STOP:
167 spin_unlock(&msg_queue->lock); 169 spin_unlock_irqrestore(&msg_queue->lock, flags);
168 ipi_cpu_stop(cpu); 170 ipi_cpu_stop(cpu);
169 spin_lock(&msg_queue->lock); 171 spin_lock_irqsave(&msg_queue->lock, flags);
170 kfree(msg);
171 break; 172 break;
172 default: 173 default:
173 printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%lx\n", 174 printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%lx\n",
174 cpu, msg->type); 175 cpu, msg->type);
175 kfree(msg);
176 break; 176 break;
177 } 177 }
178 msg_queue->head++;
179 msg_queue->head %= BFIN_IPI_MSGQ_LEN;
180 msg_queue->count--;
178 } 181 }
179 spin_unlock(&msg_queue->lock); 182 spin_unlock_irqrestore(&msg_queue->lock, flags);
180 return IRQ_HANDLED; 183 return IRQ_HANDLED;
181} 184}
182 185
@@ -186,48 +189,47 @@ static void ipi_queue_init(void)
186 struct ipi_message_queue *msg_queue; 189 struct ipi_message_queue *msg_queue;
187 for_each_possible_cpu(cpu) { 190 for_each_possible_cpu(cpu) {
188 msg_queue = &per_cpu(ipi_msg_queue, cpu); 191 msg_queue = &per_cpu(ipi_msg_queue, cpu);
189 INIT_LIST_HEAD(&msg_queue->head);
190 spin_lock_init(&msg_queue->lock); 192 spin_lock_init(&msg_queue->lock);
191 msg_queue->count = 0; 193 msg_queue->count = 0;
194 msg_queue->head = 0;
192 } 195 }
193} 196}
194 197
195int smp_call_function(void (*func)(void *info), void *info, int wait) 198static inline void smp_send_message(cpumask_t callmap, unsigned long type,
199 void (*func) (void *info), void *info, int wait)
196{ 200{
197 unsigned int cpu; 201 unsigned int cpu;
198 cpumask_t callmap;
199 unsigned long flags;
200 struct ipi_message_queue *msg_queue; 202 struct ipi_message_queue *msg_queue;
201 struct ipi_message *msg; 203 struct ipi_message *msg;
202 204 unsigned long flags, next_msg;
203 callmap = cpu_online_map; 205 cpumask_t waitmask = callmap; /* waitmask is shared by all cpus */
204 cpu_clear(smp_processor_id(), callmap);
205 if (cpus_empty(callmap))
206 return 0;
207
208 msg = kmalloc(sizeof(*msg), GFP_ATOMIC);
209 if (!msg)
210 return -ENOMEM;
211 INIT_LIST_HEAD(&msg->list);
212 msg->call_struct.func = func;
213 msg->call_struct.info = info;
214 msg->call_struct.wait = wait;
215 msg->call_struct.pending = callmap;
216 msg->call_struct.waitmask = callmap;
217 msg->type = BFIN_IPI_CALL_FUNC;
218 206
219 for_each_cpu_mask(cpu, callmap) { 207 for_each_cpu_mask(cpu, callmap) {
220 msg_queue = &per_cpu(ipi_msg_queue, cpu); 208 msg_queue = &per_cpu(ipi_msg_queue, cpu);
221 spin_lock_irqsave(&msg_queue->lock, flags); 209 spin_lock_irqsave(&msg_queue->lock, flags);
222 list_add_tail(&msg->list, &msg_queue->head); 210 if (msg_queue->count < BFIN_IPI_MSGQ_LEN) {
211 next_msg = (msg_queue->head + msg_queue->count)
212 % BFIN_IPI_MSGQ_LEN;
213 msg = &msg_queue->ipi_message[next_msg];
214 msg->type = type;
215 if (type == BFIN_IPI_CALL_FUNC) {
216 msg->call_struct.func = func;
217 msg->call_struct.info = info;
218 msg->call_struct.wait = wait;
219 msg->call_struct.waitmask = &waitmask;
220 }
221 msg_queue->count++;
222 } else
223 panic("IPI message queue overflow\n");
223 spin_unlock_irqrestore(&msg_queue->lock, flags); 224 spin_unlock_irqrestore(&msg_queue->lock, flags);
224 platform_send_ipi_cpu(cpu); 225 platform_send_ipi_cpu(cpu, IRQ_SUPPLE_1);
225 } 226 }
227
226 if (wait) { 228 if (wait) {
227 while (!cpus_empty(msg->call_struct.waitmask)) 229 while (!cpus_empty(waitmask))
228 blackfin_dcache_invalidate_range( 230 blackfin_dcache_invalidate_range(
229 (unsigned long)(&msg->call_struct.waitmask), 231 (unsigned long)(&waitmask),
230 (unsigned long)(&msg->call_struct.waitmask)); 232 (unsigned long)(&waitmask));
231#ifdef __ARCH_SYNC_CORE_DCACHE 233#ifdef __ARCH_SYNC_CORE_DCACHE
232 /* 234 /*
233 * Invalidate D cache in case shared data was changed by 235 * Invalidate D cache in case shared data was changed by
@@ -235,8 +237,20 @@ int smp_call_function(void (*func)(void *info), void *info, int wait)
235 */ 237 */
236 resync_core_dcache(); 238 resync_core_dcache();
237#endif 239#endif
238 kfree(msg);
239 } 240 }
241}
242
243int smp_call_function(void (*func)(void *info), void *info, int wait)
244{
245 cpumask_t callmap;
246
247 callmap = cpu_online_map;
248 cpu_clear(smp_processor_id(), callmap);
249 if (cpus_empty(callmap))
250 return 0;
251
252 smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait);
253
240 return 0; 254 return 0;
241} 255}
242EXPORT_SYMBOL_GPL(smp_call_function); 256EXPORT_SYMBOL_GPL(smp_call_function);
@@ -246,100 +260,39 @@ int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
246{ 260{
247 unsigned int cpu = cpuid; 261 unsigned int cpu = cpuid;
248 cpumask_t callmap; 262 cpumask_t callmap;
249 unsigned long flags;
250 struct ipi_message_queue *msg_queue;
251 struct ipi_message *msg;
252 263
253 if (cpu_is_offline(cpu)) 264 if (cpu_is_offline(cpu))
254 return 0; 265 return 0;
255 cpus_clear(callmap); 266 cpus_clear(callmap);
256 cpu_set(cpu, callmap); 267 cpu_set(cpu, callmap);
257 268
258 msg = kmalloc(sizeof(*msg), GFP_ATOMIC); 269 smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait);
259 if (!msg)
260 return -ENOMEM;
261 INIT_LIST_HEAD(&msg->list);
262 msg->call_struct.func = func;
263 msg->call_struct.info = info;
264 msg->call_struct.wait = wait;
265 msg->call_struct.pending = callmap;
266 msg->call_struct.waitmask = callmap;
267 msg->type = BFIN_IPI_CALL_FUNC;
268
269 msg_queue = &per_cpu(ipi_msg_queue, cpu);
270 spin_lock_irqsave(&msg_queue->lock, flags);
271 list_add_tail(&msg->list, &msg_queue->head);
272 spin_unlock_irqrestore(&msg_queue->lock, flags);
273 platform_send_ipi_cpu(cpu);
274 270
275 if (wait) {
276 while (!cpus_empty(msg->call_struct.waitmask))
277 blackfin_dcache_invalidate_range(
278 (unsigned long)(&msg->call_struct.waitmask),
279 (unsigned long)(&msg->call_struct.waitmask));
280#ifdef __ARCH_SYNC_CORE_DCACHE
281 /*
282 * Invalidate D cache in case shared data was changed by
283 * other processors to ensure cache coherence.
284 */
285 resync_core_dcache();
286#endif
287 kfree(msg);
288 }
289 return 0; 271 return 0;
290} 272}
291EXPORT_SYMBOL_GPL(smp_call_function_single); 273EXPORT_SYMBOL_GPL(smp_call_function_single);
292 274
293void smp_send_reschedule(int cpu) 275void smp_send_reschedule(int cpu)
294{ 276{
295 unsigned long flags; 277 /* simply trigger an ipi */
296 struct ipi_message_queue *msg_queue;
297 struct ipi_message *msg;
298
299 if (cpu_is_offline(cpu)) 278 if (cpu_is_offline(cpu))
300 return; 279 return;
301 280 platform_send_ipi_cpu(cpu, IRQ_SUPPLE_0);
302 msg = kzalloc(sizeof(*msg), GFP_ATOMIC);
303 if (!msg)
304 return;
305 INIT_LIST_HEAD(&msg->list);
306 msg->type = BFIN_IPI_RESCHEDULE;
307
308 msg_queue = &per_cpu(ipi_msg_queue, cpu);
309 spin_lock_irqsave(&msg_queue->lock, flags);
310 list_add_tail(&msg->list, &msg_queue->head);
311 spin_unlock_irqrestore(&msg_queue->lock, flags);
312 platform_send_ipi_cpu(cpu);
313 281
314 return; 282 return;
315} 283}
316 284
317void smp_send_stop(void) 285void smp_send_stop(void)
318{ 286{
319 unsigned int cpu;
320 cpumask_t callmap; 287 cpumask_t callmap;
321 unsigned long flags;
322 struct ipi_message_queue *msg_queue;
323 struct ipi_message *msg;
324 288
325 callmap = cpu_online_map; 289 callmap = cpu_online_map;
326 cpu_clear(smp_processor_id(), callmap); 290 cpu_clear(smp_processor_id(), callmap);
327 if (cpus_empty(callmap)) 291 if (cpus_empty(callmap))
328 return; 292 return;
329 293
330 msg = kzalloc(sizeof(*msg), GFP_ATOMIC); 294 smp_send_message(callmap, BFIN_IPI_CPU_STOP, NULL, NULL, 0);
331 if (!msg)
332 return;
333 INIT_LIST_HEAD(&msg->list);
334 msg->type = BFIN_IPI_CPU_STOP;
335 295
336 for_each_cpu_mask(cpu, callmap) {
337 msg_queue = &per_cpu(ipi_msg_queue, cpu);
338 spin_lock_irqsave(&msg_queue->lock, flags);
339 list_add_tail(&msg->list, &msg_queue->head);
340 spin_unlock_irqrestore(&msg_queue->lock, flags);
341 platform_send_ipi_cpu(cpu);
342 }
343 return; 296 return;
344} 297}
345 298
@@ -446,7 +399,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
446{ 399{
447 platform_prepare_cpus(max_cpus); 400 platform_prepare_cpus(max_cpus);
448 ipi_queue_init(); 401 ipi_queue_init();
449 platform_request_ipi(&ipi_handler); 402 platform_request_ipi(IRQ_SUPPLE_0, ipi_handler_int0);
403 platform_request_ipi(IRQ_SUPPLE_1, ipi_handler_int1);
450} 404}
451 405
452void __init smp_cpus_done(unsigned int max_cpus) 406void __init smp_cpus_done(unsigned int max_cpus)
diff --git a/arch/blackfin/mm/sram-alloc.c b/arch/blackfin/mm/sram-alloc.c
index 627e04b5ba9a..dfd304a4a3ea 100644
--- a/arch/blackfin/mm/sram-alloc.c
+++ b/arch/blackfin/mm/sram-alloc.c
@@ -704,18 +704,18 @@ int sram_free_with_lsl(const void *addr)
704{ 704{
705 struct sram_list_struct *lsl, **tmp; 705 struct sram_list_struct *lsl, **tmp;
706 struct mm_struct *mm = current->mm; 706 struct mm_struct *mm = current->mm;
707 int ret = -1;
707 708
708 for (tmp = &mm->context.sram_list; *tmp; tmp = &(*tmp)->next) 709 for (tmp = &mm->context.sram_list; *tmp; tmp = &(*tmp)->next)
709 if ((*tmp)->addr == addr) 710 if ((*tmp)->addr == addr) {
710 goto found; 711 lsl = *tmp;
711 return -1; 712 ret = sram_free(addr);
712found: 713 *tmp = lsl->next;
713 lsl = *tmp; 714 kfree(lsl);
714 sram_free(addr); 715 break;
715 *tmp = lsl->next; 716 }
716 kfree(lsl);
717 717
718 return 0; 718 return ret;
719} 719}
720EXPORT_SYMBOL(sram_free_with_lsl); 720EXPORT_SYMBOL(sram_free_with_lsl);
721 721
diff --git a/arch/microblaze/Kconfig.debug b/arch/microblaze/Kconfig.debug
index e66e25c4b0b2..012e377330cd 100644
--- a/arch/microblaze/Kconfig.debug
+++ b/arch/microblaze/Kconfig.debug
@@ -23,8 +23,4 @@ config HEART_BEAT
23 This option turns on/off heart beat kernel functionality. 23 This option turns on/off heart beat kernel functionality.
24 First GPIO node is taken. 24 First GPIO node is taken.
25 25
26config DEBUG_BOOTMEM
27 depends on DEBUG_KERNEL
28 bool "Debug BOOTMEM initialization"
29
30endmenu 26endmenu
diff --git a/arch/microblaze/Makefile b/arch/microblaze/Makefile
index 15f1f1d1840d..6f432e6df9af 100644
--- a/arch/microblaze/Makefile
+++ b/arch/microblaze/Makefile
@@ -17,7 +17,7 @@ export CPU_VER CPU_MAJOR CPU_MINOR CPU_REV
17# The various CONFIG_XILINX cpu features options are integers 0/1/2... 17# The various CONFIG_XILINX cpu features options are integers 0/1/2...
18# rather than bools y/n 18# rather than bools y/n
19 19
20# Work out HW multipler support. This is icky. 20# Work out HW multipler support. This is tricky.
21# 1. Spartan2 has no HW multiplers. 21# 1. Spartan2 has no HW multiplers.
22# 2. MicroBlaze v3.x always uses them, except in Spartan 2 22# 2. MicroBlaze v3.x always uses them, except in Spartan 2
23# 3. All other FPGa/CPU ver combos, we can trust the CONFIG_ settings 23# 3. All other FPGa/CPU ver combos, we can trust the CONFIG_ settings
diff --git a/arch/microblaze/configs/mmu_defconfig b/arch/microblaze/configs/mmu_defconfig
index 8b422b12ef78..ab8fbe7ad90b 100644
--- a/arch/microblaze/configs/mmu_defconfig
+++ b/arch/microblaze/configs/mmu_defconfig
@@ -66,5 +66,4 @@ CONFIG_DEBUG_SPINLOCK=y
66CONFIG_DEBUG_INFO=y 66CONFIG_DEBUG_INFO=y
67# CONFIG_RCU_CPU_STALL_DETECTOR is not set 67# CONFIG_RCU_CPU_STALL_DETECTOR is not set
68CONFIG_EARLY_PRINTK=y 68CONFIG_EARLY_PRINTK=y
69CONFIG_DEBUG_BOOTMEM=y
70# CONFIG_CRYPTO_ANSI_CPRNG is not set 69# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/microblaze/include/asm/pvr.h b/arch/microblaze/include/asm/pvr.h
index 37db96a15b45..a10bec62e857 100644
--- a/arch/microblaze/include/asm/pvr.h
+++ b/arch/microblaze/include/asm/pvr.h
@@ -1,9 +1,9 @@
1/* 1/*
2 * Support for the MicroBlaze PVR (Processor Version Register) 2 * Support for the MicroBlaze PVR (Processor Version Register)
3 * 3 *
4 * Copyright (C) 2009 Michal Simek <monstr@monstr.eu> 4 * Copyright (C) 2009 - 2011 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2007 John Williams <john.williams@petalogix.com> 5 * Copyright (C) 2007 John Williams <john.williams@petalogix.com>
6 * Copyright (C) 2007 - 2009 PetaLogix 6 * Copyright (C) 2007 - 2011 PetaLogix
7 * 7 *
8 * This file is subject to the terms and conditions of the GNU General 8 * This file is subject to the terms and conditions of the GNU General
9 * Public License. See the file COPYING in the main directory of this 9 * Public License. See the file COPYING in the main directory of this
@@ -46,11 +46,11 @@ struct pvr_s {
46#define PVR2_I_LMB_MASK 0x10000000 46#define PVR2_I_LMB_MASK 0x10000000
47#define PVR2_INTERRUPT_IS_EDGE_MASK 0x08000000 47#define PVR2_INTERRUPT_IS_EDGE_MASK 0x08000000
48#define PVR2_EDGE_IS_POSITIVE_MASK 0x04000000 48#define PVR2_EDGE_IS_POSITIVE_MASK 0x04000000
49#define PVR2_D_PLB_MASK 0x02000000 /* new */ 49#define PVR2_D_PLB_MASK 0x02000000 /* new */
50#define PVR2_I_PLB_MASK 0x01000000 /* new */ 50#define PVR2_I_PLB_MASK 0x01000000 /* new */
51#define PVR2_INTERCONNECT 0x00800000 /* new */ 51#define PVR2_INTERCONNECT 0x00800000 /* new */
52#define PVR2_USE_EXTEND_FSL 0x00080000 /* new */ 52#define PVR2_USE_EXTEND_FSL 0x00080000 /* new */
53#define PVR2_USE_FSL_EXC 0x00040000 /* new */ 53#define PVR2_USE_FSL_EXC 0x00040000 /* new */
54#define PVR2_USE_MSR_INSTR 0x00020000 54#define PVR2_USE_MSR_INSTR 0x00020000
55#define PVR2_USE_PCMP_INSTR 0x00010000 55#define PVR2_USE_PCMP_INSTR 0x00010000
56#define PVR2_AREA_OPTIMISED 0x00008000 56#define PVR2_AREA_OPTIMISED 0x00008000
@@ -59,7 +59,7 @@ struct pvr_s {
59#define PVR2_USE_HW_MUL_MASK 0x00001000 59#define PVR2_USE_HW_MUL_MASK 0x00001000
60#define PVR2_USE_FPU_MASK 0x00000800 60#define PVR2_USE_FPU_MASK 0x00000800
61#define PVR2_USE_MUL64_MASK 0x00000400 61#define PVR2_USE_MUL64_MASK 0x00000400
62#define PVR2_USE_FPU2_MASK 0x00000200 /* new */ 62#define PVR2_USE_FPU2_MASK 0x00000200 /* new */
63#define PVR2_USE_IPLBEXC 0x00000100 63#define PVR2_USE_IPLBEXC 0x00000100
64#define PVR2_USE_DPLBEXC 0x00000080 64#define PVR2_USE_DPLBEXC 0x00000080
65#define PVR2_OPCODE_0x0_ILL_MASK 0x00000040 65#define PVR2_OPCODE_0x0_ILL_MASK 0x00000040
@@ -122,96 +122,103 @@ struct pvr_s {
122 122
123 123
124/* PVR access macros */ 124/* PVR access macros */
125#define PVR_IS_FULL(pvr) (pvr.pvr[0] & PVR0_PVR_FULL_MASK) 125#define PVR_IS_FULL(_pvr) (_pvr.pvr[0] & PVR0_PVR_FULL_MASK)
126#define PVR_USE_BARREL(pvr) (pvr.pvr[0] & PVR0_USE_BARREL_MASK) 126#define PVR_USE_BARREL(_pvr) (_pvr.pvr[0] & PVR0_USE_BARREL_MASK)
127#define PVR_USE_DIV(pvr) (pvr.pvr[0] & PVR0_USE_DIV_MASK) 127#define PVR_USE_DIV(_pvr) (_pvr.pvr[0] & PVR0_USE_DIV_MASK)
128#define PVR_USE_HW_MUL(pvr) (pvr.pvr[0] & PVR0_USE_HW_MUL_MASK) 128#define PVR_USE_HW_MUL(_pvr) (_pvr.pvr[0] & PVR0_USE_HW_MUL_MASK)
129#define PVR_USE_FPU(pvr) (pvr.pvr[0] & PVR0_USE_FPU_MASK) 129#define PVR_USE_FPU(_pvr) (_pvr.pvr[0] & PVR0_USE_FPU_MASK)
130#define PVR_USE_FPU2(pvr) (pvr.pvr[2] & PVR2_USE_FPU2_MASK) 130#define PVR_USE_FPU2(_pvr) (_pvr.pvr[2] & PVR2_USE_FPU2_MASK)
131#define PVR_USE_ICACHE(pvr) (pvr.pvr[0] & PVR0_USE_ICACHE_MASK) 131#define PVR_USE_ICACHE(_pvr) (_pvr.pvr[0] & PVR0_USE_ICACHE_MASK)
132#define PVR_USE_DCACHE(pvr) (pvr.pvr[0] & PVR0_USE_DCACHE_MASK) 132#define PVR_USE_DCACHE(_pvr) (_pvr.pvr[0] & PVR0_USE_DCACHE_MASK)
133#define PVR_VERSION(pvr) ((pvr.pvr[0] & PVR0_VERSION_MASK) >> 8) 133#define PVR_VERSION(_pvr) ((_pvr.pvr[0] & PVR0_VERSION_MASK) >> 8)
134#define PVR_USER1(pvr) (pvr.pvr[0] & PVR0_USER1_MASK) 134#define PVR_USER1(_pvr) (_pvr.pvr[0] & PVR0_USER1_MASK)
135#define PVR_USER2(pvr) (pvr.pvr[1] & PVR1_USER2_MASK) 135#define PVR_USER2(_pvr) (_pvr.pvr[1] & PVR1_USER2_MASK)
136 136
137#define PVR_D_OPB(pvr) (pvr.pvr[2] & PVR2_D_OPB_MASK) 137#define PVR_D_OPB(_pvr) (_pvr.pvr[2] & PVR2_D_OPB_MASK)
138#define PVR_D_LMB(pvr) (pvr.pvr[2] & PVR2_D_LMB_MASK) 138#define PVR_D_LMB(_pvr) (_pvr.pvr[2] & PVR2_D_LMB_MASK)
139#define PVR_I_OPB(pvr) (pvr.pvr[2] & PVR2_I_OPB_MASK) 139#define PVR_I_OPB(_pvr) (_pvr.pvr[2] & PVR2_I_OPB_MASK)
140#define PVR_I_LMB(pvr) (pvr.pvr[2] & PVR2_I_LMB_MASK) 140#define PVR_I_LMB(_pvr) (_pvr.pvr[2] & PVR2_I_LMB_MASK)
141#define PVR_INTERRUPT_IS_EDGE(pvr) \ 141#define PVR_INTERRUPT_IS_EDGE(_pvr) \
142 (pvr.pvr[2] & PVR2_INTERRUPT_IS_EDGE_MASK) 142 (_pvr.pvr[2] & PVR2_INTERRUPT_IS_EDGE_MASK)
143#define PVR_EDGE_IS_POSITIVE(pvr) \ 143#define PVR_EDGE_IS_POSITIVE(_pvr) \
144 (pvr.pvr[2] & PVR2_EDGE_IS_POSITIVE_MASK) 144 (_pvr.pvr[2] & PVR2_EDGE_IS_POSITIVE_MASK)
145#define PVR_USE_MSR_INSTR(pvr) (pvr.pvr[2] & PVR2_USE_MSR_INSTR) 145#define PVR_USE_MSR_INSTR(_pvr) (_pvr.pvr[2] & PVR2_USE_MSR_INSTR)
146#define PVR_USE_PCMP_INSTR(pvr) (pvr.pvr[2] & PVR2_USE_PCMP_INSTR) 146#define PVR_USE_PCMP_INSTR(_pvr) (_pvr.pvr[2] & PVR2_USE_PCMP_INSTR)
147#define PVR_AREA_OPTIMISED(pvr) (pvr.pvr[2] & PVR2_AREA_OPTIMISED) 147#define PVR_AREA_OPTIMISED(_pvr) (_pvr.pvr[2] & PVR2_AREA_OPTIMISED)
148#define PVR_USE_MUL64(pvr) (pvr.pvr[2] & PVR2_USE_MUL64_MASK) 148#define PVR_USE_MUL64(_pvr) (_pvr.pvr[2] & PVR2_USE_MUL64_MASK)
149#define PVR_OPCODE_0x0_ILLEGAL(pvr) \ 149#define PVR_OPCODE_0x0_ILLEGAL(_pvr) \
150 (pvr.pvr[2] & PVR2_OPCODE_0x0_ILL_MASK) 150 (_pvr.pvr[2] & PVR2_OPCODE_0x0_ILL_MASK)
151#define PVR_UNALIGNED_EXCEPTION(pvr) \ 151#define PVR_UNALIGNED_EXCEPTION(_pvr) \
152 (pvr.pvr[2] & PVR2_UNALIGNED_EXC_MASK) 152 (_pvr.pvr[2] & PVR2_UNALIGNED_EXC_MASK)
153#define PVR_ILL_OPCODE_EXCEPTION(pvr) \ 153#define PVR_ILL_OPCODE_EXCEPTION(_pvr) \
154 (pvr.pvr[2] & PVR2_ILL_OPCODE_EXC_MASK) 154 (_pvr.pvr[2] & PVR2_ILL_OPCODE_EXC_MASK)
155#define PVR_IOPB_BUS_EXCEPTION(pvr) \ 155#define PVR_IOPB_BUS_EXCEPTION(_pvr) \
156 (pvr.pvr[2] & PVR2_IOPB_BUS_EXC_MASK) 156 (_pvr.pvr[2] & PVR2_IOPB_BUS_EXC_MASK)
157#define PVR_DOPB_BUS_EXCEPTION(pvr) \ 157#define PVR_DOPB_BUS_EXCEPTION(_pvr) \
158 (pvr.pvr[2] & PVR2_DOPB_BUS_EXC_MASK) 158 (_pvr.pvr[2] & PVR2_DOPB_BUS_EXC_MASK)
159#define PVR_DIV_ZERO_EXCEPTION(pvr) \ 159#define PVR_DIV_ZERO_EXCEPTION(_pvr) \
160 (pvr.pvr[2] & PVR2_DIV_ZERO_EXC_MASK) 160 (_pvr.pvr[2] & PVR2_DIV_ZERO_EXC_MASK)
161#define PVR_FPU_EXCEPTION(pvr) (pvr.pvr[2] & PVR2_FPU_EXC_MASK) 161#define PVR_FPU_EXCEPTION(_pvr) (_pvr.pvr[2] & PVR2_FPU_EXC_MASK)
162#define PVR_FSL_EXCEPTION(pvr) (pvr.pvr[2] & PVR2_USE_EXTEND_FSL) 162#define PVR_FSL_EXCEPTION(_pvr) (_pvr.pvr[2] & PVR2_USE_EXTEND_FSL)
163 163
164#define PVR_DEBUG_ENABLED(pvr) (pvr.pvr[3] & PVR3_DEBUG_ENABLED_MASK) 164#define PVR_DEBUG_ENABLED(_pvr) (_pvr.pvr[3] & PVR3_DEBUG_ENABLED_MASK)
165#define PVR_NUMBER_OF_PC_BRK(pvr) \ 165#define PVR_NUMBER_OF_PC_BRK(_pvr) \
166 ((pvr.pvr[3] & PVR3_NUMBER_OF_PC_BRK_MASK) >> 25) 166 ((_pvr.pvr[3] & PVR3_NUMBER_OF_PC_BRK_MASK) >> 25)
167#define PVR_NUMBER_OF_RD_ADDR_BRK(pvr) \ 167#define PVR_NUMBER_OF_RD_ADDR_BRK(_pvr) \
168 ((pvr.pvr[3] & PVR3_NUMBER_OF_RD_ADDR_BRK_MASK) >> 19) 168 ((_pvr.pvr[3] & PVR3_NUMBER_OF_RD_ADDR_BRK_MASK) >> 19)
169#define PVR_NUMBER_OF_WR_ADDR_BRK(pvr) \ 169#define PVR_NUMBER_OF_WR_ADDR_BRK(_pvr) \
170 ((pvr.pvr[3] & PVR3_NUMBER_OF_WR_ADDR_BRK_MASK) >> 13) 170 ((_pvr.pvr[3] & PVR3_NUMBER_OF_WR_ADDR_BRK_MASK) >> 13)
171#define PVR_FSL_LINKS(pvr) ((pvr.pvr[3] & PVR3_FSL_LINKS_MASK) >> 7) 171#define PVR_FSL_LINKS(_pvr) ((_pvr.pvr[3] & PVR3_FSL_LINKS_MASK) >> 7)
172 172
173#define PVR_ICACHE_ADDR_TAG_BITS(pvr) \ 173#define PVR_ICACHE_ADDR_TAG_BITS(_pvr) \
174 ((pvr.pvr[4] & PVR4_ICACHE_ADDR_TAG_BITS_MASK) >> 26) 174 ((_pvr.pvr[4] & PVR4_ICACHE_ADDR_TAG_BITS_MASK) >> 26)
175#define PVR_ICACHE_USE_FSL(pvr) (pvr.pvr[4] & PVR4_ICACHE_USE_FSL_MASK) 175#define PVR_ICACHE_USE_FSL(_pvr) \
176#define PVR_ICACHE_ALLOW_WR(pvr) (pvr.pvr[4] & PVR4_ICACHE_ALLOW_WR_MASK) 176 (_pvr.pvr[4] & PVR4_ICACHE_USE_FSL_MASK)
177#define PVR_ICACHE_LINE_LEN(pvr) \ 177#define PVR_ICACHE_ALLOW_WR(_pvr) \
178 (1 << ((pvr.pvr[4] & PVR4_ICACHE_LINE_LEN_MASK) >> 21)) 178 (_pvr.pvr[4] & PVR4_ICACHE_ALLOW_WR_MASK)
179#define PVR_ICACHE_BYTE_SIZE(pvr) \ 179#define PVR_ICACHE_LINE_LEN(_pvr) \
180 (1 << ((pvr.pvr[4] & PVR4_ICACHE_BYTE_SIZE_MASK) >> 16)) 180 (1 << ((_pvr.pvr[4] & PVR4_ICACHE_LINE_LEN_MASK) >> 21))
181 181#define PVR_ICACHE_BYTE_SIZE(_pvr) \
182#define PVR_DCACHE_ADDR_TAG_BITS(pvr) \ 182 (1 << ((_pvr.pvr[4] & PVR4_ICACHE_BYTE_SIZE_MASK) >> 16))
183 ((pvr.pvr[5] & PVR5_DCACHE_ADDR_TAG_BITS_MASK) >> 26) 183
184#define PVR_DCACHE_USE_FSL(pvr) (pvr.pvr[5] & PVR5_DCACHE_USE_FSL_MASK) 184#define PVR_DCACHE_ADDR_TAG_BITS(_pvr) \
185#define PVR_DCACHE_ALLOW_WR(pvr) (pvr.pvr[5] & PVR5_DCACHE_ALLOW_WR_MASK) 185 ((_pvr.pvr[5] & PVR5_DCACHE_ADDR_TAG_BITS_MASK) >> 26)
186#define PVR_DCACHE_USE_FSL(_pvr) (_pvr.pvr[5] & PVR5_DCACHE_USE_FSL_MASK)
187#define PVR_DCACHE_ALLOW_WR(_pvr) \
188 (_pvr.pvr[5] & PVR5_DCACHE_ALLOW_WR_MASK)
186/* FIXME two shifts on one line needs any comment */ 189/* FIXME two shifts on one line needs any comment */
187#define PVR_DCACHE_LINE_LEN(pvr) \ 190#define PVR_DCACHE_LINE_LEN(_pvr) \
188 (1 << ((pvr.pvr[5] & PVR5_DCACHE_LINE_LEN_MASK) >> 21)) 191 (1 << ((_pvr.pvr[5] & PVR5_DCACHE_LINE_LEN_MASK) >> 21))
189#define PVR_DCACHE_BYTE_SIZE(pvr) \ 192#define PVR_DCACHE_BYTE_SIZE(_pvr) \
190 (1 << ((pvr.pvr[5] & PVR5_DCACHE_BYTE_SIZE_MASK) >> 16)) 193 (1 << ((_pvr.pvr[5] & PVR5_DCACHE_BYTE_SIZE_MASK) >> 16))
191 194
192#define PVR_DCACHE_USE_WRITEBACK(pvr) \ 195#define PVR_DCACHE_USE_WRITEBACK(_pvr) \
193 ((pvr.pvr[5] & PVR5_DCACHE_USE_WRITEBACK) >> 14) 196 ((_pvr.pvr[5] & PVR5_DCACHE_USE_WRITEBACK) >> 14)
194 197
195#define PVR_ICACHE_BASEADDR(pvr) (pvr.pvr[6] & PVR6_ICACHE_BASEADDR_MASK) 198#define PVR_ICACHE_BASEADDR(_pvr) \
196#define PVR_ICACHE_HIGHADDR(pvr) (pvr.pvr[7] & PVR7_ICACHE_HIGHADDR_MASK) 199 (_pvr.pvr[6] & PVR6_ICACHE_BASEADDR_MASK)
200#define PVR_ICACHE_HIGHADDR(_pvr) \
201 (_pvr.pvr[7] & PVR7_ICACHE_HIGHADDR_MASK)
202#define PVR_DCACHE_BASEADDR(_pvr) \
203 (_pvr.pvr[8] & PVR8_DCACHE_BASEADDR_MASK)
204#define PVR_DCACHE_HIGHADDR(_pvr) \
205 (_pvr.pvr[9] & PVR9_DCACHE_HIGHADDR_MASK)
197 206
198#define PVR_DCACHE_BASEADDR(pvr) (pvr.pvr[8] & PVR8_DCACHE_BASEADDR_MASK) 207#define PVR_TARGET_FAMILY(_pvr) \
199#define PVR_DCACHE_HIGHADDR(pvr) (pvr.pvr[9] & PVR9_DCACHE_HIGHADDR_MASK) 208 ((_pvr.pvr[10] & PVR10_TARGET_FAMILY_MASK) >> 24)
200 209
201#define PVR_TARGET_FAMILY(pvr) ((pvr.pvr[10] & PVR10_TARGET_FAMILY_MASK) >> 24) 210#define PVR_MSR_RESET_VALUE(_pvr) \
202 211 (_pvr.pvr[11] & PVR11_MSR_RESET_VALUE_MASK)
203#define PVR_MSR_RESET_VALUE(pvr) \
204 (pvr.pvr[11] & PVR11_MSR_RESET_VALUE_MASK)
205 212
206/* mmu */ 213/* mmu */
207#define PVR_USE_MMU(pvr) ((pvr.pvr[11] & PVR11_USE_MMU) >> 30) 214#define PVR_USE_MMU(_pvr) ((_pvr.pvr[11] & PVR11_USE_MMU) >> 30)
208#define PVR_MMU_ITLB_SIZE(pvr) (pvr.pvr[11] & PVR11_MMU_ITLB_SIZE) 215#define PVR_MMU_ITLB_SIZE(_pvr) (_pvr.pvr[11] & PVR11_MMU_ITLB_SIZE)
209#define PVR_MMU_DTLB_SIZE(pvr) (pvr.pvr[11] & PVR11_MMU_DTLB_SIZE) 216#define PVR_MMU_DTLB_SIZE(_pvr) (_pvr.pvr[11] & PVR11_MMU_DTLB_SIZE)
210#define PVR_MMU_TLB_ACCESS(pvr) (pvr.pvr[11] & PVR11_MMU_TLB_ACCESS) 217#define PVR_MMU_TLB_ACCESS(_pvr) (_pvr.pvr[11] & PVR11_MMU_TLB_ACCESS)
211#define PVR_MMU_ZONES(pvr) (pvr.pvr[11] & PVR11_MMU_ZONES) 218#define PVR_MMU_ZONES(_pvr) (_pvr.pvr[11] & PVR11_MMU_ZONES)
212 219
213/* endian */ 220/* endian */
214#define PVR_ENDIAN(pvr) (pvr.pvr[0] & PVR0_ENDI) 221#define PVR_ENDIAN(_pvr) (_pvr.pvr[0] & PVR0_ENDI)
215 222
216int cpu_has_pvr(void); 223int cpu_has_pvr(void);
217void get_pvr(struct pvr_s *pvr); 224void get_pvr(struct pvr_s *pvr);
diff --git a/arch/microblaze/kernel/cpu/cpuinfo.c b/arch/microblaze/kernel/cpu/cpuinfo.c
index 87c79fa275c3..2c309fccf230 100644
--- a/arch/microblaze/kernel/cpu/cpuinfo.c
+++ b/arch/microblaze/kernel/cpu/cpuinfo.c
@@ -32,6 +32,7 @@ const struct cpu_ver_key cpu_ver_lookup[] = {
32 {"7.30.a", 0x10}, 32 {"7.30.a", 0x10},
33 {"7.30.b", 0x11}, 33 {"7.30.b", 0x11},
34 {"8.00.a", 0x12}, 34 {"8.00.a", 0x12},
35 {"8.00.b", 0x13},
35 {NULL, 0}, 36 {NULL, 0},
36}; 37};
37 38
diff --git a/arch/microblaze/kernel/entry.S b/arch/microblaze/kernel/entry.S
index 819238b8a429..41c30cdb2704 100644
--- a/arch/microblaze/kernel/entry.S
+++ b/arch/microblaze/kernel/entry.S
@@ -287,25 +287,44 @@
287 * are masked. This is nice, means we don't have to CLI before state save 287 * are masked. This is nice, means we don't have to CLI before state save
288 */ 288 */
289C_ENTRY(_user_exception): 289C_ENTRY(_user_exception):
290 addi r14, r14, 4 /* return address is 4 byte after call */
291 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */ 290 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
291 addi r14, r14, 4 /* return address is 4 byte after call */
292
293 mfs r1, rmsr
294 nop
295 andi r1, r1, MSR_UMS
296 bnei r1, 1f
297
298/* Kernel-mode state save - kernel execve */
299 lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
300 tophys(r1,r1);
301
302 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
303 SAVE_REGS
292 304
305 swi r1, r1, PTO + PT_MODE; /* pt_regs -> kernel mode */
306 brid 2f;
307 nop; /* Fill delay slot */
308
309/* User-mode state save. */
3101:
293 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */ 311 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
294 tophys(r1,r1); 312 tophys(r1,r1);
295 lwi r1, r1, TS_THREAD_INFO; /* get stack from task_struct */ 313 lwi r1, r1, TS_THREAD_INFO; /* get stack from task_struct */
296 /* MS these three instructions can be added to one */ 314/* calculate kernel stack pointer from task struct 8k */
297 /* addik r1, r1, THREAD_SIZE; */ 315 addik r1, r1, THREAD_SIZE;
298 /* tophys(r1,r1); */ 316 tophys(r1,r1);
299 /* addik r1, r1, -STATE_SAVE_SIZE; */ 317
300 addik r1, r1, THREAD_SIZE + CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - STATE_SAVE_SIZE; 318 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
301 SAVE_REGS 319 SAVE_REGS
302 swi r0, r1, PTO + PT_R3 320 swi r0, r1, PTO + PT_R3
303 swi r0, r1, PTO + PT_R4 321 swi r0, r1, PTO + PT_R4
304 322
323 swi r0, r1, PTO + PT_MODE; /* Was in user-mode. */
305 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); 324 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
306 swi r11, r1, PTO+PT_R1; /* Store user SP. */ 325 swi r11, r1, PTO+PT_R1; /* Store user SP. */
307 clear_ums; 326 clear_ums;
308 lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); 3272: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
309 /* Save away the syscall number. */ 328 /* Save away the syscall number. */
310 swi r12, r1, PTO+PT_R0; 329 swi r12, r1, PTO+PT_R0;
311 tovirt(r1,r1) 330 tovirt(r1,r1)
@@ -375,6 +394,9 @@ C_ENTRY(ret_from_trap):
375 swi r3, r1, PTO + PT_R3 394 swi r3, r1, PTO + PT_R3
376 swi r4, r1, PTO + PT_R4 395 swi r4, r1, PTO + PT_R4
377 396
397 lwi r11, r1, PTO + PT_MODE;
398/* See if returning to kernel mode, if so, skip resched &c. */
399 bnei r11, 2f;
378 /* We're returning to user mode, so check for various conditions that 400 /* We're returning to user mode, so check for various conditions that
379 * trigger rescheduling. */ 401 * trigger rescheduling. */
380 /* FIXME: Restructure all these flag checks. */ 402 /* FIXME: Restructure all these flag checks. */
@@ -417,6 +439,16 @@ C_ENTRY(ret_from_trap):
417 RESTORE_REGS; 439 RESTORE_REGS;
418 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */ 440 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
419 lwi r1, r1, PT_R1 - PT_SIZE;/* Restore user stack pointer. */ 441 lwi r1, r1, PT_R1 - PT_SIZE;/* Restore user stack pointer. */
442 bri 6f;
443
444/* Return to kernel state. */
4452: set_bip; /* Ints masked for state restore */
446 VM_OFF;
447 tophys(r1,r1);
448 RESTORE_REGS;
449 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
450 tovirt(r1,r1);
4516:
420TRAP_return: /* Make global symbol for debugging */ 452TRAP_return: /* Make global symbol for debugging */
421 rtbd r14, 0; /* Instructions to return from an IRQ */ 453 rtbd r14, 0; /* Instructions to return from an IRQ */
422 nop; 454 nop;
diff --git a/arch/microblaze/kernel/exceptions.c b/arch/microblaze/kernel/exceptions.c
index 478f2943ede7..a7fa6ae76d89 100644
--- a/arch/microblaze/kernel/exceptions.c
+++ b/arch/microblaze/kernel/exceptions.c
@@ -25,6 +25,7 @@
25#include <linux/errno.h> 25#include <linux/errno.h>
26#include <linux/ptrace.h> 26#include <linux/ptrace.h>
27#include <asm/current.h> 27#include <asm/current.h>
28#include <asm/cacheflush.h>
28 29
29#define MICROBLAZE_ILL_OPCODE_EXCEPTION 0x02 30#define MICROBLAZE_ILL_OPCODE_EXCEPTION 0x02
30#define MICROBLAZE_IBUS_EXCEPTION 0x03 31#define MICROBLAZE_IBUS_EXCEPTION 0x03
@@ -52,6 +53,8 @@ void die(const char *str, struct pt_regs *fp, long err)
52void sw_exception(struct pt_regs *regs) 53void sw_exception(struct pt_regs *regs)
53{ 54{
54 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->r16); 55 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->r16);
56 flush_dcache_range(regs->r16, regs->r16 + 0x4);
57 flush_icache_range(regs->r16, regs->r16 + 0x4);
55} 58}
56 59
57void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr) 60void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
diff --git a/arch/microblaze/kernel/hw_exception_handler.S b/arch/microblaze/kernel/hw_exception_handler.S
index 781195438ee6..25f6e07d8de8 100644
--- a/arch/microblaze/kernel/hw_exception_handler.S
+++ b/arch/microblaze/kernel/hw_exception_handler.S
@@ -945,11 +945,20 @@ store3: sbi r3, r4, 2;
945store4: sbi r3, r4, 3; /* Delay slot */ 945store4: sbi r3, r4, 3; /* Delay slot */
946ex_shw_vm: 946ex_shw_vm:
947 /* Store the lower half-word, byte-by-byte into destination address */ 947 /* Store the lower half-word, byte-by-byte into destination address */
948#ifdef __MICROBLAZEEL__
949 lbui r3, r5, 0;
950store5: sbi r3, r4, 0;
951 lbui r3, r5, 1;
952 brid ret_from_exc;
953store6: sbi r3, r4, 1; /* Delay slot */
954#else
948 lbui r3, r5, 2; 955 lbui r3, r5, 2;
949store5: sbi r3, r4, 0; 956store5: sbi r3, r4, 0;
950 lbui r3, r5, 3; 957 lbui r3, r5, 3;
951 brid ret_from_exc; 958 brid ret_from_exc;
952store6: sbi r3, r4, 1; /* Delay slot */ 959store6: sbi r3, r4, 1; /* Delay slot */
960#endif
961
953ex_sw_end_vm: /* Exception handling of store word, ends. */ 962ex_sw_end_vm: /* Exception handling of store word, ends. */
954 963
955/* We have to prevent cases that get/put_user macros get unaligned pointer 964/* We have to prevent cases that get/put_user macros get unaligned pointer
diff --git a/arch/microblaze/kernel/prom.c b/arch/microblaze/kernel/prom.c
index a105301e2b7f..c881393f07fd 100644
--- a/arch/microblaze/kernel/prom.c
+++ b/arch/microblaze/kernel/prom.c
@@ -61,14 +61,12 @@ static int __init early_init_dt_scan_serial(unsigned long node,
61 char *p; 61 char *p;
62 int *addr; 62 int *addr;
63 63
64 pr_debug("search \"chosen\", depth: %d, uname: %s\n", depth, uname); 64 pr_debug("search \"serial\", depth: %d, uname: %s\n", depth, uname);
65 65
66/* find all serial nodes */ 66/* find all serial nodes */
67 if (strncmp(uname, "serial", 6) != 0) 67 if (strncmp(uname, "serial", 6) != 0)
68 return 0; 68 return 0;
69 69
70 early_init_dt_check_for_initrd(node);
71
72/* find compatible node with uartlite */ 70/* find compatible node with uartlite */
73 p = of_get_flat_dt_prop(node, "compatible", &l); 71 p = of_get_flat_dt_prop(node, "compatible", &l);
74 if ((strncmp(p, "xlnx,xps-uartlite", 17) != 0) && 72 if ((strncmp(p, "xlnx,xps-uartlite", 17) != 0) &&
diff --git a/arch/microblaze/kernel/vmlinux.lds.S b/arch/microblaze/kernel/vmlinux.lds.S
index 96a88c31fe48..3451bdec9f05 100644
--- a/arch/microblaze/kernel/vmlinux.lds.S
+++ b/arch/microblaze/kernel/vmlinux.lds.S
@@ -123,20 +123,10 @@ SECTIONS {
123 123
124 __init_end_before_initramfs = .; 124 __init_end_before_initramfs = .;
125 125
126 .init.ramfs ALIGN(PAGE_SIZE) : AT(ADDR(.init.ramfs) - LOAD_OFFSET) { 126 .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) {
127 __initramfs_start = .; 127 INIT_RAM_FS
128 *(.init.ramfs)
129 __initramfs_end = .;
130 . = ALIGN(4);
131 LONG(0);
132/*
133 * FIXME this can break initramfs for MMU.
134 * Pad init.ramfs up to page boundary,
135 * so that __init_end == __bss_start. This will make image.elf
136 * consistent with the image.bin
137 */
138 /* . = ALIGN(PAGE_SIZE); */
139 } 128 }
129
140 __init_end = .; 130 __init_end = .;
141 131
142 .bss ALIGN (PAGE_SIZE) : AT(ADDR(.bss) - LOAD_OFFSET) { 132 .bss ALIGN (PAGE_SIZE) : AT(ADDR(.bss) - LOAD_OFFSET) {
diff --git a/arch/microblaze/lib/memmove.c b/arch/microblaze/lib/memmove.c
index 123e3616f2dd..810fd68775e3 100644
--- a/arch/microblaze/lib/memmove.c
+++ b/arch/microblaze/lib/memmove.c
@@ -182,7 +182,7 @@ void *memmove(void *v_dst, const void *v_src, __kernel_size_t c)
182 for (; c >= 4; c -= 4) { 182 for (; c >= 4; c -= 4) {
183 value = *--i_src; 183 value = *--i_src;
184 *--i_dst = buf_hold | ((value & 0xFF000000)>> 24); 184 *--i_dst = buf_hold | ((value & 0xFF000000)>> 24);
185 buf_hold = (value & 0xFFFFFF) << 8;; 185 buf_hold = (value & 0xFFFFFF) << 8;
186 } 186 }
187#endif 187#endif
188 /* Realign the source */ 188 /* Realign the source */
diff --git a/arch/microblaze/lib/muldi3.S b/arch/microblaze/lib/muldi3.S
deleted file mode 100644
index ceeaa8c407f2..000000000000
--- a/arch/microblaze/lib/muldi3.S
+++ /dev/null
@@ -1,121 +0,0 @@
1#include <linux/linkage.h>
2
3/*
4 * Multiply operation for 64 bit integers, for devices with hard multiply
5 * Input : Operand1[H] in Reg r5
6 * Operand1[L] in Reg r6
7 * Operand2[H] in Reg r7
8 * Operand2[L] in Reg r8
9 * Output: Result[H] in Reg r3
10 * Result[L] in Reg r4
11 *
12 * Explaination:
13 *
14 * Both the input numbers are divided into 16 bit number as follows
15 * op1 = A B C D
16 * op2 = E F G H
17 * result = D * H
18 * + (C * H + D * G) << 16
19 * + (B * H + C * G + D * F) << 32
20 * + (A * H + B * G + C * F + D * E) << 48
21 *
22 * Only 64 bits of the output are considered
23 */
24
25 .text
26 .globl __muldi3
27 .type __muldi3, @function
28 .ent __muldi3
29
30__muldi3:
31 addi r1, r1, -40
32
33/* Save the input operands on the caller's stack */
34 swi r5, r1, 44
35 swi r6, r1, 48
36 swi r7, r1, 52
37 swi r8, r1, 56
38
39/* Store all the callee saved registers */
40 sw r20, r1, r0
41 swi r21, r1, 4
42 swi r22, r1, 8
43 swi r23, r1, 12
44 swi r24, r1, 16
45 swi r25, r1, 20
46 swi r26, r1, 24
47 swi r27, r1, 28
48
49/* Load all the 16 bit values for A thru H */
50 lhui r20, r1, 44 /* A */
51 lhui r21, r1, 46 /* B */
52 lhui r22, r1, 48 /* C */
53 lhui r23, r1, 50 /* D */
54 lhui r24, r1, 52 /* E */
55 lhui r25, r1, 54 /* F */
56 lhui r26, r1, 56 /* G */
57 lhui r27, r1, 58 /* H */
58
59/* D * H ==> LSB of the result on stack ==> Store1 */
60 mul r9, r23, r27
61 swi r9, r1, 36 /* Pos2 and Pos3 */
62
63/* Hi (Store1) + C * H + D * G ==> Store2 ==> Pos1 and Pos2 */
64/* Store the carry generated in position 2 for Pos 3 */
65 lhui r11, r1, 36 /* Pos2 */
66 mul r9, r22, r27 /* C * H */
67 mul r10, r23, r26 /* D * G */
68 add r9, r9, r10
69 addc r12, r0, r0
70 add r9, r9, r11
71 addc r12, r12, r0 /* Store the Carry */
72 shi r9, r1, 36 /* Store Pos2 */
73 swi r9, r1, 32
74 lhui r11, r1, 32
75 shi r11, r1, 34 /* Store Pos1 */
76
77/* Hi (Store2) + B * H + C * G + D * F ==> Store3 ==> Pos0 and Pos1 */
78 mul r9, r21, r27 /* B * H */
79 mul r10, r22, r26 /* C * G */
80 mul r7, r23, r25 /* D * F */
81 add r9, r9, r11
82 add r9, r9, r10
83 add r9, r9, r7
84 swi r9, r1, 32 /* Pos0 and Pos1 */
85
86/* Hi (Store3) + A * H + B * G + C * F + D * E ==> Store3 ==> Pos0 */
87 lhui r11, r1, 32 /* Pos0 */
88 mul r9, r20, r27 /* A * H */
89 mul r10, r21, r26 /* B * G */
90 mul r7, r22, r25 /* C * F */
91 mul r8, r23, r24 /* D * E */
92 add r9, r9, r11
93 add r9, r9, r10
94 add r9, r9, r7
95 add r9, r9, r8
96 sext16 r9, r9 /* Sign extend the MSB */
97 shi r9, r1, 32
98
99/* Move results to r3 and r4 */
100 lhui r3, r1, 32
101 add r3, r3, r12
102 shi r3, r1, 32
103 lwi r3, r1, 32 /* Hi Part */
104 lwi r4, r1, 36 /* Lo Part */
105
106/* Restore Callee saved registers */
107 lw r20, r1, r0
108 lwi r21, r1, 4
109 lwi r22, r1, 8
110 lwi r23, r1, 12
111 lwi r24, r1, 16
112 lwi r25, r1, 20
113 lwi r26, r1, 24
114 lwi r27, r1, 28
115
116/* Restore Frame and return */
117 rtsd r15, 8
118 addi r1, r1, 40
119
120.size __muldi3, . - __muldi3
121.end __muldi3
diff --git a/arch/microblaze/lib/muldi3.c b/arch/microblaze/lib/muldi3.c
new file mode 100644
index 000000000000..d4860e154d29
--- /dev/null
+++ b/arch/microblaze/lib/muldi3.c
@@ -0,0 +1,60 @@
1#include <linux/module.h>
2
3#include "libgcc.h"
4
5#define DWtype long long
6#define UWtype unsigned long
7#define UHWtype unsigned short
8
9#define W_TYPE_SIZE 32
10
11#define __ll_B ((UWtype) 1 << (W_TYPE_SIZE / 2))
12#define __ll_lowpart(t) ((UWtype) (t) & (__ll_B - 1))
13#define __ll_highpart(t) ((UWtype) (t) >> (W_TYPE_SIZE / 2))
14
15/* If we still don't have umul_ppmm, define it using plain C. */
16#if !defined(umul_ppmm)
17#define umul_ppmm(w1, w0, u, v) \
18 do { \
19 UWtype __x0, __x1, __x2, __x3; \
20 UHWtype __ul, __vl, __uh, __vh; \
21 \
22 __ul = __ll_lowpart(u); \
23 __uh = __ll_highpart(u); \
24 __vl = __ll_lowpart(v); \
25 __vh = __ll_highpart(v); \
26 \
27 __x0 = (UWtype) __ul * __vl; \
28 __x1 = (UWtype) __ul * __vh; \
29 __x2 = (UWtype) __uh * __vl; \
30 __x3 = (UWtype) __uh * __vh; \
31 \
32 __x1 += __ll_highpart(__x0); /* this can't give carry */\
33 __x1 += __x2; /* but this indeed can */ \
34 if (__x1 < __x2) /* did we get it? */ \
35 __x3 += __ll_B; /* yes, add it in the proper pos */ \
36 \
37 (w1) = __x3 + __ll_highpart(__x1); \
38 (w0) = __ll_lowpart(__x1) * __ll_B + __ll_lowpart(__x0);\
39 } while (0)
40#endif
41
42#if !defined(__umulsidi3)
43#define __umulsidi3(u, v) ({ \
44 DWunion __w; \
45 umul_ppmm(__w.s.high, __w.s.low, u, v); \
46 __w.ll; \
47 })
48#endif
49
50DWtype __muldi3(DWtype u, DWtype v)
51{
52 const DWunion uu = {.ll = u};
53 const DWunion vv = {.ll = v};
54 DWunion w = {.ll = __umulsidi3(uu.s.low, vv.s.low)};
55
56 w.s.high += ((UWtype) uu.s.low * (UWtype) vv.s.high
57 + (UWtype) uu.s.high * (UWtype) vv.s.low);
58
59 return w.ll;
60}
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 48fb4790bfec..959f38ccb9a7 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -20,6 +20,9 @@ config WORD_SIZE
20config ARCH_PHYS_ADDR_T_64BIT 20config ARCH_PHYS_ADDR_T_64BIT
21 def_bool PPC64 || PHYS_64BIT 21 def_bool PPC64 || PHYS_64BIT
22 22
23config ARCH_DMA_ADDR_T_64BIT
24 def_bool ARCH_PHYS_ADDR_T_64BIT
25
23config MMU 26config MMU
24 bool 27 bool
25 default y 28 default y
@@ -209,7 +212,7 @@ config ARCH_HIBERNATION_POSSIBLE
209config ARCH_SUSPEND_POSSIBLE 212config ARCH_SUSPEND_POSSIBLE
210 def_bool y 213 def_bool y
211 depends on ADB_PMU || PPC_EFIKA || PPC_LITE5200 || PPC_83xx || \ 214 depends on ADB_PMU || PPC_EFIKA || PPC_LITE5200 || PPC_83xx || \
212 PPC_85xx || PPC_86xx || PPC_PSERIES 215 PPC_85xx || PPC_86xx || PPC_PSERIES || 44x || 40x
213 216
214config PPC_DCR_NATIVE 217config PPC_DCR_NATIVE
215 bool 218 bool
@@ -595,13 +598,11 @@ config EXTRA_TARGETS
595 598
596 If unsure, leave blank 599 If unsure, leave blank
597 600
598if !44x || BROKEN
599config ARCH_WANTS_FREEZER_CONTROL 601config ARCH_WANTS_FREEZER_CONTROL
600 def_bool y 602 def_bool y
601 depends on ADB_PMU 603 depends on ADB_PMU
602 604
603source kernel/power/Kconfig 605source kernel/power/Kconfig
604endif
605 606
606config SECCOMP 607config SECCOMP
607 bool "Enable seccomp to safely compute untrusted bytecode" 608 bool "Enable seccomp to safely compute untrusted bytecode"
@@ -682,6 +683,15 @@ config FSL_PMC
682 Freescale MPC85xx/MPC86xx power management controller support 683 Freescale MPC85xx/MPC86xx power management controller support
683 (suspend/resume). For MPC83xx see platforms/83xx/suspend.c 684 (suspend/resume). For MPC83xx see platforms/83xx/suspend.c
684 685
686config PPC4xx_CPM
687 bool
688 default y
689 depends on SUSPEND && (44x || 40x)
690 help
691 PPC4xx Clock Power Management (CPM) support (suspend/resume).
692 It also enables support for two different idle states (idle-wait
693 and idle-doze).
694
685config 4xx_SOC 695config 4xx_SOC
686 bool 696 bool
687 697
diff --git a/arch/powerpc/boot/dts/canyonlands.dts b/arch/powerpc/boot/dts/canyonlands.dts
index a30370396250..5b27a4b74b79 100644
--- a/arch/powerpc/boot/dts/canyonlands.dts
+++ b/arch/powerpc/boot/dts/canyonlands.dts
@@ -105,6 +105,15 @@
105 dcr-reg = <0x00c 0x002>; 105 dcr-reg = <0x00c 0x002>;
106 }; 106 };
107 107
108 CPM0: cpm {
109 compatible = "ibm,cpm";
110 dcr-access-method = "native";
111 dcr-reg = <0x160 0x003>;
112 unused-units = <0x00000100>;
113 idle-doze = <0x02000000>;
114 standby = <0xfeff791d>;
115 };
116
108 L2C0: l2c { 117 L2C0: l2c {
109 compatible = "ibm,l2-cache-460ex", "ibm,l2-cache"; 118 compatible = "ibm,l2-cache-460ex", "ibm,l2-cache";
110 dcr-reg = <0x020 0x008 /* Internal SRAM DCR's */ 119 dcr-reg = <0x020 0x008 /* Internal SRAM DCR's */
@@ -270,28 +279,6 @@
270 interrupts = <0x1 0x4>; 279 interrupts = <0x1 0x4>;
271 }; 280 };
272 281
273 UART2: serial@ef600500 {
274 device_type = "serial";
275 compatible = "ns16550";
276 reg = <0xef600500 0x00000008>;
277 virtual-reg = <0xef600500>;
278 clock-frequency = <0>; /* Filled in by U-Boot */
279 current-speed = <0>; /* Filled in by U-Boot */
280 interrupt-parent = <&UIC1>;
281 interrupts = <28 0x4>;
282 };
283
284 UART3: serial@ef600600 {
285 device_type = "serial";
286 compatible = "ns16550";
287 reg = <0xef600600 0x00000008>;
288 virtual-reg = <0xef600600>;
289 clock-frequency = <0>; /* Filled in by U-Boot */
290 current-speed = <0>; /* Filled in by U-Boot */
291 interrupt-parent = <&UIC1>;
292 interrupts = <29 0x4>;
293 };
294
295 IIC0: i2c@ef600700 { 282 IIC0: i2c@ef600700 {
296 compatible = "ibm,iic-460ex", "ibm,iic"; 283 compatible = "ibm,iic-460ex", "ibm,iic";
297 reg = <0xef600700 0x00000014>; 284 reg = <0xef600700 0x00000014>;
diff --git a/arch/powerpc/boot/dts/kilauea.dts b/arch/powerpc/boot/dts/kilauea.dts
index 083e68eeaca4..89edb16649c3 100644
--- a/arch/powerpc/boot/dts/kilauea.dts
+++ b/arch/powerpc/boot/dts/kilauea.dts
@@ -82,6 +82,15 @@
82 interrupt-parent = <&UIC0>; 82 interrupt-parent = <&UIC0>;
83 }; 83 };
84 84
85 CPM0: cpm {
86 compatible = "ibm,cpm";
87 dcr-access-method = "native";
88 dcr-reg = <0x0b0 0x003>;
89 unused-units = <0x00000000>;
90 idle-doze = <0x02000000>;
91 standby = <0xe3e74800>;
92 };
93
85 plb { 94 plb {
86 compatible = "ibm,plb-405ex", "ibm,plb4"; 95 compatible = "ibm,plb-405ex", "ibm,plb4";
87 #address-cells = <1>; 96 #address-cells = <1>;
diff --git a/arch/powerpc/boot/dts/mpc8308_p1m.dts b/arch/powerpc/boot/dts/mpc8308_p1m.dts
index 05a76ccfd499..697b3f6b78bf 100644
--- a/arch/powerpc/boot/dts/mpc8308_p1m.dts
+++ b/arch/powerpc/boot/dts/mpc8308_p1m.dts
@@ -297,6 +297,14 @@
297 interrupt-parent = < &ipic >; 297 interrupt-parent = < &ipic >;
298 }; 298 };
299 299
300 dma@2c000 {
301 compatible = "fsl,mpc8308-dma", "fsl,mpc5121-dma";
302 reg = <0x2c000 0x1800>;
303 interrupts = <3 0x8
304 94 0x8>;
305 interrupt-parent = < &ipic >;
306 };
307
300 }; 308 };
301 309
302 pci0: pcie@e0009000 { 310 pci0: pcie@e0009000 {
diff --git a/arch/powerpc/boot/dts/mpc8308rdb.dts b/arch/powerpc/boot/dts/mpc8308rdb.dts
index a97eb2db5a18..d3db02f98ddd 100644
--- a/arch/powerpc/boot/dts/mpc8308rdb.dts
+++ b/arch/powerpc/boot/dts/mpc8308rdb.dts
@@ -265,6 +265,14 @@
265 interrupt-parent = < &ipic >; 265 interrupt-parent = < &ipic >;
266 }; 266 };
267 267
268 dma@2c000 {
269 compatible = "fsl,mpc8308-dma", "fsl,mpc5121-dma";
270 reg = <0x2c000 0x1800>;
271 interrupts = <3 0x8
272 94 0x8>;
273 interrupt-parent = < &ipic >;
274 };
275
268 }; 276 };
269 277
270 pci0: pcie@e0009000 { 278 pci0: pcie@e0009000 {
diff --git a/arch/powerpc/configs/40x/kilauea_defconfig b/arch/powerpc/configs/40x/kilauea_defconfig
index 4e19ee7ce4ee..34b8c1a1e752 100644
--- a/arch/powerpc/configs/40x/kilauea_defconfig
+++ b/arch/powerpc/configs/40x/kilauea_defconfig
@@ -12,6 +12,8 @@ CONFIG_MODULES=y
12CONFIG_MODULE_UNLOAD=y 12CONFIG_MODULE_UNLOAD=y
13# CONFIG_BLK_DEV_BSG is not set 13# CONFIG_BLK_DEV_BSG is not set
14CONFIG_KILAUEA=y 14CONFIG_KILAUEA=y
15CONFIG_NO_HZ=y
16CONFIG_HIGH_RES_TIMERS=y
15# CONFIG_WALNUT is not set 17# CONFIG_WALNUT is not set
16CONFIG_SPARSE_IRQ=y 18CONFIG_SPARSE_IRQ=y
17CONFIG_PCI=y 19CONFIG_PCI=y
@@ -42,6 +44,9 @@ CONFIG_MTD_PHYSMAP_OF=y
42CONFIG_MTD_NAND=y 44CONFIG_MTD_NAND=y
43CONFIG_MTD_NAND_NDFC=y 45CONFIG_MTD_NAND_NDFC=y
44CONFIG_PROC_DEVICETREE=y 46CONFIG_PROC_DEVICETREE=y
47CONFIG_PM=y
48CONFIG_SUSPEND=y
49CONFIG_PPC4xx_CPM=y
45CONFIG_BLK_DEV_RAM=y 50CONFIG_BLK_DEV_RAM=y
46CONFIG_BLK_DEV_RAM_SIZE=35000 51CONFIG_BLK_DEV_RAM_SIZE=35000
47# CONFIG_MISC_DEVICES is not set 52# CONFIG_MISC_DEVICES is not set
diff --git a/arch/powerpc/configs/44x/canyonlands_defconfig b/arch/powerpc/configs/44x/canyonlands_defconfig
index 45c64d818b2a..17e4dd98eed7 100644
--- a/arch/powerpc/configs/44x/canyonlands_defconfig
+++ b/arch/powerpc/configs/44x/canyonlands_defconfig
@@ -42,6 +42,9 @@ CONFIG_MTD_PHYSMAP_OF=y
42CONFIG_MTD_NAND=y 42CONFIG_MTD_NAND=y
43CONFIG_MTD_NAND_NDFC=y 43CONFIG_MTD_NAND_NDFC=y
44CONFIG_PROC_DEVICETREE=y 44CONFIG_PROC_DEVICETREE=y
45CONFIG_PM=y
46CONFIG_SUSPEND=y
47CONFIG_PPC4xx_CPM=y
45CONFIG_BLK_DEV_RAM=y 48CONFIG_BLK_DEV_RAM=y
46CONFIG_BLK_DEV_RAM_SIZE=35000 49CONFIG_BLK_DEV_RAM_SIZE=35000
47# CONFIG_MISC_DEVICES is not set 50# CONFIG_MISC_DEVICES is not set
diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h
index 30964ae2d096..8a7e9314c68a 100644
--- a/arch/powerpc/include/asm/bitops.h
+++ b/arch/powerpc/include/asm/bitops.h
@@ -267,7 +267,16 @@ static __inline__ int fls64(__u64 x)
267#include <asm-generic/bitops/fls64.h> 267#include <asm-generic/bitops/fls64.h>
268#endif /* __powerpc64__ */ 268#endif /* __powerpc64__ */
269 269
270#ifdef CONFIG_PPC64
271unsigned int __arch_hweight8(unsigned int w);
272unsigned int __arch_hweight16(unsigned int w);
273unsigned int __arch_hweight32(unsigned int w);
274unsigned long __arch_hweight64(__u64 w);
275#include <asm-generic/bitops/const_hweight.h>
276#else
270#include <asm-generic/bitops/hweight.h> 277#include <asm-generic/bitops/hweight.h>
278#endif
279
271#include <asm-generic/bitops/find.h> 280#include <asm-generic/bitops/find.h>
272 281
273/* Little-endian versions */ 282/* Little-endian versions */
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index f3a1fdd9cf08..f0a211d96923 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -199,6 +199,8 @@ extern const char *powerpc_base_platform;
199#define CPU_FTR_UNALIGNED_LD_STD LONG_ASM_CONST(0x0080000000000000) 199#define CPU_FTR_UNALIGNED_LD_STD LONG_ASM_CONST(0x0080000000000000)
200#define CPU_FTR_ASYM_SMT LONG_ASM_CONST(0x0100000000000000) 200#define CPU_FTR_ASYM_SMT LONG_ASM_CONST(0x0100000000000000)
201#define CPU_FTR_STCX_CHECKS_ADDRESS LONG_ASM_CONST(0x0200000000000000) 201#define CPU_FTR_STCX_CHECKS_ADDRESS LONG_ASM_CONST(0x0200000000000000)
202#define CPU_FTR_POPCNTB LONG_ASM_CONST(0x0400000000000000)
203#define CPU_FTR_POPCNTD LONG_ASM_CONST(0x0800000000000000)
202 204
203#ifndef __ASSEMBLY__ 205#ifndef __ASSEMBLY__
204 206
@@ -403,21 +405,22 @@ extern const char *powerpc_base_platform;
403 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ 405 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
404 CPU_FTR_MMCRA | CPU_FTR_SMT | \ 406 CPU_FTR_MMCRA | CPU_FTR_SMT | \
405 CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \ 407 CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \
406 CPU_FTR_PURR | CPU_FTR_STCX_CHECKS_ADDRESS) 408 CPU_FTR_PURR | CPU_FTR_STCX_CHECKS_ADDRESS | \
409 CPU_FTR_POPCNTB)
407#define CPU_FTRS_POWER6 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ 410#define CPU_FTRS_POWER6 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
408 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ 411 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
409 CPU_FTR_MMCRA | CPU_FTR_SMT | \ 412 CPU_FTR_MMCRA | CPU_FTR_SMT | \
410 CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \ 413 CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \
411 CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ 414 CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
412 CPU_FTR_DSCR | CPU_FTR_UNALIGNED_LD_STD | \ 415 CPU_FTR_DSCR | CPU_FTR_UNALIGNED_LD_STD | \
413 CPU_FTR_STCX_CHECKS_ADDRESS) 416 CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB)
414#define CPU_FTRS_POWER7 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ 417#define CPU_FTRS_POWER7 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
415 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ 418 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
416 CPU_FTR_MMCRA | CPU_FTR_SMT | \ 419 CPU_FTR_MMCRA | CPU_FTR_SMT | \
417 CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \ 420 CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \
418 CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ 421 CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
419 CPU_FTR_DSCR | CPU_FTR_SAO | CPU_FTR_ASYM_SMT | \ 422 CPU_FTR_DSCR | CPU_FTR_SAO | CPU_FTR_ASYM_SMT | \
420 CPU_FTR_STCX_CHECKS_ADDRESS) 423 CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD)
421#define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ 424#define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
422 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ 425 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
423 CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \ 426 CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
diff --git a/arch/powerpc/include/asm/cputhreads.h b/arch/powerpc/include/asm/cputhreads.h
index a8e18447c62b..f71bb4c118b4 100644
--- a/arch/powerpc/include/asm/cputhreads.h
+++ b/arch/powerpc/include/asm/cputhreads.h
@@ -61,22 +61,25 @@ static inline cpumask_t cpu_online_cores_map(void)
61 return cpu_thread_mask_to_cores(cpu_online_map); 61 return cpu_thread_mask_to_cores(cpu_online_map);
62} 62}
63 63
64static inline int cpu_thread_to_core(int cpu) 64#ifdef CONFIG_SMP
65{ 65int cpu_core_index_of_thread(int cpu);
66 return cpu >> threads_shift; 66int cpu_first_thread_of_core(int core);
67} 67#else
68static inline int cpu_core_index_of_thread(int cpu) { return cpu; }
69static inline int cpu_first_thread_of_core(int core) { return core; }
70#endif
68 71
69static inline int cpu_thread_in_core(int cpu) 72static inline int cpu_thread_in_core(int cpu)
70{ 73{
71 return cpu & (threads_per_core - 1); 74 return cpu & (threads_per_core - 1);
72} 75}
73 76
74static inline int cpu_first_thread_in_core(int cpu) 77static inline int cpu_first_thread_sibling(int cpu)
75{ 78{
76 return cpu & ~(threads_per_core - 1); 79 return cpu & ~(threads_per_core - 1);
77} 80}
78 81
79static inline int cpu_last_thread_in_core(int cpu) 82static inline int cpu_last_thread_sibling(int cpu)
80{ 83{
81 return cpu | (threads_per_core - 1); 84 return cpu | (threads_per_core - 1);
82} 85}
diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h
index a3954e4fcbe2..16d25c0974be 100644
--- a/arch/powerpc/include/asm/device.h
+++ b/arch/powerpc/include/asm/device.h
@@ -9,6 +9,12 @@
9struct dma_map_ops; 9struct dma_map_ops;
10struct device_node; 10struct device_node;
11 11
12/*
13 * Arch extensions to struct device.
14 *
15 * When adding fields, consider macio_add_one_device in
16 * drivers/macintosh/macio_asic.c
17 */
12struct dev_archdata { 18struct dev_archdata {
13 /* DMA operations on that device */ 19 /* DMA operations on that device */
14 struct dma_map_ops *dma_ops; 20 struct dma_map_ops *dma_ops;
diff --git a/arch/powerpc/include/asm/firmware.h b/arch/powerpc/include/asm/firmware.h
index 20778a405d7a..4ef662e4a31d 100644
--- a/arch/powerpc/include/asm/firmware.h
+++ b/arch/powerpc/include/asm/firmware.h
@@ -46,6 +46,7 @@
46#define FW_FEATURE_PS3_LV1 ASM_CONST(0x0000000000800000) 46#define FW_FEATURE_PS3_LV1 ASM_CONST(0x0000000000800000)
47#define FW_FEATURE_BEAT ASM_CONST(0x0000000001000000) 47#define FW_FEATURE_BEAT ASM_CONST(0x0000000001000000)
48#define FW_FEATURE_CMO ASM_CONST(0x0000000002000000) 48#define FW_FEATURE_CMO ASM_CONST(0x0000000002000000)
49#define FW_FEATURE_VPHN ASM_CONST(0x0000000004000000)
49 50
50#ifndef __ASSEMBLY__ 51#ifndef __ASSEMBLY__
51 52
@@ -59,7 +60,7 @@ enum {
59 FW_FEATURE_VIO | FW_FEATURE_RDMA | FW_FEATURE_LLAN | 60 FW_FEATURE_VIO | FW_FEATURE_RDMA | FW_FEATURE_LLAN |
60 FW_FEATURE_BULK_REMOVE | FW_FEATURE_XDABR | 61 FW_FEATURE_BULK_REMOVE | FW_FEATURE_XDABR |
61 FW_FEATURE_MULTITCE | FW_FEATURE_SPLPAR | FW_FEATURE_LPAR | 62 FW_FEATURE_MULTITCE | FW_FEATURE_SPLPAR | FW_FEATURE_LPAR |
62 FW_FEATURE_CMO, 63 FW_FEATURE_CMO | FW_FEATURE_VPHN,
63 FW_FEATURE_PSERIES_ALWAYS = 0, 64 FW_FEATURE_PSERIES_ALWAYS = 0,
64 FW_FEATURE_ISERIES_POSSIBLE = FW_FEATURE_ISERIES | FW_FEATURE_LPAR, 65 FW_FEATURE_ISERIES_POSSIBLE = FW_FEATURE_ISERIES | FW_FEATURE_LPAR,
65 FW_FEATURE_ISERIES_ALWAYS = FW_FEATURE_ISERIES | FW_FEATURE_LPAR, 66 FW_FEATURE_ISERIES_ALWAYS = FW_FEATURE_ISERIES | FW_FEATURE_LPAR,
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index de03ca58db5d..ec089acfa56b 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -232,7 +232,9 @@
232#define H_GET_EM_PARMS 0x2B8 232#define H_GET_EM_PARMS 0x2B8
233#define H_SET_MPP 0x2D0 233#define H_SET_MPP 0x2D0
234#define H_GET_MPP 0x2D4 234#define H_GET_MPP 0x2D4
235#define MAX_HCALL_OPCODE H_GET_MPP 235#define H_HOME_NODE_ASSOCIATIVITY 0x2EC
236#define H_BEST_ENERGY 0x2F4
237#define MAX_HCALL_OPCODE H_BEST_ENERGY
236 238
237#ifndef __ASSEMBLY__ 239#ifndef __ASSEMBLY__
238 240
diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h
index 7f5e0fefebb0..380d48bacd16 100644
--- a/arch/powerpc/include/asm/lppaca.h
+++ b/arch/powerpc/include/asm/lppaca.h
@@ -62,7 +62,10 @@ struct lppaca {
62 volatile u32 dyn_pir; // Dynamic ProcIdReg value x20-x23 62 volatile u32 dyn_pir; // Dynamic ProcIdReg value x20-x23
63 u32 dsei_data; // DSEI data x24-x27 63 u32 dsei_data; // DSEI data x24-x27
64 u64 sprg3; // SPRG3 value x28-x2F 64 u64 sprg3; // SPRG3 value x28-x2F
65 u8 reserved3[80]; // Reserved x30-x7F 65 u8 reserved3[40]; // Reserved x30-x57
66 volatile u8 vphn_assoc_counts[8]; // Virtual processor home node
67 // associativity change counters x58-x5F
68 u8 reserved4[32]; // Reserved x60-x7F
66 69
67//============================================================================= 70//=============================================================================
68// CACHE_LINE_2 0x0080 - 0x00FF Contains local read-write data 71// CACHE_LINE_2 0x0080 - 0x00FF Contains local read-write data
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index d045b0145537..8433d36619a1 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -27,9 +27,7 @@ struct iommu_table;
27struct rtc_time; 27struct rtc_time;
28struct file; 28struct file;
29struct pci_controller; 29struct pci_controller;
30#ifdef CONFIG_KEXEC
31struct kimage; 30struct kimage;
32#endif
33 31
34#ifdef CONFIG_SMP 32#ifdef CONFIG_SMP
35struct smp_ops_t { 33struct smp_ops_t {
@@ -72,7 +70,7 @@ struct machdep_calls {
72 int psize, int ssize); 70 int psize, int ssize);
73 void (*flush_hash_range)(unsigned long number, int local); 71 void (*flush_hash_range)(unsigned long number, int local);
74 72
75 /* special for kexec, to be called in real mode, linar mapping is 73 /* special for kexec, to be called in real mode, linear mapping is
76 * destroyed as well */ 74 * destroyed as well */
77 void (*hpte_clear_all)(void); 75 void (*hpte_clear_all)(void);
78 76
@@ -324,8 +322,6 @@ extern sys_ctrler_t sys_ctrler;
324 322
325#endif /* CONFIG_PPC_PMAC */ 323#endif /* CONFIG_PPC_PMAC */
326 324
327extern void setup_pci_ptrs(void);
328
329#ifdef CONFIG_SMP 325#ifdef CONFIG_SMP
330/* Poor default implementations */ 326/* Poor default implementations */
331extern void __devinit smp_generic_give_timebase(void); 327extern void __devinit smp_generic_give_timebase(void);
diff --git a/arch/powerpc/include/asm/mmzone.h b/arch/powerpc/include/asm/mmzone.h
index aac87cbceb57..fd3fd58bad84 100644
--- a/arch/powerpc/include/asm/mmzone.h
+++ b/arch/powerpc/include/asm/mmzone.h
@@ -33,6 +33,9 @@ extern int numa_cpu_lookup_table[];
33extern cpumask_var_t node_to_cpumask_map[]; 33extern cpumask_var_t node_to_cpumask_map[];
34#ifdef CONFIG_MEMORY_HOTPLUG 34#ifdef CONFIG_MEMORY_HOTPLUG
35extern unsigned long max_pfn; 35extern unsigned long max_pfn;
36u64 memory_hotplug_max(void);
37#else
38#define memory_hotplug_max() memblock_end_of_DRAM()
36#endif 39#endif
37 40
38/* 41/*
@@ -42,6 +45,8 @@ extern unsigned long max_pfn;
42#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) 45#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
43#define node_end_pfn(nid) (NODE_DATA(nid)->node_end_pfn) 46#define node_end_pfn(nid) (NODE_DATA(nid)->node_end_pfn)
44 47
48#else
49#define memory_hotplug_max() memblock_end_of_DRAM()
45#endif /* CONFIG_NEED_MULTIPLE_NODES */ 50#endif /* CONFIG_NEED_MULTIPLE_NODES */
46 51
47#endif /* __KERNEL__ */ 52#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/nvram.h b/arch/powerpc/include/asm/nvram.h
index 850b72f27445..92efe67d1c57 100644
--- a/arch/powerpc/include/asm/nvram.h
+++ b/arch/powerpc/include/asm/nvram.h
@@ -10,31 +10,7 @@
10#ifndef _ASM_POWERPC_NVRAM_H 10#ifndef _ASM_POWERPC_NVRAM_H
11#define _ASM_POWERPC_NVRAM_H 11#define _ASM_POWERPC_NVRAM_H
12 12
13#include <linux/errno.h> 13/* Signatures for nvram partitions */
14
15#define NVRW_CNT 0x20
16#define NVRAM_HEADER_LEN 16 /* sizeof(struct nvram_header) */
17#define NVRAM_BLOCK_LEN 16
18#define NVRAM_MAX_REQ (2080/NVRAM_BLOCK_LEN)
19#define NVRAM_MIN_REQ (1056/NVRAM_BLOCK_LEN)
20
21#define NVRAM_AS0 0x74
22#define NVRAM_AS1 0x75
23#define NVRAM_DATA 0x77
24
25
26/* RTC Offsets */
27
28#define MOTO_RTC_SECONDS 0x1FF9
29#define MOTO_RTC_MINUTES 0x1FFA
30#define MOTO_RTC_HOURS 0x1FFB
31#define MOTO_RTC_DAY_OF_WEEK 0x1FFC
32#define MOTO_RTC_DAY_OF_MONTH 0x1FFD
33#define MOTO_RTC_MONTH 0x1FFE
34#define MOTO_RTC_YEAR 0x1FFF
35#define MOTO_RTC_CONTROLA 0x1FF8
36#define MOTO_RTC_CONTROLB 0x1FF9
37
38#define NVRAM_SIG_SP 0x02 /* support processor */ 14#define NVRAM_SIG_SP 0x02 /* support processor */
39#define NVRAM_SIG_OF 0x50 /* open firmware config */ 15#define NVRAM_SIG_OF 0x50 /* open firmware config */
40#define NVRAM_SIG_FW 0x51 /* general firmware */ 16#define NVRAM_SIG_FW 0x51 /* general firmware */
@@ -49,32 +25,19 @@
49#define NVRAM_SIG_OS 0xa0 /* OS defined */ 25#define NVRAM_SIG_OS 0xa0 /* OS defined */
50#define NVRAM_SIG_PANIC 0xa1 /* Apple OSX "panic" */ 26#define NVRAM_SIG_PANIC 0xa1 /* Apple OSX "panic" */
51 27
52/* If change this size, then change the size of NVNAME_LEN */
53struct nvram_header {
54 unsigned char signature;
55 unsigned char checksum;
56 unsigned short length;
57 char name[12];
58};
59
60#ifdef __KERNEL__ 28#ifdef __KERNEL__
61 29
30#include <linux/errno.h>
62#include <linux/list.h> 31#include <linux/list.h>
63 32
64struct nvram_partition { 33#ifdef CONFIG_PPC_PSERIES
65 struct list_head partition;
66 struct nvram_header header;
67 unsigned int index;
68};
69
70
71extern int nvram_write_error_log(char * buff, int length, 34extern int nvram_write_error_log(char * buff, int length,
72 unsigned int err_type, unsigned int err_seq); 35 unsigned int err_type, unsigned int err_seq);
73extern int nvram_read_error_log(char * buff, int length, 36extern int nvram_read_error_log(char * buff, int length,
74 unsigned int * err_type, unsigned int *err_seq); 37 unsigned int * err_type, unsigned int *err_seq);
75extern int nvram_clear_error_log(void); 38extern int nvram_clear_error_log(void);
76
77extern int pSeries_nvram_init(void); 39extern int pSeries_nvram_init(void);
40#endif /* CONFIG_PPC_PSERIES */
78 41
79#ifdef CONFIG_MMIO_NVRAM 42#ifdef CONFIG_MMIO_NVRAM
80extern int mmio_nvram_init(void); 43extern int mmio_nvram_init(void);
@@ -85,6 +48,13 @@ static inline int mmio_nvram_init(void)
85} 48}
86#endif 49#endif
87 50
51extern int __init nvram_scan_partitions(void);
52extern loff_t nvram_create_partition(const char *name, int sig,
53 int req_size, int min_size);
54extern int nvram_remove_partition(const char *name, int sig);
55extern int nvram_get_partition_size(loff_t data_index);
56extern loff_t nvram_find_partition(const char *name, int sig, int *out_size);
57
88#endif /* __KERNEL__ */ 58#endif /* __KERNEL__ */
89 59
90/* PowerMac specific nvram stuffs */ 60/* PowerMac specific nvram stuffs */
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 43adc8b819ed..1255569387b6 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -36,6 +36,8 @@
36#define PPC_INST_NOP 0x60000000 36#define PPC_INST_NOP 0x60000000
37#define PPC_INST_POPCNTB 0x7c0000f4 37#define PPC_INST_POPCNTB 0x7c0000f4
38#define PPC_INST_POPCNTB_MASK 0xfc0007fe 38#define PPC_INST_POPCNTB_MASK 0xfc0007fe
39#define PPC_INST_POPCNTD 0x7c0003f4
40#define PPC_INST_POPCNTW 0x7c0002f4
39#define PPC_INST_RFCI 0x4c000066 41#define PPC_INST_RFCI 0x4c000066
40#define PPC_INST_RFDI 0x4c00004e 42#define PPC_INST_RFDI 0x4c00004e
41#define PPC_INST_RFMCI 0x4c00004c 43#define PPC_INST_RFMCI 0x4c00004c
@@ -88,6 +90,12 @@
88 __PPC_RB(b) | __PPC_EH(eh)) 90 __PPC_RB(b) | __PPC_EH(eh))
89#define PPC_MSGSND(b) stringify_in_c(.long PPC_INST_MSGSND | \ 91#define PPC_MSGSND(b) stringify_in_c(.long PPC_INST_MSGSND | \
90 __PPC_RB(b)) 92 __PPC_RB(b))
93#define PPC_POPCNTB(a, s) stringify_in_c(.long PPC_INST_POPCNTB | \
94 __PPC_RA(a) | __PPC_RS(s))
95#define PPC_POPCNTD(a, s) stringify_in_c(.long PPC_INST_POPCNTD | \
96 __PPC_RA(a) | __PPC_RS(s))
97#define PPC_POPCNTW(a, s) stringify_in_c(.long PPC_INST_POPCNTW | \
98 __PPC_RA(a) | __PPC_RS(s))
91#define PPC_RFCI stringify_in_c(.long PPC_INST_RFCI) 99#define PPC_RFCI stringify_in_c(.long PPC_INST_RFCI)
92#define PPC_RFDI stringify_in_c(.long PPC_INST_RFDI) 100#define PPC_RFDI stringify_in_c(.long PPC_INST_RFDI)
93#define PPC_RFMCI stringify_in_c(.long PPC_INST_RFMCI) 101#define PPC_RFMCI stringify_in_c(.long PPC_INST_RFMCI)
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 4c14187ba02d..de1967a1ff57 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -122,7 +122,6 @@ extern struct task_struct *last_task_used_spe;
122 TASK_UNMAPPED_BASE_USER32 : TASK_UNMAPPED_BASE_USER64 ) 122 TASK_UNMAPPED_BASE_USER32 : TASK_UNMAPPED_BASE_USER64 )
123#endif 123#endif
124 124
125#ifdef __KERNEL__
126#ifdef __powerpc64__ 125#ifdef __powerpc64__
127 126
128#define STACK_TOP_USER64 TASK_SIZE_USER64 127#define STACK_TOP_USER64 TASK_SIZE_USER64
@@ -139,7 +138,6 @@ extern struct task_struct *last_task_used_spe;
139#define STACK_TOP_MAX STACK_TOP 138#define STACK_TOP_MAX STACK_TOP
140 139
141#endif /* __powerpc64__ */ 140#endif /* __powerpc64__ */
142#endif /* __KERNEL__ */
143 141
144typedef struct { 142typedef struct {
145 unsigned long seg; 143 unsigned long seg;
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
index afe4aaa65c3b..7ef0d90defc8 100644
--- a/arch/powerpc/include/asm/topology.h
+++ b/arch/powerpc/include/asm/topology.h
@@ -106,9 +106,22 @@ static inline void sysfs_remove_device_from_node(struct sys_device *dev,
106 int nid) 106 int nid)
107{ 107{
108} 108}
109
110#endif /* CONFIG_NUMA */ 109#endif /* CONFIG_NUMA */
111 110
111#if defined(CONFIG_NUMA) && defined(CONFIG_PPC_SPLPAR)
112extern int start_topology_update(void);
113extern int stop_topology_update(void);
114#else
115static inline int start_topology_update(void)
116{
117 return 0;
118}
119static inline int stop_topology_update(void)
120{
121 return 0;
122}
123#endif /* CONFIG_NUMA && CONFIG_PPC_SPLPAR */
124
112#include <asm-generic/topology.h> 125#include <asm-generic/topology.h>
113 126
114#ifdef CONFIG_SMP 127#ifdef CONFIG_SMP
diff --git a/arch/powerpc/include/asm/vdso_datapage.h b/arch/powerpc/include/asm/vdso_datapage.h
index 08679c5319b8..25e39220e89c 100644
--- a/arch/powerpc/include/asm/vdso_datapage.h
+++ b/arch/powerpc/include/asm/vdso_datapage.h
@@ -116,9 +116,7 @@ struct vdso_data {
116 116
117#endif /* CONFIG_PPC64 */ 117#endif /* CONFIG_PPC64 */
118 118
119#ifdef __KERNEL__
120extern struct vdso_data *vdso_data; 119extern struct vdso_data *vdso_data;
121#endif
122 120
123#endif /* __ASSEMBLY__ */ 121#endif /* __ASSEMBLY__ */
124 122
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 36c30f31ec93..3bb2a3e6a337 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -29,8 +29,10 @@ endif
29obj-y := cputable.o ptrace.o syscalls.o \ 29obj-y := cputable.o ptrace.o syscalls.o \
30 irq.o align.o signal_32.o pmc.o vdso.o \ 30 irq.o align.o signal_32.o pmc.o vdso.o \
31 init_task.o process.o systbl.o idle.o \ 31 init_task.o process.o systbl.o idle.o \
32 signal.o sysfs.o cacheinfo.o 32 signal.o sysfs.o cacheinfo.o time.o \
33obj-y += vdso32/ 33 prom.o traps.o setup-common.o \
34 udbg.o misc.o io.o dma.o \
35 misc_$(CONFIG_WORD_SIZE).o vdso32/
34obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \ 36obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \
35 signal_64.o ptrace32.o \ 37 signal_64.o ptrace32.o \
36 paca.o nvram_64.o firmware.o 38 paca.o nvram_64.o firmware.o
@@ -80,9 +82,6 @@ extra-$(CONFIG_FSL_BOOKE) := head_fsl_booke.o
80extra-$(CONFIG_8xx) := head_8xx.o 82extra-$(CONFIG_8xx) := head_8xx.o
81extra-y += vmlinux.lds 83extra-y += vmlinux.lds
82 84
83obj-y += time.o prom.o traps.o setup-common.o \
84 udbg.o misc.o io.o dma.o \
85 misc_$(CONFIG_WORD_SIZE).o
86obj-$(CONFIG_PPC32) += entry_32.o setup_32.o 85obj-$(CONFIG_PPC32) += entry_32.o setup_32.o
87obj-$(CONFIG_PPC64) += dma-iommu.o iommu.o 86obj-$(CONFIG_PPC64) += dma-iommu.o iommu.o
88obj-$(CONFIG_KGDB) += kgdb.o 87obj-$(CONFIG_KGDB) += kgdb.o
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index bd0df2e6aa8f..23e6a93145ab 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -209,7 +209,6 @@ int main(void)
209 DEFINE(RTASENTRY, offsetof(struct rtas_t, entry)); 209 DEFINE(RTASENTRY, offsetof(struct rtas_t, entry));
210 210
211 /* Interrupt register frame */ 211 /* Interrupt register frame */
212 DEFINE(STACK_FRAME_OVERHEAD, STACK_FRAME_OVERHEAD);
213 DEFINE(INT_FRAME_SIZE, STACK_INT_FRAME_SIZE); 212 DEFINE(INT_FRAME_SIZE, STACK_INT_FRAME_SIZE);
214 DEFINE(SWITCH_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs)); 213 DEFINE(SWITCH_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs));
215#ifdef CONFIG_PPC64 214#ifdef CONFIG_PPC64
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 96a908f1cd87..be5ab18b03b5 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -457,16 +457,26 @@ static struct cpu_spec __initdata cpu_specs[] = {
457 .dcache_bsize = 128, 457 .dcache_bsize = 128,
458 .num_pmcs = 6, 458 .num_pmcs = 6,
459 .pmc_type = PPC_PMC_IBM, 459 .pmc_type = PPC_PMC_IBM,
460 .cpu_setup = __setup_cpu_power7,
461 .cpu_restore = __restore_cpu_power7,
462 .oprofile_cpu_type = "ppc64/power7", 460 .oprofile_cpu_type = "ppc64/power7",
463 .oprofile_type = PPC_OPROFILE_POWER4, 461 .oprofile_type = PPC_OPROFILE_POWER4,
464 .oprofile_mmcra_sihv = POWER6_MMCRA_SIHV,
465 .oprofile_mmcra_sipr = POWER6_MMCRA_SIPR,
466 .oprofile_mmcra_clear = POWER6_MMCRA_THRM |
467 POWER6_MMCRA_OTHER,
468 .platform = "power7", 462 .platform = "power7",
469 }, 463 },
464 { /* Power7+ */
465 .pvr_mask = 0xffff0000,
466 .pvr_value = 0x004A0000,
467 .cpu_name = "POWER7+ (raw)",
468 .cpu_features = CPU_FTRS_POWER7,
469 .cpu_user_features = COMMON_USER_POWER7,
470 .mmu_features = MMU_FTR_HPTE_TABLE |
471 MMU_FTR_TLBIE_206,
472 .icache_bsize = 128,
473 .dcache_bsize = 128,
474 .num_pmcs = 6,
475 .pmc_type = PPC_PMC_IBM,
476 .oprofile_cpu_type = "ppc64/power7",
477 .oprofile_type = PPC_OPROFILE_POWER4,
478 .platform = "power7+",
479 },
470 { /* Cell Broadband Engine */ 480 { /* Cell Broadband Engine */
471 .pvr_mask = 0xffff0000, 481 .pvr_mask = 0xffff0000,
472 .pvr_value = 0x00700000, 482 .pvr_value = 0x00700000,
diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c
index 8e05c16344e4..0a2af50243cb 100644
--- a/arch/powerpc/kernel/crash_dump.c
+++ b/arch/powerpc/kernel/crash_dump.c
@@ -19,6 +19,7 @@
19#include <asm/prom.h> 19#include <asm/prom.h>
20#include <asm/firmware.h> 20#include <asm/firmware.h>
21#include <asm/uaccess.h> 21#include <asm/uaccess.h>
22#include <asm/rtas.h>
22 23
23#ifdef DEBUG 24#ifdef DEBUG
24#include <asm/udbg.h> 25#include <asm/udbg.h>
@@ -141,3 +142,35 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
141 142
142 return csize; 143 return csize;
143} 144}
145
146#ifdef CONFIG_PPC_RTAS
147/*
148 * The crashkernel region will almost always overlap the RTAS region, so
149 * we have to be careful when shrinking the crashkernel region.
150 */
151void crash_free_reserved_phys_range(unsigned long begin, unsigned long end)
152{
153 unsigned long addr;
154 const u32 *basep, *sizep;
155 unsigned int rtas_start = 0, rtas_end = 0;
156
157 basep = of_get_property(rtas.dev, "linux,rtas-base", NULL);
158 sizep = of_get_property(rtas.dev, "rtas-size", NULL);
159
160 if (basep && sizep) {
161 rtas_start = *basep;
162 rtas_end = *basep + *sizep;
163 }
164
165 for (addr = begin; addr < end; addr += PAGE_SIZE) {
166 /* Does this page overlap with the RTAS region? */
167 if (addr <= rtas_end && ((addr + PAGE_SIZE) > rtas_start))
168 continue;
169
170 ClearPageReserved(pfn_to_page(addr >> PAGE_SHIFT));
171 init_page_count(pfn_to_page(addr >> PAGE_SHIFT));
172 free_page((unsigned long)__va(addr));
173 totalram_pages++;
174 }
175}
176#endif
diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
index 6e54a0fd31aa..e7554154a6de 100644
--- a/arch/powerpc/kernel/dma-iommu.c
+++ b/arch/powerpc/kernel/dma-iommu.c
@@ -19,7 +19,7 @@ static void *dma_iommu_alloc_coherent(struct device *dev, size_t size,
19 dma_addr_t *dma_handle, gfp_t flag) 19 dma_addr_t *dma_handle, gfp_t flag)
20{ 20{
21 return iommu_alloc_coherent(dev, get_iommu_table_base(dev), size, 21 return iommu_alloc_coherent(dev, get_iommu_table_base(dev), size,
22 dma_handle, device_to_mask(dev), flag, 22 dma_handle, dev->coherent_dma_mask, flag,
23 dev_to_node(dev)); 23 dev_to_node(dev));
24} 24}
25 25
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index ed4aeb96398b..c22dc1ec1c94 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -31,6 +31,7 @@
31#include <asm/asm-offsets.h> 31#include <asm/asm-offsets.h>
32#include <asm/unistd.h> 32#include <asm/unistd.h>
33#include <asm/ftrace.h> 33#include <asm/ftrace.h>
34#include <asm/ptrace.h>
34 35
35#undef SHOW_SYSCALLS 36#undef SHOW_SYSCALLS
36#undef SHOW_SYSCALLS_TASK 37#undef SHOW_SYSCALLS_TASK
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 9f8b01d6466f..8a817995b4cd 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -13,6 +13,7 @@
13 */ 13 */
14 14
15#include <asm/exception-64s.h> 15#include <asm/exception-64s.h>
16#include <asm/ptrace.h>
16 17
17/* 18/*
18 * We layout physical memory as follows: 19 * We layout physical memory as follows:
diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S
index e86c040ae585..de369558bf0a 100644
--- a/arch/powerpc/kernel/fpu.S
+++ b/arch/powerpc/kernel/fpu.S
@@ -23,6 +23,7 @@
23#include <asm/thread_info.h> 23#include <asm/thread_info.h>
24#include <asm/ppc_asm.h> 24#include <asm/ppc_asm.h>
25#include <asm/asm-offsets.h> 25#include <asm/asm-offsets.h>
26#include <asm/ptrace.h>
26 27
27#ifdef CONFIG_VSX 28#ifdef CONFIG_VSX
28#define REST_32FPVSRS(n,c,base) \ 29#define REST_32FPVSRS(n,c,base) \
diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S
index 8278e8bad5a0..9dd21a8c4d52 100644
--- a/arch/powerpc/kernel/head_40x.S
+++ b/arch/powerpc/kernel/head_40x.S
@@ -40,6 +40,7 @@
40#include <asm/thread_info.h> 40#include <asm/thread_info.h>
41#include <asm/ppc_asm.h> 41#include <asm/ppc_asm.h>
42#include <asm/asm-offsets.h> 42#include <asm/asm-offsets.h>
43#include <asm/ptrace.h>
43 44
44/* As with the other PowerPC ports, it is expected that when code 45/* As with the other PowerPC ports, it is expected that when code
45 * execution begins here, the following registers contain valid, yet 46 * execution begins here, the following registers contain valid, yet
diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S
index 562305b40a8e..cbb3436b592d 100644
--- a/arch/powerpc/kernel/head_44x.S
+++ b/arch/powerpc/kernel/head_44x.S
@@ -37,6 +37,7 @@
37#include <asm/thread_info.h> 37#include <asm/thread_info.h>
38#include <asm/ppc_asm.h> 38#include <asm/ppc_asm.h>
39#include <asm/asm-offsets.h> 39#include <asm/asm-offsets.h>
40#include <asm/ptrace.h>
40#include <asm/synch.h> 41#include <asm/synch.h>
41#include "head_booke.h" 42#include "head_booke.h"
42 43
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index f0dd577e4a5b..782f23df7c85 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -38,6 +38,7 @@
38#include <asm/page_64.h> 38#include <asm/page_64.h>
39#include <asm/irqflags.h> 39#include <asm/irqflags.h>
40#include <asm/kvm_book3s_asm.h> 40#include <asm/kvm_book3s_asm.h>
41#include <asm/ptrace.h>
41 42
42/* The physical memory is layed out such that the secondary processor 43/* The physical memory is layed out such that the secondary processor
43 * spin code sits at 0x0000...0x00ff. On server, the vectors follow 44 * spin code sits at 0x0000...0x00ff. On server, the vectors follow
@@ -96,7 +97,7 @@ __secondary_hold_acknowledge:
96 .llong hvReleaseData-KERNELBASE 97 .llong hvReleaseData-KERNELBASE
97#endif /* CONFIG_PPC_ISERIES */ 98#endif /* CONFIG_PPC_ISERIES */
98 99
99#ifdef CONFIG_CRASH_DUMP 100#ifdef CONFIG_RELOCATABLE
100 /* This flag is set to 1 by a loader if the kernel should run 101 /* This flag is set to 1 by a loader if the kernel should run
101 * at the loaded address instead of the linked address. This 102 * at the loaded address instead of the linked address. This
102 * is used by kexec-tools to keep the the kdump kernel in the 103 * is used by kexec-tools to keep the the kdump kernel in the
@@ -384,12 +385,10 @@ _STATIC(__after_prom_start)
384 /* process relocations for the final address of the kernel */ 385 /* process relocations for the final address of the kernel */
385 lis r25,PAGE_OFFSET@highest /* compute virtual base of kernel */ 386 lis r25,PAGE_OFFSET@highest /* compute virtual base of kernel */
386 sldi r25,r25,32 387 sldi r25,r25,32
387#ifdef CONFIG_CRASH_DUMP
388 lwz r7,__run_at_load-_stext(r26) 388 lwz r7,__run_at_load-_stext(r26)
389 cmplwi cr0,r7,1 /* kdump kernel ? - stay where we are */ 389 cmplwi cr0,r7,1 /* flagged to stay where we are ? */
390 bne 1f 390 bne 1f
391 add r25,r25,r26 391 add r25,r25,r26
392#endif
3931: mr r3,r25 3921: mr r3,r25
394 bl .relocate 393 bl .relocate
395#endif 394#endif
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index 1f1a04b5c2a4..1cbf64e6b416 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -29,6 +29,7 @@
29#include <asm/thread_info.h> 29#include <asm/thread_info.h>
30#include <asm/ppc_asm.h> 30#include <asm/ppc_asm.h>
31#include <asm/asm-offsets.h> 31#include <asm/asm-offsets.h>
32#include <asm/ptrace.h>
32 33
33/* Macro to make the code more readable. */ 34/* Macro to make the code more readable. */
34#ifdef CONFIG_8xx_CPU6 35#ifdef CONFIG_8xx_CPU6
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index 529b817f473b..3e02710d9562 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -41,6 +41,7 @@
41#include <asm/ppc_asm.h> 41#include <asm/ppc_asm.h>
42#include <asm/asm-offsets.h> 42#include <asm/asm-offsets.h>
43#include <asm/cache.h> 43#include <asm/cache.h>
44#include <asm/ptrace.h>
44#include "head_booke.h" 45#include "head_booke.h"
45 46
46/* As with the other PowerPC ports, it is expected that when code 47/* As with the other PowerPC ports, it is expected that when code
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index d5839179ec77..961bb03413f3 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -311,8 +311,9 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
311 /* Handle failure */ 311 /* Handle failure */
312 if (unlikely(entry == DMA_ERROR_CODE)) { 312 if (unlikely(entry == DMA_ERROR_CODE)) {
313 if (printk_ratelimit()) 313 if (printk_ratelimit())
314 printk(KERN_INFO "iommu_alloc failed, tbl %p vaddr %lx" 314 dev_info(dev, "iommu_alloc failed, tbl %p "
315 " npages %lx\n", tbl, vaddr, npages); 315 "vaddr %lx npages %lu\n", tbl, vaddr,
316 npages);
316 goto failure; 317 goto failure;
317 } 318 }
318 319
@@ -579,9 +580,9 @@ dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
579 attrs); 580 attrs);
580 if (dma_handle == DMA_ERROR_CODE) { 581 if (dma_handle == DMA_ERROR_CODE) {
581 if (printk_ratelimit()) { 582 if (printk_ratelimit()) {
582 printk(KERN_INFO "iommu_alloc failed, " 583 dev_info(dev, "iommu_alloc failed, tbl %p "
583 "tbl %p vaddr %p npages %d\n", 584 "vaddr %p npages %d\n", tbl, vaddr,
584 tbl, vaddr, npages); 585 npages);
585 } 586 }
586 } else 587 } else
587 dma_handle |= (uaddr & ~IOMMU_PAGE_MASK); 588 dma_handle |= (uaddr & ~IOMMU_PAGE_MASK);
@@ -627,7 +628,8 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
627 * the tce tables. 628 * the tce tables.
628 */ 629 */
629 if (order >= IOMAP_MAX_ORDER) { 630 if (order >= IOMAP_MAX_ORDER) {
630 printk("iommu_alloc_consistent size too large: 0x%lx\n", size); 631 dev_info(dev, "iommu_alloc_consistent size too large: 0x%lx\n",
632 size);
631 return NULL; 633 return NULL;
632 } 634 }
633 635
diff --git a/arch/powerpc/kernel/misc.S b/arch/powerpc/kernel/misc.S
index 2d29752cbe16..b69463ec2010 100644
--- a/arch/powerpc/kernel/misc.S
+++ b/arch/powerpc/kernel/misc.S
@@ -122,8 +122,3 @@ _GLOBAL(longjmp)
122 mtlr r0 122 mtlr r0
123 mr r3,r4 123 mr r3,r4
124 blr 124 blr
125
126_GLOBAL(__setup_cpu_power7)
127_GLOBAL(__restore_cpu_power7)
128 /* place holder */
129 blr
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index a7a570dcdd57..094bd9821ad4 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -30,6 +30,7 @@
30#include <asm/processor.h> 30#include <asm/processor.h>
31#include <asm/kexec.h> 31#include <asm/kexec.h>
32#include <asm/bug.h> 32#include <asm/bug.h>
33#include <asm/ptrace.h>
33 34
34 .text 35 .text
35 36
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index e5144906a56d..206a321a71d3 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -25,6 +25,7 @@
25#include <asm/cputable.h> 25#include <asm/cputable.h>
26#include <asm/thread_info.h> 26#include <asm/thread_info.h>
27#include <asm/kexec.h> 27#include <asm/kexec.h>
28#include <asm/ptrace.h>
28 29
29 .text 30 .text
30 31
diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c
index 9cf197f01e94..bb12b3248f13 100644
--- a/arch/powerpc/kernel/nvram_64.c
+++ b/arch/powerpc/kernel/nvram_64.c
@@ -34,15 +34,26 @@
34 34
35#undef DEBUG_NVRAM 35#undef DEBUG_NVRAM
36 36
37static struct nvram_partition * nvram_part; 37#define NVRAM_HEADER_LEN sizeof(struct nvram_header)
38static long nvram_error_log_index = -1; 38#define NVRAM_BLOCK_LEN NVRAM_HEADER_LEN
39static long nvram_error_log_size = 0; 39
40/* If change this size, then change the size of NVNAME_LEN */
41struct nvram_header {
42 unsigned char signature;
43 unsigned char checksum;
44 unsigned short length;
45 /* Terminating null required only for names < 12 chars. */
46 char name[12];
47};
40 48
41struct err_log_info { 49struct nvram_partition {
42 int error_type; 50 struct list_head partition;
43 unsigned int seq_num; 51 struct nvram_header header;
52 unsigned int index;
44}; 53};
45 54
55static LIST_HEAD(nvram_partitions);
56
46static loff_t dev_nvram_llseek(struct file *file, loff_t offset, int origin) 57static loff_t dev_nvram_llseek(struct file *file, loff_t offset, int origin)
47{ 58{
48 int size; 59 int size;
@@ -186,14 +197,12 @@ static struct miscdevice nvram_dev = {
186#ifdef DEBUG_NVRAM 197#ifdef DEBUG_NVRAM
187static void __init nvram_print_partitions(char * label) 198static void __init nvram_print_partitions(char * label)
188{ 199{
189 struct list_head * p;
190 struct nvram_partition * tmp_part; 200 struct nvram_partition * tmp_part;
191 201
192 printk(KERN_WARNING "--------%s---------\n", label); 202 printk(KERN_WARNING "--------%s---------\n", label);
193 printk(KERN_WARNING "indx\t\tsig\tchks\tlen\tname\n"); 203 printk(KERN_WARNING "indx\t\tsig\tchks\tlen\tname\n");
194 list_for_each(p, &nvram_part->partition) { 204 list_for_each_entry(tmp_part, &nvram_partitions, partition) {
195 tmp_part = list_entry(p, struct nvram_partition, partition); 205 printk(KERN_WARNING "%4d \t%02x\t%02x\t%d\t%12s\n",
196 printk(KERN_WARNING "%4d \t%02x\t%02x\t%d\t%s\n",
197 tmp_part->index, tmp_part->header.signature, 206 tmp_part->index, tmp_part->header.signature,
198 tmp_part->header.checksum, tmp_part->header.length, 207 tmp_part->header.checksum, tmp_part->header.length,
199 tmp_part->header.name); 208 tmp_part->header.name);
@@ -228,95 +237,113 @@ static unsigned char __init nvram_checksum(struct nvram_header *p)
228 return c_sum; 237 return c_sum;
229} 238}
230 239
231static int __init nvram_remove_os_partition(void) 240/**
241 * nvram_remove_partition - Remove one or more partitions in nvram
242 * @name: name of the partition to remove, or NULL for a
243 * signature only match
244 * @sig: signature of the partition(s) to remove
245 */
246
247int __init nvram_remove_partition(const char *name, int sig)
232{ 248{
233 struct list_head *i; 249 struct nvram_partition *part, *prev, *tmp;
234 struct list_head *j;
235 struct nvram_partition * part;
236 struct nvram_partition * cur_part;
237 int rc; 250 int rc;
238 251
239 list_for_each(i, &nvram_part->partition) { 252 list_for_each_entry(part, &nvram_partitions, partition) {
240 part = list_entry(i, struct nvram_partition, partition); 253 if (part->header.signature != sig)
241 if (part->header.signature != NVRAM_SIG_OS)
242 continue; 254 continue;
243 255 if (name && strncmp(name, part->header.name, 12))
244 /* Make os partition a free partition */ 256 continue;
257
258 /* Make partition a free partition */
245 part->header.signature = NVRAM_SIG_FREE; 259 part->header.signature = NVRAM_SIG_FREE;
246 sprintf(part->header.name, "wwwwwwwwwwww"); 260 strncpy(part->header.name, "wwwwwwwwwwww", 12);
247 part->header.checksum = nvram_checksum(&part->header); 261 part->header.checksum = nvram_checksum(&part->header);
248
249 /* Merge contiguous free partitions backwards */
250 list_for_each_prev(j, &part->partition) {
251 cur_part = list_entry(j, struct nvram_partition, partition);
252 if (cur_part == nvram_part || cur_part->header.signature != NVRAM_SIG_FREE) {
253 break;
254 }
255
256 part->header.length += cur_part->header.length;
257 part->header.checksum = nvram_checksum(&part->header);
258 part->index = cur_part->index;
259
260 list_del(&cur_part->partition);
261 kfree(cur_part);
262 j = &part->partition; /* fixup our loop */
263 }
264
265 /* Merge contiguous free partitions forwards */
266 list_for_each(j, &part->partition) {
267 cur_part = list_entry(j, struct nvram_partition, partition);
268 if (cur_part == nvram_part || cur_part->header.signature != NVRAM_SIG_FREE) {
269 break;
270 }
271
272 part->header.length += cur_part->header.length;
273 part->header.checksum = nvram_checksum(&part->header);
274
275 list_del(&cur_part->partition);
276 kfree(cur_part);
277 j = &part->partition; /* fixup our loop */
278 }
279
280 rc = nvram_write_header(part); 262 rc = nvram_write_header(part);
281 if (rc <= 0) { 263 if (rc <= 0) {
282 printk(KERN_ERR "nvram_remove_os_partition: nvram_write failed (%d)\n", rc); 264 printk(KERN_ERR "nvram_remove_partition: nvram_write failed (%d)\n", rc);
283 return rc; 265 return rc;
284 } 266 }
267 }
285 268
269 /* Merge contiguous ones */
270 prev = NULL;
271 list_for_each_entry_safe(part, tmp, &nvram_partitions, partition) {
272 if (part->header.signature != NVRAM_SIG_FREE) {
273 prev = NULL;
274 continue;
275 }
276 if (prev) {
277 prev->header.length += part->header.length;
278 prev->header.checksum = nvram_checksum(&part->header);
279 rc = nvram_write_header(part);
280 if (rc <= 0) {
281 printk(KERN_ERR "nvram_remove_partition: nvram_write failed (%d)\n", rc);
282 return rc;
283 }
284 list_del(&part->partition);
285 kfree(part);
286 } else
287 prev = part;
286 } 288 }
287 289
288 return 0; 290 return 0;
289} 291}
290 292
291/* nvram_create_os_partition 293/**
294 * nvram_create_partition - Create a partition in nvram
295 * @name: name of the partition to create
296 * @sig: signature of the partition to create
297 * @req_size: size of data to allocate in bytes
298 * @min_size: minimum acceptable size (0 means req_size)
292 * 299 *
293 * Create a OS linux partition to buffer error logs. 300 * Returns a negative error code or a positive nvram index
294 * Will create a partition starting at the first free 301 * of the beginning of the data area of the newly created
295 * space found if space has enough room. 302 * partition. If you provided a min_size smaller than req_size
303 * you need to query for the actual size yourself after the
304 * call using nvram_partition_get_size().
296 */ 305 */
297static int __init nvram_create_os_partition(void) 306loff_t __init nvram_create_partition(const char *name, int sig,
307 int req_size, int min_size)
298{ 308{
299 struct nvram_partition *part; 309 struct nvram_partition *part;
300 struct nvram_partition *new_part; 310 struct nvram_partition *new_part;
301 struct nvram_partition *free_part = NULL; 311 struct nvram_partition *free_part = NULL;
302 int seq_init[2] = { 0, 0 }; 312 static char nv_init_vals[16];
303 loff_t tmp_index; 313 loff_t tmp_index;
304 long size = 0; 314 long size = 0;
305 int rc; 315 int rc;
306 316
317 /* Convert sizes from bytes to blocks */
318 req_size = _ALIGN_UP(req_size, NVRAM_BLOCK_LEN) / NVRAM_BLOCK_LEN;
319 min_size = _ALIGN_UP(min_size, NVRAM_BLOCK_LEN) / NVRAM_BLOCK_LEN;
320
321 /* If no minimum size specified, make it the same as the
322 * requested size
323 */
324 if (min_size == 0)
325 min_size = req_size;
326 if (min_size > req_size)
327 return -EINVAL;
328
329 /* Now add one block to each for the header */
330 req_size += 1;
331 min_size += 1;
332
307 /* Find a free partition that will give us the maximum needed size 333 /* Find a free partition that will give us the maximum needed size
308 If can't find one that will give us the minimum size needed */ 334 If can't find one that will give us the minimum size needed */
309 list_for_each_entry(part, &nvram_part->partition, partition) { 335 list_for_each_entry(part, &nvram_partitions, partition) {
310 if (part->header.signature != NVRAM_SIG_FREE) 336 if (part->header.signature != NVRAM_SIG_FREE)
311 continue; 337 continue;
312 338
313 if (part->header.length >= NVRAM_MAX_REQ) { 339 if (part->header.length >= req_size) {
314 size = NVRAM_MAX_REQ; 340 size = req_size;
315 free_part = part; 341 free_part = part;
316 break; 342 break;
317 } 343 }
318 if (!size && part->header.length >= NVRAM_MIN_REQ) { 344 if (part->header.length > size &&
319 size = NVRAM_MIN_REQ; 345 part->header.length >= min_size) {
346 size = part->header.length;
320 free_part = part; 347 free_part = part;
321 } 348 }
322 } 349 }
@@ -326,136 +353,95 @@ static int __init nvram_create_os_partition(void)
326 /* Create our OS partition */ 353 /* Create our OS partition */
327 new_part = kmalloc(sizeof(*new_part), GFP_KERNEL); 354 new_part = kmalloc(sizeof(*new_part), GFP_KERNEL);
328 if (!new_part) { 355 if (!new_part) {
329 printk(KERN_ERR "nvram_create_os_partition: kmalloc failed\n"); 356 pr_err("nvram_create_os_partition: kmalloc failed\n");
330 return -ENOMEM; 357 return -ENOMEM;
331 } 358 }
332 359
333 new_part->index = free_part->index; 360 new_part->index = free_part->index;
334 new_part->header.signature = NVRAM_SIG_OS; 361 new_part->header.signature = sig;
335 new_part->header.length = size; 362 new_part->header.length = size;
336 strcpy(new_part->header.name, "ppc64,linux"); 363 strncpy(new_part->header.name, name, 12);
337 new_part->header.checksum = nvram_checksum(&new_part->header); 364 new_part->header.checksum = nvram_checksum(&new_part->header);
338 365
339 rc = nvram_write_header(new_part); 366 rc = nvram_write_header(new_part);
340 if (rc <= 0) { 367 if (rc <= 0) {
341 printk(KERN_ERR "nvram_create_os_partition: nvram_write_header " 368 pr_err("nvram_create_os_partition: nvram_write_header "
342 "failed (%d)\n", rc);
343 return rc;
344 }
345
346 /* make sure and initialize to zero the sequence number and the error
347 type logged */
348 tmp_index = new_part->index + NVRAM_HEADER_LEN;
349 rc = ppc_md.nvram_write((char *)&seq_init, sizeof(seq_init), &tmp_index);
350 if (rc <= 0) {
351 printk(KERN_ERR "nvram_create_os_partition: nvram_write "
352 "failed (%d)\n", rc); 369 "failed (%d)\n", rc);
353 return rc; 370 return rc;
354 } 371 }
355
356 nvram_error_log_index = new_part->index + NVRAM_HEADER_LEN;
357 nvram_error_log_size = ((part->header.length - 1) *
358 NVRAM_BLOCK_LEN) - sizeof(struct err_log_info);
359
360 list_add_tail(&new_part->partition, &free_part->partition); 372 list_add_tail(&new_part->partition, &free_part->partition);
361 373
362 if (free_part->header.length <= size) { 374 /* Adjust or remove the partition we stole the space from */
375 if (free_part->header.length > size) {
376 free_part->index += size * NVRAM_BLOCK_LEN;
377 free_part->header.length -= size;
378 free_part->header.checksum = nvram_checksum(&free_part->header);
379 rc = nvram_write_header(free_part);
380 if (rc <= 0) {
381 pr_err("nvram_create_os_partition: nvram_write_header "
382 "failed (%d)\n", rc);
383 return rc;
384 }
385 } else {
363 list_del(&free_part->partition); 386 list_del(&free_part->partition);
364 kfree(free_part); 387 kfree(free_part);
365 return 0;
366 } 388 }
367 389
368 /* Adjust the partition we stole the space from */ 390 /* Clear the new partition */
369 free_part->index += size * NVRAM_BLOCK_LEN; 391 for (tmp_index = new_part->index + NVRAM_HEADER_LEN;
370 free_part->header.length -= size; 392 tmp_index < ((size - 1) * NVRAM_BLOCK_LEN);
371 free_part->header.checksum = nvram_checksum(&free_part->header); 393 tmp_index += NVRAM_BLOCK_LEN) {
372 394 rc = ppc_md.nvram_write(nv_init_vals, NVRAM_BLOCK_LEN, &tmp_index);
373 rc = nvram_write_header(free_part); 395 if (rc <= 0) {
374 if (rc <= 0) { 396 pr_err("nvram_create_partition: nvram_write failed (%d)\n", rc);
375 printk(KERN_ERR "nvram_create_os_partition: nvram_write_header " 397 return rc;
376 "failed (%d)\n", rc); 398 }
377 return rc;
378 } 399 }
379 400
380 return 0; 401 return new_part->index + NVRAM_HEADER_LEN;
381} 402}
382 403
383 404/**
384/* nvram_setup_partition 405 * nvram_get_partition_size - Get the data size of an nvram partition
385 * 406 * @data_index: This is the offset of the start of the data of
386 * This will setup the partition we need for buffering the 407 * the partition. The same value that is returned by
387 * error logs and cleanup partitions if needed. 408 * nvram_create_partition().
388 *
389 * The general strategy is the following:
390 * 1.) If there is ppc64,linux partition large enough then use it.
391 * 2.) If there is not a ppc64,linux partition large enough, search
392 * for a free partition that is large enough.
393 * 3.) If there is not a free partition large enough remove
394 * _all_ OS partitions and consolidate the space.
395 * 4.) Will first try getting a chunk that will satisfy the maximum
396 * error log size (NVRAM_MAX_REQ).
397 * 5.) If the max chunk cannot be allocated then try finding a chunk
398 * that will satisfy the minum needed (NVRAM_MIN_REQ).
399 */ 409 */
400static int __init nvram_setup_partition(void) 410int nvram_get_partition_size(loff_t data_index)
401{ 411{
402 struct list_head * p; 412 struct nvram_partition *part;
403 struct nvram_partition * part; 413
404 int rc; 414 list_for_each_entry(part, &nvram_partitions, partition) {
405 415 if (part->index + NVRAM_HEADER_LEN == data_index)
406 /* For now, we don't do any of this on pmac, until I 416 return (part->header.length - 1) * NVRAM_BLOCK_LEN;
407 * have figured out if it's worth killing some unused stuffs 417 }
408 * in our nvram, as Apple defined partitions use pretty much 418 return -1;
409 * all of the space 419}
410 */
411 if (machine_is(powermac))
412 return -ENOSPC;
413
414 /* see if we have an OS partition that meets our needs.
415 will try getting the max we need. If not we'll delete
416 partitions and try again. */
417 list_for_each(p, &nvram_part->partition) {
418 part = list_entry(p, struct nvram_partition, partition);
419 if (part->header.signature != NVRAM_SIG_OS)
420 continue;
421 420
422 if (strcmp(part->header.name, "ppc64,linux"))
423 continue;
424 421
425 if (part->header.length >= NVRAM_MIN_REQ) { 422/**
426 /* found our partition */ 423 * nvram_find_partition - Find an nvram partition by signature and name
427 nvram_error_log_index = part->index + NVRAM_HEADER_LEN; 424 * @name: Name of the partition or NULL for any name
428 nvram_error_log_size = ((part->header.length - 1) * 425 * @sig: Signature to test against
429 NVRAM_BLOCK_LEN) - sizeof(struct err_log_info); 426 * @out_size: if non-NULL, returns the size of the data part of the partition
430 return 0; 427 */
428loff_t nvram_find_partition(const char *name, int sig, int *out_size)
429{
430 struct nvram_partition *p;
431
432 list_for_each_entry(p, &nvram_partitions, partition) {
433 if (p->header.signature == sig &&
434 (!name || !strncmp(p->header.name, name, 12))) {
435 if (out_size)
436 *out_size = (p->header.length - 1) *
437 NVRAM_BLOCK_LEN;
438 return p->index + NVRAM_HEADER_LEN;
431 } 439 }
432 } 440 }
433
434 /* try creating a partition with the free space we have */
435 rc = nvram_create_os_partition();
436 if (!rc) {
437 return 0;
438 }
439
440 /* need to free up some space */
441 rc = nvram_remove_os_partition();
442 if (rc) {
443 return rc;
444 }
445
446 /* create a partition in this new space */
447 rc = nvram_create_os_partition();
448 if (rc) {
449 printk(KERN_ERR "nvram_create_os_partition: Could not find a "
450 "NVRAM partition large enough\n");
451 return rc;
452 }
453
454 return 0; 441 return 0;
455} 442}
456 443
457 444int __init nvram_scan_partitions(void)
458static int __init nvram_scan_partitions(void)
459{ 445{
460 loff_t cur_index = 0; 446 loff_t cur_index = 0;
461 struct nvram_header phead; 447 struct nvram_header phead;
@@ -465,7 +451,7 @@ static int __init nvram_scan_partitions(void)
465 int total_size; 451 int total_size;
466 int err; 452 int err;
467 453
468 if (ppc_md.nvram_size == NULL) 454 if (ppc_md.nvram_size == NULL || ppc_md.nvram_size() <= 0)
469 return -ENODEV; 455 return -ENODEV;
470 total_size = ppc_md.nvram_size(); 456 total_size = ppc_md.nvram_size();
471 457
@@ -512,12 +498,16 @@ static int __init nvram_scan_partitions(void)
512 498
513 memcpy(&tmp_part->header, &phead, NVRAM_HEADER_LEN); 499 memcpy(&tmp_part->header, &phead, NVRAM_HEADER_LEN);
514 tmp_part->index = cur_index; 500 tmp_part->index = cur_index;
515 list_add_tail(&tmp_part->partition, &nvram_part->partition); 501 list_add_tail(&tmp_part->partition, &nvram_partitions);
516 502
517 cur_index += phead.length * NVRAM_BLOCK_LEN; 503 cur_index += phead.length * NVRAM_BLOCK_LEN;
518 } 504 }
519 err = 0; 505 err = 0;
520 506
507#ifdef DEBUG_NVRAM
508 nvram_print_partitions("NVRAM Partitions");
509#endif
510
521 out: 511 out:
522 kfree(header); 512 kfree(header);
523 return err; 513 return err;
@@ -525,9 +515,10 @@ static int __init nvram_scan_partitions(void)
525 515
526static int __init nvram_init(void) 516static int __init nvram_init(void)
527{ 517{
528 int error;
529 int rc; 518 int rc;
530 519
520 BUILD_BUG_ON(NVRAM_BLOCK_LEN != 16);
521
531 if (ppc_md.nvram_size == NULL || ppc_md.nvram_size() <= 0) 522 if (ppc_md.nvram_size == NULL || ppc_md.nvram_size() <= 0)
532 return -ENODEV; 523 return -ENODEV;
533 524
@@ -537,29 +528,6 @@ static int __init nvram_init(void)
537 return rc; 528 return rc;
538 } 529 }
539 530
540 /* initialize our anchor for the nvram partition list */
541 nvram_part = kmalloc(sizeof(struct nvram_partition), GFP_KERNEL);
542 if (!nvram_part) {
543 printk(KERN_ERR "nvram_init: Failed kmalloc\n");
544 return -ENOMEM;
545 }
546 INIT_LIST_HEAD(&nvram_part->partition);
547
548 /* Get all the NVRAM partitions */
549 error = nvram_scan_partitions();
550 if (error) {
551 printk(KERN_ERR "nvram_init: Failed nvram_scan_partitions\n");
552 return error;
553 }
554
555 if(nvram_setup_partition())
556 printk(KERN_WARNING "nvram_init: Could not find nvram partition"
557 " for nvram buffered error logging.\n");
558
559#ifdef DEBUG_NVRAM
560 nvram_print_partitions("NVRAM Partitions");
561#endif
562
563 return rc; 531 return rc;
564} 532}
565 533
@@ -568,135 +536,6 @@ void __exit nvram_cleanup(void)
568 misc_deregister( &nvram_dev ); 536 misc_deregister( &nvram_dev );
569} 537}
570 538
571
572#ifdef CONFIG_PPC_PSERIES
573
574/* nvram_write_error_log
575 *
576 * We need to buffer the error logs into nvram to ensure that we have
577 * the failure information to decode. If we have a severe error there
578 * is no way to guarantee that the OS or the machine is in a state to
579 * get back to user land and write the error to disk. For example if
580 * the SCSI device driver causes a Machine Check by writing to a bad
581 * IO address, there is no way of guaranteeing that the device driver
582 * is in any state that is would also be able to write the error data
583 * captured to disk, thus we buffer it in NVRAM for analysis on the
584 * next boot.
585 *
586 * In NVRAM the partition containing the error log buffer will looks like:
587 * Header (in bytes):
588 * +-----------+----------+--------+------------+------------------+
589 * | signature | checksum | length | name | data |
590 * |0 |1 |2 3|4 15|16 length-1|
591 * +-----------+----------+--------+------------+------------------+
592 *
593 * The 'data' section would look like (in bytes):
594 * +--------------+------------+-----------------------------------+
595 * | event_logged | sequence # | error log |
596 * |0 3|4 7|8 nvram_error_log_size-1|
597 * +--------------+------------+-----------------------------------+
598 *
599 * event_logged: 0 if event has not been logged to syslog, 1 if it has
600 * sequence #: The unique sequence # for each event. (until it wraps)
601 * error log: The error log from event_scan
602 */
603int nvram_write_error_log(char * buff, int length,
604 unsigned int err_type, unsigned int error_log_cnt)
605{
606 int rc;
607 loff_t tmp_index;
608 struct err_log_info info;
609
610 if (nvram_error_log_index == -1) {
611 return -ESPIPE;
612 }
613
614 if (length > nvram_error_log_size) {
615 length = nvram_error_log_size;
616 }
617
618 info.error_type = err_type;
619 info.seq_num = error_log_cnt;
620
621 tmp_index = nvram_error_log_index;
622
623 rc = ppc_md.nvram_write((char *)&info, sizeof(struct err_log_info), &tmp_index);
624 if (rc <= 0) {
625 printk(KERN_ERR "nvram_write_error_log: Failed nvram_write (%d)\n", rc);
626 return rc;
627 }
628
629 rc = ppc_md.nvram_write(buff, length, &tmp_index);
630 if (rc <= 0) {
631 printk(KERN_ERR "nvram_write_error_log: Failed nvram_write (%d)\n", rc);
632 return rc;
633 }
634
635 return 0;
636}
637
638/* nvram_read_error_log
639 *
640 * Reads nvram for error log for at most 'length'
641 */
642int nvram_read_error_log(char * buff, int length,
643 unsigned int * err_type, unsigned int * error_log_cnt)
644{
645 int rc;
646 loff_t tmp_index;
647 struct err_log_info info;
648
649 if (nvram_error_log_index == -1)
650 return -1;
651
652 if (length > nvram_error_log_size)
653 length = nvram_error_log_size;
654
655 tmp_index = nvram_error_log_index;
656
657 rc = ppc_md.nvram_read((char *)&info, sizeof(struct err_log_info), &tmp_index);
658 if (rc <= 0) {
659 printk(KERN_ERR "nvram_read_error_log: Failed nvram_read (%d)\n", rc);
660 return rc;
661 }
662
663 rc = ppc_md.nvram_read(buff, length, &tmp_index);
664 if (rc <= 0) {
665 printk(KERN_ERR "nvram_read_error_log: Failed nvram_read (%d)\n", rc);
666 return rc;
667 }
668
669 *error_log_cnt = info.seq_num;
670 *err_type = info.error_type;
671
672 return 0;
673}
674
675/* This doesn't actually zero anything, but it sets the event_logged
676 * word to tell that this event is safely in syslog.
677 */
678int nvram_clear_error_log(void)
679{
680 loff_t tmp_index;
681 int clear_word = ERR_FLAG_ALREADY_LOGGED;
682 int rc;
683
684 if (nvram_error_log_index == -1)
685 return -1;
686
687 tmp_index = nvram_error_log_index;
688
689 rc = ppc_md.nvram_write((char *)&clear_word, sizeof(int), &tmp_index);
690 if (rc <= 0) {
691 printk(KERN_ERR "nvram_clear_error_log: Failed nvram_write (%d)\n", rc);
692 return rc;
693 }
694
695 return 0;
696}
697
698#endif /* CONFIG_PPC_PSERIES */
699
700module_init(nvram_init); 539module_init(nvram_init);
701module_exit(nvram_cleanup); 540module_exit(nvram_cleanup);
702MODULE_LICENSE("GPL"); 541MODULE_LICENSE("GPL");
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index d43fc65749c1..851577608a78 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -193,8 +193,7 @@ int __devinit pcibios_map_io_space(struct pci_bus *bus)
193 hose->io_resource.start += io_virt_offset; 193 hose->io_resource.start += io_virt_offset;
194 hose->io_resource.end += io_virt_offset; 194 hose->io_resource.end += io_virt_offset;
195 195
196 pr_debug(" hose->io_resource=0x%016llx...0x%016llx\n", 196 pr_debug(" hose->io_resource=%pR\n", &hose->io_resource);
197 hose->io_resource.start, hose->io_resource.end);
198 197
199 return 0; 198 return 0;
200} 199}
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index ab3e392ac63c..ef3ef566235e 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -186,3 +186,10 @@ EXPORT_SYMBOL(__mtdcr);
186EXPORT_SYMBOL(__mfdcr); 186EXPORT_SYMBOL(__mfdcr);
187#endif 187#endif
188EXPORT_SYMBOL(empty_zero_page); 188EXPORT_SYMBOL(empty_zero_page);
189
190#ifdef CONFIG_PPC64
191EXPORT_SYMBOL(__arch_hweight8);
192EXPORT_SYMBOL(__arch_hweight16);
193EXPORT_SYMBOL(__arch_hweight32);
194EXPORT_SYMBOL(__arch_hweight64);
195#endif
diff --git a/arch/powerpc/kernel/ppc_save_regs.S b/arch/powerpc/kernel/ppc_save_regs.S
index 5113bd2285e1..e83ba3f078e4 100644
--- a/arch/powerpc/kernel/ppc_save_regs.S
+++ b/arch/powerpc/kernel/ppc_save_regs.S
@@ -11,6 +11,7 @@
11#include <asm/processor.h> 11#include <asm/processor.h>
12#include <asm/ppc_asm.h> 12#include <asm/ppc_asm.h>
13#include <asm/asm-offsets.h> 13#include <asm/asm-offsets.h>
14#include <asm/ptrace.h>
14 15
15/* 16/*
16 * Grab the register values as they are now. 17 * Grab the register values as they are now.
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index a9b32967cff6..906536998291 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -1316,6 +1316,10 @@ static int set_dac_range(struct task_struct *child,
1316static long ppc_set_hwdebug(struct task_struct *child, 1316static long ppc_set_hwdebug(struct task_struct *child,
1317 struct ppc_hw_breakpoint *bp_info) 1317 struct ppc_hw_breakpoint *bp_info)
1318{ 1318{
1319#ifndef CONFIG_PPC_ADV_DEBUG_REGS
1320 unsigned long dabr;
1321#endif
1322
1319 if (bp_info->version != 1) 1323 if (bp_info->version != 1)
1320 return -ENOTSUPP; 1324 return -ENOTSUPP;
1321#ifdef CONFIG_PPC_ADV_DEBUG_REGS 1325#ifdef CONFIG_PPC_ADV_DEBUG_REGS
@@ -1353,11 +1357,10 @@ static long ppc_set_hwdebug(struct task_struct *child,
1353 /* 1357 /*
1354 * We only support one data breakpoint 1358 * We only support one data breakpoint
1355 */ 1359 */
1356 if (((bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_RW) == 0) || 1360 if ((bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_RW) == 0 ||
1357 ((bp_info->trigger_type & ~PPC_BREAKPOINT_TRIGGER_RW) != 0) || 1361 (bp_info->trigger_type & ~PPC_BREAKPOINT_TRIGGER_RW) != 0 ||
1358 (bp_info->trigger_type != PPC_BREAKPOINT_TRIGGER_WRITE) || 1362 bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT ||
1359 (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT) || 1363 bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
1360 (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE))
1361 return -EINVAL; 1364 return -EINVAL;
1362 1365
1363 if (child->thread.dabr) 1366 if (child->thread.dabr)
@@ -1366,7 +1369,14 @@ static long ppc_set_hwdebug(struct task_struct *child,
1366 if ((unsigned long)bp_info->addr >= TASK_SIZE) 1369 if ((unsigned long)bp_info->addr >= TASK_SIZE)
1367 return -EIO; 1370 return -EIO;
1368 1371
1369 child->thread.dabr = (unsigned long)bp_info->addr; 1372 dabr = (unsigned long)bp_info->addr & ~7UL;
1373 dabr |= DABR_TRANSLATION;
1374 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
1375 dabr |= DABR_DATA_READ;
1376 if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
1377 dabr |= DABR_DATA_WRITE;
1378
1379 child->thread.dabr = dabr;
1370 1380
1371 return 1; 1381 return 1;
1372#endif /* !CONFIG_PPC_ADV_DEBUG_DVCS */ 1382#endif /* !CONFIG_PPC_ADV_DEBUG_DVCS */
diff --git a/arch/powerpc/kernel/ptrace32.c b/arch/powerpc/kernel/ptrace32.c
index 8a6daf4129f6..69c4be917d07 100644
--- a/arch/powerpc/kernel/ptrace32.c
+++ b/arch/powerpc/kernel/ptrace32.c
@@ -280,7 +280,11 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
280 /* We only support one DABR and no IABRS at the moment */ 280 /* We only support one DABR and no IABRS at the moment */
281 if (addr > 0) 281 if (addr > 0)
282 break; 282 break;
283#ifdef CONFIG_PPC_ADV_DEBUG_REGS
284 ret = put_user(child->thread.dac1, (u32 __user *)data);
285#else
283 ret = put_user(child->thread.dabr, (u32 __user *)data); 286 ret = put_user(child->thread.dabr, (u32 __user *)data);
287#endif
284 break; 288 break;
285 } 289 }
286 290
@@ -312,6 +316,9 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
312 case PTRACE_SET_DEBUGREG: 316 case PTRACE_SET_DEBUGREG:
313 case PTRACE_SYSCALL: 317 case PTRACE_SYSCALL:
314 case PTRACE_CONT: 318 case PTRACE_CONT:
319 case PPC_PTRACE_GETHWDBGINFO:
320 case PPC_PTRACE_SETHWDEBUG:
321 case PPC_PTRACE_DELHWDEBUG:
315 ret = arch_ptrace(child, request, addr, data); 322 ret = arch_ptrace(child, request, addr, data);
316 break; 323 break;
317 324
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 8fe8bc61c10a..2097f2b3cba8 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -41,6 +41,7 @@
41#include <asm/atomic.h> 41#include <asm/atomic.h>
42#include <asm/time.h> 42#include <asm/time.h>
43#include <asm/mmu.h> 43#include <asm/mmu.h>
44#include <asm/topology.h>
44 45
45struct rtas_t rtas = { 46struct rtas_t rtas = {
46 .lock = __ARCH_SPIN_LOCK_UNLOCKED 47 .lock = __ARCH_SPIN_LOCK_UNLOCKED
@@ -713,6 +714,7 @@ static int __rtas_suspend_last_cpu(struct rtas_suspend_me_data *data, int wake_w
713 int cpu; 714 int cpu;
714 715
715 slb_set_size(SLB_MIN_SIZE); 716 slb_set_size(SLB_MIN_SIZE);
717 stop_topology_update();
716 printk(KERN_DEBUG "calling ibm,suspend-me on cpu %i\n", smp_processor_id()); 718 printk(KERN_DEBUG "calling ibm,suspend-me on cpu %i\n", smp_processor_id());
717 719
718 while (rc == H_MULTI_THREADS_ACTIVE && !atomic_read(&data->done) && 720 while (rc == H_MULTI_THREADS_ACTIVE && !atomic_read(&data->done) &&
@@ -728,6 +730,7 @@ static int __rtas_suspend_last_cpu(struct rtas_suspend_me_data *data, int wake_w
728 rc = atomic_read(&data->error); 730 rc = atomic_read(&data->error);
729 731
730 atomic_set(&data->error, rc); 732 atomic_set(&data->error, rc);
733 start_topology_update();
731 734
732 if (wake_when_done) { 735 if (wake_when_done) {
733 atomic_set(&data->done, 1); 736 atomic_set(&data->done, 1);
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index ce6f61c6f871..5a0401fcaebd 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -437,8 +437,8 @@ static void __init irqstack_early_init(void)
437 unsigned int i; 437 unsigned int i;
438 438
439 /* 439 /*
440 * interrupt stacks must be under 256MB, we cannot afford to take 440 * Interrupt stacks must be in the first segment since we
441 * SLB misses on them. 441 * cannot afford to take SLB misses on them.
442 */ 442 */
443 for_each_possible_cpu(i) { 443 for_each_possible_cpu(i) {
444 softirq_ctx[i] = (struct thread_info *) 444 softirq_ctx[i] = (struct thread_info *)
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 68034bbf2e4f..981360509172 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -466,7 +466,20 @@ out:
466 return id; 466 return id;
467} 467}
468 468
469/* Must be called when no change can occur to cpu_present_mask, 469/* Helper routines for cpu to core mapping */
470int cpu_core_index_of_thread(int cpu)
471{
472 return cpu >> threads_shift;
473}
474EXPORT_SYMBOL_GPL(cpu_core_index_of_thread);
475
476int cpu_first_thread_of_core(int core)
477{
478 return core << threads_shift;
479}
480EXPORT_SYMBOL_GPL(cpu_first_thread_of_core);
481
482/* Must be called when no change can occur to cpu_present_map,
470 * i.e. during cpu online or offline. 483 * i.e. during cpu online or offline.
471 */ 484 */
472static struct device_node *cpu_to_l2cache(int cpu) 485static struct device_node *cpu_to_l2cache(int cpu)
@@ -514,7 +527,7 @@ int __devinit start_secondary(void *unused)
514 notify_cpu_starting(cpu); 527 notify_cpu_starting(cpu);
515 set_cpu_online(cpu, true); 528 set_cpu_online(cpu, true);
516 /* Update sibling maps */ 529 /* Update sibling maps */
517 base = cpu_first_thread_in_core(cpu); 530 base = cpu_first_thread_sibling(cpu);
518 for (i = 0; i < threads_per_core; i++) { 531 for (i = 0; i < threads_per_core; i++) {
519 if (cpu_is_offline(base + i)) 532 if (cpu_is_offline(base + i))
520 continue; 533 continue;
@@ -600,7 +613,7 @@ int __cpu_disable(void)
600 return err; 613 return err;
601 614
602 /* Update sibling maps */ 615 /* Update sibling maps */
603 base = cpu_first_thread_in_core(cpu); 616 base = cpu_first_thread_sibling(cpu);
604 for (i = 0; i < threads_per_core; i++) { 617 for (i = 0; i < threads_per_core; i++) {
605 cpumask_clear_cpu(cpu, cpu_sibling_mask(base + i)); 618 cpumask_clear_cpu(cpu, cpu_sibling_mask(base + i));
606 cpumask_clear_cpu(base + i, cpu_sibling_mask(cpu)); 619 cpumask_clear_cpu(base + i, cpu_sibling_mask(cpu));
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 010406958d97..09e4dea4a85a 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -155,7 +155,7 @@ EXPORT_SYMBOL_GPL(rtc_lock);
155 155
156static u64 tb_to_ns_scale __read_mostly; 156static u64 tb_to_ns_scale __read_mostly;
157static unsigned tb_to_ns_shift __read_mostly; 157static unsigned tb_to_ns_shift __read_mostly;
158static unsigned long boot_tb __read_mostly; 158static u64 boot_tb __read_mostly;
159 159
160extern struct timezone sys_tz; 160extern struct timezone sys_tz;
161static long timezone_offset; 161static long timezone_offset;
diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S
index fe460482fa68..9de6f396cf85 100644
--- a/arch/powerpc/kernel/vector.S
+++ b/arch/powerpc/kernel/vector.S
@@ -5,6 +5,7 @@
5#include <asm/cputable.h> 5#include <asm/cputable.h>
6#include <asm/thread_info.h> 6#include <asm/thread_info.h>
7#include <asm/page.h> 7#include <asm/page.h>
8#include <asm/ptrace.h>
8 9
9/* 10/*
10 * load_up_altivec(unused, unused, tsk) 11 * load_up_altivec(unused, unused, tsk)
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index 441d2a722f06..1b695fdc362b 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -600,6 +600,11 @@ static void vio_dma_iommu_unmap_sg(struct device *dev,
600 vio_cmo_dealloc(viodev, alloc_size); 600 vio_cmo_dealloc(viodev, alloc_size);
601} 601}
602 602
603static int vio_dma_iommu_dma_supported(struct device *dev, u64 mask)
604{
605 return dma_iommu_ops.dma_supported(dev, mask);
606}
607
603struct dma_map_ops vio_dma_mapping_ops = { 608struct dma_map_ops vio_dma_mapping_ops = {
604 .alloc_coherent = vio_dma_iommu_alloc_coherent, 609 .alloc_coherent = vio_dma_iommu_alloc_coherent,
605 .free_coherent = vio_dma_iommu_free_coherent, 610 .free_coherent = vio_dma_iommu_free_coherent,
@@ -607,6 +612,7 @@ struct dma_map_ops vio_dma_mapping_ops = {
607 .unmap_sg = vio_dma_iommu_unmap_sg, 612 .unmap_sg = vio_dma_iommu_unmap_sg,
608 .map_page = vio_dma_iommu_map_page, 613 .map_page = vio_dma_iommu_map_page,
609 .unmap_page = vio_dma_iommu_unmap_page, 614 .unmap_page = vio_dma_iommu_unmap_page,
615 .dma_supported = vio_dma_iommu_dma_supported,
610 616
611}; 617};
612 618
@@ -858,8 +864,7 @@ static void vio_cmo_bus_remove(struct vio_dev *viodev)
858 864
859static void vio_cmo_set_dma_ops(struct vio_dev *viodev) 865static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
860{ 866{
861 vio_dma_mapping_ops.dma_supported = dma_iommu_ops.dma_supported; 867 set_dma_ops(&viodev->dev, &vio_dma_mapping_ops);
862 viodev->dev.archdata.dma_ops = &vio_dma_mapping_ops;
863} 868}
864 869
865/** 870/**
@@ -1244,7 +1249,7 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node)
1244 if (firmware_has_feature(FW_FEATURE_CMO)) 1249 if (firmware_has_feature(FW_FEATURE_CMO))
1245 vio_cmo_set_dma_ops(viodev); 1250 vio_cmo_set_dma_ops(viodev);
1246 else 1251 else
1247 viodev->dev.archdata.dma_ops = &dma_iommu_ops; 1252 set_dma_ops(&viodev->dev, &dma_iommu_ops);
1248 set_iommu_table_base(&viodev->dev, vio_build_iommu_table(viodev)); 1253 set_iommu_table_base(&viodev->dev, vio_build_iommu_table(viodev));
1249 set_dev_node(&viodev->dev, of_node_to_nid(of_node)); 1254 set_dev_node(&viodev->dev, of_node_to_nid(of_node));
1250 1255
@@ -1252,6 +1257,10 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node)
1252 viodev->dev.parent = &vio_bus_device.dev; 1257 viodev->dev.parent = &vio_bus_device.dev;
1253 viodev->dev.bus = &vio_bus_type; 1258 viodev->dev.bus = &vio_bus_type;
1254 viodev->dev.release = vio_dev_release; 1259 viodev->dev.release = vio_dev_release;
1260 /* needed to ensure proper operation of coherent allocations
1261 * later, in case driver doesn't set it explicitly */
1262 dma_set_mask(&viodev->dev, DMA_BIT_MASK(64));
1263 dma_set_coherent_mask(&viodev->dev, DMA_BIT_MASK(64));
1255 1264
1256 /* register with generic device framework */ 1265 /* register with generic device framework */
1257 if (device_register(&viodev->dev)) { 1266 if (device_register(&viodev->dev)) {
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
index 889f2bc106dd..166a6a0ad544 100644
--- a/arch/powerpc/lib/Makefile
+++ b/arch/powerpc/lib/Makefile
@@ -16,7 +16,7 @@ obj-$(CONFIG_HAS_IOMEM) += devres.o
16 16
17obj-$(CONFIG_PPC64) += copypage_64.o copyuser_64.o \ 17obj-$(CONFIG_PPC64) += copypage_64.o copyuser_64.o \
18 memcpy_64.o usercopy_64.o mem_64.o string.o \ 18 memcpy_64.o usercopy_64.o mem_64.o string.o \
19 checksum_wrappers_64.o 19 checksum_wrappers_64.o hweight_64.o
20obj-$(CONFIG_XMON) += sstep.o ldstfp.o 20obj-$(CONFIG_XMON) += sstep.o ldstfp.o
21obj-$(CONFIG_KPROBES) += sstep.o ldstfp.o 21obj-$(CONFIG_KPROBES) += sstep.o ldstfp.o
22obj-$(CONFIG_HAVE_HW_BREAKPOINT) += sstep.o ldstfp.o 22obj-$(CONFIG_HAVE_HW_BREAKPOINT) += sstep.o ldstfp.o
diff --git a/arch/powerpc/lib/hweight_64.S b/arch/powerpc/lib/hweight_64.S
new file mode 100644
index 000000000000..fda27868cf8c
--- /dev/null
+++ b/arch/powerpc/lib/hweight_64.S
@@ -0,0 +1,110 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
15 *
16 * Copyright (C) IBM Corporation, 2010
17 *
18 * Author: Anton Blanchard <anton@au.ibm.com>
19 */
20#include <asm/processor.h>
21#include <asm/ppc_asm.h>
22
23/* Note: This code relies on -mminimal-toc */
24
25_GLOBAL(__arch_hweight8)
26BEGIN_FTR_SECTION
27 b .__sw_hweight8
28 nop
29 nop
30FTR_SECTION_ELSE
31 PPC_POPCNTB(r3,r3)
32 clrldi r3,r3,64-8
33 blr
34ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB)
35
36_GLOBAL(__arch_hweight16)
37BEGIN_FTR_SECTION
38 b .__sw_hweight16
39 nop
40 nop
41 nop
42 nop
43FTR_SECTION_ELSE
44 BEGIN_FTR_SECTION_NESTED(50)
45 PPC_POPCNTB(r3,r3)
46 srdi r4,r3,8
47 add r3,r4,r3
48 clrldi r3,r3,64-8
49 blr
50 FTR_SECTION_ELSE_NESTED(50)
51 clrlwi r3,r3,16
52 PPC_POPCNTW(r3,r3)
53 clrldi r3,r3,64-8
54 blr
55 ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_POPCNTD, 50)
56ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB)
57
58_GLOBAL(__arch_hweight32)
59BEGIN_FTR_SECTION
60 b .__sw_hweight32
61 nop
62 nop
63 nop
64 nop
65 nop
66 nop
67FTR_SECTION_ELSE
68 BEGIN_FTR_SECTION_NESTED(51)
69 PPC_POPCNTB(r3,r3)
70 srdi r4,r3,16
71 add r3,r4,r3
72 srdi r4,r3,8
73 add r3,r4,r3
74 clrldi r3,r3,64-8
75 blr
76 FTR_SECTION_ELSE_NESTED(51)
77 PPC_POPCNTW(r3,r3)
78 clrldi r3,r3,64-8
79 blr
80 ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_POPCNTD, 51)
81ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB)
82
83_GLOBAL(__arch_hweight64)
84BEGIN_FTR_SECTION
85 b .__sw_hweight64
86 nop
87 nop
88 nop
89 nop
90 nop
91 nop
92 nop
93 nop
94FTR_SECTION_ELSE
95 BEGIN_FTR_SECTION_NESTED(52)
96 PPC_POPCNTB(r3,r3)
97 srdi r4,r3,32
98 add r3,r4,r3
99 srdi r4,r3,16
100 add r3,r4,r3
101 srdi r4,r3,8
102 add r3,r4,r3
103 clrldi r3,r3,64-8
104 blr
105 FTR_SECTION_ELSE_NESTED(52)
106 PPC_POPCNTD(r3,r3)
107 clrldi r3,r3,64-8
108 blr
109 ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_POPCNTD, 52)
110ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB)
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 5e9584405c45..a5991facddce 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -1070,7 +1070,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
1070 unsigned long access, unsigned long trap) 1070 unsigned long access, unsigned long trap)
1071{ 1071{
1072 unsigned long vsid; 1072 unsigned long vsid;
1073 void *pgdir; 1073 pgd_t *pgdir;
1074 pte_t *ptep; 1074 pte_t *ptep;
1075 unsigned long flags; 1075 unsigned long flags;
1076 int rc, ssize, local = 0; 1076 int rc, ssize, local = 0;
diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
index 5ce99848d91e..c0aab52da3a5 100644
--- a/arch/powerpc/mm/mmu_context_nohash.c
+++ b/arch/powerpc/mm/mmu_context_nohash.c
@@ -111,8 +111,8 @@ static unsigned int steal_context_smp(unsigned int id)
111 * a core map instead but this will do for now. 111 * a core map instead but this will do for now.
112 */ 112 */
113 for_each_cpu(cpu, mm_cpumask(mm)) { 113 for_each_cpu(cpu, mm_cpumask(mm)) {
114 for (i = cpu_first_thread_in_core(cpu); 114 for (i = cpu_first_thread_sibling(cpu);
115 i <= cpu_last_thread_in_core(cpu); i++) 115 i <= cpu_last_thread_sibling(cpu); i++)
116 __set_bit(id, stale_map[i]); 116 __set_bit(id, stale_map[i]);
117 cpu = i - 1; 117 cpu = i - 1;
118 } 118 }
@@ -264,14 +264,14 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
264 */ 264 */
265 if (test_bit(id, stale_map[cpu])) { 265 if (test_bit(id, stale_map[cpu])) {
266 pr_hardcont(" | stale flush %d [%d..%d]", 266 pr_hardcont(" | stale flush %d [%d..%d]",
267 id, cpu_first_thread_in_core(cpu), 267 id, cpu_first_thread_sibling(cpu),
268 cpu_last_thread_in_core(cpu)); 268 cpu_last_thread_sibling(cpu));
269 269
270 local_flush_tlb_mm(next); 270 local_flush_tlb_mm(next);
271 271
272 /* XXX This clear should ultimately be part of local_flush_tlb_mm */ 272 /* XXX This clear should ultimately be part of local_flush_tlb_mm */
273 for (i = cpu_first_thread_in_core(cpu); 273 for (i = cpu_first_thread_sibling(cpu);
274 i <= cpu_last_thread_in_core(cpu); i++) { 274 i <= cpu_last_thread_sibling(cpu); i++) {
275 __clear_bit(id, stale_map[i]); 275 __clear_bit(id, stale_map[i]);
276 } 276 }
277 } 277 }
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 74505b245374..bf5cb91f07de 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -20,10 +20,15 @@
20#include <linux/memblock.h> 20#include <linux/memblock.h>
21#include <linux/of.h> 21#include <linux/of.h>
22#include <linux/pfn.h> 22#include <linux/pfn.h>
23#include <linux/cpuset.h>
24#include <linux/node.h>
23#include <asm/sparsemem.h> 25#include <asm/sparsemem.h>
24#include <asm/prom.h> 26#include <asm/prom.h>
25#include <asm/system.h> 27#include <asm/system.h>
26#include <asm/smp.h> 28#include <asm/smp.h>
29#include <asm/firmware.h>
30#include <asm/paca.h>
31#include <asm/hvcall.h>
27 32
28static int numa_enabled = 1; 33static int numa_enabled = 1;
29 34
@@ -163,7 +168,7 @@ static void __init get_node_active_region(unsigned long start_pfn,
163 work_with_active_regions(nid, get_active_region_work_fn, node_ar); 168 work_with_active_regions(nid, get_active_region_work_fn, node_ar);
164} 169}
165 170
166static void __cpuinit map_cpu_to_node(int cpu, int node) 171static void map_cpu_to_node(int cpu, int node)
167{ 172{
168 numa_cpu_lookup_table[cpu] = node; 173 numa_cpu_lookup_table[cpu] = node;
169 174
@@ -173,7 +178,7 @@ static void __cpuinit map_cpu_to_node(int cpu, int node)
173 cpumask_set_cpu(cpu, node_to_cpumask_map[node]); 178 cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
174} 179}
175 180
176#ifdef CONFIG_HOTPLUG_CPU 181#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PPC_SPLPAR)
177static void unmap_cpu_from_node(unsigned long cpu) 182static void unmap_cpu_from_node(unsigned long cpu)
178{ 183{
179 int node = numa_cpu_lookup_table[cpu]; 184 int node = numa_cpu_lookup_table[cpu];
@@ -187,7 +192,7 @@ static void unmap_cpu_from_node(unsigned long cpu)
187 cpu, node); 192 cpu, node);
188 } 193 }
189} 194}
190#endif /* CONFIG_HOTPLUG_CPU */ 195#endif /* CONFIG_HOTPLUG_CPU || CONFIG_PPC_SPLPAR */
191 196
192/* must hold reference to node during call */ 197/* must hold reference to node during call */
193static const int *of_get_associativity(struct device_node *dev) 198static const int *of_get_associativity(struct device_node *dev)
@@ -246,32 +251,41 @@ static void initialize_distance_lookup_table(int nid,
246/* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa 251/* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa
247 * info is found. 252 * info is found.
248 */ 253 */
249static int of_node_to_nid_single(struct device_node *device) 254static int associativity_to_nid(const unsigned int *associativity)
250{ 255{
251 int nid = -1; 256 int nid = -1;
252 const unsigned int *tmp;
253 257
254 if (min_common_depth == -1) 258 if (min_common_depth == -1)
255 goto out; 259 goto out;
256 260
257 tmp = of_get_associativity(device); 261 if (associativity[0] >= min_common_depth)
258 if (!tmp) 262 nid = associativity[min_common_depth];
259 goto out;
260
261 if (tmp[0] >= min_common_depth)
262 nid = tmp[min_common_depth];
263 263
264 /* POWER4 LPAR uses 0xffff as invalid node */ 264 /* POWER4 LPAR uses 0xffff as invalid node */
265 if (nid == 0xffff || nid >= MAX_NUMNODES) 265 if (nid == 0xffff || nid >= MAX_NUMNODES)
266 nid = -1; 266 nid = -1;
267 267
268 if (nid > 0 && tmp[0] >= distance_ref_points_depth) 268 if (nid > 0 && associativity[0] >= distance_ref_points_depth)
269 initialize_distance_lookup_table(nid, tmp); 269 initialize_distance_lookup_table(nid, associativity);
270 270
271out: 271out:
272 return nid; 272 return nid;
273} 273}
274 274
275/* Returns the nid associated with the given device tree node,
276 * or -1 if not found.
277 */
278static int of_node_to_nid_single(struct device_node *device)
279{
280 int nid = -1;
281 const unsigned int *tmp;
282
283 tmp = of_get_associativity(device);
284 if (tmp)
285 nid = associativity_to_nid(tmp);
286 return nid;
287}
288
275/* Walk the device tree upwards, looking for an associativity id */ 289/* Walk the device tree upwards, looking for an associativity id */
276int of_node_to_nid(struct device_node *device) 290int of_node_to_nid(struct device_node *device)
277{ 291{
@@ -1247,4 +1261,275 @@ int hot_add_scn_to_nid(unsigned long scn_addr)
1247 return nid; 1261 return nid;
1248} 1262}
1249 1263
1264static u64 hot_add_drconf_memory_max(void)
1265{
1266 struct device_node *memory = NULL;
1267 unsigned int drconf_cell_cnt = 0;
1268 u64 lmb_size = 0;
1269 const u32 *dm = 0;
1270
1271 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1272 if (memory) {
1273 drconf_cell_cnt = of_get_drconf_memory(memory, &dm);
1274 lmb_size = of_get_lmb_size(memory);
1275 of_node_put(memory);
1276 }
1277 return lmb_size * drconf_cell_cnt;
1278}
1279
1280/*
1281 * memory_hotplug_max - return max address of memory that may be added
1282 *
1283 * This is currently only used on systems that support drconfig memory
1284 * hotplug.
1285 */
1286u64 memory_hotplug_max(void)
1287{
1288 return max(hot_add_drconf_memory_max(), memblock_end_of_DRAM());
1289}
1250#endif /* CONFIG_MEMORY_HOTPLUG */ 1290#endif /* CONFIG_MEMORY_HOTPLUG */
1291
1292/* Vrtual Processor Home Node (VPHN) support */
1293#ifdef CONFIG_PPC_SPLPAR
1294#define VPHN_NR_CHANGE_CTRS (8)
1295static u8 vphn_cpu_change_counts[NR_CPUS][VPHN_NR_CHANGE_CTRS];
1296static cpumask_t cpu_associativity_changes_mask;
1297static int vphn_enabled;
1298static void set_topology_timer(void);
1299
1300/*
1301 * Store the current values of the associativity change counters in the
1302 * hypervisor.
1303 */
1304static void setup_cpu_associativity_change_counters(void)
1305{
1306 int cpu = 0;
1307
1308 for_each_possible_cpu(cpu) {
1309 int i = 0;
1310 u8 *counts = vphn_cpu_change_counts[cpu];
1311 volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts;
1312
1313 for (i = 0; i < VPHN_NR_CHANGE_CTRS; i++) {
1314 counts[i] = hypervisor_counts[i];
1315 }
1316 }
1317}
1318
1319/*
1320 * The hypervisor maintains a set of 8 associativity change counters in
1321 * the VPA of each cpu that correspond to the associativity levels in the
1322 * ibm,associativity-reference-points property. When an associativity
1323 * level changes, the corresponding counter is incremented.
1324 *
1325 * Set a bit in cpu_associativity_changes_mask for each cpu whose home
1326 * node associativity levels have changed.
1327 *
1328 * Returns the number of cpus with unhandled associativity changes.
1329 */
1330static int update_cpu_associativity_changes_mask(void)
1331{
1332 int cpu = 0, nr_cpus = 0;
1333 cpumask_t *changes = &cpu_associativity_changes_mask;
1334
1335 cpumask_clear(changes);
1336
1337 for_each_possible_cpu(cpu) {
1338 int i, changed = 0;
1339 u8 *counts = vphn_cpu_change_counts[cpu];
1340 volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts;
1341
1342 for (i = 0; i < VPHN_NR_CHANGE_CTRS; i++) {
1343 if (hypervisor_counts[i] > counts[i]) {
1344 counts[i] = hypervisor_counts[i];
1345 changed = 1;
1346 }
1347 }
1348 if (changed) {
1349 cpumask_set_cpu(cpu, changes);
1350 nr_cpus++;
1351 }
1352 }
1353
1354 return nr_cpus;
1355}
1356
1357/* 6 64-bit registers unpacked into 12 32-bit associativity values */
1358#define VPHN_ASSOC_BUFSIZE (6*sizeof(u64)/sizeof(u32))
1359
1360/*
1361 * Convert the associativity domain numbers returned from the hypervisor
1362 * to the sequence they would appear in the ibm,associativity property.
1363 */
1364static int vphn_unpack_associativity(const long *packed, unsigned int *unpacked)
1365{
1366 int i = 0;
1367 int nr_assoc_doms = 0;
1368 const u16 *field = (const u16*) packed;
1369
1370#define VPHN_FIELD_UNUSED (0xffff)
1371#define VPHN_FIELD_MSB (0x8000)
1372#define VPHN_FIELD_MASK (~VPHN_FIELD_MSB)
1373
1374 for (i = 0; i < VPHN_ASSOC_BUFSIZE; i++) {
1375 if (*field == VPHN_FIELD_UNUSED) {
1376 /* All significant fields processed, and remaining
1377 * fields contain the reserved value of all 1's.
1378 * Just store them.
1379 */
1380 unpacked[i] = *((u32*)field);
1381 field += 2;
1382 }
1383 else if (*field & VPHN_FIELD_MSB) {
1384 /* Data is in the lower 15 bits of this field */
1385 unpacked[i] = *field & VPHN_FIELD_MASK;
1386 field++;
1387 nr_assoc_doms++;
1388 }
1389 else {
1390 /* Data is in the lower 15 bits of this field
1391 * concatenated with the next 16 bit field
1392 */
1393 unpacked[i] = *((u32*)field);
1394 field += 2;
1395 nr_assoc_doms++;
1396 }
1397 }
1398
1399 return nr_assoc_doms;
1400}
1401
1402/*
1403 * Retrieve the new associativity information for a virtual processor's
1404 * home node.
1405 */
1406static long hcall_vphn(unsigned long cpu, unsigned int *associativity)
1407{
1408 long rc = 0;
1409 long retbuf[PLPAR_HCALL9_BUFSIZE] = {0};
1410 u64 flags = 1;
1411 int hwcpu = get_hard_smp_processor_id(cpu);
1412
1413 rc = plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY, retbuf, flags, hwcpu);
1414 vphn_unpack_associativity(retbuf, associativity);
1415
1416 return rc;
1417}
1418
1419static long vphn_get_associativity(unsigned long cpu,
1420 unsigned int *associativity)
1421{
1422 long rc = 0;
1423
1424 rc = hcall_vphn(cpu, associativity);
1425
1426 switch (rc) {
1427 case H_FUNCTION:
1428 printk(KERN_INFO
1429 "VPHN is not supported. Disabling polling...\n");
1430 stop_topology_update();
1431 break;
1432 case H_HARDWARE:
1433 printk(KERN_ERR
1434 "hcall_vphn() experienced a hardware fault "
1435 "preventing VPHN. Disabling polling...\n");
1436 stop_topology_update();
1437 }
1438
1439 return rc;
1440}
1441
1442/*
1443 * Update the node maps and sysfs entries for each cpu whose home node
1444 * has changed.
1445 */
1446int arch_update_cpu_topology(void)
1447{
1448 int cpu = 0, nid = 0, old_nid = 0;
1449 unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0};
1450 struct sys_device *sysdev = NULL;
1451
1452 for_each_cpu_mask(cpu, cpu_associativity_changes_mask) {
1453 vphn_get_associativity(cpu, associativity);
1454 nid = associativity_to_nid(associativity);
1455
1456 if (nid < 0 || !node_online(nid))
1457 nid = first_online_node;
1458
1459 old_nid = numa_cpu_lookup_table[cpu];
1460
1461 /* Disable hotplug while we update the cpu
1462 * masks and sysfs.
1463 */
1464 get_online_cpus();
1465 unregister_cpu_under_node(cpu, old_nid);
1466 unmap_cpu_from_node(cpu);
1467 map_cpu_to_node(cpu, nid);
1468 register_cpu_under_node(cpu, nid);
1469 put_online_cpus();
1470
1471 sysdev = get_cpu_sysdev(cpu);
1472 if (sysdev)
1473 kobject_uevent(&sysdev->kobj, KOBJ_CHANGE);
1474 }
1475
1476 return 1;
1477}
1478
1479static void topology_work_fn(struct work_struct *work)
1480{
1481 rebuild_sched_domains();
1482}
1483static DECLARE_WORK(topology_work, topology_work_fn);
1484
1485void topology_schedule_update(void)
1486{
1487 schedule_work(&topology_work);
1488}
1489
1490static void topology_timer_fn(unsigned long ignored)
1491{
1492 if (!vphn_enabled)
1493 return;
1494 if (update_cpu_associativity_changes_mask() > 0)
1495 topology_schedule_update();
1496 set_topology_timer();
1497}
1498static struct timer_list topology_timer =
1499 TIMER_INITIALIZER(topology_timer_fn, 0, 0);
1500
1501static void set_topology_timer(void)
1502{
1503 topology_timer.data = 0;
1504 topology_timer.expires = jiffies + 60 * HZ;
1505 add_timer(&topology_timer);
1506}
1507
1508/*
1509 * Start polling for VPHN associativity changes.
1510 */
1511int start_topology_update(void)
1512{
1513 int rc = 0;
1514
1515 if (firmware_has_feature(FW_FEATURE_VPHN)) {
1516 vphn_enabled = 1;
1517 setup_cpu_associativity_change_counters();
1518 init_timer_deferrable(&topology_timer);
1519 set_topology_timer();
1520 rc = 1;
1521 }
1522
1523 return rc;
1524}
1525__initcall(start_topology_update);
1526
1527/*
1528 * Disable polling for VPHN associativity changes.
1529 */
1530int stop_topology_update(void)
1531{
1532 vphn_enabled = 0;
1533 return del_timer_sync(&topology_timer);
1534}
1535#endif /* CONFIG_PPC_SPLPAR */
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index a87ead0138b4..8dc41c0157fe 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -78,7 +78,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
78 78
79 /* pgdir take page or two with 4K pages and a page fraction otherwise */ 79 /* pgdir take page or two with 4K pages and a page fraction otherwise */
80#ifndef CONFIG_PPC_4K_PAGES 80#ifndef CONFIG_PPC_4K_PAGES
81 ret = (pgd_t *)kzalloc(1 << PGDIR_ORDER, GFP_KERNEL); 81 ret = kzalloc(1 << PGDIR_ORDER, GFP_KERNEL);
82#else 82#else
83 ret = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, 83 ret = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
84 PGDIR_ORDER - PAGE_SHIFT); 84 PGDIR_ORDER - PAGE_SHIFT);
@@ -230,6 +230,7 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags,
230 area = get_vm_area_caller(size, VM_IOREMAP, caller); 230 area = get_vm_area_caller(size, VM_IOREMAP, caller);
231 if (area == 0) 231 if (area == 0)
232 return NULL; 232 return NULL;
233 area->phys_addr = p;
233 v = (unsigned long) area->addr; 234 v = (unsigned long) area->addr;
234 } else { 235 } else {
235 v = (ioremap_bot -= size); 236 v = (ioremap_bot -= size);
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 21d6dfab7942..88927a05cdc2 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -223,6 +223,8 @@ void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size,
223 caller); 223 caller);
224 if (area == NULL) 224 if (area == NULL)
225 return NULL; 225 return NULL;
226
227 area->phys_addr = paligned;
226 ret = __ioremap_at(paligned, area->addr, size, flags); 228 ret = __ioremap_at(paligned, area->addr, size, flags);
227 if (!ret) 229 if (!ret)
228 vunmap(area->addr); 230 vunmap(area->addr);
diff --git a/arch/powerpc/platforms/44x/Makefile b/arch/powerpc/platforms/44x/Makefile
index 82ff326e0795..c04d16df8488 100644
--- a/arch/powerpc/platforms/44x/Makefile
+++ b/arch/powerpc/platforms/44x/Makefile
@@ -1,4 +1,7 @@
1obj-$(CONFIG_44x) := misc_44x.o idle.o 1obj-$(CONFIG_44x) += misc_44x.o
2ifneq ($(CONFIG_PPC4xx_CPM),y)
3obj-$(CONFIG_44x) += idle.o
4endif
2obj-$(CONFIG_PPC44x_SIMPLE) += ppc44x_simple.o 5obj-$(CONFIG_PPC44x_SIMPLE) += ppc44x_simple.o
3obj-$(CONFIG_EBONY) += ebony.o 6obj-$(CONFIG_EBONY) += ebony.o
4obj-$(CONFIG_SAM440EP) += sam440ep.o 7obj-$(CONFIG_SAM440EP) += sam440ep.o
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index 956154f32cfe..20576829eca5 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -313,13 +313,14 @@ config OF_RTC
313source "arch/powerpc/sysdev/bestcomm/Kconfig" 313source "arch/powerpc/sysdev/bestcomm/Kconfig"
314 314
315config MPC8xxx_GPIO 315config MPC8xxx_GPIO
316 bool "MPC8xxx GPIO support" 316 bool "MPC512x/MPC8xxx GPIO support"
317 depends on PPC_MPC831x || PPC_MPC834x || PPC_MPC837x || FSL_SOC_BOOKE || PPC_86xx 317 depends on PPC_MPC512x || PPC_MPC831x || PPC_MPC834x || PPC_MPC837x || \
318 FSL_SOC_BOOKE || PPC_86xx
318 select GENERIC_GPIO 319 select GENERIC_GPIO
319 select ARCH_REQUIRE_GPIOLIB 320 select ARCH_REQUIRE_GPIOLIB
320 help 321 help
321 Say Y here if you're going to use hardware that connects to the 322 Say Y here if you're going to use hardware that connects to the
322 MPC831x/834x/837x/8572/8610 GPIOs. 323 MPC512x/831x/834x/837x/8572/8610 GPIOs.
323 324
324config SIMPLE_GPIO 325config SIMPLE_GPIO
325 bool "Support for simple, memory-mapped GPIO controllers" 326 bool "Support for simple, memory-mapped GPIO controllers"
diff --git a/arch/powerpc/platforms/cell/beat_iommu.c b/arch/powerpc/platforms/cell/beat_iommu.c
index beec405eb6f8..3ce685568935 100644
--- a/arch/powerpc/platforms/cell/beat_iommu.c
+++ b/arch/powerpc/platforms/cell/beat_iommu.c
@@ -76,7 +76,7 @@ static void __init celleb_init_direct_mapping(void)
76 76
77static void celleb_dma_dev_setup(struct device *dev) 77static void celleb_dma_dev_setup(struct device *dev)
78{ 78{
79 dev->archdata.dma_ops = get_pci_dma_ops(); 79 set_dma_ops(dev, &dma_direct_ops);
80 set_dma_offset(dev, celleb_dma_direct_offset); 80 set_dma_offset(dev, celleb_dma_direct_offset);
81} 81}
82 82
@@ -106,7 +106,6 @@ static struct notifier_block celleb_of_bus_notifier = {
106static int __init celleb_init_iommu(void) 106static int __init celleb_init_iommu(void)
107{ 107{
108 celleb_init_direct_mapping(); 108 celleb_init_direct_mapping();
109 set_pci_dma_ops(&dma_direct_ops);
110 ppc_md.pci_dma_dev_setup = celleb_pci_dma_dev_setup; 109 ppc_md.pci_dma_dev_setup = celleb_pci_dma_dev_setup;
111 bus_register_notifier(&platform_bus_type, &celleb_of_bus_notifier); 110 bus_register_notifier(&platform_bus_type, &celleb_of_bus_notifier);
112 111
diff --git a/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c b/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c
index a101abf17504..3b894f585280 100644
--- a/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c
+++ b/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c
@@ -36,10 +36,9 @@ static int spu_alloc_lscsa_std(struct spu_state *csa)
36 struct spu_lscsa *lscsa; 36 struct spu_lscsa *lscsa;
37 unsigned char *p; 37 unsigned char *p;
38 38
39 lscsa = vmalloc(sizeof(struct spu_lscsa)); 39 lscsa = vzalloc(sizeof(struct spu_lscsa));
40 if (!lscsa) 40 if (!lscsa)
41 return -ENOMEM; 41 return -ENOMEM;
42 memset(lscsa, 0, sizeof(struct spu_lscsa));
43 csa->lscsa = lscsa; 42 csa->lscsa = lscsa;
44 43
45 /* Set LS pages reserved to allow for user-space mapping. */ 44 /* Set LS pages reserved to allow for user-space mapping. */
diff --git a/arch/powerpc/platforms/chrp/time.c b/arch/powerpc/platforms/chrp/time.c
index 054dfe5b8e77..f803f4b8ab6f 100644
--- a/arch/powerpc/platforms/chrp/time.c
+++ b/arch/powerpc/platforms/chrp/time.c
@@ -29,6 +29,10 @@
29 29
30extern spinlock_t rtc_lock; 30extern spinlock_t rtc_lock;
31 31
32#define NVRAM_AS0 0x74
33#define NVRAM_AS1 0x75
34#define NVRAM_DATA 0x77
35
32static int nvram_as1 = NVRAM_AS1; 36static int nvram_as1 = NVRAM_AS1;
33static int nvram_as0 = NVRAM_AS0; 37static int nvram_as0 = NVRAM_AS0;
34static int nvram_data = NVRAM_DATA; 38static int nvram_data = NVRAM_DATA;
diff --git a/arch/powerpc/platforms/iseries/mf.c b/arch/powerpc/platforms/iseries/mf.c
index 42d0a886de05..b5e026bdca21 100644
--- a/arch/powerpc/platforms/iseries/mf.c
+++ b/arch/powerpc/platforms/iseries/mf.c
@@ -1045,71 +1045,9 @@ static const struct file_operations mf_side_proc_fops = {
1045 .write = mf_side_proc_write, 1045 .write = mf_side_proc_write,
1046}; 1046};
1047 1047
1048#if 0
1049static void mf_getSrcHistory(char *buffer, int size)
1050{
1051 struct IplTypeReturnStuff return_stuff;
1052 struct pending_event *ev = new_pending_event();
1053 int rc = 0;
1054 char *pages[4];
1055
1056 pages[0] = kmalloc(4096, GFP_ATOMIC);
1057 pages[1] = kmalloc(4096, GFP_ATOMIC);
1058 pages[2] = kmalloc(4096, GFP_ATOMIC);
1059 pages[3] = kmalloc(4096, GFP_ATOMIC);
1060 if ((ev == NULL) || (pages[0] == NULL) || (pages[1] == NULL)
1061 || (pages[2] == NULL) || (pages[3] == NULL))
1062 return -ENOMEM;
1063
1064 return_stuff.xType = 0;
1065 return_stuff.xRc = 0;
1066 return_stuff.xDone = 0;
1067 ev->event.hp_lp_event.xSubtype = 6;
1068 ev->event.hp_lp_event.x.xSubtypeData =
1069 subtype_data('M', 'F', 'V', 'I');
1070 ev->event.data.vsp_cmd.xEvent = &return_stuff;
1071 ev->event.data.vsp_cmd.cmd = 4;
1072 ev->event.data.vsp_cmd.lp_index = HvLpConfig_getLpIndex();
1073 ev->event.data.vsp_cmd.result_code = 0xFF;
1074 ev->event.data.vsp_cmd.reserved = 0;
1075 ev->event.data.vsp_cmd.sub_data.page[0] = iseries_hv_addr(pages[0]);
1076 ev->event.data.vsp_cmd.sub_data.page[1] = iseries_hv_addr(pages[1]);
1077 ev->event.data.vsp_cmd.sub_data.page[2] = iseries_hv_addr(pages[2]);
1078 ev->event.data.vsp_cmd.sub_data.page[3] = iseries_hv_addr(pages[3]);
1079 mb();
1080 if (signal_event(ev) != 0)
1081 return;
1082
1083 while (return_stuff.xDone != 1)
1084 udelay(10);
1085 if (return_stuff.xRc == 0)
1086 memcpy(buffer, pages[0], size);
1087 kfree(pages[0]);
1088 kfree(pages[1]);
1089 kfree(pages[2]);
1090 kfree(pages[3]);
1091}
1092#endif
1093
1094static int mf_src_proc_show(struct seq_file *m, void *v) 1048static int mf_src_proc_show(struct seq_file *m, void *v)
1095{ 1049{
1096#if 0
1097 int len;
1098
1099 mf_getSrcHistory(page, count);
1100 len = count;
1101 len -= off;
1102 if (len < count) {
1103 *eof = 1;
1104 if (len <= 0)
1105 return 0;
1106 } else
1107 len = count;
1108 *start = page + off;
1109 return len;
1110#else
1111 return 0; 1050 return 0;
1112#endif
1113} 1051}
1114 1052
1115static int mf_src_proc_open(struct inode *inode, struct file *file) 1053static int mf_src_proc_open(struct inode *inode, struct file *file)
diff --git a/arch/powerpc/platforms/pasemi/iommu.c b/arch/powerpc/platforms/pasemi/iommu.c
index 1f9fb2c57761..14943ef01918 100644
--- a/arch/powerpc/platforms/pasemi/iommu.c
+++ b/arch/powerpc/platforms/pasemi/iommu.c
@@ -156,20 +156,12 @@ static void iommu_table_iobmap_setup(void)
156 156
157static void pci_dma_bus_setup_pasemi(struct pci_bus *bus) 157static void pci_dma_bus_setup_pasemi(struct pci_bus *bus)
158{ 158{
159 struct device_node *dn;
160
161 pr_debug("pci_dma_bus_setup, bus %p, bus->self %p\n", bus, bus->self); 159 pr_debug("pci_dma_bus_setup, bus %p, bus->self %p\n", bus, bus->self);
162 160
163 if (!iommu_table_iobmap_inited) { 161 if (!iommu_table_iobmap_inited) {
164 iommu_table_iobmap_inited = 1; 162 iommu_table_iobmap_inited = 1;
165 iommu_table_iobmap_setup(); 163 iommu_table_iobmap_setup();
166 } 164 }
167
168 dn = pci_bus_to_OF_node(bus);
169
170 if (dn)
171 PCI_DN(dn)->iommu_table = &iommu_table_iobmap;
172
173} 165}
174 166
175 167
@@ -192,9 +184,6 @@ static void pci_dma_dev_setup_pasemi(struct pci_dev *dev)
192 set_iommu_table_base(&dev->dev, &iommu_table_iobmap); 184 set_iommu_table_base(&dev->dev, &iommu_table_iobmap);
193} 185}
194 186
195static void pci_dma_bus_setup_null(struct pci_bus *b) { }
196static void pci_dma_dev_setup_null(struct pci_dev *d) { }
197
198int __init iob_init(struct device_node *dn) 187int __init iob_init(struct device_node *dn)
199{ 188{
200 unsigned long tmp; 189 unsigned long tmp;
@@ -251,14 +240,8 @@ void __init iommu_init_early_pasemi(void)
251 iommu_off = of_chosen && 240 iommu_off = of_chosen &&
252 of_get_property(of_chosen, "linux,iommu-off", NULL); 241 of_get_property(of_chosen, "linux,iommu-off", NULL);
253#endif 242#endif
254 if (iommu_off) { 243 if (iommu_off)
255 /* Direct I/O, IOMMU off */
256 ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_null;
257 ppc_md.pci_dma_bus_setup = pci_dma_bus_setup_null;
258 set_pci_dma_ops(&dma_direct_ops);
259
260 return; 244 return;
261 }
262 245
263 iob_init(NULL); 246 iob_init(NULL);
264 247
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c
index 9deb274841f1..d5aceb7fb125 100644
--- a/arch/powerpc/platforms/powermac/setup.c
+++ b/arch/powerpc/platforms/powermac/setup.c
@@ -506,6 +506,15 @@ static int __init pmac_declare_of_platform_devices(void)
506 of_platform_device_create(np, "smu", NULL); 506 of_platform_device_create(np, "smu", NULL);
507 of_node_put(np); 507 of_node_put(np);
508 } 508 }
509 np = of_find_node_by_type(NULL, "fcu");
510 if (np == NULL) {
511 /* Some machines have strangely broken device-tree */
512 np = of_find_node_by_path("/u3@0,f8000000/i2c@f8001000/fan@15e");
513 }
514 if (np) {
515 of_platform_device_create(np, "temperature", NULL);
516 of_node_put(np);
517 }
509 518
510 return 0; 519 return 0;
511} 520}
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index 3139814f6439..5d1b743dbe7e 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -33,6 +33,16 @@ config PSERIES_MSI
33 depends on PCI_MSI && EEH 33 depends on PCI_MSI && EEH
34 default y 34 default y
35 35
36config PSERIES_ENERGY
37 tristate "pSeries energy management capabilities driver"
38 depends on PPC_PSERIES
39 default y
40 help
41 Provides interface to platform energy management capabilities
42 on supported PSERIES platforms.
43 Provides: /sys/devices/system/cpu/pseries_(de)activation_hint_list
44 and /sys/devices/system/cpu/cpuN/pseries_(de)activation_hint
45
36config SCANLOG 46config SCANLOG
37 tristate "Scanlog dump interface" 47 tristate "Scanlog dump interface"
38 depends on RTAS_PROC && PPC_PSERIES 48 depends on RTAS_PROC && PPC_PSERIES
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile
index 59eb8bdaa79d..fc5237810ece 100644
--- a/arch/powerpc/platforms/pseries/Makefile
+++ b/arch/powerpc/platforms/pseries/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_EEH) += eeh.o eeh_cache.o eeh_driver.o eeh_event.o eeh_sysfs.o
11obj-$(CONFIG_KEXEC) += kexec.o 11obj-$(CONFIG_KEXEC) += kexec.o
12obj-$(CONFIG_PCI) += pci.o pci_dlpar.o 12obj-$(CONFIG_PCI) += pci.o pci_dlpar.o
13obj-$(CONFIG_PSERIES_MSI) += msi.o 13obj-$(CONFIG_PSERIES_MSI) += msi.o
14obj-$(CONFIG_PSERIES_ENERGY) += pseries_energy.o
14 15
15obj-$(CONFIG_HOTPLUG_CPU) += hotplug-cpu.o 16obj-$(CONFIG_HOTPLUG_CPU) += hotplug-cpu.o
16obj-$(CONFIG_MEMORY_HOTPLUG) += hotplug-memory.o 17obj-$(CONFIG_MEMORY_HOTPLUG) += hotplug-memory.o
diff --git a/arch/powerpc/platforms/pseries/firmware.c b/arch/powerpc/platforms/pseries/firmware.c
index 0a14d8cd314f..0b0eff0cce35 100644
--- a/arch/powerpc/platforms/pseries/firmware.c
+++ b/arch/powerpc/platforms/pseries/firmware.c
@@ -55,6 +55,7 @@ firmware_features_table[FIRMWARE_MAX_FEATURES] = {
55 {FW_FEATURE_XDABR, "hcall-xdabr"}, 55 {FW_FEATURE_XDABR, "hcall-xdabr"},
56 {FW_FEATURE_MULTITCE, "hcall-multi-tce"}, 56 {FW_FEATURE_MULTITCE, "hcall-multi-tce"},
57 {FW_FEATURE_SPLPAR, "hcall-splpar"}, 57 {FW_FEATURE_SPLPAR, "hcall-splpar"},
58 {FW_FEATURE_VPHN, "hcall-vphn"},
58}; 59};
59 60
60/* Build up the firmware features bitmask using the contents of 61/* Build up the firmware features bitmask using the contents of
diff --git a/arch/powerpc/platforms/pseries/hvCall.S b/arch/powerpc/platforms/pseries/hvCall.S
index 48d20573e4de..fd05fdee576a 100644
--- a/arch/powerpc/platforms/pseries/hvCall.S
+++ b/arch/powerpc/platforms/pseries/hvCall.S
@@ -11,6 +11,7 @@
11#include <asm/processor.h> 11#include <asm/processor.h>
12#include <asm/ppc_asm.h> 12#include <asm/ppc_asm.h>
13#include <asm/asm-offsets.h> 13#include <asm/asm-offsets.h>
14#include <asm/ptrace.h>
14 15
15#define STK_PARM(i) (48 + ((i)-3)*8) 16#define STK_PARM(i) (48 + ((i)-3)*8)
16 17
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index a77bcaed80af..edea60b7ee90 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -140,7 +140,7 @@ static int tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum,
140 return ret; 140 return ret;
141} 141}
142 142
143static DEFINE_PER_CPU(u64 *, tce_page) = NULL; 143static DEFINE_PER_CPU(u64 *, tce_page);
144 144
145static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, 145static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
146 long npages, unsigned long uaddr, 146 long npages, unsigned long uaddr,
@@ -323,14 +323,13 @@ static void iommu_table_setparms(struct pci_controller *phb,
323static void iommu_table_setparms_lpar(struct pci_controller *phb, 323static void iommu_table_setparms_lpar(struct pci_controller *phb,
324 struct device_node *dn, 324 struct device_node *dn,
325 struct iommu_table *tbl, 325 struct iommu_table *tbl,
326 const void *dma_window, 326 const void *dma_window)
327 int bussubno)
328{ 327{
329 unsigned long offset, size; 328 unsigned long offset, size;
330 329
331 tbl->it_busno = bussubno;
332 of_parse_dma_window(dn, dma_window, &tbl->it_index, &offset, &size); 330 of_parse_dma_window(dn, dma_window, &tbl->it_index, &offset, &size);
333 331
332 tbl->it_busno = phb->bus->number;
334 tbl->it_base = 0; 333 tbl->it_base = 0;
335 tbl->it_blocksize = 16; 334 tbl->it_blocksize = 16;
336 tbl->it_type = TCE_PCI; 335 tbl->it_type = TCE_PCI;
@@ -450,14 +449,10 @@ static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus)
450 if (!ppci->iommu_table) { 449 if (!ppci->iommu_table) {
451 tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, 450 tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
452 ppci->phb->node); 451 ppci->phb->node);
453 iommu_table_setparms_lpar(ppci->phb, pdn, tbl, dma_window, 452 iommu_table_setparms_lpar(ppci->phb, pdn, tbl, dma_window);
454 bus->number);
455 ppci->iommu_table = iommu_init_table(tbl, ppci->phb->node); 453 ppci->iommu_table = iommu_init_table(tbl, ppci->phb->node);
456 pr_debug(" created table: %p\n", ppci->iommu_table); 454 pr_debug(" created table: %p\n", ppci->iommu_table);
457 } 455 }
458
459 if (pdn != dn)
460 PCI_DN(dn)->iommu_table = ppci->iommu_table;
461} 456}
462 457
463 458
@@ -533,21 +528,11 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
533 } 528 }
534 pr_debug(" parent is %s\n", pdn->full_name); 529 pr_debug(" parent is %s\n", pdn->full_name);
535 530
536 /* Check for parent == NULL so we don't try to setup the empty EADS
537 * slots on POWER4 machines.
538 */
539 if (dma_window == NULL || pdn->parent == NULL) {
540 pr_debug(" no dma window for device, linking to parent\n");
541 set_iommu_table_base(&dev->dev, PCI_DN(pdn)->iommu_table);
542 return;
543 }
544
545 pci = PCI_DN(pdn); 531 pci = PCI_DN(pdn);
546 if (!pci->iommu_table) { 532 if (!pci->iommu_table) {
547 tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, 533 tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
548 pci->phb->node); 534 pci->phb->node);
549 iommu_table_setparms_lpar(pci->phb, pdn, tbl, dma_window, 535 iommu_table_setparms_lpar(pci->phb, pdn, tbl, dma_window);
550 pci->phb->bus->number);
551 pci->iommu_table = iommu_init_table(tbl, pci->phb->node); 536 pci->iommu_table = iommu_init_table(tbl, pci->phb->node);
552 pr_debug(" created table: %p\n", pci->iommu_table); 537 pr_debug(" created table: %p\n", pci->iommu_table);
553 } else { 538 } else {
@@ -571,8 +556,7 @@ static int iommu_reconfig_notifier(struct notifier_block *nb, unsigned long acti
571 556
572 switch (action) { 557 switch (action) {
573 case PSERIES_RECONFIG_REMOVE: 558 case PSERIES_RECONFIG_REMOVE:
574 if (pci && pci->iommu_table && 559 if (pci && pci->iommu_table)
575 of_get_property(np, "ibm,dma-window", NULL))
576 iommu_free_table(pci->iommu_table, np->full_name); 560 iommu_free_table(pci->iommu_table, np->full_name);
577 break; 561 break;
578 default: 562 default:
@@ -589,13 +573,8 @@ static struct notifier_block iommu_reconfig_nb = {
589/* These are called very early. */ 573/* These are called very early. */
590void iommu_init_early_pSeries(void) 574void iommu_init_early_pSeries(void)
591{ 575{
592 if (of_chosen && of_get_property(of_chosen, "linux,iommu-off", NULL)) { 576 if (of_chosen && of_get_property(of_chosen, "linux,iommu-off", NULL))
593 /* Direct I/O, IOMMU off */
594 ppc_md.pci_dma_dev_setup = NULL;
595 ppc_md.pci_dma_bus_setup = NULL;
596 set_pci_dma_ops(&dma_direct_ops);
597 return; 577 return;
598 }
599 578
600 if (firmware_has_feature(FW_FEATURE_LPAR)) { 579 if (firmware_has_feature(FW_FEATURE_LPAR)) {
601 if (firmware_has_feature(FW_FEATURE_MULTITCE)) { 580 if (firmware_has_feature(FW_FEATURE_MULTITCE)) {
@@ -622,3 +601,17 @@ void iommu_init_early_pSeries(void)
622 set_pci_dma_ops(&dma_iommu_ops); 601 set_pci_dma_ops(&dma_iommu_ops);
623} 602}
624 603
604static int __init disable_multitce(char *str)
605{
606 if (strcmp(str, "off") == 0 &&
607 firmware_has_feature(FW_FEATURE_LPAR) &&
608 firmware_has_feature(FW_FEATURE_MULTITCE)) {
609 printk(KERN_INFO "Disabling MULTITCE firmware feature\n");
610 ppc_md.tce_build = tce_build_pSeriesLP;
611 ppc_md.tce_free = tce_free_pSeriesLP;
612 powerpc_firmware_features &= ~FW_FEATURE_MULTITCE;
613 }
614 return 1;
615}
616
617__setup("multitce=", disable_multitce);
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index f129040d974c..5d3ea9f60dd7 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -627,6 +627,18 @@ static void pSeries_lpar_flush_hash_range(unsigned long number, int local)
627 spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags); 627 spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags);
628} 628}
629 629
630static int __init disable_bulk_remove(char *str)
631{
632 if (strcmp(str, "off") == 0 &&
633 firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
634 printk(KERN_INFO "Disabling BULK_REMOVE firmware feature");
635 powerpc_firmware_features &= ~FW_FEATURE_BULK_REMOVE;
636 }
637 return 1;
638}
639
640__setup("bulk_remove=", disable_bulk_remove);
641
630void __init hpte_init_lpar(void) 642void __init hpte_init_lpar(void)
631{ 643{
632 ppc_md.hpte_invalidate = pSeries_lpar_hpte_invalidate; 644 ppc_md.hpte_invalidate = pSeries_lpar_hpte_invalidate;
diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c
index bc3c7f2abd79..7e828ba29bc3 100644
--- a/arch/powerpc/platforms/pseries/nvram.c
+++ b/arch/powerpc/platforms/pseries/nvram.c
@@ -22,11 +22,25 @@
22#include <asm/prom.h> 22#include <asm/prom.h>
23#include <asm/machdep.h> 23#include <asm/machdep.h>
24 24
25/* Max bytes to read/write in one go */
26#define NVRW_CNT 0x20
27
25static unsigned int nvram_size; 28static unsigned int nvram_size;
26static int nvram_fetch, nvram_store; 29static int nvram_fetch, nvram_store;
27static char nvram_buf[NVRW_CNT]; /* assume this is in the first 4GB */ 30static char nvram_buf[NVRW_CNT]; /* assume this is in the first 4GB */
28static DEFINE_SPINLOCK(nvram_lock); 31static DEFINE_SPINLOCK(nvram_lock);
29 32
33static long nvram_error_log_index = -1;
34static long nvram_error_log_size = 0;
35
36struct err_log_info {
37 int error_type;
38 unsigned int seq_num;
39};
40#define NVRAM_MAX_REQ 2079
41#define NVRAM_MIN_REQ 1055
42
43#define NVRAM_LOG_PART_NAME "ibm,rtas-log"
30 44
31static ssize_t pSeries_nvram_read(char *buf, size_t count, loff_t *index) 45static ssize_t pSeries_nvram_read(char *buf, size_t count, loff_t *index)
32{ 46{
@@ -119,6 +133,197 @@ static ssize_t pSeries_nvram_get_size(void)
119 return nvram_size ? nvram_size : -ENODEV; 133 return nvram_size ? nvram_size : -ENODEV;
120} 134}
121 135
136
137/* nvram_write_error_log
138 *
139 * We need to buffer the error logs into nvram to ensure that we have
140 * the failure information to decode. If we have a severe error there
141 * is no way to guarantee that the OS or the machine is in a state to
142 * get back to user land and write the error to disk. For example if
143 * the SCSI device driver causes a Machine Check by writing to a bad
144 * IO address, there is no way of guaranteeing that the device driver
145 * is in any state that is would also be able to write the error data
146 * captured to disk, thus we buffer it in NVRAM for analysis on the
147 * next boot.
148 *
149 * In NVRAM the partition containing the error log buffer will looks like:
150 * Header (in bytes):
151 * +-----------+----------+--------+------------+------------------+
152 * | signature | checksum | length | name | data |
153 * |0 |1 |2 3|4 15|16 length-1|
154 * +-----------+----------+--------+------------+------------------+
155 *
156 * The 'data' section would look like (in bytes):
157 * +--------------+------------+-----------------------------------+
158 * | event_logged | sequence # | error log |
159 * |0 3|4 7|8 nvram_error_log_size-1|
160 * +--------------+------------+-----------------------------------+
161 *
162 * event_logged: 0 if event has not been logged to syslog, 1 if it has
163 * sequence #: The unique sequence # for each event. (until it wraps)
164 * error log: The error log from event_scan
165 */
166int nvram_write_error_log(char * buff, int length,
167 unsigned int err_type, unsigned int error_log_cnt)
168{
169 int rc;
170 loff_t tmp_index;
171 struct err_log_info info;
172
173 if (nvram_error_log_index == -1) {
174 return -ESPIPE;
175 }
176
177 if (length > nvram_error_log_size) {
178 length = nvram_error_log_size;
179 }
180
181 info.error_type = err_type;
182 info.seq_num = error_log_cnt;
183
184 tmp_index = nvram_error_log_index;
185
186 rc = ppc_md.nvram_write((char *)&info, sizeof(struct err_log_info), &tmp_index);
187 if (rc <= 0) {
188 printk(KERN_ERR "nvram_write_error_log: Failed nvram_write (%d)\n", rc);
189 return rc;
190 }
191
192 rc = ppc_md.nvram_write(buff, length, &tmp_index);
193 if (rc <= 0) {
194 printk(KERN_ERR "nvram_write_error_log: Failed nvram_write (%d)\n", rc);
195 return rc;
196 }
197
198 return 0;
199}
200
201/* nvram_read_error_log
202 *
203 * Reads nvram for error log for at most 'length'
204 */
205int nvram_read_error_log(char * buff, int length,
206 unsigned int * err_type, unsigned int * error_log_cnt)
207{
208 int rc;
209 loff_t tmp_index;
210 struct err_log_info info;
211
212 if (nvram_error_log_index == -1)
213 return -1;
214
215 if (length > nvram_error_log_size)
216 length = nvram_error_log_size;
217
218 tmp_index = nvram_error_log_index;
219
220 rc = ppc_md.nvram_read((char *)&info, sizeof(struct err_log_info), &tmp_index);
221 if (rc <= 0) {
222 printk(KERN_ERR "nvram_read_error_log: Failed nvram_read (%d)\n", rc);
223 return rc;
224 }
225
226 rc = ppc_md.nvram_read(buff, length, &tmp_index);
227 if (rc <= 0) {
228 printk(KERN_ERR "nvram_read_error_log: Failed nvram_read (%d)\n", rc);
229 return rc;
230 }
231
232 *error_log_cnt = info.seq_num;
233 *err_type = info.error_type;
234
235 return 0;
236}
237
238/* This doesn't actually zero anything, but it sets the event_logged
239 * word to tell that this event is safely in syslog.
240 */
241int nvram_clear_error_log(void)
242{
243 loff_t tmp_index;
244 int clear_word = ERR_FLAG_ALREADY_LOGGED;
245 int rc;
246
247 if (nvram_error_log_index == -1)
248 return -1;
249
250 tmp_index = nvram_error_log_index;
251
252 rc = ppc_md.nvram_write((char *)&clear_word, sizeof(int), &tmp_index);
253 if (rc <= 0) {
254 printk(KERN_ERR "nvram_clear_error_log: Failed nvram_write (%d)\n", rc);
255 return rc;
256 }
257
258 return 0;
259}
260
261/* pseries_nvram_init_log_partition
262 *
263 * This will setup the partition we need for buffering the
264 * error logs and cleanup partitions if needed.
265 *
266 * The general strategy is the following:
267 * 1.) If there is log partition large enough then use it.
268 * 2.) If there is none large enough, search
269 * for a free partition that is large enough.
270 * 3.) If there is not a free partition large enough remove
271 * _all_ OS partitions and consolidate the space.
272 * 4.) Will first try getting a chunk that will satisfy the maximum
273 * error log size (NVRAM_MAX_REQ).
274 * 5.) If the max chunk cannot be allocated then try finding a chunk
275 * that will satisfy the minum needed (NVRAM_MIN_REQ).
276 */
277static int __init pseries_nvram_init_log_partition(void)
278{
279 loff_t p;
280 int size;
281
282 /* Scan nvram for partitions */
283 nvram_scan_partitions();
284
285 /* Lookg for ours */
286 p = nvram_find_partition(NVRAM_LOG_PART_NAME, NVRAM_SIG_OS, &size);
287
288 /* Found one but too small, remove it */
289 if (p && size < NVRAM_MIN_REQ) {
290 pr_info("nvram: Found too small "NVRAM_LOG_PART_NAME" partition"
291 ",removing it...");
292 nvram_remove_partition(NVRAM_LOG_PART_NAME, NVRAM_SIG_OS);
293 p = 0;
294 }
295
296 /* Create one if we didn't find */
297 if (!p) {
298 p = nvram_create_partition(NVRAM_LOG_PART_NAME, NVRAM_SIG_OS,
299 NVRAM_MAX_REQ, NVRAM_MIN_REQ);
300 /* No room for it, try to get rid of any OS partition
301 * and try again
302 */
303 if (p == -ENOSPC) {
304 pr_info("nvram: No room to create "NVRAM_LOG_PART_NAME
305 " partition, deleting all OS partitions...");
306 nvram_remove_partition(NULL, NVRAM_SIG_OS);
307 p = nvram_create_partition(NVRAM_LOG_PART_NAME,
308 NVRAM_SIG_OS, NVRAM_MAX_REQ,
309 NVRAM_MIN_REQ);
310 }
311 }
312
313 if (p <= 0) {
314 pr_err("nvram: Failed to find or create "NVRAM_LOG_PART_NAME
315 " partition, err %d\n", (int)p);
316 return 0;
317 }
318
319 nvram_error_log_index = p;
320 nvram_error_log_size = nvram_get_partition_size(p) -
321 sizeof(struct err_log_info);
322
323 return 0;
324}
325machine_arch_initcall(pseries, pseries_nvram_init_log_partition);
326
122int __init pSeries_nvram_init(void) 327int __init pSeries_nvram_init(void)
123{ 328{
124 struct device_node *nvram; 329 struct device_node *nvram;
diff --git a/arch/powerpc/platforms/pseries/pseries_energy.c b/arch/powerpc/platforms/pseries/pseries_energy.c
new file mode 100644
index 000000000000..c8b3c69fe891
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/pseries_energy.c
@@ -0,0 +1,326 @@
1/*
2 * POWER platform energy management driver
3 * Copyright (C) 2010 IBM Corporation
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * version 2 as published by the Free Software Foundation.
8 *
9 * This pseries platform device driver provides access to
10 * platform energy management capabilities.
11 */
12
13#include <linux/module.h>
14#include <linux/types.h>
15#include <linux/errno.h>
16#include <linux/init.h>
17#include <linux/seq_file.h>
18#include <linux/sysdev.h>
19#include <linux/cpu.h>
20#include <linux/of.h>
21#include <asm/cputhreads.h>
22#include <asm/page.h>
23#include <asm/hvcall.h>
24
25
26#define MODULE_VERS "1.0"
27#define MODULE_NAME "pseries_energy"
28
29/* Driver flags */
30
31static int sysfs_entries;
32
33/* Helper routines */
34
35/*
36 * Routine to detect firmware support for hcall
37 * return 1 if H_BEST_ENERGY is supported
38 * else return 0
39 */
40
41static int check_for_h_best_energy(void)
42{
43 struct device_node *rtas = NULL;
44 const char *hypertas, *s;
45 int length;
46 int rc = 0;
47
48 rtas = of_find_node_by_path("/rtas");
49 if (!rtas)
50 return 0;
51
52 hypertas = of_get_property(rtas, "ibm,hypertas-functions", &length);
53 if (!hypertas) {
54 of_node_put(rtas);
55 return 0;
56 }
57
58 /* hypertas will have list of strings with hcall names */
59 for (s = hypertas; s < hypertas + length; s += strlen(s) + 1) {
60 if (!strncmp("hcall-best-energy-1", s, 19)) {
61 rc = 1; /* Found the string */
62 break;
63 }
64 }
65 of_node_put(rtas);
66 return rc;
67}
68
69/* Helper Routines to convert between drc_index to cpu numbers */
70
71static u32 cpu_to_drc_index(int cpu)
72{
73 struct device_node *dn = NULL;
74 const int *indexes;
75 int i;
76 int rc = 1;
77 u32 ret = 0;
78
79 dn = of_find_node_by_path("/cpus");
80 if (dn == NULL)
81 goto err;
82 indexes = of_get_property(dn, "ibm,drc-indexes", NULL);
83 if (indexes == NULL)
84 goto err_of_node_put;
85 /* Convert logical cpu number to core number */
86 i = cpu_core_index_of_thread(cpu);
87 /*
88 * The first element indexes[0] is the number of drc_indexes
89 * returned in the list. Hence i+1 will get the drc_index
90 * corresponding to core number i.
91 */
92 WARN_ON(i > indexes[0]);
93 ret = indexes[i + 1];
94 rc = 0;
95
96err_of_node_put:
97 of_node_put(dn);
98err:
99 if (rc)
100 printk(KERN_WARNING "cpu_to_drc_index(%d) failed", cpu);
101 return ret;
102}
103
104static int drc_index_to_cpu(u32 drc_index)
105{
106 struct device_node *dn = NULL;
107 const int *indexes;
108 int i, cpu = 0;
109 int rc = 1;
110
111 dn = of_find_node_by_path("/cpus");
112 if (dn == NULL)
113 goto err;
114 indexes = of_get_property(dn, "ibm,drc-indexes", NULL);
115 if (indexes == NULL)
116 goto err_of_node_put;
117 /*
118 * First element in the array is the number of drc_indexes
119 * returned. Search through the list to find the matching
120 * drc_index and get the core number
121 */
122 for (i = 0; i < indexes[0]; i++) {
123 if (indexes[i + 1] == drc_index)
124 break;
125 }
126 /* Convert core number to logical cpu number */
127 cpu = cpu_first_thread_of_core(i);
128 rc = 0;
129
130err_of_node_put:
131 of_node_put(dn);
132err:
133 if (rc)
134 printk(KERN_WARNING "drc_index_to_cpu(%d) failed", drc_index);
135 return cpu;
136}
137
138/*
139 * pseries hypervisor call H_BEST_ENERGY provides hints to OS on
140 * preferred logical cpus to activate or deactivate for optimized
141 * energy consumption.
142 */
143
144#define FLAGS_MODE1 0x004E200000080E01
145#define FLAGS_MODE2 0x004E200000080401
146#define FLAGS_ACTIVATE 0x100
147
148static ssize_t get_best_energy_list(char *page, int activate)
149{
150 int rc, cnt, i, cpu;
151 unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
152 unsigned long flags = 0;
153 u32 *buf_page;
154 char *s = page;
155
156 buf_page = (u32 *) get_zeroed_page(GFP_KERNEL);
157 if (!buf_page)
158 return -ENOMEM;
159
160 flags = FLAGS_MODE1;
161 if (activate)
162 flags |= FLAGS_ACTIVATE;
163
164 rc = plpar_hcall9(H_BEST_ENERGY, retbuf, flags, 0, __pa(buf_page),
165 0, 0, 0, 0, 0, 0);
166 if (rc != H_SUCCESS) {
167 free_page((unsigned long) buf_page);
168 return -EINVAL;
169 }
170
171 cnt = retbuf[0];
172 for (i = 0; i < cnt; i++) {
173 cpu = drc_index_to_cpu(buf_page[2*i+1]);
174 if ((cpu_online(cpu) && !activate) ||
175 (!cpu_online(cpu) && activate))
176 s += sprintf(s, "%d,", cpu);
177 }
178 if (s > page) { /* Something to show */
179 s--; /* Suppress last comma */
180 s += sprintf(s, "\n");
181 }
182
183 free_page((unsigned long) buf_page);
184 return s-page;
185}
186
187static ssize_t get_best_energy_data(struct sys_device *dev,
188 char *page, int activate)
189{
190 int rc;
191 unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
192 unsigned long flags = 0;
193
194 flags = FLAGS_MODE2;
195 if (activate)
196 flags |= FLAGS_ACTIVATE;
197
198 rc = plpar_hcall9(H_BEST_ENERGY, retbuf, flags,
199 cpu_to_drc_index(dev->id),
200 0, 0, 0, 0, 0, 0, 0);
201
202 if (rc != H_SUCCESS)
203 return -EINVAL;
204
205 return sprintf(page, "%lu\n", retbuf[1] >> 32);
206}
207
208/* Wrapper functions */
209
210static ssize_t cpu_activate_hint_list_show(struct sysdev_class *class,
211 struct sysdev_class_attribute *attr, char *page)
212{
213 return get_best_energy_list(page, 1);
214}
215
216static ssize_t cpu_deactivate_hint_list_show(struct sysdev_class *class,
217 struct sysdev_class_attribute *attr, char *page)
218{
219 return get_best_energy_list(page, 0);
220}
221
222static ssize_t percpu_activate_hint_show(struct sys_device *dev,
223 struct sysdev_attribute *attr, char *page)
224{
225 return get_best_energy_data(dev, page, 1);
226}
227
228static ssize_t percpu_deactivate_hint_show(struct sys_device *dev,
229 struct sysdev_attribute *attr, char *page)
230{
231 return get_best_energy_data(dev, page, 0);
232}
233
234/*
235 * Create sysfs interface:
236 * /sys/devices/system/cpu/pseries_activate_hint_list
237 * /sys/devices/system/cpu/pseries_deactivate_hint_list
238 * Comma separated list of cpus to activate or deactivate
239 * /sys/devices/system/cpu/cpuN/pseries_activate_hint
240 * /sys/devices/system/cpu/cpuN/pseries_deactivate_hint
241 * Per-cpu value of the hint
242 */
243
244struct sysdev_class_attribute attr_cpu_activate_hint_list =
245 _SYSDEV_CLASS_ATTR(pseries_activate_hint_list, 0444,
246 cpu_activate_hint_list_show, NULL);
247
248struct sysdev_class_attribute attr_cpu_deactivate_hint_list =
249 _SYSDEV_CLASS_ATTR(pseries_deactivate_hint_list, 0444,
250 cpu_deactivate_hint_list_show, NULL);
251
252struct sysdev_attribute attr_percpu_activate_hint =
253 _SYSDEV_ATTR(pseries_activate_hint, 0444,
254 percpu_activate_hint_show, NULL);
255
256struct sysdev_attribute attr_percpu_deactivate_hint =
257 _SYSDEV_ATTR(pseries_deactivate_hint, 0444,
258 percpu_deactivate_hint_show, NULL);
259
260static int __init pseries_energy_init(void)
261{
262 int cpu, err;
263 struct sys_device *cpu_sys_dev;
264
265 if (!check_for_h_best_energy()) {
266 printk(KERN_INFO "Hypercall H_BEST_ENERGY not supported\n");
267 return 0;
268 }
269 /* Create the sysfs files */
270 err = sysfs_create_file(&cpu_sysdev_class.kset.kobj,
271 &attr_cpu_activate_hint_list.attr);
272 if (!err)
273 err = sysfs_create_file(&cpu_sysdev_class.kset.kobj,
274 &attr_cpu_deactivate_hint_list.attr);
275
276 if (err)
277 return err;
278 for_each_possible_cpu(cpu) {
279 cpu_sys_dev = get_cpu_sysdev(cpu);
280 err = sysfs_create_file(&cpu_sys_dev->kobj,
281 &attr_percpu_activate_hint.attr);
282 if (err)
283 break;
284 err = sysfs_create_file(&cpu_sys_dev->kobj,
285 &attr_percpu_deactivate_hint.attr);
286 if (err)
287 break;
288 }
289
290 if (err)
291 return err;
292
293 sysfs_entries = 1; /* Removed entries on cleanup */
294 return 0;
295
296}
297
298static void __exit pseries_energy_cleanup(void)
299{
300 int cpu;
301 struct sys_device *cpu_sys_dev;
302
303 if (!sysfs_entries)
304 return;
305
306 /* Remove the sysfs files */
307 sysfs_remove_file(&cpu_sysdev_class.kset.kobj,
308 &attr_cpu_activate_hint_list.attr);
309
310 sysfs_remove_file(&cpu_sysdev_class.kset.kobj,
311 &attr_cpu_deactivate_hint_list.attr);
312
313 for_each_possible_cpu(cpu) {
314 cpu_sys_dev = get_cpu_sysdev(cpu);
315 sysfs_remove_file(&cpu_sys_dev->kobj,
316 &attr_percpu_activate_hint.attr);
317 sysfs_remove_file(&cpu_sys_dev->kobj,
318 &attr_percpu_deactivate_hint.attr);
319 }
320}
321
322module_init(pseries_energy_init);
323module_exit(pseries_energy_cleanup);
324MODULE_DESCRIPTION("Driver for pSeries platform energy management");
325MODULE_AUTHOR("Vaidyanathan Srinivasan");
326MODULE_LICENSE("GPL");
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile
index 0bef9dacb64e..9c2973479142 100644
--- a/arch/powerpc/sysdev/Makefile
+++ b/arch/powerpc/sysdev/Makefile
@@ -41,6 +41,7 @@ obj-$(CONFIG_OF_RTC) += of_rtc.o
41ifeq ($(CONFIG_PCI),y) 41ifeq ($(CONFIG_PCI),y)
42obj-$(CONFIG_4xx) += ppc4xx_pci.o 42obj-$(CONFIG_4xx) += ppc4xx_pci.o
43endif 43endif
44obj-$(CONFIG_PPC4xx_CPM) += ppc4xx_cpm.o
44obj-$(CONFIG_PPC4xx_GPIO) += ppc4xx_gpio.o 45obj-$(CONFIG_PPC4xx_GPIO) += ppc4xx_gpio.o
45 46
46obj-$(CONFIG_CPM) += cpm_common.o 47obj-$(CONFIG_CPM) += cpm_common.o
diff --git a/arch/powerpc/sysdev/dart_iommu.c b/arch/powerpc/sysdev/dart_iommu.c
index 17cf15ec38be..8e9e06a7ca59 100644
--- a/arch/powerpc/sysdev/dart_iommu.c
+++ b/arch/powerpc/sysdev/dart_iommu.c
@@ -312,17 +312,10 @@ static void pci_dma_dev_setup_dart(struct pci_dev *dev)
312 312
313static void pci_dma_bus_setup_dart(struct pci_bus *bus) 313static void pci_dma_bus_setup_dart(struct pci_bus *bus)
314{ 314{
315 struct device_node *dn;
316
317 if (!iommu_table_dart_inited) { 315 if (!iommu_table_dart_inited) {
318 iommu_table_dart_inited = 1; 316 iommu_table_dart_inited = 1;
319 iommu_table_dart_setup(); 317 iommu_table_dart_setup();
320 } 318 }
321
322 dn = pci_bus_to_OF_node(bus);
323
324 if (dn)
325 PCI_DN(dn)->iommu_table = &iommu_table_dart;
326} 319}
327 320
328static bool dart_device_on_pcie(struct device *dev) 321static bool dart_device_on_pcie(struct device *dev)
@@ -373,7 +366,7 @@ void __init iommu_init_early_dart(void)
373 if (dn == NULL) { 366 if (dn == NULL) {
374 dn = of_find_compatible_node(NULL, "dart", "u4-dart"); 367 dn = of_find_compatible_node(NULL, "dart", "u4-dart");
375 if (dn == NULL) 368 if (dn == NULL)
376 goto bail; 369 return; /* use default direct_dma_ops */
377 dart_is_u4 = 1; 370 dart_is_u4 = 1;
378 } 371 }
379 372
diff --git a/arch/powerpc/sysdev/mpc8xxx_gpio.c b/arch/powerpc/sysdev/mpc8xxx_gpio.c
index c0ea05e87f1d..c48cd8178079 100644
--- a/arch/powerpc/sysdev/mpc8xxx_gpio.c
+++ b/arch/powerpc/sysdev/mpc8xxx_gpio.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * GPIOs on MPC8349/8572/8610 and compatible 2 * GPIOs on MPC512x/8349/8572/8610 and compatible
3 * 3 *
4 * Copyright (C) 2008 Peter Korsgaard <jacmet@sunsite.dk> 4 * Copyright (C) 2008 Peter Korsgaard <jacmet@sunsite.dk>
5 * 5 *
@@ -26,6 +26,7 @@
26#define GPIO_IER 0x0c 26#define GPIO_IER 0x0c
27#define GPIO_IMR 0x10 27#define GPIO_IMR 0x10
28#define GPIO_ICR 0x14 28#define GPIO_ICR 0x14
29#define GPIO_ICR2 0x18
29 30
30struct mpc8xxx_gpio_chip { 31struct mpc8xxx_gpio_chip {
31 struct of_mm_gpio_chip mm_gc; 32 struct of_mm_gpio_chip mm_gc;
@@ -37,6 +38,7 @@ struct mpc8xxx_gpio_chip {
37 */ 38 */
38 u32 data; 39 u32 data;
39 struct irq_host *irq; 40 struct irq_host *irq;
41 void *of_dev_id_data;
40}; 42};
41 43
42static inline u32 mpc8xxx_gpio2mask(unsigned int gpio) 44static inline u32 mpc8xxx_gpio2mask(unsigned int gpio)
@@ -215,6 +217,51 @@ static int mpc8xxx_irq_set_type(unsigned int virq, unsigned int flow_type)
215 return 0; 217 return 0;
216} 218}
217 219
220static int mpc512x_irq_set_type(unsigned int virq, unsigned int flow_type)
221{
222 struct mpc8xxx_gpio_chip *mpc8xxx_gc = get_irq_chip_data(virq);
223 struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc;
224 unsigned long gpio = virq_to_hw(virq);
225 void __iomem *reg;
226 unsigned int shift;
227 unsigned long flags;
228
229 if (gpio < 16) {
230 reg = mm->regs + GPIO_ICR;
231 shift = (15 - gpio) * 2;
232 } else {
233 reg = mm->regs + GPIO_ICR2;
234 shift = (15 - (gpio % 16)) * 2;
235 }
236
237 switch (flow_type) {
238 case IRQ_TYPE_EDGE_FALLING:
239 case IRQ_TYPE_LEVEL_LOW:
240 spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
241 clrsetbits_be32(reg, 3 << shift, 2 << shift);
242 spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
243 break;
244
245 case IRQ_TYPE_EDGE_RISING:
246 case IRQ_TYPE_LEVEL_HIGH:
247 spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
248 clrsetbits_be32(reg, 3 << shift, 1 << shift);
249 spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
250 break;
251
252 case IRQ_TYPE_EDGE_BOTH:
253 spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
254 clrbits32(reg, 3 << shift);
255 spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
256 break;
257
258 default:
259 return -EINVAL;
260 }
261
262 return 0;
263}
264
218static struct irq_chip mpc8xxx_irq_chip = { 265static struct irq_chip mpc8xxx_irq_chip = {
219 .name = "mpc8xxx-gpio", 266 .name = "mpc8xxx-gpio",
220 .unmask = mpc8xxx_irq_unmask, 267 .unmask = mpc8xxx_irq_unmask,
@@ -226,6 +273,11 @@ static struct irq_chip mpc8xxx_irq_chip = {
226static int mpc8xxx_gpio_irq_map(struct irq_host *h, unsigned int virq, 273static int mpc8xxx_gpio_irq_map(struct irq_host *h, unsigned int virq,
227 irq_hw_number_t hw) 274 irq_hw_number_t hw)
228{ 275{
276 struct mpc8xxx_gpio_chip *mpc8xxx_gc = h->host_data;
277
278 if (mpc8xxx_gc->of_dev_id_data)
279 mpc8xxx_irq_chip.set_type = mpc8xxx_gc->of_dev_id_data;
280
229 set_irq_chip_data(virq, h->host_data); 281 set_irq_chip_data(virq, h->host_data);
230 set_irq_chip_and_handler(virq, &mpc8xxx_irq_chip, handle_level_irq); 282 set_irq_chip_and_handler(virq, &mpc8xxx_irq_chip, handle_level_irq);
231 set_irq_type(virq, IRQ_TYPE_NONE); 283 set_irq_type(virq, IRQ_TYPE_NONE);
@@ -253,11 +305,20 @@ static struct irq_host_ops mpc8xxx_gpio_irq_ops = {
253 .xlate = mpc8xxx_gpio_irq_xlate, 305 .xlate = mpc8xxx_gpio_irq_xlate,
254}; 306};
255 307
308static struct of_device_id mpc8xxx_gpio_ids[] __initdata = {
309 { .compatible = "fsl,mpc8349-gpio", },
310 { .compatible = "fsl,mpc8572-gpio", },
311 { .compatible = "fsl,mpc8610-gpio", },
312 { .compatible = "fsl,mpc5121-gpio", .data = mpc512x_irq_set_type, },
313 {}
314};
315
256static void __init mpc8xxx_add_controller(struct device_node *np) 316static void __init mpc8xxx_add_controller(struct device_node *np)
257{ 317{
258 struct mpc8xxx_gpio_chip *mpc8xxx_gc; 318 struct mpc8xxx_gpio_chip *mpc8xxx_gc;
259 struct of_mm_gpio_chip *mm_gc; 319 struct of_mm_gpio_chip *mm_gc;
260 struct gpio_chip *gc; 320 struct gpio_chip *gc;
321 const struct of_device_id *id;
261 unsigned hwirq; 322 unsigned hwirq;
262 int ret; 323 int ret;
263 324
@@ -297,6 +358,10 @@ static void __init mpc8xxx_add_controller(struct device_node *np)
297 if (!mpc8xxx_gc->irq) 358 if (!mpc8xxx_gc->irq)
298 goto skip_irq; 359 goto skip_irq;
299 360
361 id = of_match_node(mpc8xxx_gpio_ids, np);
362 if (id)
363 mpc8xxx_gc->of_dev_id_data = id->data;
364
300 mpc8xxx_gc->irq->host_data = mpc8xxx_gc; 365 mpc8xxx_gc->irq->host_data = mpc8xxx_gc;
301 366
302 /* ack and mask all irqs */ 367 /* ack and mask all irqs */
@@ -321,13 +386,7 @@ static int __init mpc8xxx_add_gpiochips(void)
321{ 386{
322 struct device_node *np; 387 struct device_node *np;
323 388
324 for_each_compatible_node(np, NULL, "fsl,mpc8349-gpio") 389 for_each_matching_node(np, mpc8xxx_gpio_ids)
325 mpc8xxx_add_controller(np);
326
327 for_each_compatible_node(np, NULL, "fsl,mpc8572-gpio")
328 mpc8xxx_add_controller(np);
329
330 for_each_compatible_node(np, NULL, "fsl,mpc8610-gpio")
331 mpc8xxx_add_controller(np); 390 mpc8xxx_add_controller(np);
332 391
333 for_each_compatible_node(np, NULL, "fsl,qoriq-gpio") 392 for_each_compatible_node(np, NULL, "fsl,qoriq-gpio")
diff --git a/arch/powerpc/sysdev/ppc4xx_cpm.c b/arch/powerpc/sysdev/ppc4xx_cpm.c
new file mode 100644
index 000000000000..73b86cc5ea74
--- /dev/null
+++ b/arch/powerpc/sysdev/ppc4xx_cpm.c
@@ -0,0 +1,346 @@
1/*
2 * PowerPC 4xx Clock and Power Management
3 *
4 * Copyright (C) 2010, Applied Micro Circuits Corporation
5 * Victor Gallardo (vgallardo@apm.com)
6 *
7 * Based on arch/powerpc/platforms/44x/idle.c:
8 * Jerone Young <jyoung5@us.ibm.com>
9 * Copyright 2008 IBM Corp.
10 *
11 * Based on arch/powerpc/sysdev/fsl_pmc.c:
12 * Anton Vorontsov <avorontsov@ru.mvista.com>
13 * Copyright 2009 MontaVista Software, Inc.
14 *
15 * See file CREDITS for list of people who contributed to this
16 * project.
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License as
20 * published by the Free Software Foundation; either version 2 of
21 * the License, or (at your option) any later version.
22 *
23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details.
27 *
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, write to the Free Software
30 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
31 * MA 02111-1307 USA
32 */
33
34#include <linux/kernel.h>
35#include <linux/of_platform.h>
36#include <linux/sysfs.h>
37#include <linux/cpu.h>
38#include <linux/suspend.h>
39#include <asm/dcr.h>
40#include <asm/dcr-native.h>
41#include <asm/machdep.h>
42
43#define CPM_ER 0
44#define CPM_FR 1
45#define CPM_SR 2
46
47#define CPM_IDLE_WAIT 0
48#define CPM_IDLE_DOZE 1
49
50struct cpm {
51 dcr_host_t dcr_host;
52 unsigned int dcr_offset[3];
53 unsigned int powersave_off;
54 unsigned int unused;
55 unsigned int idle_doze;
56 unsigned int standby;
57 unsigned int suspend;
58};
59
60static struct cpm cpm;
61
62struct cpm_idle_mode {
63 unsigned int enabled;
64 const char *name;
65};
66
67static struct cpm_idle_mode idle_mode[] = {
68 [CPM_IDLE_WAIT] = { 1, "wait" }, /* default */
69 [CPM_IDLE_DOZE] = { 0, "doze" },
70};
71
72static unsigned int cpm_set(unsigned int cpm_reg, unsigned int mask)
73{
74 unsigned int value;
75
76 /* CPM controller supports 3 different types of sleep interface
77 * known as class 1, 2 and 3. For class 1 units, they are
78 * unconditionally put to sleep when the corresponding CPM bit is
79 * set. For class 2 and 3 units this is not case; if they can be
80 * put to to sleep, they will. Here we do not verify, we just
81 * set them and expect them to eventually go off when they can.
82 */
83 value = dcr_read(cpm.dcr_host, cpm.dcr_offset[cpm_reg]);
84 dcr_write(cpm.dcr_host, cpm.dcr_offset[cpm_reg], value | mask);
85
86 /* return old state, to restore later if needed */
87 return value;
88}
89
90static void cpm_idle_wait(void)
91{
92 unsigned long msr_save;
93
94 /* save off initial state */
95 msr_save = mfmsr();
96 /* sync required when CPM0_ER[CPU] is set */
97 mb();
98 /* set wait state MSR */
99 mtmsr(msr_save|MSR_WE|MSR_EE|MSR_CE|MSR_DE);
100 isync();
101 /* return to initial state */
102 mtmsr(msr_save);
103 isync();
104}
105
106static void cpm_idle_sleep(unsigned int mask)
107{
108 unsigned int er_save;
109
110 /* update CPM_ER state */
111 er_save = cpm_set(CPM_ER, mask);
112
113 /* go to wait state so that CPM0_ER[CPU] can take effect */
114 cpm_idle_wait();
115
116 /* restore CPM_ER state */
117 dcr_write(cpm.dcr_host, cpm.dcr_offset[CPM_ER], er_save);
118}
119
120static void cpm_idle_doze(void)
121{
122 cpm_idle_sleep(cpm.idle_doze);
123}
124
125static void cpm_idle_config(int mode)
126{
127 int i;
128
129 if (idle_mode[mode].enabled)
130 return;
131
132 for (i = 0; i < ARRAY_SIZE(idle_mode); i++)
133 idle_mode[i].enabled = 0;
134
135 idle_mode[mode].enabled = 1;
136}
137
138static ssize_t cpm_idle_show(struct kobject *kobj,
139 struct kobj_attribute *attr, char *buf)
140{
141 char *s = buf;
142 int i;
143
144 for (i = 0; i < ARRAY_SIZE(idle_mode); i++) {
145 if (idle_mode[i].enabled)
146 s += sprintf(s, "[%s] ", idle_mode[i].name);
147 else
148 s += sprintf(s, "%s ", idle_mode[i].name);
149 }
150
151 *(s-1) = '\n'; /* convert the last space to a newline */
152
153 return s - buf;
154}
155
156static ssize_t cpm_idle_store(struct kobject *kobj,
157 struct kobj_attribute *attr,
158 const char *buf, size_t n)
159{
160 int i;
161 char *p;
162 int len;
163
164 p = memchr(buf, '\n', n);
165 len = p ? p - buf : n;
166
167 for (i = 0; i < ARRAY_SIZE(idle_mode); i++) {
168 if (strncmp(buf, idle_mode[i].name, len) == 0) {
169 cpm_idle_config(i);
170 return n;
171 }
172 }
173
174 return -EINVAL;
175}
176
177static struct kobj_attribute cpm_idle_attr =
178 __ATTR(idle, 0644, cpm_idle_show, cpm_idle_store);
179
180static void cpm_idle_config_sysfs(void)
181{
182 struct sys_device *sys_dev;
183 unsigned long ret;
184
185 sys_dev = get_cpu_sysdev(0);
186
187 ret = sysfs_create_file(&sys_dev->kobj,
188 &cpm_idle_attr.attr);
189 if (ret)
190 printk(KERN_WARNING
191 "cpm: failed to create idle sysfs entry\n");
192}
193
194static void cpm_idle(void)
195{
196 if (idle_mode[CPM_IDLE_DOZE].enabled)
197 cpm_idle_doze();
198 else
199 cpm_idle_wait();
200}
201
202static int cpm_suspend_valid(suspend_state_t state)
203{
204 switch (state) {
205 case PM_SUSPEND_STANDBY:
206 return !!cpm.standby;
207 case PM_SUSPEND_MEM:
208 return !!cpm.suspend;
209 default:
210 return 0;
211 }
212}
213
214static void cpm_suspend_standby(unsigned int mask)
215{
216 unsigned long tcr_save;
217
218 /* disable decrement interrupt */
219 tcr_save = mfspr(SPRN_TCR);
220 mtspr(SPRN_TCR, tcr_save & ~TCR_DIE);
221
222 /* go to sleep state */
223 cpm_idle_sleep(mask);
224
225 /* restore decrement interrupt */
226 mtspr(SPRN_TCR, tcr_save);
227}
228
229static int cpm_suspend_enter(suspend_state_t state)
230{
231 switch (state) {
232 case PM_SUSPEND_STANDBY:
233 cpm_suspend_standby(cpm.standby);
234 break;
235 case PM_SUSPEND_MEM:
236 cpm_suspend_standby(cpm.suspend);
237 break;
238 }
239
240 return 0;
241}
242
243static struct platform_suspend_ops cpm_suspend_ops = {
244 .valid = cpm_suspend_valid,
245 .enter = cpm_suspend_enter,
246};
247
248static int cpm_get_uint_property(struct device_node *np,
249 const char *name)
250{
251 int len;
252 const unsigned int *prop = of_get_property(np, name, &len);
253
254 if (prop == NULL || len < sizeof(u32))
255 return 0;
256
257 return *prop;
258}
259
260static int __init cpm_init(void)
261{
262 struct device_node *np;
263 int dcr_base, dcr_len;
264 int ret = 0;
265
266 if (!cpm.powersave_off) {
267 cpm_idle_config(CPM_IDLE_WAIT);
268 ppc_md.power_save = &cpm_idle;
269 }
270
271 np = of_find_compatible_node(NULL, NULL, "ibm,cpm");
272 if (!np) {
273 ret = -EINVAL;
274 goto out;
275 }
276
277 dcr_base = dcr_resource_start(np, 0);
278 dcr_len = dcr_resource_len(np, 0);
279
280 if (dcr_base == 0 || dcr_len == 0) {
281 printk(KERN_ERR "cpm: could not parse dcr property for %s\n",
282 np->full_name);
283 ret = -EINVAL;
284 goto out;
285 }
286
287 cpm.dcr_host = dcr_map(np, dcr_base, dcr_len);
288
289 if (!DCR_MAP_OK(cpm.dcr_host)) {
290 printk(KERN_ERR "cpm: failed to map dcr property for %s\n",
291 np->full_name);
292 ret = -EINVAL;
293 goto out;
294 }
295
296 /* All 4xx SoCs with a CPM controller have one of two
297 * different order for the CPM registers. Some have the
298 * CPM registers in the following order (ER,FR,SR). The
299 * others have them in the following order (SR,ER,FR).
300 */
301
302 if (cpm_get_uint_property(np, "er-offset") == 0) {
303 cpm.dcr_offset[CPM_ER] = 0;
304 cpm.dcr_offset[CPM_FR] = 1;
305 cpm.dcr_offset[CPM_SR] = 2;
306 } else {
307 cpm.dcr_offset[CPM_ER] = 1;
308 cpm.dcr_offset[CPM_FR] = 2;
309 cpm.dcr_offset[CPM_SR] = 0;
310 }
311
312 /* Now let's see what IPs to turn off for the following modes */
313
314 cpm.unused = cpm_get_uint_property(np, "unused-units");
315 cpm.idle_doze = cpm_get_uint_property(np, "idle-doze");
316 cpm.standby = cpm_get_uint_property(np, "standby");
317 cpm.suspend = cpm_get_uint_property(np, "suspend");
318
319 /* If some IPs are unused let's turn them off now */
320
321 if (cpm.unused) {
322 cpm_set(CPM_ER, cpm.unused);
323 cpm_set(CPM_FR, cpm.unused);
324 }
325
326 /* Now let's export interfaces */
327
328 if (!cpm.powersave_off && cpm.idle_doze)
329 cpm_idle_config_sysfs();
330
331 if (cpm.standby || cpm.suspend)
332 suspend_set_ops(&cpm_suspend_ops);
333out:
334 if (np)
335 of_node_put(np);
336 return ret;
337}
338
339late_initcall(cpm_init);
340
341static int __init cpm_powersave_off(char *arg)
342{
343 cpm.powersave_off = 1;
344 return 0;
345}
346__setup("powersave=off", cpm_powersave_off);
diff --git a/arch/powerpc/sysdev/tsi108_dev.c b/arch/powerpc/sysdev/tsi108_dev.c
index c2d675b6392c..ee056807b52c 100644
--- a/arch/powerpc/sysdev/tsi108_dev.c
+++ b/arch/powerpc/sysdev/tsi108_dev.c
@@ -84,8 +84,8 @@ static int __init tsi108_eth_of_init(void)
84 memset(&tsi_eth_data, 0, sizeof(tsi_eth_data)); 84 memset(&tsi_eth_data, 0, sizeof(tsi_eth_data));
85 85
86 ret = of_address_to_resource(np, 0, &r[0]); 86 ret = of_address_to_resource(np, 0, &r[0]);
87 DBG("%s: name:start->end = %s:0x%lx-> 0x%lx\n", 87 DBG("%s: name:start->end = %s:%pR\n",
88 __func__,r[0].name, r[0].start, r[0].end); 88 __func__, r[0].name, &r[0]);
89 if (ret) 89 if (ret)
90 goto err; 90 goto err;
91 91
@@ -93,8 +93,8 @@ static int __init tsi108_eth_of_init(void)
93 r[1].start = irq_of_parse_and_map(np, 0); 93 r[1].start = irq_of_parse_and_map(np, 0);
94 r[1].end = irq_of_parse_and_map(np, 0); 94 r[1].end = irq_of_parse_and_map(np, 0);
95 r[1].flags = IORESOURCE_IRQ; 95 r[1].flags = IORESOURCE_IRQ;
96 DBG("%s: name:start->end = %s:0x%lx-> 0x%lx\n", 96 DBG("%s: name:start->end = %s:%pR\n",
97 __func__,r[1].name, r[1].start, r[1].end); 97 __func__, r[1].name, &r[1]);
98 98
99 tsi_eth_dev = 99 tsi_eth_dev =
100 platform_device_register_simple("tsi-ethernet", i++, &r[0], 100 platform_device_register_simple("tsi-ethernet", i++, &r[0],
diff --git a/arch/sh/boards/mach-ap325rxa/setup.c b/arch/sh/boards/mach-ap325rxa/setup.c
index 07ea908c510d..3e5fc3bbf3ed 100644
--- a/arch/sh/boards/mach-ap325rxa/setup.c
+++ b/arch/sh/boards/mach-ap325rxa/setup.c
@@ -14,6 +14,8 @@
14#include <linux/device.h> 14#include <linux/device.h>
15#include <linux/interrupt.h> 15#include <linux/interrupt.h>
16#include <linux/platform_device.h> 16#include <linux/platform_device.h>
17#include <linux/mfd/sh_mobile_sdhi.h>
18#include <linux/mmc/host.h>
17#include <linux/mtd/physmap.h> 19#include <linux/mtd/physmap.h>
18#include <linux/mtd/sh_flctl.h> 20#include <linux/mtd/sh_flctl.h>
19#include <linux/delay.h> 21#include <linux/delay.h>
@@ -430,11 +432,18 @@ static struct resource sdhi0_cn3_resources[] = {
430 }, 432 },
431}; 433};
432 434
435static struct sh_mobile_sdhi_info sdhi0_cn3_data = {
436 .tmio_caps = MMC_CAP_SDIO_IRQ,
437};
438
433static struct platform_device sdhi0_cn3_device = { 439static struct platform_device sdhi0_cn3_device = {
434 .name = "sh_mobile_sdhi", 440 .name = "sh_mobile_sdhi",
435 .id = 0, /* "sdhi0" clock */ 441 .id = 0, /* "sdhi0" clock */
436 .num_resources = ARRAY_SIZE(sdhi0_cn3_resources), 442 .num_resources = ARRAY_SIZE(sdhi0_cn3_resources),
437 .resource = sdhi0_cn3_resources, 443 .resource = sdhi0_cn3_resources,
444 .dev = {
445 .platform_data = &sdhi0_cn3_data,
446 },
438 .archdata = { 447 .archdata = {
439 .hwblk_id = HWBLK_SDHI0, 448 .hwblk_id = HWBLK_SDHI0,
440 }, 449 },
@@ -453,11 +462,18 @@ static struct resource sdhi1_cn7_resources[] = {
453 }, 462 },
454}; 463};
455 464
465static struct sh_mobile_sdhi_info sdhi1_cn7_data = {
466 .tmio_caps = MMC_CAP_SDIO_IRQ,
467};
468
456static struct platform_device sdhi1_cn7_device = { 469static struct platform_device sdhi1_cn7_device = {
457 .name = "sh_mobile_sdhi", 470 .name = "sh_mobile_sdhi",
458 .id = 1, /* "sdhi1" clock */ 471 .id = 1, /* "sdhi1" clock */
459 .num_resources = ARRAY_SIZE(sdhi1_cn7_resources), 472 .num_resources = ARRAY_SIZE(sdhi1_cn7_resources),
460 .resource = sdhi1_cn7_resources, 473 .resource = sdhi1_cn7_resources,
474 .dev = {
475 .platform_data = &sdhi1_cn7_data,
476 },
461 .archdata = { 477 .archdata = {
462 .hwblk_id = HWBLK_SDHI1, 478 .hwblk_id = HWBLK_SDHI1,
463 }, 479 },
diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c
index f48c492a68d3..33b662999fc6 100644
--- a/arch/sh/boards/mach-ecovec24/setup.c
+++ b/arch/sh/boards/mach-ecovec24/setup.c
@@ -473,6 +473,7 @@ static struct sh_mobile_sdhi_info sdhi0_info = {
473 .dma_slave_tx = SHDMA_SLAVE_SDHI0_TX, 473 .dma_slave_tx = SHDMA_SLAVE_SDHI0_TX,
474 .dma_slave_rx = SHDMA_SLAVE_SDHI0_RX, 474 .dma_slave_rx = SHDMA_SLAVE_SDHI0_RX,
475 .set_pwr = sdhi0_set_pwr, 475 .set_pwr = sdhi0_set_pwr,
476 .tmio_caps = MMC_CAP_SDIO_IRQ | MMC_CAP_POWER_OFF_CARD,
476}; 477};
477 478
478static struct resource sdhi0_resources[] = { 479static struct resource sdhi0_resources[] = {
@@ -511,6 +512,7 @@ static void sdhi1_set_pwr(struct platform_device *pdev, int state)
511static struct sh_mobile_sdhi_info sdhi1_info = { 512static struct sh_mobile_sdhi_info sdhi1_info = {
512 .dma_slave_tx = SHDMA_SLAVE_SDHI1_TX, 513 .dma_slave_tx = SHDMA_SLAVE_SDHI1_TX,
513 .dma_slave_rx = SHDMA_SLAVE_SDHI1_RX, 514 .dma_slave_rx = SHDMA_SLAVE_SDHI1_RX,
515 .tmio_caps = MMC_CAP_SDIO_IRQ | MMC_CAP_POWER_OFF_CARD,
514 .set_pwr = sdhi1_set_pwr, 516 .set_pwr = sdhi1_set_pwr,
515}; 517};
516 518
diff --git a/arch/sh/boards/mach-kfr2r09/setup.c b/arch/sh/boards/mach-kfr2r09/setup.c
index 9b60eaabf8f3..7504daaa85da 100644
--- a/arch/sh/boards/mach-kfr2r09/setup.c
+++ b/arch/sh/boards/mach-kfr2r09/setup.c
@@ -11,6 +11,7 @@
11#include <linux/platform_device.h> 11#include <linux/platform_device.h>
12#include <linux/interrupt.h> 12#include <linux/interrupt.h>
13#include <linux/mfd/sh_mobile_sdhi.h> 13#include <linux/mfd/sh_mobile_sdhi.h>
14#include <linux/mmc/host.h>
14#include <linux/mfd/tmio.h> 15#include <linux/mfd/tmio.h>
15#include <linux/mtd/physmap.h> 16#include <linux/mtd/physmap.h>
16#include <linux/mtd/onenand.h> 17#include <linux/mtd/onenand.h>
@@ -366,6 +367,7 @@ static struct sh_mobile_sdhi_info sh7724_sdhi0_data = {
366 .dma_slave_tx = SHDMA_SLAVE_SDHI0_TX, 367 .dma_slave_tx = SHDMA_SLAVE_SDHI0_TX,
367 .dma_slave_rx = SHDMA_SLAVE_SDHI0_RX, 368 .dma_slave_rx = SHDMA_SLAVE_SDHI0_RX,
368 .tmio_flags = TMIO_MMC_WRPROTECT_DISABLE, 369 .tmio_flags = TMIO_MMC_WRPROTECT_DISABLE,
370 .tmio_caps = MMC_CAP_SDIO_IRQ,
369}; 371};
370 372
371static struct platform_device kfr2r09_sh_sdhi0_device = { 373static struct platform_device kfr2r09_sh_sdhi0_device = {
diff --git a/arch/sh/boards/mach-migor/setup.c b/arch/sh/boards/mach-migor/setup.c
index c8acfec98695..03a7ffe729d5 100644
--- a/arch/sh/boards/mach-migor/setup.c
+++ b/arch/sh/boards/mach-migor/setup.c
@@ -13,6 +13,7 @@
13#include <linux/input.h> 13#include <linux/input.h>
14#include <linux/input/sh_keysc.h> 14#include <linux/input/sh_keysc.h>
15#include <linux/mfd/sh_mobile_sdhi.h> 15#include <linux/mfd/sh_mobile_sdhi.h>
16#include <linux/mmc/host.h>
16#include <linux/mtd/physmap.h> 17#include <linux/mtd/physmap.h>
17#include <linux/mtd/nand.h> 18#include <linux/mtd/nand.h>
18#include <linux/i2c.h> 19#include <linux/i2c.h>
@@ -410,6 +411,7 @@ static struct resource sdhi_cn9_resources[] = {
410static struct sh_mobile_sdhi_info sh7724_sdhi_data = { 411static struct sh_mobile_sdhi_info sh7724_sdhi_data = {
411 .dma_slave_tx = SHDMA_SLAVE_SDHI0_TX, 412 .dma_slave_tx = SHDMA_SLAVE_SDHI0_TX,
412 .dma_slave_rx = SHDMA_SLAVE_SDHI0_RX, 413 .dma_slave_rx = SHDMA_SLAVE_SDHI0_RX,
414 .tmio_caps = MMC_CAP_SDIO_IRQ,
413}; 415};
414 416
415static struct platform_device sdhi_cn9_device = { 417static struct platform_device sdhi_cn9_device = {
diff --git a/arch/sh/boards/mach-se/7724/setup.c b/arch/sh/boards/mach-se/7724/setup.c
index 527a0cd956b5..6dc7407f2cd0 100644
--- a/arch/sh/boards/mach-se/7724/setup.c
+++ b/arch/sh/boards/mach-se/7724/setup.c
@@ -15,6 +15,7 @@
15#include <linux/interrupt.h> 15#include <linux/interrupt.h>
16#include <linux/platform_device.h> 16#include <linux/platform_device.h>
17#include <linux/mfd/sh_mobile_sdhi.h> 17#include <linux/mfd/sh_mobile_sdhi.h>
18#include <linux/mmc/host.h>
18#include <linux/mtd/physmap.h> 19#include <linux/mtd/physmap.h>
19#include <linux/delay.h> 20#include <linux/delay.h>
20#include <linux/smc91x.h> 21#include <linux/smc91x.h>
@@ -467,6 +468,7 @@ static struct resource sdhi0_cn7_resources[] = {
467static struct sh_mobile_sdhi_info sh7724_sdhi0_data = { 468static struct sh_mobile_sdhi_info sh7724_sdhi0_data = {
468 .dma_slave_tx = SHDMA_SLAVE_SDHI0_TX, 469 .dma_slave_tx = SHDMA_SLAVE_SDHI0_TX,
469 .dma_slave_rx = SHDMA_SLAVE_SDHI0_RX, 470 .dma_slave_rx = SHDMA_SLAVE_SDHI0_RX,
471 .tmio_caps = MMC_CAP_SDIO_IRQ,
470}; 472};
471 473
472static struct platform_device sdhi0_cn7_device = { 474static struct platform_device sdhi0_cn7_device = {
@@ -498,6 +500,7 @@ static struct resource sdhi1_cn8_resources[] = {
498static struct sh_mobile_sdhi_info sh7724_sdhi1_data = { 500static struct sh_mobile_sdhi_info sh7724_sdhi1_data = {
499 .dma_slave_tx = SHDMA_SLAVE_SDHI1_TX, 501 .dma_slave_tx = SHDMA_SLAVE_SDHI1_TX,
500 .dma_slave_rx = SHDMA_SLAVE_SDHI1_RX, 502 .dma_slave_rx = SHDMA_SLAVE_SDHI1_RX,
503 .tmio_caps = MMC_CAP_SDIO_IRQ,
501}; 504};
502 505
503static struct platform_device sdhi1_cn8_device = { 506static struct platform_device sdhi1_cn8_device = {
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7343.c b/arch/sh/kernel/cpu/sh4a/setup-sh7343.c
index 3681cafdb4af..b8e5bc80aa4a 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7343.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7343.c
@@ -360,6 +360,8 @@ void __init plat_early_device_setup(void)
360 360
361enum { 361enum {
362 UNUSED = 0, 362 UNUSED = 0,
363 ENABLED,
364 DISABLED,
363 365
364 /* interrupt sources */ 366 /* interrupt sources */
365 IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7, 367 IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
@@ -375,15 +377,13 @@ enum {
375 I2C0_ALI, I2C0_TACKI, I2C0_WAITI, I2C0_DTEI, 377 I2C0_ALI, I2C0_TACKI, I2C0_WAITI, I2C0_DTEI,
376 I2C1_ALI, I2C1_TACKI, I2C1_WAITI, I2C1_DTEI, 378 I2C1_ALI, I2C1_TACKI, I2C1_WAITI, I2C1_DTEI,
377 SIM_TEI, SIM_TXI, SIM_RXI, SIM_ERI, 379 SIM_TEI, SIM_TXI, SIM_RXI, SIM_ERI,
378 IRDA, 380 IRDA, SDHI, CMT, TSIF, SIU,
379 SDHI0, SDHI1, SDHI2, SDHI3,
380 CMT, TSIF, SIU,
381 TMU0, TMU1, TMU2, 381 TMU0, TMU1, TMU2,
382 JPU, LCDC, 382 JPU, LCDC,
383 383
384 /* interrupt groups */ 384 /* interrupt groups */
385 385
386 DMAC0123, VIOVOU, MMC, DMAC45, FLCTL, I2C0, I2C1, SIM, SDHI, USB, 386 DMAC0123, VIOVOU, MMC, DMAC45, FLCTL, I2C0, I2C1, SIM, USB,
387}; 387};
388 388
389static struct intc_vect vectors[] __initdata = { 389static struct intc_vect vectors[] __initdata = {
@@ -412,8 +412,8 @@ static struct intc_vect vectors[] __initdata = {
412 INTC_VECT(FLCTL_FLTREQ0I, 0xdc0), INTC_VECT(FLCTL_FLTREQ1I, 0xde0), 412 INTC_VECT(FLCTL_FLTREQ0I, 0xdc0), INTC_VECT(FLCTL_FLTREQ1I, 0xde0),
413 INTC_VECT(I2C0_ALI, 0xe00), INTC_VECT(I2C0_TACKI, 0xe20), 413 INTC_VECT(I2C0_ALI, 0xe00), INTC_VECT(I2C0_TACKI, 0xe20),
414 INTC_VECT(I2C0_WAITI, 0xe40), INTC_VECT(I2C0_DTEI, 0xe60), 414 INTC_VECT(I2C0_WAITI, 0xe40), INTC_VECT(I2C0_DTEI, 0xe60),
415 INTC_VECT(SDHI0, 0xe80), INTC_VECT(SDHI1, 0xea0), 415 INTC_VECT(SDHI, 0xe80), INTC_VECT(SDHI, 0xea0),
416 INTC_VECT(SDHI2, 0xec0), INTC_VECT(SDHI3, 0xee0), 416 INTC_VECT(SDHI, 0xec0), INTC_VECT(SDHI, 0xee0),
417 INTC_VECT(CMT, 0xf00), INTC_VECT(TSIF, 0xf20), 417 INTC_VECT(CMT, 0xf00), INTC_VECT(TSIF, 0xf20),
418 INTC_VECT(SIU, 0xf80), 418 INTC_VECT(SIU, 0xf80),
419 INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420), 419 INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420),
@@ -431,7 +431,6 @@ static struct intc_group groups[] __initdata = {
431 INTC_GROUP(I2C0, I2C0_ALI, I2C0_TACKI, I2C0_WAITI, I2C0_DTEI), 431 INTC_GROUP(I2C0, I2C0_ALI, I2C0_TACKI, I2C0_WAITI, I2C0_DTEI),
432 INTC_GROUP(I2C1, I2C1_ALI, I2C1_TACKI, I2C1_WAITI, I2C1_DTEI), 432 INTC_GROUP(I2C1, I2C1_ALI, I2C1_TACKI, I2C1_WAITI, I2C1_DTEI),
433 INTC_GROUP(SIM, SIM_TEI, SIM_TXI, SIM_RXI, SIM_ERI), 433 INTC_GROUP(SIM, SIM_TEI, SIM_TXI, SIM_RXI, SIM_ERI),
434 INTC_GROUP(SDHI, SDHI0, SDHI1, SDHI2, SDHI3),
435 INTC_GROUP(USB, USBI0, USBI1), 434 INTC_GROUP(USB, USBI0, USBI1),
436}; 435};
437 436
@@ -452,7 +451,7 @@ static struct intc_mask_reg mask_registers[] __initdata = {
452 { I2C0_DTEI, I2C0_WAITI, I2C0_TACKI, I2C0_ALI, 451 { I2C0_DTEI, I2C0_WAITI, I2C0_TACKI, I2C0_ALI,
453 FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLENDI, FLCTL_FLSTEI } }, 452 FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLENDI, FLCTL_FLSTEI } },
454 { 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */ 453 { 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */
455 { SDHI3, SDHI2, SDHI1, SDHI0, 0, 0, 0, SIU } }, 454 { DISABLED, ENABLED, ENABLED, ENABLED, 0, 0, 0, SIU } },
456 { 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */ 455 { 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */
457 { 0, 0, 0, CMT, 0, USBI1, USBI0 } }, 456 { 0, 0, 0, CMT, 0, USBI1, USBI0 } },
458 { 0xa40800a8, 0xa40800e8, 8, /* IMR10 / IMCR10 */ 457 { 0xa40800a8, 0xa40800e8, 8, /* IMR10 / IMCR10 */
@@ -488,9 +487,13 @@ static struct intc_mask_reg ack_registers[] __initdata = {
488 { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, 487 { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
489}; 488};
490 489
491static DECLARE_INTC_DESC_ACK(intc_desc, "sh7343", vectors, groups, 490static struct intc_desc intc_desc __initdata = {
492 mask_registers, prio_registers, sense_registers, 491 .name = "sh7343",
493 ack_registers); 492 .force_enable = ENABLED,
493 .force_disable = DISABLED,
494 .hw = INTC_HW_DESC(vectors, groups, mask_registers,
495 prio_registers, sense_registers, ack_registers),
496};
494 497
495void __init plat_irq_setup(void) 498void __init plat_irq_setup(void)
496{ 499{
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7366.c b/arch/sh/kernel/cpu/sh4a/setup-sh7366.c
index 8dab9e1bbd89..9b3a6aa9081c 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7366.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7366.c
@@ -319,6 +319,8 @@ void __init plat_early_device_setup(void)
319 319
320enum { 320enum {
321 UNUSED=0, 321 UNUSED=0,
322 ENABLED,
323 DISABLED,
322 324
323 /* interrupt sources */ 325 /* interrupt sources */
324 IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7, 326 IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
@@ -332,14 +334,13 @@ enum {
332 DENC, MSIOF, 334 DENC, MSIOF,
333 FLCTL_FLSTEI, FLCTL_FLENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I, 335 FLCTL_FLSTEI, FLCTL_FLENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I,
334 I2C_ALI, I2C_TACKI, I2C_WAITI, I2C_DTEI, 336 I2C_ALI, I2C_TACKI, I2C_WAITI, I2C_DTEI,
335 SDHI0, SDHI1, SDHI2, SDHI3, 337 SDHI, CMT, TSIF, SIU,
336 CMT, TSIF, SIU,
337 TMU0, TMU1, TMU2, 338 TMU0, TMU1, TMU2,
338 VEU2, LCDC, 339 VEU2, LCDC,
339 340
340 /* interrupt groups */ 341 /* interrupt groups */
341 342
342 DMAC0123, VIOVOU, MMC, DMAC45, FLCTL, I2C, SDHI, 343 DMAC0123, VIOVOU, MMC, DMAC45, FLCTL, I2C,
343}; 344};
344 345
345static struct intc_vect vectors[] __initdata = { 346static struct intc_vect vectors[] __initdata = {
@@ -364,8 +365,8 @@ static struct intc_vect vectors[] __initdata = {
364 INTC_VECT(FLCTL_FLTREQ0I, 0xdc0), INTC_VECT(FLCTL_FLTREQ1I, 0xde0), 365 INTC_VECT(FLCTL_FLTREQ0I, 0xdc0), INTC_VECT(FLCTL_FLTREQ1I, 0xde0),
365 INTC_VECT(I2C_ALI, 0xe00), INTC_VECT(I2C_TACKI, 0xe20), 366 INTC_VECT(I2C_ALI, 0xe00), INTC_VECT(I2C_TACKI, 0xe20),
366 INTC_VECT(I2C_WAITI, 0xe40), INTC_VECT(I2C_DTEI, 0xe60), 367 INTC_VECT(I2C_WAITI, 0xe40), INTC_VECT(I2C_DTEI, 0xe60),
367 INTC_VECT(SDHI0, 0xe80), INTC_VECT(SDHI1, 0xea0), 368 INTC_VECT(SDHI, 0xe80), INTC_VECT(SDHI, 0xea0),
368 INTC_VECT(SDHI2, 0xec0), INTC_VECT(SDHI3, 0xee0), 369 INTC_VECT(SDHI, 0xec0), INTC_VECT(SDHI, 0xee0),
369 INTC_VECT(CMT, 0xf00), INTC_VECT(TSIF, 0xf20), 370 INTC_VECT(CMT, 0xf00), INTC_VECT(TSIF, 0xf20),
370 INTC_VECT(SIU, 0xf80), 371 INTC_VECT(SIU, 0xf80),
371 INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420), 372 INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420),
@@ -381,7 +382,6 @@ static struct intc_group groups[] __initdata = {
381 INTC_GROUP(FLCTL, FLCTL_FLSTEI, FLCTL_FLENDI, 382 INTC_GROUP(FLCTL, FLCTL_FLSTEI, FLCTL_FLENDI,
382 FLCTL_FLTREQ0I, FLCTL_FLTREQ1I), 383 FLCTL_FLTREQ0I, FLCTL_FLTREQ1I),
383 INTC_GROUP(I2C, I2C_ALI, I2C_TACKI, I2C_WAITI, I2C_DTEI), 384 INTC_GROUP(I2C, I2C_ALI, I2C_TACKI, I2C_WAITI, I2C_DTEI),
384 INTC_GROUP(SDHI, SDHI0, SDHI1, SDHI2, SDHI3),
385}; 385};
386 386
387static struct intc_mask_reg mask_registers[] __initdata = { 387static struct intc_mask_reg mask_registers[] __initdata = {
@@ -403,7 +403,7 @@ static struct intc_mask_reg mask_registers[] __initdata = {
403 { I2C_DTEI, I2C_WAITI, I2C_TACKI, I2C_ALI, 403 { I2C_DTEI, I2C_WAITI, I2C_TACKI, I2C_ALI,
404 FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLENDI, FLCTL_FLSTEI } }, 404 FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLENDI, FLCTL_FLSTEI } },
405 { 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */ 405 { 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */
406 { SDHI3, SDHI2, SDHI1, SDHI0, 0, 0, 0, SIU } }, 406 { DISABLED, ENABLED, ENABLED, ENABLED, 0, 0, 0, SIU } },
407 { 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */ 407 { 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */
408 { 0, 0, 0, CMT, 0, USB, } }, 408 { 0, 0, 0, CMT, 0, USB, } },
409 { 0xa40800a8, 0xa40800e8, 8, /* IMR10 / IMCR10 */ 409 { 0xa40800a8, 0xa40800e8, 8, /* IMR10 / IMCR10 */
@@ -441,9 +441,13 @@ static struct intc_mask_reg ack_registers[] __initdata = {
441 { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, 441 { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } },
442}; 442};
443 443
444static DECLARE_INTC_DESC_ACK(intc_desc, "sh7366", vectors, groups, 444static struct intc_desc intc_desc __initdata = {
445 mask_registers, prio_registers, sense_registers, 445 .name = "sh7366",
446 ack_registers); 446 .force_enable = ENABLED,
447 .force_disable = DISABLED,
448 .hw = INTC_HW_DESC(vectors, groups, mask_registers,
449 prio_registers, sense_registers, ack_registers),
450};
447 451
448void __init plat_irq_setup(void) 452void __init plat_irq_setup(void)
449{ 453{
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
index d551ed8dea95..a164f8924258 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
@@ -699,7 +699,7 @@ static struct intc_mask_reg mask_registers[] __initdata = {
699 { I2C_DTEI, I2C_WAITI, I2C_TACKI, I2C_ALI, 699 { I2C_DTEI, I2C_WAITI, I2C_TACKI, I2C_ALI,
700 FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLENDI, FLCTL_FLSTEI } }, 700 FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLENDI, FLCTL_FLSTEI } },
701 { 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */ 701 { 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */
702 { DISABLED, DISABLED, ENABLED, ENABLED, 0, 0, TWODG, SIU } }, 702 { DISABLED, ENABLED, ENABLED, ENABLED, 0, 0, TWODG, SIU } },
703 { 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */ 703 { 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */
704 { 0, 0, 0, CMT, 0, USB_USBI1, USB_USBI0, } }, 704 { 0, 0, 0, CMT, 0, USB_USBI1, USB_USBI0, } },
705 { 0xa40800a8, 0xa40800e8, 8, /* IMR10 / IMCR10 */ 705 { 0xa40800a8, 0xa40800e8, 8, /* IMR10 / IMCR10 */
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7723.c b/arch/sh/kernel/cpu/sh4a/setup-sh7723.c
index 0eadefdbbba1..d7641221ee42 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7723.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7723.c
@@ -719,7 +719,7 @@ static struct intc_group groups[] __initdata = {
719static struct intc_mask_reg mask_registers[] __initdata = { 719static struct intc_mask_reg mask_registers[] __initdata = {
720 { 0xa4080080, 0xa40800c0, 8, /* IMR0 / IMCR0 */ 720 { 0xa4080080, 0xa40800c0, 8, /* IMR0 / IMCR0 */
721 { 0, TMU1_TUNI2, TMU1_TUNI1, TMU1_TUNI0, 721 { 0, TMU1_TUNI2, TMU1_TUNI1, TMU1_TUNI0,
722 0, DISABLED, ENABLED, ENABLED } }, 722 0, ENABLED, ENABLED, ENABLED } },
723 { 0xa4080084, 0xa40800c4, 8, /* IMR1 / IMCR1 */ 723 { 0xa4080084, 0xa40800c4, 8, /* IMR1 / IMCR1 */
724 { VIO_VOUI, VIO_VEU2HI,VIO_BEUI,VIO_CEUI,DMAC0A_DEI3,DMAC0A_DEI2,DMAC0A_DEI1,DMAC0A_DEI0 } }, 724 { VIO_VOUI, VIO_VEU2HI,VIO_BEUI,VIO_CEUI,DMAC0A_DEI3,DMAC0A_DEI2,DMAC0A_DEI1,DMAC0A_DEI0 } },
725 { 0xa4080088, 0xa40800c8, 8, /* IMR2 / IMCR2 */ 725 { 0xa4080088, 0xa40800c8, 8, /* IMR2 / IMCR2 */
@@ -736,7 +736,7 @@ static struct intc_mask_reg mask_registers[] __initdata = {
736 { I2C_DTEI, I2C_WAITI, I2C_TACKI, I2C_ALI, 736 { I2C_DTEI, I2C_WAITI, I2C_TACKI, I2C_ALI,
737 FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLTENDI, FLCTL_FLSTEI } }, 737 FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLTENDI, FLCTL_FLSTEI } },
738 { 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */ 738 { 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */
739 { 0, DISABLED, ENABLED, ENABLED, 739 { 0, ENABLED, ENABLED, ENABLED,
740 0, 0, SCIFA_SCIFA2, SIU_SIUI } }, 740 0, 0, SCIFA_SCIFA2, SIU_SIUI } },
741 { 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */ 741 { 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */
742 { 0, 0, 0, CMT_CMTI, 0, 0, USB_USI0,0 } }, 742 { 0, 0, 0, CMT_CMTI, 0, 0, USB_USI0,0 } },
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
index 828c9657eb52..c598a7f61b7f 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
@@ -1144,7 +1144,7 @@ static struct intc_group groups[] __initdata = {
1144static struct intc_mask_reg mask_registers[] __initdata = { 1144static struct intc_mask_reg mask_registers[] __initdata = {
1145 { 0xa4080080, 0xa40800c0, 8, /* IMR0 / IMCR0 */ 1145 { 0xa4080080, 0xa40800c0, 8, /* IMR0 / IMCR0 */
1146 { 0, TMU1_TUNI2, TMU1_TUNI1, TMU1_TUNI0, 1146 { 0, TMU1_TUNI2, TMU1_TUNI1, TMU1_TUNI0,
1147 0, DISABLED, ENABLED, ENABLED } }, 1147 0, ENABLED, ENABLED, ENABLED } },
1148 { 0xa4080084, 0xa40800c4, 8, /* IMR1 / IMCR1 */ 1148 { 0xa4080084, 0xa40800c4, 8, /* IMR1 / IMCR1 */
1149 { VIO_VOU, VIO_VEU1, VIO_BEU0, VIO_CEU0, 1149 { VIO_VOU, VIO_VEU1, VIO_BEU0, VIO_CEU0,
1150 DMAC0A_DEI3, DMAC0A_DEI2, DMAC0A_DEI1, DMAC0A_DEI0 } }, 1150 DMAC0A_DEI3, DMAC0A_DEI2, DMAC0A_DEI1, DMAC0A_DEI0 } },
@@ -1166,7 +1166,7 @@ static struct intc_mask_reg mask_registers[] __initdata = {
1166 { I2C0_DTEI, I2C0_WAITI, I2C0_TACKI, I2C0_ALI, 1166 { I2C0_DTEI, I2C0_WAITI, I2C0_TACKI, I2C0_ALI,
1167 I2C1_DTEI, I2C1_WAITI, I2C1_TACKI, I2C1_ALI } }, 1167 I2C1_DTEI, I2C1_WAITI, I2C1_TACKI, I2C1_ALI } },
1168 { 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */ 1168 { 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */
1169 { DISABLED, DISABLED, ENABLED, ENABLED, 1169 { DISABLED, ENABLED, ENABLED, ENABLED,
1170 0, 0, SCIFA5, FSI } }, 1170 0, 0, SCIFA5, FSI } },
1171 { 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */ 1171 { 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */
1172 { 0, 0, 0, CMT, 0, USB1, USB0, 0 } }, 1172 { 0, 0, 0, CMT, 0, USB1, USB0, 0 } },
diff --git a/arch/sparc/kernel/cpu.c b/arch/sparc/kernel/cpu.c
index e447938d39cf..0dc714fa23d8 100644
--- a/arch/sparc/kernel/cpu.c
+++ b/arch/sparc/kernel/cpu.c
@@ -375,5 +375,5 @@ static int __init cpu_type_probe(void)
375 return 0; 375 return 0;
376} 376}
377 377
378arch_initcall(cpu_type_probe); 378early_initcall(cpu_type_probe);
379#endif 379#endif
diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c
index b87873c0e8ea..ae96cf52a955 100644
--- a/arch/sparc/kernel/pcr.c
+++ b/arch/sparc/kernel/pcr.c
@@ -168,4 +168,4 @@ out_unregister:
168 return err; 168 return err;
169} 169}
170 170
171arch_initcall(pcr_arch_init); 171early_initcall(pcr_arch_init);
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index 55d106b5e31b..211ca3f7fd16 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -185,17 +185,16 @@ struct bootnode;
185 185
186#ifdef CONFIG_ACPI_NUMA 186#ifdef CONFIG_ACPI_NUMA
187extern int acpi_numa; 187extern int acpi_numa;
188extern int acpi_get_nodes(struct bootnode *physnodes); 188extern void acpi_get_nodes(struct bootnode *physnodes, unsigned long start,
189 unsigned long end);
189extern int acpi_scan_nodes(unsigned long start, unsigned long end); 190extern int acpi_scan_nodes(unsigned long start, unsigned long end);
190#define NR_NODE_MEMBLKS (MAX_NUMNODES*2) 191#define NR_NODE_MEMBLKS (MAX_NUMNODES*2)
192
193#ifdef CONFIG_NUMA_EMU
191extern void acpi_fake_nodes(const struct bootnode *fake_nodes, 194extern void acpi_fake_nodes(const struct bootnode *fake_nodes,
192 int num_nodes); 195 int num_nodes);
193#else
194static inline void acpi_fake_nodes(const struct bootnode *fake_nodes,
195 int num_nodes)
196{
197}
198#endif 196#endif
197#endif /* CONFIG_ACPI_NUMA */
199 198
200#define acpi_unlazy_tlb(x) leave_mm(x) 199#define acpi_unlazy_tlb(x) leave_mm(x)
201 200
diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h
index 6aee50d655d1..64dc82ee19f0 100644
--- a/arch/x86/include/asm/amd_nb.h
+++ b/arch/x86/include/asm/amd_nb.h
@@ -3,16 +3,27 @@
3 3
4#include <linux/pci.h> 4#include <linux/pci.h>
5 5
6struct amd_nb_bus_dev_range {
7 u8 bus;
8 u8 dev_base;
9 u8 dev_limit;
10};
11
6extern struct pci_device_id amd_nb_misc_ids[]; 12extern struct pci_device_id amd_nb_misc_ids[];
13extern const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[];
7struct bootnode; 14struct bootnode;
8 15
9extern int early_is_amd_nb(u32 value); 16extern int early_is_amd_nb(u32 value);
10extern int amd_cache_northbridges(void); 17extern int amd_cache_northbridges(void);
11extern void amd_flush_garts(void); 18extern void amd_flush_garts(void);
12extern int amd_get_nodes(struct bootnode *nodes);
13extern int amd_numa_init(unsigned long start_pfn, unsigned long end_pfn); 19extern int amd_numa_init(unsigned long start_pfn, unsigned long end_pfn);
14extern int amd_scan_nodes(void); 20extern int amd_scan_nodes(void);
15 21
22#ifdef CONFIG_NUMA_EMU
23extern void amd_fake_nodes(const struct bootnode *nodes, int nr_nodes);
24extern void amd_get_nodes(struct bootnode *nodes);
25#endif
26
16struct amd_northbridge { 27struct amd_northbridge {
17 struct pci_dev *misc; 28 struct pci_dev *misc;
18}; 29};
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
index 0141b234406f..4729b2b63117 100644
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -116,11 +116,11 @@ enum fixed_addresses {
116#endif 116#endif
117 FIX_TEXT_POKE1, /* reserve 2 pages for text_poke() */ 117 FIX_TEXT_POKE1, /* reserve 2 pages for text_poke() */
118 FIX_TEXT_POKE0, /* first page is last, because allocation is backward */ 118 FIX_TEXT_POKE0, /* first page is last, because allocation is backward */
119 __end_of_permanent_fixed_addresses,
120
121#ifdef CONFIG_X86_MRST 119#ifdef CONFIG_X86_MRST
122 FIX_LNW_VRTC, 120 FIX_LNW_VRTC,
123#endif 121#endif
122 __end_of_permanent_fixed_addresses,
123
124 /* 124 /*
125 * 256 temporary boot-time mappings, used by early_ioremap(), 125 * 256 temporary boot-time mappings, used by early_ioremap(),
126 * before ioremap() is functional. 126 * before ioremap() is functional.
diff --git a/arch/x86/include/asm/gpio.h b/arch/x86/include/asm/gpio.h
index 49dbfdfa50f9..91d915a65259 100644
--- a/arch/x86/include/asm/gpio.h
+++ b/arch/x86/include/asm/gpio.h
@@ -38,12 +38,9 @@ static inline int gpio_cansleep(unsigned int gpio)
38 return __gpio_cansleep(gpio); 38 return __gpio_cansleep(gpio);
39} 39}
40 40
41/*
42 * Not implemented, yet.
43 */
44static inline int gpio_to_irq(unsigned int gpio) 41static inline int gpio_to_irq(unsigned int gpio)
45{ 42{
46 return -ENOSYS; 43 return __gpio_to_irq(gpio);
47} 44}
48 45
49static inline int irq_to_gpio(unsigned int irq) 46static inline int irq_to_gpio(unsigned int irq)
diff --git a/arch/x86/include/asm/kdebug.h b/arch/x86/include/asm/kdebug.h
index f23eb2528464..ca242d35e873 100644
--- a/arch/x86/include/asm/kdebug.h
+++ b/arch/x86/include/asm/kdebug.h
@@ -18,7 +18,6 @@ enum die_val {
18 DIE_TRAP, 18 DIE_TRAP,
19 DIE_GPF, 19 DIE_GPF,
20 DIE_CALL, 20 DIE_CALL,
21 DIE_NMI_IPI,
22 DIE_PAGE_FAULT, 21 DIE_PAGE_FAULT,
23 DIE_NMIUNKNOWN, 22 DIE_NMIUNKNOWN,
24}; 23};
diff --git a/arch/x86/include/asm/mach_traps.h b/arch/x86/include/asm/mach_traps.h
index f7920601e472..72a8b52e7dfd 100644
--- a/arch/x86/include/asm/mach_traps.h
+++ b/arch/x86/include/asm/mach_traps.h
@@ -7,9 +7,19 @@
7 7
8#include <asm/mc146818rtc.h> 8#include <asm/mc146818rtc.h>
9 9
10#define NMI_REASON_PORT 0x61
11
12#define NMI_REASON_SERR 0x80
13#define NMI_REASON_IOCHK 0x40
14#define NMI_REASON_MASK (NMI_REASON_SERR | NMI_REASON_IOCHK)
15
16#define NMI_REASON_CLEAR_SERR 0x04
17#define NMI_REASON_CLEAR_IOCHK 0x08
18#define NMI_REASON_CLEAR_MASK 0x0f
19
10static inline unsigned char get_nmi_reason(void) 20static inline unsigned char get_nmi_reason(void)
11{ 21{
12 return inb(0x61); 22 return inb(NMI_REASON_PORT);
13} 23}
14 24
15static inline void reassert_nmi(void) 25static inline void reassert_nmi(void)
diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
index c4021b953510..c76f5b92b840 100644
--- a/arch/x86/include/asm/nmi.h
+++ b/arch/x86/include/asm/nmi.h
@@ -23,6 +23,26 @@ void arch_trigger_all_cpu_backtrace(void);
23#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace 23#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
24#endif 24#endif
25 25
26/*
27 * Define some priorities for the nmi notifier call chain.
28 *
29 * Create a local nmi bit that has a higher priority than
30 * external nmis, because the local ones are more frequent.
31 *
32 * Also setup some default high/normal/low settings for
33 * subsystems to registers with. Using 4 bits to seperate
34 * the priorities. This can go alot higher if needed be.
35 */
36
37#define NMI_LOCAL_SHIFT 16 /* randomly picked */
38#define NMI_LOCAL_BIT (1ULL << NMI_LOCAL_SHIFT)
39#define NMI_HIGH_PRIOR (1ULL << 8)
40#define NMI_NORMAL_PRIOR (1ULL << 4)
41#define NMI_LOW_PRIOR (1ULL << 0)
42#define NMI_LOCAL_HIGH_PRIOR (NMI_LOCAL_BIT | NMI_HIGH_PRIOR)
43#define NMI_LOCAL_NORMAL_PRIOR (NMI_LOCAL_BIT | NMI_NORMAL_PRIOR)
44#define NMI_LOCAL_LOW_PRIOR (NMI_LOCAL_BIT | NMI_LOW_PRIOR)
45
26void stop_nmi(void); 46void stop_nmi(void);
27void restart_nmi(void); 47void restart_nmi(void);
28 48
diff --git a/arch/x86/include/asm/numa_64.h b/arch/x86/include/asm/numa_64.h
index 823e070e7c26..5ae87285a502 100644
--- a/arch/x86/include/asm/numa_64.h
+++ b/arch/x86/include/asm/numa_64.h
@@ -38,7 +38,7 @@ extern void __cpuinit numa_add_cpu(int cpu);
38extern void __cpuinit numa_remove_cpu(int cpu); 38extern void __cpuinit numa_remove_cpu(int cpu);
39 39
40#ifdef CONFIG_NUMA_EMU 40#ifdef CONFIG_NUMA_EMU
41#define FAKE_NODE_MIN_SIZE ((u64)64 << 20) 41#define FAKE_NODE_MIN_SIZE ((u64)32 << 20)
42#define FAKE_NODE_MIN_HASH_MASK (~(FAKE_NODE_MIN_SIZE - 1UL)) 42#define FAKE_NODE_MIN_HASH_MASK (~(FAKE_NODE_MIN_SIZE - 1UL))
43#endif /* CONFIG_NUMA_EMU */ 43#endif /* CONFIG_NUMA_EMU */
44#else 44#else
diff --git a/arch/x86/include/asm/perf_event_p4.h b/arch/x86/include/asm/perf_event_p4.h
index 295e2ff18a6a..e2f6a99f14ab 100644
--- a/arch/x86/include/asm/perf_event_p4.h
+++ b/arch/x86/include/asm/perf_event_p4.h
@@ -20,6 +20,9 @@
20#define ARCH_P4_MAX_ESCR (ARCH_P4_TOTAL_ESCR - ARCH_P4_RESERVED_ESCR) 20#define ARCH_P4_MAX_ESCR (ARCH_P4_TOTAL_ESCR - ARCH_P4_RESERVED_ESCR)
21#define ARCH_P4_MAX_CCCR (18) 21#define ARCH_P4_MAX_CCCR (18)
22 22
23#define ARCH_P4_CNTRVAL_BITS (40)
24#define ARCH_P4_CNTRVAL_MASK ((1ULL << ARCH_P4_CNTRVAL_BITS) - 1)
25
23#define P4_ESCR_EVENT_MASK 0x7e000000U 26#define P4_ESCR_EVENT_MASK 0x7e000000U
24#define P4_ESCR_EVENT_SHIFT 25 27#define P4_ESCR_EVENT_SHIFT 25
25#define P4_ESCR_EVENTMASK_MASK 0x01fffe00U 28#define P4_ESCR_EVENTMASK_MASK 0x01fffe00U
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
index affacb5e0065..0a99f7198bc3 100644
--- a/arch/x86/kernel/amd_nb.c
+++ b/arch/x86/kernel/amd_nb.c
@@ -20,6 +20,13 @@ struct pci_device_id amd_nb_misc_ids[] = {
20}; 20};
21EXPORT_SYMBOL(amd_nb_misc_ids); 21EXPORT_SYMBOL(amd_nb_misc_ids);
22 22
23const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
24 { 0x00, 0x18, 0x20 },
25 { 0xff, 0x00, 0x20 },
26 { 0xfe, 0x00, 0x20 },
27 { }
28};
29
23struct amd_northbridge_info amd_northbridges; 30struct amd_northbridge_info amd_northbridges;
24EXPORT_SYMBOL(amd_northbridges); 31EXPORT_SYMBOL(amd_northbridges);
25 32
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
index dcd7c83e1659..5955a7800a96 100644
--- a/arch/x86/kernel/aperture_64.c
+++ b/arch/x86/kernel/aperture_64.c
@@ -39,18 +39,6 @@ int fallback_aper_force __initdata;
39 39
40int fix_aperture __initdata = 1; 40int fix_aperture __initdata = 1;
41 41
42struct bus_dev_range {
43 int bus;
44 int dev_base;
45 int dev_limit;
46};
47
48static struct bus_dev_range bus_dev_ranges[] __initdata = {
49 { 0x00, 0x18, 0x20},
50 { 0xff, 0x00, 0x20},
51 { 0xfe, 0x00, 0x20}
52};
53
54static struct resource gart_resource = { 42static struct resource gart_resource = {
55 .name = "GART", 43 .name = "GART",
56 .flags = IORESOURCE_MEM, 44 .flags = IORESOURCE_MEM,
@@ -294,13 +282,13 @@ void __init early_gart_iommu_check(void)
294 search_agp_bridge(&agp_aper_order, &valid_agp); 282 search_agp_bridge(&agp_aper_order, &valid_agp);
295 283
296 fix = 0; 284 fix = 0;
297 for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) { 285 for (i = 0; amd_nb_bus_dev_ranges[i].dev_limit; i++) {
298 int bus; 286 int bus;
299 int dev_base, dev_limit; 287 int dev_base, dev_limit;
300 288
301 bus = bus_dev_ranges[i].bus; 289 bus = amd_nb_bus_dev_ranges[i].bus;
302 dev_base = bus_dev_ranges[i].dev_base; 290 dev_base = amd_nb_bus_dev_ranges[i].dev_base;
303 dev_limit = bus_dev_ranges[i].dev_limit; 291 dev_limit = amd_nb_bus_dev_ranges[i].dev_limit;
304 292
305 for (slot = dev_base; slot < dev_limit; slot++) { 293 for (slot = dev_base; slot < dev_limit; slot++) {
306 if (!early_is_amd_nb(read_pci_config(bus, slot, 3, 0x00))) 294 if (!early_is_amd_nb(read_pci_config(bus, slot, 3, 0x00)))
@@ -349,13 +337,13 @@ void __init early_gart_iommu_check(void)
349 return; 337 return;
350 338
351 /* disable them all at first */ 339 /* disable them all at first */
352 for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) { 340 for (i = 0; i < amd_nb_bus_dev_ranges[i].dev_limit; i++) {
353 int bus; 341 int bus;
354 int dev_base, dev_limit; 342 int dev_base, dev_limit;
355 343
356 bus = bus_dev_ranges[i].bus; 344 bus = amd_nb_bus_dev_ranges[i].bus;
357 dev_base = bus_dev_ranges[i].dev_base; 345 dev_base = amd_nb_bus_dev_ranges[i].dev_base;
358 dev_limit = bus_dev_ranges[i].dev_limit; 346 dev_limit = amd_nb_bus_dev_ranges[i].dev_limit;
359 347
360 for (slot = dev_base; slot < dev_limit; slot++) { 348 for (slot = dev_base; slot < dev_limit; slot++) {
361 if (!early_is_amd_nb(read_pci_config(bus, slot, 3, 0x00))) 349 if (!early_is_amd_nb(read_pci_config(bus, slot, 3, 0x00)))
@@ -390,14 +378,14 @@ int __init gart_iommu_hole_init(void)
390 378
391 fix = 0; 379 fix = 0;
392 node = 0; 380 node = 0;
393 for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) { 381 for (i = 0; i < amd_nb_bus_dev_ranges[i].dev_limit; i++) {
394 int bus; 382 int bus;
395 int dev_base, dev_limit; 383 int dev_base, dev_limit;
396 u32 ctl; 384 u32 ctl;
397 385
398 bus = bus_dev_ranges[i].bus; 386 bus = amd_nb_bus_dev_ranges[i].bus;
399 dev_base = bus_dev_ranges[i].dev_base; 387 dev_base = amd_nb_bus_dev_ranges[i].dev_base;
400 dev_limit = bus_dev_ranges[i].dev_limit; 388 dev_limit = amd_nb_bus_dev_ranges[i].dev_limit;
401 389
402 for (slot = dev_base; slot < dev_limit; slot++) { 390 for (slot = dev_base; slot < dev_limit; slot++) {
403 if (!early_is_amd_nb(read_pci_config(bus, slot, 3, 0x00))) 391 if (!early_is_amd_nb(read_pci_config(bus, slot, 3, 0x00)))
@@ -505,7 +493,7 @@ out:
505 } 493 }
506 494
507 /* Fix up the north bridges */ 495 /* Fix up the north bridges */
508 for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) { 496 for (i = 0; i < amd_nb_bus_dev_ranges[i].dev_limit; i++) {
509 int bus, dev_base, dev_limit; 497 int bus, dev_base, dev_limit;
510 498
511 /* 499 /*
@@ -514,9 +502,9 @@ out:
514 */ 502 */
515 u32 ctl = DISTLBWALKPRB | aper_order << 1; 503 u32 ctl = DISTLBWALKPRB | aper_order << 1;
516 504
517 bus = bus_dev_ranges[i].bus; 505 bus = amd_nb_bus_dev_ranges[i].bus;
518 dev_base = bus_dev_ranges[i].dev_base; 506 dev_base = amd_nb_bus_dev_ranges[i].dev_base;
519 dev_limit = bus_dev_ranges[i].dev_limit; 507 dev_limit = amd_nb_bus_dev_ranges[i].dev_limit;
520 for (slot = dev_base; slot < dev_limit; slot++) { 508 for (slot = dev_base; slot < dev_limit; slot++) {
521 if (!early_is_amd_nb(read_pci_config(bus, slot, 3, 0x00))) 509 if (!early_is_amd_nb(read_pci_config(bus, slot, 3, 0x00)))
522 continue; 510 continue;
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index a51345ba449e..06c196d7e59c 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -684,7 +684,7 @@ static int __init calibrate_APIC_clock(void)
684 lapic_clockevent.mult = div_sc(delta, TICK_NSEC * LAPIC_CAL_LOOPS, 684 lapic_clockevent.mult = div_sc(delta, TICK_NSEC * LAPIC_CAL_LOOPS,
685 lapic_clockevent.shift); 685 lapic_clockevent.shift);
686 lapic_clockevent.max_delta_ns = 686 lapic_clockevent.max_delta_ns =
687 clockevent_delta2ns(0x7FFFFF, &lapic_clockevent); 687 clockevent_delta2ns(0x7FFFFFFF, &lapic_clockevent);
688 lapic_clockevent.min_delta_ns = 688 lapic_clockevent.min_delta_ns =
689 clockevent_delta2ns(0xF, &lapic_clockevent); 689 clockevent_delta2ns(0xF, &lapic_clockevent);
690 690
diff --git a/arch/x86/kernel/apic/hw_nmi.c b/arch/x86/kernel/apic/hw_nmi.c
index 72ec29e1ae06..79fd43ca6f96 100644
--- a/arch/x86/kernel/apic/hw_nmi.c
+++ b/arch/x86/kernel/apic/hw_nmi.c
@@ -68,7 +68,6 @@ arch_trigger_all_cpu_backtrace_handler(struct notifier_block *self,
68 68
69 switch (cmd) { 69 switch (cmd) {
70 case DIE_NMI: 70 case DIE_NMI:
71 case DIE_NMI_IPI:
72 break; 71 break;
73 72
74 default: 73 default:
@@ -96,7 +95,7 @@ arch_trigger_all_cpu_backtrace_handler(struct notifier_block *self,
96static __read_mostly struct notifier_block backtrace_notifier = { 95static __read_mostly struct notifier_block backtrace_notifier = {
97 .notifier_call = arch_trigger_all_cpu_backtrace_handler, 96 .notifier_call = arch_trigger_all_cpu_backtrace_handler,
98 .next = NULL, 97 .next = NULL,
99 .priority = 1 98 .priority = NMI_LOCAL_LOW_PRIOR,
100}; 99};
101 100
102static int __init register_trigger_all_cpu_backtrace(void) 101static int __init register_trigger_all_cpu_backtrace(void)
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index ecca5f41ad2c..bd16b58b8850 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -378,7 +378,7 @@ struct apic __refdata apic_x2apic_uv_x = {
378 378
379static __cpuinit void set_x2apic_extra_bits(int pnode) 379static __cpuinit void set_x2apic_extra_bits(int pnode)
380{ 380{
381 __this_cpu_write(x2apic_extra_bits, (pnode << 6)); 381 __this_cpu_write(x2apic_extra_bits, pnode << uvh_apicid.s.pnode_shift);
382} 382}
383 383
384/* 384/*
@@ -641,7 +641,7 @@ void __cpuinit uv_cpu_init(void)
641 */ 641 */
642int uv_handle_nmi(struct notifier_block *self, unsigned long reason, void *data) 642int uv_handle_nmi(struct notifier_block *self, unsigned long reason, void *data)
643{ 643{
644 if (reason != DIE_NMI_IPI) 644 if (reason != DIE_NMIUNKNOWN)
645 return NOTIFY_OK; 645 return NOTIFY_OK;
646 646
647 if (in_crash_kexec) 647 if (in_crash_kexec)
diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c
index e7dbde7bfedb..a77971979564 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-inject.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c
@@ -25,6 +25,7 @@
25#include <linux/gfp.h> 25#include <linux/gfp.h>
26#include <asm/mce.h> 26#include <asm/mce.h>
27#include <asm/apic.h> 27#include <asm/apic.h>
28#include <asm/nmi.h>
28 29
29/* Update fake mce registers on current CPU. */ 30/* Update fake mce registers on current CPU. */
30static void inject_mce(struct mce *m) 31static void inject_mce(struct mce *m)
@@ -83,7 +84,7 @@ static int mce_raise_notify(struct notifier_block *self,
83 struct die_args *args = (struct die_args *)data; 84 struct die_args *args = (struct die_args *)data;
84 int cpu = smp_processor_id(); 85 int cpu = smp_processor_id();
85 struct mce *m = &__get_cpu_var(injectm); 86 struct mce *m = &__get_cpu_var(injectm);
86 if (val != DIE_NMI_IPI || !cpumask_test_cpu(cpu, mce_inject_cpumask)) 87 if (val != DIE_NMI || !cpumask_test_cpu(cpu, mce_inject_cpumask))
87 return NOTIFY_DONE; 88 return NOTIFY_DONE;
88 cpumask_clear_cpu(cpu, mce_inject_cpumask); 89 cpumask_clear_cpu(cpu, mce_inject_cpumask);
89 if (m->inject_flags & MCJ_EXCEPTION) 90 if (m->inject_flags & MCJ_EXCEPTION)
@@ -95,7 +96,7 @@ static int mce_raise_notify(struct notifier_block *self,
95 96
96static struct notifier_block mce_raise_nb = { 97static struct notifier_block mce_raise_nb = {
97 .notifier_call = mce_raise_notify, 98 .notifier_call = mce_raise_notify,
98 .priority = 1000, 99 .priority = NMI_LOCAL_NORMAL_PRIOR,
99}; 100};
100 101
101/* Inject mce on current CPU */ 102/* Inject mce on current CPU */
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 04921017abe0..9d977a2ea693 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1267,7 +1267,6 @@ perf_event_nmi_handler(struct notifier_block *self,
1267 1267
1268 switch (cmd) { 1268 switch (cmd) {
1269 case DIE_NMI: 1269 case DIE_NMI:
1270 case DIE_NMI_IPI:
1271 break; 1270 break;
1272 case DIE_NMIUNKNOWN: 1271 case DIE_NMIUNKNOWN:
1273 this_nmi = percpu_read(irq_stat.__nmi_count); 1272 this_nmi = percpu_read(irq_stat.__nmi_count);
@@ -1317,7 +1316,7 @@ perf_event_nmi_handler(struct notifier_block *self,
1317static __read_mostly struct notifier_block perf_event_nmi_notifier = { 1316static __read_mostly struct notifier_block perf_event_nmi_notifier = {
1318 .notifier_call = perf_event_nmi_handler, 1317 .notifier_call = perf_event_nmi_handler,
1319 .next = NULL, 1318 .next = NULL,
1320 .priority = 1 1319 .priority = NMI_LOCAL_LOW_PRIOR,
1321}; 1320};
1322 1321
1323static struct event_constraint unconstrained; 1322static struct event_constraint unconstrained;
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c
index 81400b93e694..e56b9bfbabd1 100644
--- a/arch/x86/kernel/cpu/perf_event_p4.c
+++ b/arch/x86/kernel/cpu/perf_event_p4.c
@@ -753,19 +753,21 @@ out:
753 753
754static inline int p4_pmu_clear_cccr_ovf(struct hw_perf_event *hwc) 754static inline int p4_pmu_clear_cccr_ovf(struct hw_perf_event *hwc)
755{ 755{
756 int overflow = 0; 756 u64 v;
757 u32 low, high;
758 757
759 rdmsr(hwc->config_base + hwc->idx, low, high); 758 /* an official way for overflow indication */
760 759 rdmsrl(hwc->config_base + hwc->idx, v);
761 /* we need to check high bit for unflagged overflows */ 760 if (v & P4_CCCR_OVF) {
762 if ((low & P4_CCCR_OVF) || !(high & (1 << 31))) { 761 wrmsrl(hwc->config_base + hwc->idx, v & ~P4_CCCR_OVF);
763 overflow = 1; 762 return 1;
764 (void)checking_wrmsrl(hwc->config_base + hwc->idx,
765 ((u64)low) & ~P4_CCCR_OVF);
766 } 763 }
767 764
768 return overflow; 765 /* it might be unflagged overflow */
766 rdmsrl(hwc->event_base + hwc->idx, v);
767 if (!(v & ARCH_P4_CNTRVAL_MASK))
768 return 1;
769
770 return 0;
769} 771}
770 772
771static void p4_pmu_disable_pebs(void) 773static void p4_pmu_disable_pebs(void)
@@ -1152,9 +1154,9 @@ static __initconst const struct x86_pmu p4_pmu = {
1152 */ 1154 */
1153 .num_counters = ARCH_P4_MAX_CCCR, 1155 .num_counters = ARCH_P4_MAX_CCCR,
1154 .apic = 1, 1156 .apic = 1,
1155 .cntval_bits = 40, 1157 .cntval_bits = ARCH_P4_CNTRVAL_BITS,
1156 .cntval_mask = (1ULL << 40) - 1, 1158 .cntval_mask = ARCH_P4_CNTRVAL_MASK,
1157 .max_period = (1ULL << 39) - 1, 1159 .max_period = (1ULL << (ARCH_P4_CNTRVAL_BITS - 1)) - 1,
1158 .hw_config = p4_hw_config, 1160 .hw_config = p4_hw_config,
1159 .schedule_events = p4_pmu_schedule_events, 1161 .schedule_events = p4_pmu_schedule_events,
1160 /* 1162 /*
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index 8474c998cbd4..d6fb146c0d8b 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -197,14 +197,8 @@ void show_stack(struct task_struct *task, unsigned long *sp)
197 */ 197 */
198void dump_stack(void) 198void dump_stack(void)
199{ 199{
200 unsigned long bp = 0;
201 unsigned long stack; 200 unsigned long stack;
202 201
203#ifdef CONFIG_FRAME_POINTER
204 if (!bp)
205 get_bp(bp);
206#endif
207
208 printk("Pid: %d, comm: %.20s %s %s %.*s\n", 202 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
209 current->pid, current->comm, print_tainted(), 203 current->pid, current->comm, print_tainted(),
210 init_utsname()->release, 204 init_utsname()->release,
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index e3ba417e8697..d3b895f375d3 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -299,17 +299,21 @@ ENDPROC(native_usergs_sysret64)
299ENTRY(save_args) 299ENTRY(save_args)
300 XCPT_FRAME 300 XCPT_FRAME
301 cld 301 cld
302 movq_cfi rdi, RDI+16-ARGOFFSET 302 /*
303 movq_cfi rsi, RSI+16-ARGOFFSET 303 * start from rbp in pt_regs and jump over
304 movq_cfi rdx, RDX+16-ARGOFFSET 304 * return address.
305 movq_cfi rcx, RCX+16-ARGOFFSET 305 */
306 movq_cfi rax, RAX+16-ARGOFFSET 306 movq_cfi rdi, RDI+8-RBP
307 movq_cfi r8, R8+16-ARGOFFSET 307 movq_cfi rsi, RSI+8-RBP
308 movq_cfi r9, R9+16-ARGOFFSET 308 movq_cfi rdx, RDX+8-RBP
309 movq_cfi r10, R10+16-ARGOFFSET 309 movq_cfi rcx, RCX+8-RBP
310 movq_cfi r11, R11+16-ARGOFFSET 310 movq_cfi rax, RAX+8-RBP
311 311 movq_cfi r8, R8+8-RBP
312 leaq -ARGOFFSET+16(%rsp),%rdi /* arg1 for handler */ 312 movq_cfi r9, R9+8-RBP
313 movq_cfi r10, R10+8-RBP
314 movq_cfi r11, R11+8-RBP
315
316 leaq -RBP+8(%rsp),%rdi /* arg1 for handler */
313 movq_cfi rbp, 8 /* push %rbp */ 317 movq_cfi rbp, 8 /* push %rbp */
314 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */ 318 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
315 testl $3, CS(%rdi) 319 testl $3, CS(%rdi)
@@ -782,8 +786,9 @@ END(interrupt)
782 786
783/* 0(%rsp): ~(interrupt number) */ 787/* 0(%rsp): ~(interrupt number) */
784 .macro interrupt func 788 .macro interrupt func
785 subq $ORIG_RAX-ARGOFFSET+8, %rsp 789 /* reserve pt_regs for scratch regs and rbp */
786 CFI_ADJUST_CFA_OFFSET ORIG_RAX-ARGOFFSET+8 790 subq $ORIG_RAX-RBP, %rsp
791 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
787 call save_args 792 call save_args
788 PARTIAL_FRAME 0 793 PARTIAL_FRAME 0
789 call \func 794 call \func
@@ -808,9 +813,14 @@ ret_from_intr:
808 TRACE_IRQS_OFF 813 TRACE_IRQS_OFF
809 decl PER_CPU_VAR(irq_count) 814 decl PER_CPU_VAR(irq_count)
810 leaveq 815 leaveq
816
811 CFI_RESTORE rbp 817 CFI_RESTORE rbp
812 CFI_DEF_CFA_REGISTER rsp 818 CFI_DEF_CFA_REGISTER rsp
813 CFI_ADJUST_CFA_OFFSET -8 819 CFI_ADJUST_CFA_OFFSET -8
820
821 /* we did not save rbx, restore only from ARGOFFSET */
822 addq $8, %rsp
823 CFI_ADJUST_CFA_OFFSET -8
814exit_intr: 824exit_intr:
815 GET_THREAD_INFO(%rcx) 825 GET_THREAD_INFO(%rcx)
816 testl $3,CS-ARGOFFSET(%rsp) 826 testl $3,CS-ARGOFFSET(%rsp)
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
index cd21b654dec6..a4130005028a 100644
--- a/arch/x86/kernel/kgdb.c
+++ b/arch/x86/kernel/kgdb.c
@@ -48,6 +48,7 @@
48#include <asm/apicdef.h> 48#include <asm/apicdef.h>
49#include <asm/system.h> 49#include <asm/system.h>
50#include <asm/apic.h> 50#include <asm/apic.h>
51#include <asm/nmi.h>
51 52
52struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = 53struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] =
53{ 54{
@@ -525,10 +526,6 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
525 } 526 }
526 return NOTIFY_DONE; 527 return NOTIFY_DONE;
527 528
528 case DIE_NMI_IPI:
529 /* Just ignore, we will handle the roundup on DIE_NMI. */
530 return NOTIFY_DONE;
531
532 case DIE_NMIUNKNOWN: 529 case DIE_NMIUNKNOWN:
533 if (was_in_debug_nmi[raw_smp_processor_id()]) { 530 if (was_in_debug_nmi[raw_smp_processor_id()]) {
534 was_in_debug_nmi[raw_smp_processor_id()] = 0; 531 was_in_debug_nmi[raw_smp_processor_id()] = 0;
@@ -606,7 +603,7 @@ static struct notifier_block kgdb_notifier = {
606 /* 603 /*
607 * Lowest-prio notifier priority, we want to be notified last: 604 * Lowest-prio notifier priority, we want to be notified last:
608 */ 605 */
609 .priority = -INT_MAX, 606 .priority = NMI_LOCAL_LOW_PRIOR,
610}; 607};
611 608
612/** 609/**
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index c495aa8d4815..fc7aae1e2bc7 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -18,6 +18,7 @@
18#include <asm/pci_x86.h> 18#include <asm/pci_x86.h>
19#include <asm/virtext.h> 19#include <asm/virtext.h>
20#include <asm/cpu.h> 20#include <asm/cpu.h>
21#include <asm/nmi.h>
21 22
22#ifdef CONFIG_X86_32 23#ifdef CONFIG_X86_32
23# include <linux/ctype.h> 24# include <linux/ctype.h>
@@ -747,7 +748,7 @@ static int crash_nmi_callback(struct notifier_block *self,
747{ 748{
748 int cpu; 749 int cpu;
749 750
750 if (val != DIE_NMI_IPI) 751 if (val != DIE_NMI)
751 return NOTIFY_OK; 752 return NOTIFY_OK;
752 753
753 cpu = raw_smp_processor_id(); 754 cpu = raw_smp_processor_id();
@@ -778,6 +779,8 @@ static void smp_send_nmi_allbutself(void)
778 779
779static struct notifier_block crash_nmi_nb = { 780static struct notifier_block crash_nmi_nb = {
780 .notifier_call = crash_nmi_callback, 781 .notifier_call = crash_nmi_callback,
782 /* we want to be the first one called */
783 .priority = NMI_LOCAL_HIGH_PRIOR+1,
781}; 784};
782 785
783/* Halt all other CPUs, calling the specified function on each of them 786/* Halt all other CPUs, calling the specified function on each of them
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index c7149c96d079..763df77343dd 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -97,12 +97,12 @@ static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
97 */ 97 */
98static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex); 98static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
99 99
100void cpu_hotplug_driver_lock() 100void cpu_hotplug_driver_lock(void)
101{ 101{
102 mutex_lock(&x86_cpu_hotplug_driver_mutex); 102 mutex_lock(&x86_cpu_hotplug_driver_mutex);
103} 103}
104 104
105void cpu_hotplug_driver_unlock() 105void cpu_hotplug_driver_unlock(void)
106{ 106{
107 mutex_unlock(&x86_cpu_hotplug_driver_mutex); 107 mutex_unlock(&x86_cpu_hotplug_driver_mutex);
108} 108}
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index c76aaca5694d..b9b67166f9de 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -84,6 +84,11 @@ EXPORT_SYMBOL_GPL(used_vectors);
84static int ignore_nmis; 84static int ignore_nmis;
85 85
86int unknown_nmi_panic; 86int unknown_nmi_panic;
87/*
88 * Prevent NMI reason port (0x61) being accessed simultaneously, can
89 * only be used in NMI handler.
90 */
91static DEFINE_RAW_SPINLOCK(nmi_reason_lock);
87 92
88static inline void conditional_sti(struct pt_regs *regs) 93static inline void conditional_sti(struct pt_regs *regs)
89{ 94{
@@ -310,15 +315,15 @@ static int __init setup_unknown_nmi_panic(char *str)
310__setup("unknown_nmi_panic", setup_unknown_nmi_panic); 315__setup("unknown_nmi_panic", setup_unknown_nmi_panic);
311 316
312static notrace __kprobes void 317static notrace __kprobes void
313mem_parity_error(unsigned char reason, struct pt_regs *regs) 318pci_serr_error(unsigned char reason, struct pt_regs *regs)
314{ 319{
315 printk(KERN_EMERG 320 pr_emerg("NMI: PCI system error (SERR) for reason %02x on CPU %d.\n",
316 "Uhhuh. NMI received for unknown reason %02x on CPU %d.\n", 321 reason, smp_processor_id());
317 reason, smp_processor_id());
318
319 printk(KERN_EMERG
320 "You have some hardware problem, likely on the PCI bus.\n");
321 322
323 /*
324 * On some machines, PCI SERR line is used to report memory
325 * errors. EDAC makes use of it.
326 */
322#if defined(CONFIG_EDAC) 327#if defined(CONFIG_EDAC)
323 if (edac_handler_set()) { 328 if (edac_handler_set()) {
324 edac_atomic_assert_error(); 329 edac_atomic_assert_error();
@@ -329,11 +334,11 @@ mem_parity_error(unsigned char reason, struct pt_regs *regs)
329 if (panic_on_unrecovered_nmi) 334 if (panic_on_unrecovered_nmi)
330 panic("NMI: Not continuing"); 335 panic("NMI: Not continuing");
331 336
332 printk(KERN_EMERG "Dazed and confused, but trying to continue\n"); 337 pr_emerg("Dazed and confused, but trying to continue\n");
333 338
334 /* Clear and disable the memory parity error line. */ 339 /* Clear and disable the PCI SERR error line. */
335 reason = (reason & 0xf) | 4; 340 reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_SERR;
336 outb(reason, 0x61); 341 outb(reason, NMI_REASON_PORT);
337} 342}
338 343
339static notrace __kprobes void 344static notrace __kprobes void
@@ -341,15 +346,17 @@ io_check_error(unsigned char reason, struct pt_regs *regs)
341{ 346{
342 unsigned long i; 347 unsigned long i;
343 348
344 printk(KERN_EMERG "NMI: IOCK error (debug interrupt?)\n"); 349 pr_emerg(
350 "NMI: IOCK error (debug interrupt?) for reason %02x on CPU %d.\n",
351 reason, smp_processor_id());
345 show_registers(regs); 352 show_registers(regs);
346 353
347 if (panic_on_io_nmi) 354 if (panic_on_io_nmi)
348 panic("NMI IOCK error: Not continuing"); 355 panic("NMI IOCK error: Not continuing");
349 356
350 /* Re-enable the IOCK line, wait for a few seconds */ 357 /* Re-enable the IOCK line, wait for a few seconds */
351 reason = (reason & 0xf) | 8; 358 reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_IOCHK;
352 outb(reason, 0x61); 359 outb(reason, NMI_REASON_PORT);
353 360
354 i = 20000; 361 i = 20000;
355 while (--i) { 362 while (--i) {
@@ -357,8 +364,8 @@ io_check_error(unsigned char reason, struct pt_regs *regs)
357 udelay(100); 364 udelay(100);
358 } 365 }
359 366
360 reason &= ~8; 367 reason &= ~NMI_REASON_CLEAR_IOCHK;
361 outb(reason, 0x61); 368 outb(reason, NMI_REASON_PORT);
362} 369}
363 370
364static notrace __kprobes void 371static notrace __kprobes void
@@ -377,57 +384,50 @@ unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
377 return; 384 return;
378 } 385 }
379#endif 386#endif
380 printk(KERN_EMERG 387 pr_emerg("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
381 "Uhhuh. NMI received for unknown reason %02x on CPU %d.\n", 388 reason, smp_processor_id());
382 reason, smp_processor_id());
383 389
384 printk(KERN_EMERG "Do you have a strange power saving mode enabled?\n"); 390 pr_emerg("Do you have a strange power saving mode enabled?\n");
385 if (unknown_nmi_panic || panic_on_unrecovered_nmi) 391 if (unknown_nmi_panic || panic_on_unrecovered_nmi)
386 panic("NMI: Not continuing"); 392 panic("NMI: Not continuing");
387 393
388 printk(KERN_EMERG "Dazed and confused, but trying to continue\n"); 394 pr_emerg("Dazed and confused, but trying to continue\n");
389} 395}
390 396
391static notrace __kprobes void default_do_nmi(struct pt_regs *regs) 397static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
392{ 398{
393 unsigned char reason = 0; 399 unsigned char reason = 0;
394 int cpu;
395 400
396 cpu = smp_processor_id(); 401 /*
397 402 * CPU-specific NMI must be processed before non-CPU-specific
398 /* Only the BSP gets external NMIs from the system. */ 403 * NMI, otherwise we may lose it, because the CPU-specific
399 if (!cpu) 404 * NMI can not be detected/processed on other CPUs.
400 reason = get_nmi_reason(); 405 */
406 if (notify_die(DIE_NMI, "nmi", regs, 0, 2, SIGINT) == NOTIFY_STOP)
407 return;
401 408
402 if (!(reason & 0xc0)) { 409 /* Non-CPU-specific NMI: NMI sources can be processed on any CPU */
403 if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT) 410 raw_spin_lock(&nmi_reason_lock);
404 == NOTIFY_STOP) 411 reason = get_nmi_reason();
405 return;
406 412
407#ifdef CONFIG_X86_LOCAL_APIC 413 if (reason & NMI_REASON_MASK) {
408 if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) 414 if (reason & NMI_REASON_SERR)
409 == NOTIFY_STOP) 415 pci_serr_error(reason, regs);
410 return; 416 else if (reason & NMI_REASON_IOCHK)
417 io_check_error(reason, regs);
418#ifdef CONFIG_X86_32
419 /*
420 * Reassert NMI in case it became active
421 * meanwhile as it's edge-triggered:
422 */
423 reassert_nmi();
411#endif 424#endif
412 unknown_nmi_error(reason, regs); 425 raw_spin_unlock(&nmi_reason_lock);
413
414 return; 426 return;
415 } 427 }
416 if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP) 428 raw_spin_unlock(&nmi_reason_lock);
417 return;
418 429
419 /* AK: following checks seem to be broken on modern chipsets. FIXME */ 430 unknown_nmi_error(reason, regs);
420 if (reason & 0x80)
421 mem_parity_error(reason, regs);
422 if (reason & 0x40)
423 io_check_error(reason, regs);
424#ifdef CONFIG_X86_32
425 /*
426 * Reassert NMI in case it became active meanwhile
427 * as it's edge-triggered:
428 */
429 reassert_nmi();
430#endif
431} 431}
432 432
433dotraplinkage notrace __kprobes void 433dotraplinkage notrace __kprobes void
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 03d2ea82f35a..823f79a17ad1 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -965,7 +965,7 @@ out:
965 965
966static int __init init_tsc_clocksource(void) 966static int __init init_tsc_clocksource(void)
967{ 967{
968 if (!cpu_has_tsc || tsc_disabled > 0) 968 if (!cpu_has_tsc || tsc_disabled > 0 || !tsc_khz)
969 return 0; 969 return 0;
970 970
971 if (tsc_clocksource_reliable) 971 if (tsc_clocksource_reliable)
diff --git a/arch/x86/mm/amdtopology_64.c b/arch/x86/mm/amdtopology_64.c
index 08a0069b87a5..f21962c435ed 100644
--- a/arch/x86/mm/amdtopology_64.c
+++ b/arch/x86/mm/amdtopology_64.c
@@ -27,6 +27,7 @@
27#include <asm/amd_nb.h> 27#include <asm/amd_nb.h>
28 28
29static struct bootnode __initdata nodes[8]; 29static struct bootnode __initdata nodes[8];
30static unsigned char __initdata nodeids[8];
30static nodemask_t __initdata nodes_parsed = NODE_MASK_NONE; 31static nodemask_t __initdata nodes_parsed = NODE_MASK_NONE;
31 32
32static __init int find_northbridge(void) 33static __init int find_northbridge(void)
@@ -68,19 +69,6 @@ static __init void early_get_boot_cpu_id(void)
68#endif 69#endif
69} 70}
70 71
71int __init amd_get_nodes(struct bootnode *physnodes)
72{
73 int i;
74 int ret = 0;
75
76 for_each_node_mask(i, nodes_parsed) {
77 physnodes[ret].start = nodes[i].start;
78 physnodes[ret].end = nodes[i].end;
79 ret++;
80 }
81 return ret;
82}
83
84int __init amd_numa_init(unsigned long start_pfn, unsigned long end_pfn) 72int __init amd_numa_init(unsigned long start_pfn, unsigned long end_pfn)
85{ 73{
86 unsigned long start = PFN_PHYS(start_pfn); 74 unsigned long start = PFN_PHYS(start_pfn);
@@ -113,7 +101,7 @@ int __init amd_numa_init(unsigned long start_pfn, unsigned long end_pfn)
113 base = read_pci_config(0, nb, 1, 0x40 + i*8); 101 base = read_pci_config(0, nb, 1, 0x40 + i*8);
114 limit = read_pci_config(0, nb, 1, 0x44 + i*8); 102 limit = read_pci_config(0, nb, 1, 0x44 + i*8);
115 103
116 nodeid = limit & 7; 104 nodeids[i] = nodeid = limit & 7;
117 if ((base & 3) == 0) { 105 if ((base & 3) == 0) {
118 if (i < numnodes) 106 if (i < numnodes)
119 pr_info("Skipping disabled node %d\n", i); 107 pr_info("Skipping disabled node %d\n", i);
@@ -193,6 +181,76 @@ int __init amd_numa_init(unsigned long start_pfn, unsigned long end_pfn)
193 return 0; 181 return 0;
194} 182}
195 183
184#ifdef CONFIG_NUMA_EMU
185static s16 fake_apicid_to_node[MAX_LOCAL_APIC] __initdata = {
186 [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
187};
188
189void __init amd_get_nodes(struct bootnode *physnodes)
190{
191 int i;
192
193 for_each_node_mask(i, nodes_parsed) {
194 physnodes[i].start = nodes[i].start;
195 physnodes[i].end = nodes[i].end;
196 }
197}
198
199static int __init find_node_by_addr(unsigned long addr)
200{
201 int ret = NUMA_NO_NODE;
202 int i;
203
204 for (i = 0; i < 8; i++)
205 if (addr >= nodes[i].start && addr < nodes[i].end) {
206 ret = i;
207 break;
208 }
209 return ret;
210}
211
212/*
213 * For NUMA emulation, fake proximity domain (_PXM) to node id mappings must be
214 * setup to represent the physical topology but reflect the emulated
215 * environment. For each emulated node, the real node which it appears on is
216 * found and a fake pxm to nid mapping is created which mirrors the actual
217 * locality. node_distance() then represents the correct distances between
218 * emulated nodes by using the fake acpi mappings to pxms.
219 */
220void __init amd_fake_nodes(const struct bootnode *nodes, int nr_nodes)
221{
222 unsigned int bits;
223 unsigned int cores;
224 unsigned int apicid_base = 0;
225 int i;
226
227 bits = boot_cpu_data.x86_coreid_bits;
228 cores = 1 << bits;
229 early_get_boot_cpu_id();
230 if (boot_cpu_physical_apicid > 0)
231 apicid_base = boot_cpu_physical_apicid;
232
233 for (i = 0; i < nr_nodes; i++) {
234 int index;
235 int nid;
236 int j;
237
238 nid = find_node_by_addr(nodes[i].start);
239 if (nid == NUMA_NO_NODE)
240 continue;
241
242 index = nodeids[nid] << bits;
243 if (fake_apicid_to_node[index + apicid_base] == NUMA_NO_NODE)
244 for (j = apicid_base; j < cores + apicid_base; j++)
245 fake_apicid_to_node[index + j] = i;
246#ifdef CONFIG_ACPI_NUMA
247 __acpi_map_pxm_to_node(nid, i);
248#endif
249 }
250 memcpy(apicid_to_node, fake_apicid_to_node, sizeof(apicid_to_node));
251}
252#endif /* CONFIG_NUMA_EMU */
253
196int __init amd_scan_nodes(void) 254int __init amd_scan_nodes(void)
197{ 255{
198 unsigned int bits; 256 unsigned int bits;
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index 7762a517d69d..1e72102e80c9 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -260,30 +260,30 @@ void __init numa_init_array(void)
260#ifdef CONFIG_NUMA_EMU 260#ifdef CONFIG_NUMA_EMU
261/* Numa emulation */ 261/* Numa emulation */
262static struct bootnode nodes[MAX_NUMNODES] __initdata; 262static struct bootnode nodes[MAX_NUMNODES] __initdata;
263static struct bootnode physnodes[MAX_NUMNODES] __initdata; 263static struct bootnode physnodes[MAX_NUMNODES] __cpuinitdata;
264static char *cmdline __initdata; 264static char *cmdline __initdata;
265 265
266static int __init setup_physnodes(unsigned long start, unsigned long end, 266static int __init setup_physnodes(unsigned long start, unsigned long end,
267 int acpi, int amd) 267 int acpi, int amd)
268{ 268{
269 int nr_nodes = 0;
270 int ret = 0; 269 int ret = 0;
271 int i; 270 int i;
272 271
272 memset(physnodes, 0, sizeof(physnodes));
273#ifdef CONFIG_ACPI_NUMA 273#ifdef CONFIG_ACPI_NUMA
274 if (acpi) 274 if (acpi)
275 nr_nodes = acpi_get_nodes(physnodes); 275 acpi_get_nodes(physnodes, start, end);
276#endif 276#endif
277#ifdef CONFIG_AMD_NUMA 277#ifdef CONFIG_AMD_NUMA
278 if (amd) 278 if (amd)
279 nr_nodes = amd_get_nodes(physnodes); 279 amd_get_nodes(physnodes);
280#endif 280#endif
281 /* 281 /*
282 * Basic sanity checking on the physical node map: there may be errors 282 * Basic sanity checking on the physical node map: there may be errors
283 * if the SRAT or AMD code incorrectly reported the topology or the mem= 283 * if the SRAT or AMD code incorrectly reported the topology or the mem=
284 * kernel parameter is used. 284 * kernel parameter is used.
285 */ 285 */
286 for (i = 0; i < nr_nodes; i++) { 286 for (i = 0; i < MAX_NUMNODES; i++) {
287 if (physnodes[i].start == physnodes[i].end) 287 if (physnodes[i].start == physnodes[i].end)
288 continue; 288 continue;
289 if (physnodes[i].start > end) { 289 if (physnodes[i].start > end) {
@@ -298,17 +298,6 @@ static int __init setup_physnodes(unsigned long start, unsigned long end,
298 physnodes[i].start = start; 298 physnodes[i].start = start;
299 if (physnodes[i].end > end) 299 if (physnodes[i].end > end)
300 physnodes[i].end = end; 300 physnodes[i].end = end;
301 }
302
303 /*
304 * Remove all nodes that have no memory or were truncated because of the
305 * limited address range.
306 */
307 for (i = 0; i < nr_nodes; i++) {
308 if (physnodes[i].start == physnodes[i].end)
309 continue;
310 physnodes[ret].start = physnodes[i].start;
311 physnodes[ret].end = physnodes[i].end;
312 ret++; 301 ret++;
313 } 302 }
314 303
@@ -324,6 +313,24 @@ static int __init setup_physnodes(unsigned long start, unsigned long end,
324 return ret; 313 return ret;
325} 314}
326 315
316static void __init fake_physnodes(int acpi, int amd, int nr_nodes)
317{
318 int i;
319
320 BUG_ON(acpi && amd);
321#ifdef CONFIG_ACPI_NUMA
322 if (acpi)
323 acpi_fake_nodes(nodes, nr_nodes);
324#endif
325#ifdef CONFIG_AMD_NUMA
326 if (amd)
327 amd_fake_nodes(nodes, nr_nodes);
328#endif
329 if (!acpi && !amd)
330 for (i = 0; i < nr_cpu_ids; i++)
331 numa_set_node(i, 0);
332}
333
327/* 334/*
328 * Setups up nid to range from addr to addr + size. If the end 335 * Setups up nid to range from addr to addr + size. If the end
329 * boundary is greater than max_addr, then max_addr is used instead. 336 * boundary is greater than max_addr, then max_addr is used instead.
@@ -352,8 +359,7 @@ static int __init setup_node_range(int nid, u64 *addr, u64 size, u64 max_addr)
352 * Sets up nr_nodes fake nodes interleaved over physical nodes ranging from addr 359 * Sets up nr_nodes fake nodes interleaved over physical nodes ranging from addr
353 * to max_addr. The return value is the number of nodes allocated. 360 * to max_addr. The return value is the number of nodes allocated.
354 */ 361 */
355static int __init split_nodes_interleave(u64 addr, u64 max_addr, 362static int __init split_nodes_interleave(u64 addr, u64 max_addr, int nr_nodes)
356 int nr_phys_nodes, int nr_nodes)
357{ 363{
358 nodemask_t physnode_mask = NODE_MASK_NONE; 364 nodemask_t physnode_mask = NODE_MASK_NONE;
359 u64 size; 365 u64 size;
@@ -384,7 +390,7 @@ static int __init split_nodes_interleave(u64 addr, u64 max_addr,
384 return -1; 390 return -1;
385 } 391 }
386 392
387 for (i = 0; i < nr_phys_nodes; i++) 393 for (i = 0; i < MAX_NUMNODES; i++)
388 if (physnodes[i].start != physnodes[i].end) 394 if (physnodes[i].start != physnodes[i].end)
389 node_set(i, physnode_mask); 395 node_set(i, physnode_mask);
390 396
@@ -553,11 +559,9 @@ static int __init numa_emulation(unsigned long start_pfn,
553{ 559{
554 u64 addr = start_pfn << PAGE_SHIFT; 560 u64 addr = start_pfn << PAGE_SHIFT;
555 u64 max_addr = last_pfn << PAGE_SHIFT; 561 u64 max_addr = last_pfn << PAGE_SHIFT;
556 int num_phys_nodes;
557 int num_nodes; 562 int num_nodes;
558 int i; 563 int i;
559 564
560 num_phys_nodes = setup_physnodes(addr, max_addr, acpi, amd);
561 /* 565 /*
562 * If the numa=fake command-line contains a 'M' or 'G', it represents 566 * If the numa=fake command-line contains a 'M' or 'G', it represents
563 * the fixed node size. Otherwise, if it is just a single number N, 567 * the fixed node size. Otherwise, if it is just a single number N,
@@ -572,7 +576,7 @@ static int __init numa_emulation(unsigned long start_pfn,
572 unsigned long n; 576 unsigned long n;
573 577
574 n = simple_strtoul(cmdline, NULL, 0); 578 n = simple_strtoul(cmdline, NULL, 0);
575 num_nodes = split_nodes_interleave(addr, max_addr, num_phys_nodes, n); 579 num_nodes = split_nodes_interleave(addr, max_addr, n);
576 } 580 }
577 581
578 if (num_nodes < 0) 582 if (num_nodes < 0)
@@ -595,7 +599,8 @@ static int __init numa_emulation(unsigned long start_pfn,
595 nodes[i].end >> PAGE_SHIFT); 599 nodes[i].end >> PAGE_SHIFT);
596 setup_node_bootmem(i, nodes[i].start, nodes[i].end); 600 setup_node_bootmem(i, nodes[i].start, nodes[i].end);
597 } 601 }
598 acpi_fake_nodes(nodes, num_nodes); 602 setup_physnodes(addr, max_addr, acpi, amd);
603 fake_physnodes(acpi, amd, num_nodes);
599 numa_init_array(); 604 numa_init_array();
600 return 0; 605 return 0;
601} 606}
@@ -610,8 +615,12 @@ void __init initmem_init(unsigned long start_pfn, unsigned long last_pfn,
610 nodes_clear(node_online_map); 615 nodes_clear(node_online_map);
611 616
612#ifdef CONFIG_NUMA_EMU 617#ifdef CONFIG_NUMA_EMU
618 setup_physnodes(start_pfn << PAGE_SHIFT, last_pfn << PAGE_SHIFT,
619 acpi, amd);
613 if (cmdline && !numa_emulation(start_pfn, last_pfn, acpi, amd)) 620 if (cmdline && !numa_emulation(start_pfn, last_pfn, acpi, amd))
614 return; 621 return;
622 setup_physnodes(start_pfn << PAGE_SHIFT, last_pfn << PAGE_SHIFT,
623 acpi, amd);
615 nodes_clear(node_possible_map); 624 nodes_clear(node_possible_map);
616 nodes_clear(node_online_map); 625 nodes_clear(node_online_map);
617#endif 626#endif
@@ -767,6 +776,7 @@ void __cpuinit numa_clear_node(int cpu)
767 776
768#ifndef CONFIG_DEBUG_PER_CPU_MAPS 777#ifndef CONFIG_DEBUG_PER_CPU_MAPS
769 778
779#ifndef CONFIG_NUMA_EMU
770void __cpuinit numa_add_cpu(int cpu) 780void __cpuinit numa_add_cpu(int cpu)
771{ 781{
772 cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); 782 cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
@@ -776,34 +786,115 @@ void __cpuinit numa_remove_cpu(int cpu)
776{ 786{
777 cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); 787 cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
778} 788}
789#else
790void __cpuinit numa_add_cpu(int cpu)
791{
792 unsigned long addr;
793 u16 apicid;
794 int physnid;
795 int nid = NUMA_NO_NODE;
796
797 apicid = early_per_cpu(x86_cpu_to_apicid, cpu);
798 if (apicid != BAD_APICID)
799 nid = apicid_to_node[apicid];
800 if (nid == NUMA_NO_NODE)
801 nid = early_cpu_to_node(cpu);
802 BUG_ON(nid == NUMA_NO_NODE || !node_online(nid));
803
804 /*
805 * Use the starting address of the emulated node to find which physical
806 * node it is allocated on.
807 */
808 addr = node_start_pfn(nid) << PAGE_SHIFT;
809 for (physnid = 0; physnid < MAX_NUMNODES; physnid++)
810 if (addr >= physnodes[physnid].start &&
811 addr < physnodes[physnid].end)
812 break;
813
814 /*
815 * Map the cpu to each emulated node that is allocated on the physical
816 * node of the cpu's apic id.
817 */
818 for_each_online_node(nid) {
819 addr = node_start_pfn(nid) << PAGE_SHIFT;
820 if (addr >= physnodes[physnid].start &&
821 addr < physnodes[physnid].end)
822 cpumask_set_cpu(cpu, node_to_cpumask_map[nid]);
823 }
824}
825
826void __cpuinit numa_remove_cpu(int cpu)
827{
828 int i;
829
830 for_each_online_node(i)
831 cpumask_clear_cpu(cpu, node_to_cpumask_map[i]);
832}
833#endif /* !CONFIG_NUMA_EMU */
779 834
780#else /* CONFIG_DEBUG_PER_CPU_MAPS */ 835#else /* CONFIG_DEBUG_PER_CPU_MAPS */
836static struct cpumask __cpuinit *debug_cpumask_set_cpu(int cpu, int enable)
837{
838 int node = early_cpu_to_node(cpu);
839 struct cpumask *mask;
840 char buf[64];
841
842 mask = node_to_cpumask_map[node];
843 if (!mask) {
844 pr_err("node_to_cpumask_map[%i] NULL\n", node);
845 dump_stack();
846 return NULL;
847 }
848
849 cpulist_scnprintf(buf, sizeof(buf), mask);
850 printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
851 enable ? "numa_add_cpu" : "numa_remove_cpu",
852 cpu, node, buf);
853 return mask;
854}
781 855
782/* 856/*
783 * --------- debug versions of the numa functions --------- 857 * --------- debug versions of the numa functions ---------
784 */ 858 */
859#ifndef CONFIG_NUMA_EMU
785static void __cpuinit numa_set_cpumask(int cpu, int enable) 860static void __cpuinit numa_set_cpumask(int cpu, int enable)
786{ 861{
787 int node = early_cpu_to_node(cpu);
788 struct cpumask *mask; 862 struct cpumask *mask;
789 char buf[64];
790 863
791 mask = node_to_cpumask_map[node]; 864 mask = debug_cpumask_set_cpu(cpu, enable);
792 if (mask == NULL) { 865 if (!mask)
793 printk(KERN_ERR "node_to_cpumask_map[%i] NULL\n", node);
794 dump_stack();
795 return; 866 return;
796 }
797 867
798 if (enable) 868 if (enable)
799 cpumask_set_cpu(cpu, mask); 869 cpumask_set_cpu(cpu, mask);
800 else 870 else
801 cpumask_clear_cpu(cpu, mask); 871 cpumask_clear_cpu(cpu, mask);
872}
873#else
874static void __cpuinit numa_set_cpumask(int cpu, int enable)
875{
876 int node = early_cpu_to_node(cpu);
877 struct cpumask *mask;
878 int i;
802 879
803 cpulist_scnprintf(buf, sizeof(buf), mask); 880 for_each_online_node(i) {
804 printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n", 881 unsigned long addr;
805 enable ? "numa_add_cpu" : "numa_remove_cpu", cpu, node, buf); 882
883 addr = node_start_pfn(i) << PAGE_SHIFT;
884 if (addr < physnodes[node].start ||
885 addr >= physnodes[node].end)
886 continue;
887 mask = debug_cpumask_set_cpu(cpu, enable);
888 if (!mask)
889 return;
890
891 if (enable)
892 cpumask_set_cpu(cpu, mask);
893 else
894 cpumask_clear_cpu(cpu, mask);
895 }
806} 896}
897#endif /* CONFIG_NUMA_EMU */
807 898
808void __cpuinit numa_add_cpu(int cpu) 899void __cpuinit numa_add_cpu(int cpu)
809{ 900{
diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c
index 171a0aacb99a..603d285d1daa 100644
--- a/arch/x86/mm/srat_64.c
+++ b/arch/x86/mm/srat_64.c
@@ -349,18 +349,19 @@ static int __init nodes_cover_memory(const struct bootnode *nodes)
349 349
350void __init acpi_numa_arch_fixup(void) {} 350void __init acpi_numa_arch_fixup(void) {}
351 351
352int __init acpi_get_nodes(struct bootnode *physnodes) 352#ifdef CONFIG_NUMA_EMU
353void __init acpi_get_nodes(struct bootnode *physnodes, unsigned long start,
354 unsigned long end)
353{ 355{
354 int i; 356 int i;
355 int ret = 0;
356 357
357 for_each_node_mask(i, nodes_parsed) { 358 for_each_node_mask(i, nodes_parsed) {
358 physnodes[ret].start = nodes[i].start; 359 cutoff_node(i, start, end);
359 physnodes[ret].end = nodes[i].end; 360 physnodes[i].start = nodes[i].start;
360 ret++; 361 physnodes[i].end = nodes[i].end;
361 } 362 }
362 return ret;
363} 363}
364#endif /* CONFIG_NUMA_EMU */
364 365
365/* Use the information discovered above to actually set up the nodes. */ 366/* Use the information discovered above to actually set up the nodes. */
366int __init acpi_scan_nodes(unsigned long start, unsigned long end) 367int __init acpi_scan_nodes(unsigned long start, unsigned long end)
@@ -505,8 +506,6 @@ void __init acpi_fake_nodes(const struct bootnode *fake_nodes, int num_nodes)
505{ 506{
506 int i, j; 507 int i, j;
507 508
508 printk(KERN_INFO "Faking PXM affinity for fake nodes on real "
509 "topology.\n");
510 for (i = 0; i < num_nodes; i++) { 509 for (i = 0; i < num_nodes; i++) {
511 int nid, pxm; 510 int nid, pxm;
512 511
@@ -526,6 +525,17 @@ void __init acpi_fake_nodes(const struct bootnode *fake_nodes, int num_nodes)
526 fake_apicid_to_node[j] == NUMA_NO_NODE) 525 fake_apicid_to_node[j] == NUMA_NO_NODE)
527 fake_apicid_to_node[j] = i; 526 fake_apicid_to_node[j] = i;
528 } 527 }
528
529 /*
530 * If there are apicid-to-node mappings for physical nodes that do not
531 * have a corresponding emulated node, it should default to a guaranteed
532 * value.
533 */
534 for (i = 0; i < MAX_LOCAL_APIC; i++)
535 if (apicid_to_node[i] != NUMA_NO_NODE &&
536 fake_apicid_to_node[i] == NUMA_NO_NODE)
537 fake_apicid_to_node[i] = 0;
538
529 for (i = 0; i < num_nodes; i++) 539 for (i = 0; i < num_nodes; i++)
530 __acpi_map_pxm_to_node(fake_node_to_pxm_map[i], i); 540 __acpi_map_pxm_to_node(fake_node_to_pxm_map[i], i);
531 memcpy(apicid_to_node, fake_apicid_to_node, sizeof(apicid_to_node)); 541 memcpy(apicid_to_node, fake_apicid_to_node, sizeof(apicid_to_node));
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index f24a8533bcdf..e2b7b0c06cdf 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -65,7 +65,6 @@ static int profile_exceptions_notify(struct notifier_block *self,
65 65
66 switch (val) { 66 switch (val) {
67 case DIE_NMI: 67 case DIE_NMI:
68 case DIE_NMI_IPI:
69 if (ctr_running) 68 if (ctr_running)
70 model->check_ctrs(args->regs, &__get_cpu_var(cpu_msrs)); 69 model->check_ctrs(args->regs, &__get_cpu_var(cpu_msrs));
71 else if (!nmi_enabled) 70 else if (!nmi_enabled)
@@ -361,7 +360,7 @@ static void nmi_cpu_setup(void *dummy)
361static struct notifier_block profile_exceptions_nb = { 360static struct notifier_block profile_exceptions_nb = {
362 .notifier_call = profile_exceptions_notify, 361 .notifier_call = profile_exceptions_notify,
363 .next = NULL, 362 .next = NULL,
364 .priority = 2 363 .priority = NMI_LOCAL_LOW_PRIOR,
365}; 364};
366 365
367static void nmi_cpu_restore_registers(struct op_msrs *msrs) 366static void nmi_cpu_restore_registers(struct op_msrs *msrs)
diff --git a/arch/x86/oprofile/nmi_timer_int.c b/arch/x86/oprofile/nmi_timer_int.c
index 0636dd93cef8..720bf5a53c51 100644
--- a/arch/x86/oprofile/nmi_timer_int.c
+++ b/arch/x86/oprofile/nmi_timer_int.c
@@ -38,7 +38,7 @@ static int profile_timer_exceptions_notify(struct notifier_block *self,
38static struct notifier_block profile_timer_exceptions_nb = { 38static struct notifier_block profile_timer_exceptions_nb = {
39 .notifier_call = profile_timer_exceptions_notify, 39 .notifier_call = profile_timer_exceptions_notify,
40 .next = NULL, 40 .next = NULL,
41 .priority = 0 41 .priority = NMI_LOW_PRIOR,
42}; 42};
43 43
44static int timer_start(void) 44static int timer_start(void)
diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c
index fc1e8fe07e5c..e27dffbbb1a7 100644
--- a/arch/x86/pci/amd_bus.c
+++ b/arch/x86/pci/amd_bus.c
@@ -4,6 +4,7 @@
4#include <linux/cpu.h> 4#include <linux/cpu.h>
5#include <linux/range.h> 5#include <linux/range.h>
6 6
7#include <asm/amd_nb.h>
7#include <asm/pci_x86.h> 8#include <asm/pci_x86.h>
8 9
9#include <asm/pci-direct.h> 10#include <asm/pci-direct.h>
@@ -378,6 +379,34 @@ static struct notifier_block __cpuinitdata amd_cpu_notifier = {
378 .notifier_call = amd_cpu_notify, 379 .notifier_call = amd_cpu_notify,
379}; 380};
380 381
382static void __init pci_enable_pci_io_ecs(void)
383{
384#ifdef CONFIG_AMD_NB
385 unsigned int i, n;
386
387 for (n = i = 0; !n && amd_nb_bus_dev_ranges[i].dev_limit; ++i) {
388 u8 bus = amd_nb_bus_dev_ranges[i].bus;
389 u8 slot = amd_nb_bus_dev_ranges[i].dev_base;
390 u8 limit = amd_nb_bus_dev_ranges[i].dev_limit;
391
392 for (; slot < limit; ++slot) {
393 u32 val = read_pci_config(bus, slot, 3, 0);
394
395 if (!early_is_amd_nb(val))
396 continue;
397
398 val = read_pci_config(bus, slot, 3, 0x8c);
399 if (!(val & (ENABLE_CF8_EXT_CFG >> 32))) {
400 val |= ENABLE_CF8_EXT_CFG >> 32;
401 write_pci_config(bus, slot, 3, 0x8c, val);
402 }
403 ++n;
404 }
405 }
406 pr_info("Extended Config Space enabled on %u nodes\n", n);
407#endif
408}
409
381static int __init pci_io_ecs_init(void) 410static int __init pci_io_ecs_init(void)
382{ 411{
383 int cpu; 412 int cpu;
@@ -386,6 +415,10 @@ static int __init pci_io_ecs_init(void)
386 if (boot_cpu_data.x86 < 0x10) 415 if (boot_cpu_data.x86 < 0x10)
387 return 0; 416 return 0;
388 417
418 /* Try the PCI method first. */
419 if (early_pci_allowed())
420 pci_enable_pci_io_ecs();
421
389 register_cpu_notifier(&amd_cpu_notifier); 422 register_cpu_notifier(&amd_cpu_notifier);
390 for_each_online_cpu(cpu) 423 for_each_online_cpu(cpu)
391 amd_cpu_notify(&amd_cpu_notifier, (unsigned long)CPU_ONLINE, 424 amd_cpu_notify(&amd_cpu_notifier, (unsigned long)CPU_ONLINE,
diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
index ffe9b655292e..9f47e8625266 100644
--- a/drivers/atm/ambassador.c
+++ b/drivers/atm/ambassador.c
@@ -1926,8 +1926,9 @@ static int __devinit ucode_init (loader_block * lb, amb_dev * dev) {
1926 const struct firmware *fw; 1926 const struct firmware *fw;
1927 unsigned long start_address; 1927 unsigned long start_address;
1928 const struct ihex_binrec *rec; 1928 const struct ihex_binrec *rec;
1929 const char *errmsg = 0;
1929 int res; 1930 int res;
1930 1931
1931 res = request_ihex_firmware(&fw, "atmsar11.fw", &dev->pci_dev->dev); 1932 res = request_ihex_firmware(&fw, "atmsar11.fw", &dev->pci_dev->dev);
1932 if (res) { 1933 if (res) {
1933 PRINTK (KERN_ERR, "Cannot load microcode data"); 1934 PRINTK (KERN_ERR, "Cannot load microcode data");
@@ -1937,8 +1938,8 @@ static int __devinit ucode_init (loader_block * lb, amb_dev * dev) {
1937 /* First record contains just the start address */ 1938 /* First record contains just the start address */
1938 rec = (const struct ihex_binrec *)fw->data; 1939 rec = (const struct ihex_binrec *)fw->data;
1939 if (be16_to_cpu(rec->len) != sizeof(__be32) || be32_to_cpu(rec->addr)) { 1940 if (be16_to_cpu(rec->len) != sizeof(__be32) || be32_to_cpu(rec->addr)) {
1940 PRINTK (KERN_ERR, "Bad microcode data (no start record)"); 1941 errmsg = "no start record";
1941 return -EINVAL; 1942 goto fail;
1942 } 1943 }
1943 start_address = be32_to_cpup((__be32 *)rec->data); 1944 start_address = be32_to_cpup((__be32 *)rec->data);
1944 1945
@@ -1950,12 +1951,12 @@ static int __devinit ucode_init (loader_block * lb, amb_dev * dev) {
1950 PRINTD (DBG_LOAD, "starting region (%x, %u)", be32_to_cpu(rec->addr), 1951 PRINTD (DBG_LOAD, "starting region (%x, %u)", be32_to_cpu(rec->addr),
1951 be16_to_cpu(rec->len)); 1952 be16_to_cpu(rec->len));
1952 if (be16_to_cpu(rec->len) > 4 * MAX_TRANSFER_DATA) { 1953 if (be16_to_cpu(rec->len) > 4 * MAX_TRANSFER_DATA) {
1953 PRINTK (KERN_ERR, "Bad microcode data (record too long)"); 1954 errmsg = "record too long";
1954 return -EINVAL; 1955 goto fail;
1955 } 1956 }
1956 if (be16_to_cpu(rec->len) & 3) { 1957 if (be16_to_cpu(rec->len) & 3) {
1957 PRINTK (KERN_ERR, "Bad microcode data (odd number of bytes)"); 1958 errmsg = "odd number of bytes";
1958 return -EINVAL; 1959 goto fail;
1959 } 1960 }
1960 res = loader_write(lb, dev, rec); 1961 res = loader_write(lb, dev, rec);
1961 if (res) 1962 if (res)
@@ -1970,6 +1971,10 @@ static int __devinit ucode_init (loader_block * lb, amb_dev * dev) {
1970 res = loader_start(lb, dev, start_address); 1971 res = loader_start(lb, dev, start_address);
1971 1972
1972 return res; 1973 return res;
1974fail:
1975 release_firmware(fw);
1976 PRINTK(KERN_ERR, "Bad microcode data (%s)", errmsg);
1977 return -EINVAL;
1973} 1978}
1974 1979
1975/********** give adapter parameters **********/ 1980/********** give adapter parameters **********/
diff --git a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h
index 5259065f3c79..3e67ddde9e16 100644
--- a/drivers/char/agp/agp.h
+++ b/drivers/char/agp/agp.h
@@ -120,7 +120,6 @@ struct agp_bridge_driver {
120 void (*agp_destroy_page)(struct page *, int flags); 120 void (*agp_destroy_page)(struct page *, int flags);
121 void (*agp_destroy_pages)(struct agp_memory *); 121 void (*agp_destroy_pages)(struct agp_memory *);
122 int (*agp_type_to_mask_type) (struct agp_bridge_data *, int); 122 int (*agp_type_to_mask_type) (struct agp_bridge_data *, int);
123 void (*chipset_flush)(struct agp_bridge_data *);
124}; 123};
125 124
126struct agp_bridge_data { 125struct agp_bridge_data {
diff --git a/drivers/char/agp/compat_ioctl.c b/drivers/char/agp/compat_ioctl.c
index 9d2c97a69cdd..a48e05b31593 100644
--- a/drivers/char/agp/compat_ioctl.c
+++ b/drivers/char/agp/compat_ioctl.c
@@ -276,7 +276,6 @@ long compat_agp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
276 break; 276 break;
277 277
278 case AGPIOC_CHIPSET_FLUSH32: 278 case AGPIOC_CHIPSET_FLUSH32:
279 ret_val = agpioc_chipset_flush_wrap(curr_priv);
280 break; 279 break;
281 } 280 }
282 281
diff --git a/drivers/char/agp/compat_ioctl.h b/drivers/char/agp/compat_ioctl.h
index 0c9678ac0371..f30e0fd97963 100644
--- a/drivers/char/agp/compat_ioctl.h
+++ b/drivers/char/agp/compat_ioctl.h
@@ -102,6 +102,5 @@ void agp_free_memory_wrap(struct agp_memory *memory);
102struct agp_memory *agp_allocate_memory_wrap(size_t pg_count, u32 type); 102struct agp_memory *agp_allocate_memory_wrap(size_t pg_count, u32 type);
103struct agp_memory *agp_find_mem_by_key(int key); 103struct agp_memory *agp_find_mem_by_key(int key);
104struct agp_client *agp_find_client_by_pid(pid_t id); 104struct agp_client *agp_find_client_by_pid(pid_t id);
105int agpioc_chipset_flush_wrap(struct agp_file_private *priv);
106 105
107#endif /* _AGP_COMPAT_H */ 106#endif /* _AGP_COMPAT_H */
diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
index 3cb4539a98b2..2e044338753c 100644
--- a/drivers/char/agp/frontend.c
+++ b/drivers/char/agp/frontend.c
@@ -957,13 +957,6 @@ static int agpioc_unbind_wrap(struct agp_file_private *priv, void __user *arg)
957 return agp_unbind_memory(memory); 957 return agp_unbind_memory(memory);
958} 958}
959 959
960int agpioc_chipset_flush_wrap(struct agp_file_private *priv)
961{
962 DBG("");
963 agp_flush_chipset(agp_bridge);
964 return 0;
965}
966
967static long agp_ioctl(struct file *file, 960static long agp_ioctl(struct file *file,
968 unsigned int cmd, unsigned long arg) 961 unsigned int cmd, unsigned long arg)
969{ 962{
@@ -1039,7 +1032,6 @@ static long agp_ioctl(struct file *file,
1039 break; 1032 break;
1040 1033
1041 case AGPIOC_CHIPSET_FLUSH: 1034 case AGPIOC_CHIPSET_FLUSH:
1042 ret_val = agpioc_chipset_flush_wrap(curr_priv);
1043 break; 1035 break;
1044 } 1036 }
1045 1037
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
index 4956f1c8f9d5..012cba0d6d96 100644
--- a/drivers/char/agp/generic.c
+++ b/drivers/char/agp/generic.c
@@ -81,13 +81,6 @@ static int agp_get_key(void)
81 return -1; 81 return -1;
82} 82}
83 83
84void agp_flush_chipset(struct agp_bridge_data *bridge)
85{
86 if (bridge->driver->chipset_flush)
87 bridge->driver->chipset_flush(bridge);
88}
89EXPORT_SYMBOL(agp_flush_chipset);
90
91/* 84/*
92 * Use kmalloc if possible for the page list. Otherwise fall back to 85 * Use kmalloc if possible for the page list. Otherwise fall back to
93 * vmalloc. This speeds things up and also saves memory for small AGP 86 * vmalloc. This speeds things up and also saves memory for small AGP
@@ -487,26 +480,6 @@ int agp_unbind_memory(struct agp_memory *curr)
487} 480}
488EXPORT_SYMBOL(agp_unbind_memory); 481EXPORT_SYMBOL(agp_unbind_memory);
489 482
490/**
491 * agp_rebind_emmory - Rewrite the entire GATT, useful on resume
492 */
493int agp_rebind_memory(void)
494{
495 struct agp_memory *curr;
496 int ret_val = 0;
497
498 spin_lock(&agp_bridge->mapped_lock);
499 list_for_each_entry(curr, &agp_bridge->mapped_list, mapped_list) {
500 ret_val = curr->bridge->driver->insert_memory(curr,
501 curr->pg_start,
502 curr->type);
503 if (ret_val != 0)
504 break;
505 }
506 spin_unlock(&agp_bridge->mapped_lock);
507 return ret_val;
508}
509EXPORT_SYMBOL(agp_rebind_memory);
510 483
511/* End - Routines for handling swapping of agp_memory into the GATT */ 484/* End - Routines for handling swapping of agp_memory into the GATT */
512 485
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index e72f49d52202..07e9796fead7 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -828,14 +828,9 @@ static void __devexit agp_intel_remove(struct pci_dev *pdev)
828static int agp_intel_resume(struct pci_dev *pdev) 828static int agp_intel_resume(struct pci_dev *pdev)
829{ 829{
830 struct agp_bridge_data *bridge = pci_get_drvdata(pdev); 830 struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
831 int ret_val;
832 831
833 bridge->driver->configure(); 832 bridge->driver->configure();
834 833
835 ret_val = agp_rebind_memory();
836 if (ret_val != 0)
837 return ret_val;
838
839 return 0; 834 return 0;
840} 835}
841#endif 836#endif
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
index 90539df02504..010e3defd6c3 100644
--- a/drivers/char/agp/intel-agp.h
+++ b/drivers/char/agp/intel-agp.h
@@ -75,6 +75,8 @@
75#define I810_GMS_DISABLE 0x00000000 75#define I810_GMS_DISABLE 0x00000000
76#define I810_PGETBL_CTL 0x2020 76#define I810_PGETBL_CTL 0x2020
77#define I810_PGETBL_ENABLED 0x00000001 77#define I810_PGETBL_ENABLED 0x00000001
78/* Note: PGETBL_CTL2 has a different offset on G33. */
79#define I965_PGETBL_CTL2 0x20c4
78#define I965_PGETBL_SIZE_MASK 0x0000000e 80#define I965_PGETBL_SIZE_MASK 0x0000000e
79#define I965_PGETBL_SIZE_512KB (0 << 1) 81#define I965_PGETBL_SIZE_512KB (0 << 1)
80#define I965_PGETBL_SIZE_256KB (1 << 1) 82#define I965_PGETBL_SIZE_256KB (1 << 1)
@@ -82,9 +84,15 @@
82#define I965_PGETBL_SIZE_1MB (3 << 1) 84#define I965_PGETBL_SIZE_1MB (3 << 1)
83#define I965_PGETBL_SIZE_2MB (4 << 1) 85#define I965_PGETBL_SIZE_2MB (4 << 1)
84#define I965_PGETBL_SIZE_1_5MB (5 << 1) 86#define I965_PGETBL_SIZE_1_5MB (5 << 1)
85#define G33_PGETBL_SIZE_MASK (3 << 8) 87#define G33_GMCH_SIZE_MASK (3 << 8)
86#define G33_PGETBL_SIZE_1M (1 << 8) 88#define G33_GMCH_SIZE_1M (1 << 8)
87#define G33_PGETBL_SIZE_2M (2 << 8) 89#define G33_GMCH_SIZE_2M (2 << 8)
90#define G4x_GMCH_SIZE_MASK (0xf << 8)
91#define G4x_GMCH_SIZE_1M (0x1 << 8)
92#define G4x_GMCH_SIZE_2M (0x3 << 8)
93#define G4x_GMCH_SIZE_VT_1M (0x9 << 8)
94#define G4x_GMCH_SIZE_VT_1_5M (0xa << 8)
95#define G4x_GMCH_SIZE_VT_2M (0xc << 8)
88 96
89#define I810_DRAM_CTL 0x3000 97#define I810_DRAM_CTL 0x3000
90#define I810_DRAM_ROW_0 0x00000001 98#define I810_DRAM_ROW_0 0x00000001
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 29ac6d499fa6..356f73e0d17e 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -24,7 +24,6 @@
24#include <asm/smp.h> 24#include <asm/smp.h>
25#include "agp.h" 25#include "agp.h"
26#include "intel-agp.h" 26#include "intel-agp.h"
27#include <linux/intel-gtt.h>
28#include <drm/intel-gtt.h> 27#include <drm/intel-gtt.h>
29 28
30/* 29/*
@@ -39,40 +38,12 @@
39#define USE_PCI_DMA_API 0 38#define USE_PCI_DMA_API 0
40#endif 39#endif
41 40
42/* Max amount of stolen space, anything above will be returned to Linux */
43int intel_max_stolen = 32 * 1024 * 1024;
44
45static const struct aper_size_info_fixed intel_i810_sizes[] =
46{
47 {64, 16384, 4},
48 /* The 32M mode still requires a 64k gatt */
49 {32, 8192, 4}
50};
51
52#define AGP_DCACHE_MEMORY 1
53#define AGP_PHYS_MEMORY 2
54#define INTEL_AGP_CACHED_MEMORY 3
55
56static struct gatt_mask intel_i810_masks[] =
57{
58 {.mask = I810_PTE_VALID, .type = 0},
59 {.mask = (I810_PTE_VALID | I810_PTE_LOCAL), .type = AGP_DCACHE_MEMORY},
60 {.mask = I810_PTE_VALID, .type = 0},
61 {.mask = I810_PTE_VALID | I830_PTE_SYSTEM_CACHED,
62 .type = INTEL_AGP_CACHED_MEMORY}
63};
64
65#define INTEL_AGP_UNCACHED_MEMORY 0
66#define INTEL_AGP_CACHED_MEMORY_LLC 1
67#define INTEL_AGP_CACHED_MEMORY_LLC_GFDT 2
68#define INTEL_AGP_CACHED_MEMORY_LLC_MLC 3
69#define INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT 4
70
71struct intel_gtt_driver { 41struct intel_gtt_driver {
72 unsigned int gen : 8; 42 unsigned int gen : 8;
73 unsigned int is_g33 : 1; 43 unsigned int is_g33 : 1;
74 unsigned int is_pineview : 1; 44 unsigned int is_pineview : 1;
75 unsigned int is_ironlake : 1; 45 unsigned int is_ironlake : 1;
46 unsigned int has_pgtbl_enable : 1;
76 unsigned int dma_mask_size : 8; 47 unsigned int dma_mask_size : 8;
77 /* Chipset specific GTT setup */ 48 /* Chipset specific GTT setup */
78 int (*setup)(void); 49 int (*setup)(void);
@@ -95,13 +66,14 @@ static struct _intel_private {
95 u8 __iomem *registers; 66 u8 __iomem *registers;
96 phys_addr_t gtt_bus_addr; 67 phys_addr_t gtt_bus_addr;
97 phys_addr_t gma_bus_addr; 68 phys_addr_t gma_bus_addr;
98 phys_addr_t pte_bus_addr; 69 u32 PGETBL_save;
99 u32 __iomem *gtt; /* I915G */ 70 u32 __iomem *gtt; /* I915G */
100 int num_dcache_entries; 71 int num_dcache_entries;
101 union { 72 union {
102 void __iomem *i9xx_flush_page; 73 void __iomem *i9xx_flush_page;
103 void *i8xx_flush_page; 74 void *i8xx_flush_page;
104 }; 75 };
76 char *i81x_gtt_table;
105 struct page *i8xx_page; 77 struct page *i8xx_page;
106 struct resource ifp_resource; 78 struct resource ifp_resource;
107 int resource_valid; 79 int resource_valid;
@@ -113,42 +85,31 @@ static struct _intel_private {
113#define IS_G33 intel_private.driver->is_g33 85#define IS_G33 intel_private.driver->is_g33
114#define IS_PINEVIEW intel_private.driver->is_pineview 86#define IS_PINEVIEW intel_private.driver->is_pineview
115#define IS_IRONLAKE intel_private.driver->is_ironlake 87#define IS_IRONLAKE intel_private.driver->is_ironlake
88#define HAS_PGTBL_EN intel_private.driver->has_pgtbl_enable
116 89
117static void intel_agp_free_sglist(struct agp_memory *mem) 90int intel_gtt_map_memory(struct page **pages, unsigned int num_entries,
118{ 91 struct scatterlist **sg_list, int *num_sg)
119 struct sg_table st;
120
121 st.sgl = mem->sg_list;
122 st.orig_nents = st.nents = mem->page_count;
123
124 sg_free_table(&st);
125
126 mem->sg_list = NULL;
127 mem->num_sg = 0;
128}
129
130static int intel_agp_map_memory(struct agp_memory *mem)
131{ 92{
132 struct sg_table st; 93 struct sg_table st;
133 struct scatterlist *sg; 94 struct scatterlist *sg;
134 int i; 95 int i;
135 96
136 if (mem->sg_list) 97 if (*sg_list)
137 return 0; /* already mapped (for e.g. resume */ 98 return 0; /* already mapped (for e.g. resume */
138 99
139 DBG("try mapping %lu pages\n", (unsigned long)mem->page_count); 100 DBG("try mapping %lu pages\n", (unsigned long)num_entries);
140 101
141 if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL)) 102 if (sg_alloc_table(&st, num_entries, GFP_KERNEL))
142 goto err; 103 goto err;
143 104
144 mem->sg_list = sg = st.sgl; 105 *sg_list = sg = st.sgl;
145 106
146 for (i = 0 ; i < mem->page_count; i++, sg = sg_next(sg)) 107 for (i = 0 ; i < num_entries; i++, sg = sg_next(sg))
147 sg_set_page(sg, mem->pages[i], PAGE_SIZE, 0); 108 sg_set_page(sg, pages[i], PAGE_SIZE, 0);
148 109
149 mem->num_sg = pci_map_sg(intel_private.pcidev, mem->sg_list, 110 *num_sg = pci_map_sg(intel_private.pcidev, *sg_list,
150 mem->page_count, PCI_DMA_BIDIRECTIONAL); 111 num_entries, PCI_DMA_BIDIRECTIONAL);
151 if (unlikely(!mem->num_sg)) 112 if (unlikely(!*num_sg))
152 goto err; 113 goto err;
153 114
154 return 0; 115 return 0;
@@ -157,90 +118,22 @@ err:
157 sg_free_table(&st); 118 sg_free_table(&st);
158 return -ENOMEM; 119 return -ENOMEM;
159} 120}
121EXPORT_SYMBOL(intel_gtt_map_memory);
160 122
161static void intel_agp_unmap_memory(struct agp_memory *mem) 123void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg)
162{ 124{
125 struct sg_table st;
163 DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count); 126 DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count);
164 127
165 pci_unmap_sg(intel_private.pcidev, mem->sg_list, 128 pci_unmap_sg(intel_private.pcidev, sg_list,
166 mem->page_count, PCI_DMA_BIDIRECTIONAL); 129 num_sg, PCI_DMA_BIDIRECTIONAL);
167 intel_agp_free_sglist(mem);
168}
169
170static int intel_i810_fetch_size(void)
171{
172 u32 smram_miscc;
173 struct aper_size_info_fixed *values;
174
175 pci_read_config_dword(intel_private.bridge_dev,
176 I810_SMRAM_MISCC, &smram_miscc);
177 values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
178
179 if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) {
180 dev_warn(&intel_private.bridge_dev->dev, "i810 is disabled\n");
181 return 0;
182 }
183 if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) {
184 agp_bridge->current_size = (void *) (values + 1);
185 agp_bridge->aperture_size_idx = 1;
186 return values[1].size;
187 } else {
188 agp_bridge->current_size = (void *) (values);
189 agp_bridge->aperture_size_idx = 0;
190 return values[0].size;
191 }
192
193 return 0;
194}
195
196static int intel_i810_configure(void)
197{
198 struct aper_size_info_fixed *current_size;
199 u32 temp;
200 int i;
201
202 current_size = A_SIZE_FIX(agp_bridge->current_size);
203 130
204 if (!intel_private.registers) { 131 st.sgl = sg_list;
205 pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp); 132 st.orig_nents = st.nents = num_sg;
206 temp &= 0xfff80000;
207 133
208 intel_private.registers = ioremap(temp, 128 * 4096); 134 sg_free_table(&st);
209 if (!intel_private.registers) {
210 dev_err(&intel_private.pcidev->dev,
211 "can't remap memory\n");
212 return -ENOMEM;
213 }
214 }
215
216 if ((readl(intel_private.registers+I810_DRAM_CTL)
217 & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
218 /* This will need to be dynamically assigned */
219 dev_info(&intel_private.pcidev->dev,
220 "detected 4MB dedicated video ram\n");
221 intel_private.num_dcache_entries = 1024;
222 }
223 pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp);
224 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
225 writel(agp_bridge->gatt_bus_addr | I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
226 readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
227
228 if (agp_bridge->driver->needs_scratch_page) {
229 for (i = 0; i < current_size->num_entries; i++) {
230 writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
231 }
232 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI posting. */
233 }
234 global_cache_flush();
235 return 0;
236}
237
238static void intel_i810_cleanup(void)
239{
240 writel(0, intel_private.registers+I810_PGETBL_CTL);
241 readl(intel_private.registers); /* PCI Posting. */
242 iounmap(intel_private.registers);
243} 135}
136EXPORT_SYMBOL(intel_gtt_unmap_memory);
244 137
245static void intel_fake_agp_enable(struct agp_bridge_data *bridge, u32 mode) 138static void intel_fake_agp_enable(struct agp_bridge_data *bridge, u32 mode)
246{ 139{
@@ -277,80 +170,64 @@ static void i8xx_destroy_pages(struct page *page)
277 atomic_dec(&agp_bridge->current_memory_agp); 170 atomic_dec(&agp_bridge->current_memory_agp);
278} 171}
279 172
280static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start, 173#define I810_GTT_ORDER 4
281 int type) 174static int i810_setup(void)
282{ 175{
283 int i, j, num_entries; 176 u32 reg_addr;
284 void *temp; 177 char *gtt_table;
285 int ret = -EINVAL;
286 int mask_type;
287
288 if (mem->page_count == 0)
289 goto out;
290
291 temp = agp_bridge->current_size;
292 num_entries = A_SIZE_FIX(temp)->num_entries;
293 178
294 if ((pg_start + mem->page_count) > num_entries) 179 /* i81x does not preallocate the gtt. It's always 64kb in size. */
295 goto out_err; 180 gtt_table = alloc_gatt_pages(I810_GTT_ORDER);
181 if (gtt_table == NULL)
182 return -ENOMEM;
183 intel_private.i81x_gtt_table = gtt_table;
296 184
185 pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &reg_addr);
186 reg_addr &= 0xfff80000;
297 187
298 for (j = pg_start; j < (pg_start + mem->page_count); j++) { 188 intel_private.registers = ioremap(reg_addr, KB(64));
299 if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j))) { 189 if (!intel_private.registers)
300 ret = -EBUSY; 190 return -ENOMEM;
301 goto out_err;
302 }
303 }
304 191
305 if (type != mem->type) 192 writel(virt_to_phys(gtt_table) | I810_PGETBL_ENABLED,
306 goto out_err; 193 intel_private.registers+I810_PGETBL_CTL);
307 194
308 mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type); 195 intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE;
309 196
310 switch (mask_type) { 197 if ((readl(intel_private.registers+I810_DRAM_CTL)
311 case AGP_DCACHE_MEMORY: 198 & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
312 if (!mem->is_flushed) 199 dev_info(&intel_private.pcidev->dev,
313 global_cache_flush(); 200 "detected 4MB dedicated video ram\n");
314 for (i = pg_start; i < (pg_start + mem->page_count); i++) { 201 intel_private.num_dcache_entries = 1024;
315 writel((i*4096)|I810_PTE_LOCAL|I810_PTE_VALID,
316 intel_private.registers+I810_PTE_BASE+(i*4));
317 }
318 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
319 break;
320 case AGP_PHYS_MEMORY:
321 case AGP_NORMAL_MEMORY:
322 if (!mem->is_flushed)
323 global_cache_flush();
324 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
325 writel(agp_bridge->driver->mask_memory(agp_bridge,
326 page_to_phys(mem->pages[i]), mask_type),
327 intel_private.registers+I810_PTE_BASE+(j*4));
328 }
329 readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
330 break;
331 default:
332 goto out_err;
333 } 202 }
334 203
335out: 204 return 0;
336 ret = 0;
337out_err:
338 mem->is_flushed = true;
339 return ret;
340} 205}
341 206
342static int intel_i810_remove_entries(struct agp_memory *mem, off_t pg_start, 207static void i810_cleanup(void)
343 int type) 208{
209 writel(0, intel_private.registers+I810_PGETBL_CTL);
210 free_gatt_pages(intel_private.i81x_gtt_table, I810_GTT_ORDER);
211}
212
213static int i810_insert_dcache_entries(struct agp_memory *mem, off_t pg_start,
214 int type)
344{ 215{
345 int i; 216 int i;
346 217
347 if (mem->page_count == 0) 218 if ((pg_start + mem->page_count)
348 return 0; 219 > intel_private.num_dcache_entries)
220 return -EINVAL;
221
222 if (!mem->is_flushed)
223 global_cache_flush();
349 224
350 for (i = pg_start; i < (mem->page_count + pg_start); i++) { 225 for (i = pg_start; i < (pg_start + mem->page_count); i++) {
351 writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4)); 226 dma_addr_t addr = i << PAGE_SHIFT;
227 intel_private.driver->write_entry(addr,
228 i, type);
352 } 229 }
353 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); 230 readl(intel_private.gtt+i-1);
354 231
355 return 0; 232 return 0;
356} 233}
@@ -397,29 +274,6 @@ static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
397 return new; 274 return new;
398} 275}
399 276
400static struct agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type)
401{
402 struct agp_memory *new;
403
404 if (type == AGP_DCACHE_MEMORY) {
405 if (pg_count != intel_private.num_dcache_entries)
406 return NULL;
407
408 new = agp_create_memory(1);
409 if (new == NULL)
410 return NULL;
411
412 new->type = AGP_DCACHE_MEMORY;
413 new->page_count = pg_count;
414 new->num_scratch_pages = 0;
415 agp_free_page_array(new);
416 return new;
417 }
418 if (type == AGP_PHYS_MEMORY)
419 return alloc_agpphysmem_i8xx(pg_count, type);
420 return NULL;
421}
422
423static void intel_i810_free_by_type(struct agp_memory *curr) 277static void intel_i810_free_by_type(struct agp_memory *curr)
424{ 278{
425 agp_free_key(curr->key); 279 agp_free_key(curr->key);
@@ -437,13 +291,6 @@ static void intel_i810_free_by_type(struct agp_memory *curr)
437 kfree(curr); 291 kfree(curr);
438} 292}
439 293
440static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge,
441 dma_addr_t addr, int type)
442{
443 /* Type checking must be done elsewhere */
444 return addr | bridge->driver->masks[type].mask;
445}
446
447static int intel_gtt_setup_scratch_page(void) 294static int intel_gtt_setup_scratch_page(void)
448{ 295{
449 struct page *page; 296 struct page *page;
@@ -455,7 +302,7 @@ static int intel_gtt_setup_scratch_page(void)
455 get_page(page); 302 get_page(page);
456 set_pages_uc(page, 1); 303 set_pages_uc(page, 1);
457 304
458 if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2) { 305 if (intel_private.base.needs_dmar) {
459 dma_addr = pci_map_page(intel_private.pcidev, page, 0, 306 dma_addr = pci_map_page(intel_private.pcidev, page, 0,
460 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 307 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
461 if (pci_dma_mapping_error(intel_private.pcidev, dma_addr)) 308 if (pci_dma_mapping_error(intel_private.pcidev, dma_addr))
@@ -470,34 +317,45 @@ static int intel_gtt_setup_scratch_page(void)
470 return 0; 317 return 0;
471} 318}
472 319
473static const struct aper_size_info_fixed const intel_fake_agp_sizes[] = { 320static void i810_write_entry(dma_addr_t addr, unsigned int entry,
321 unsigned int flags)
322{
323 u32 pte_flags = I810_PTE_VALID;
324
325 switch (flags) {
326 case AGP_DCACHE_MEMORY:
327 pte_flags |= I810_PTE_LOCAL;
328 break;
329 case AGP_USER_CACHED_MEMORY:
330 pte_flags |= I830_PTE_SYSTEM_CACHED;
331 break;
332 }
333
334 writel(addr | pte_flags, intel_private.gtt + entry);
335}
336
337static const struct aper_size_info_fixed intel_fake_agp_sizes[] = {
338 {32, 8192, 3},
339 {64, 16384, 4},
474 {128, 32768, 5}, 340 {128, 32768, 5},
475 /* The 64M mode still requires a 128k gatt */
476 {64, 16384, 5},
477 {256, 65536, 6}, 341 {256, 65536, 6},
478 {512, 131072, 7}, 342 {512, 131072, 7},
479}; 343};
480 344
481static unsigned int intel_gtt_stolen_entries(void) 345static unsigned int intel_gtt_stolen_size(void)
482{ 346{
483 u16 gmch_ctrl; 347 u16 gmch_ctrl;
484 u8 rdct; 348 u8 rdct;
485 int local = 0; 349 int local = 0;
486 static const int ddt[4] = { 0, 16, 32, 64 }; 350 static const int ddt[4] = { 0, 16, 32, 64 };
487 unsigned int overhead_entries, stolen_entries;
488 unsigned int stolen_size = 0; 351 unsigned int stolen_size = 0;
489 352
353 if (INTEL_GTT_GEN == 1)
354 return 0; /* no stolen mem on i81x */
355
490 pci_read_config_word(intel_private.bridge_dev, 356 pci_read_config_word(intel_private.bridge_dev,
491 I830_GMCH_CTRL, &gmch_ctrl); 357 I830_GMCH_CTRL, &gmch_ctrl);
492 358
493 if (INTEL_GTT_GEN > 4 || IS_PINEVIEW)
494 overhead_entries = 0;
495 else
496 overhead_entries = intel_private.base.gtt_mappable_entries
497 / 1024;
498
499 overhead_entries += 1; /* BIOS popup */
500
501 if (intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82830_HB || 359 if (intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
502 intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) { 360 intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
503 switch (gmch_ctrl & I830_GMCH_GMS_MASK) { 361 switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
@@ -623,12 +481,7 @@ static unsigned int intel_gtt_stolen_entries(void)
623 } 481 }
624 } 482 }
625 483
626 if (!local && stolen_size > intel_max_stolen) { 484 if (stolen_size > 0) {
627 dev_info(&intel_private.bridge_dev->dev,
628 "detected %dK stolen memory, trimming to %dK\n",
629 stolen_size / KB(1), intel_max_stolen / KB(1));
630 stolen_size = intel_max_stolen;
631 } else if (stolen_size > 0) {
632 dev_info(&intel_private.bridge_dev->dev, "detected %dK %s memory\n", 485 dev_info(&intel_private.bridge_dev->dev, "detected %dK %s memory\n",
633 stolen_size / KB(1), local ? "local" : "stolen"); 486 stolen_size / KB(1), local ? "local" : "stolen");
634 } else { 487 } else {
@@ -637,46 +490,88 @@ static unsigned int intel_gtt_stolen_entries(void)
637 stolen_size = 0; 490 stolen_size = 0;
638 } 491 }
639 492
640 stolen_entries = stolen_size/KB(4) - overhead_entries; 493 return stolen_size;
494}
641 495
642 return stolen_entries; 496static void i965_adjust_pgetbl_size(unsigned int size_flag)
497{
498 u32 pgetbl_ctl, pgetbl_ctl2;
499
500 /* ensure that ppgtt is disabled */
501 pgetbl_ctl2 = readl(intel_private.registers+I965_PGETBL_CTL2);
502 pgetbl_ctl2 &= ~I810_PGETBL_ENABLED;
503 writel(pgetbl_ctl2, intel_private.registers+I965_PGETBL_CTL2);
504
505 /* write the new ggtt size */
506 pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
507 pgetbl_ctl &= ~I965_PGETBL_SIZE_MASK;
508 pgetbl_ctl |= size_flag;
509 writel(pgetbl_ctl, intel_private.registers+I810_PGETBL_CTL);
643} 510}
644 511
645static unsigned int intel_gtt_total_entries(void) 512static unsigned int i965_gtt_total_entries(void)
646{ 513{
647 int size; 514 int size;
515 u32 pgetbl_ctl;
516 u16 gmch_ctl;
648 517
649 if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5) { 518 pci_read_config_word(intel_private.bridge_dev,
650 u32 pgetbl_ctl; 519 I830_GMCH_CTRL, &gmch_ctl);
651 pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
652 520
653 switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) { 521 if (INTEL_GTT_GEN == 5) {
654 case I965_PGETBL_SIZE_128KB: 522 switch (gmch_ctl & G4x_GMCH_SIZE_MASK) {
655 size = KB(128); 523 case G4x_GMCH_SIZE_1M:
656 break; 524 case G4x_GMCH_SIZE_VT_1M:
657 case I965_PGETBL_SIZE_256KB: 525 i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1MB);
658 size = KB(256);
659 break;
660 case I965_PGETBL_SIZE_512KB:
661 size = KB(512);
662 break;
663 case I965_PGETBL_SIZE_1MB:
664 size = KB(1024);
665 break; 526 break;
666 case I965_PGETBL_SIZE_2MB: 527 case G4x_GMCH_SIZE_VT_1_5M:
667 size = KB(2048); 528 i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1_5MB);
668 break; 529 break;
669 case I965_PGETBL_SIZE_1_5MB: 530 case G4x_GMCH_SIZE_2M:
670 size = KB(1024 + 512); 531 case G4x_GMCH_SIZE_VT_2M:
532 i965_adjust_pgetbl_size(I965_PGETBL_SIZE_2MB);
671 break; 533 break;
672 default:
673 dev_info(&intel_private.pcidev->dev,
674 "unknown page table size, assuming 512KB\n");
675 size = KB(512);
676 } 534 }
535 }
677 536
678 return size/4; 537 pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
679 } else if (INTEL_GTT_GEN == 6) { 538
539 switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
540 case I965_PGETBL_SIZE_128KB:
541 size = KB(128);
542 break;
543 case I965_PGETBL_SIZE_256KB:
544 size = KB(256);
545 break;
546 case I965_PGETBL_SIZE_512KB:
547 size = KB(512);
548 break;
549 /* GTT pagetable sizes bigger than 512KB are not possible on G33! */
550 case I965_PGETBL_SIZE_1MB:
551 size = KB(1024);
552 break;
553 case I965_PGETBL_SIZE_2MB:
554 size = KB(2048);
555 break;
556 case I965_PGETBL_SIZE_1_5MB:
557 size = KB(1024 + 512);
558 break;
559 default:
560 dev_info(&intel_private.pcidev->dev,
561 "unknown page table size, assuming 512KB\n");
562 size = KB(512);
563 }
564
565 return size/4;
566}
567
568static unsigned int intel_gtt_total_entries(void)
569{
570 int size;
571
572 if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5)
573 return i965_gtt_total_entries();
574 else if (INTEL_GTT_GEN == 6) {
680 u16 snb_gmch_ctl; 575 u16 snb_gmch_ctl;
681 576
682 pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl); 577 pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
@@ -706,7 +601,18 @@ static unsigned int intel_gtt_mappable_entries(void)
706{ 601{
707 unsigned int aperture_size; 602 unsigned int aperture_size;
708 603
709 if (INTEL_GTT_GEN == 2) { 604 if (INTEL_GTT_GEN == 1) {
605 u32 smram_miscc;
606
607 pci_read_config_dword(intel_private.bridge_dev,
608 I810_SMRAM_MISCC, &smram_miscc);
609
610 if ((smram_miscc & I810_GFX_MEM_WIN_SIZE)
611 == I810_GFX_MEM_WIN_32M)
612 aperture_size = MB(32);
613 else
614 aperture_size = MB(64);
615 } else if (INTEL_GTT_GEN == 2) {
710 u16 gmch_ctrl; 616 u16 gmch_ctrl;
711 617
712 pci_read_config_word(intel_private.bridge_dev, 618 pci_read_config_word(intel_private.bridge_dev,
@@ -739,7 +645,7 @@ static void intel_gtt_cleanup(void)
739 645
740 iounmap(intel_private.gtt); 646 iounmap(intel_private.gtt);
741 iounmap(intel_private.registers); 647 iounmap(intel_private.registers);
742 648
743 intel_gtt_teardown_scratch_page(); 649 intel_gtt_teardown_scratch_page();
744} 650}
745 651
@@ -755,6 +661,14 @@ static int intel_gtt_init(void)
755 intel_private.base.gtt_mappable_entries = intel_gtt_mappable_entries(); 661 intel_private.base.gtt_mappable_entries = intel_gtt_mappable_entries();
756 intel_private.base.gtt_total_entries = intel_gtt_total_entries(); 662 intel_private.base.gtt_total_entries = intel_gtt_total_entries();
757 663
664 /* save the PGETBL reg for resume */
665 intel_private.PGETBL_save =
666 readl(intel_private.registers+I810_PGETBL_CTL)
667 & ~I810_PGETBL_ENABLED;
668 /* we only ever restore the register when enabling the PGTBL... */
669 if (HAS_PGTBL_EN)
670 intel_private.PGETBL_save |= I810_PGETBL_ENABLED;
671
758 dev_info(&intel_private.bridge_dev->dev, 672 dev_info(&intel_private.bridge_dev->dev,
759 "detected gtt size: %dK total, %dK mappable\n", 673 "detected gtt size: %dK total, %dK mappable\n",
760 intel_private.base.gtt_total_entries * 4, 674 intel_private.base.gtt_total_entries * 4,
@@ -772,14 +686,7 @@ static int intel_gtt_init(void)
772 686
773 global_cache_flush(); /* FIXME: ? */ 687 global_cache_flush(); /* FIXME: ? */
774 688
775 /* we have to call this as early as possible after the MMIO base address is known */ 689 intel_private.base.stolen_size = intel_gtt_stolen_size();
776 intel_private.base.gtt_stolen_entries = intel_gtt_stolen_entries();
777 if (intel_private.base.gtt_stolen_entries == 0) {
778 intel_private.driver->cleanup();
779 iounmap(intel_private.registers);
780 iounmap(intel_private.gtt);
781 return -ENOMEM;
782 }
783 690
784 ret = intel_gtt_setup_scratch_page(); 691 ret = intel_gtt_setup_scratch_page();
785 if (ret != 0) { 692 if (ret != 0) {
@@ -787,6 +694,8 @@ static int intel_gtt_init(void)
787 return ret; 694 return ret;
788 } 695 }
789 696
697 intel_private.base.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2;
698
790 return 0; 699 return 0;
791} 700}
792 701
@@ -862,25 +771,19 @@ static void i830_write_entry(dma_addr_t addr, unsigned int entry,
862 unsigned int flags) 771 unsigned int flags)
863{ 772{
864 u32 pte_flags = I810_PTE_VALID; 773 u32 pte_flags = I810_PTE_VALID;
865 774
866 switch (flags) { 775 if (flags == AGP_USER_CACHED_MEMORY)
867 case AGP_DCACHE_MEMORY:
868 pte_flags |= I810_PTE_LOCAL;
869 break;
870 case AGP_USER_CACHED_MEMORY:
871 pte_flags |= I830_PTE_SYSTEM_CACHED; 776 pte_flags |= I830_PTE_SYSTEM_CACHED;
872 break;
873 }
874 777
875 writel(addr | pte_flags, intel_private.gtt + entry); 778 writel(addr | pte_flags, intel_private.gtt + entry);
876} 779}
877 780
878static void intel_enable_gtt(void) 781static bool intel_enable_gtt(void)
879{ 782{
880 u32 gma_addr; 783 u32 gma_addr;
881 u16 gmch_ctrl; 784 u8 __iomem *reg;
882 785
883 if (INTEL_GTT_GEN == 2) 786 if (INTEL_GTT_GEN <= 2)
884 pci_read_config_dword(intel_private.pcidev, I810_GMADDR, 787 pci_read_config_dword(intel_private.pcidev, I810_GMADDR,
885 &gma_addr); 788 &gma_addr);
886 else 789 else
@@ -889,13 +792,38 @@ static void intel_enable_gtt(void)
889 792
890 intel_private.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK); 793 intel_private.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK);
891 794
892 pci_read_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, &gmch_ctrl); 795 if (INTEL_GTT_GEN >= 6)
893 gmch_ctrl |= I830_GMCH_ENABLED; 796 return true;
894 pci_write_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, gmch_ctrl);
895 797
896 writel(intel_private.pte_bus_addr|I810_PGETBL_ENABLED, 798 if (INTEL_GTT_GEN == 2) {
897 intel_private.registers+I810_PGETBL_CTL); 799 u16 gmch_ctrl;
898 readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */ 800
801 pci_read_config_word(intel_private.bridge_dev,
802 I830_GMCH_CTRL, &gmch_ctrl);
803 gmch_ctrl |= I830_GMCH_ENABLED;
804 pci_write_config_word(intel_private.bridge_dev,
805 I830_GMCH_CTRL, gmch_ctrl);
806
807 pci_read_config_word(intel_private.bridge_dev,
808 I830_GMCH_CTRL, &gmch_ctrl);
809 if ((gmch_ctrl & I830_GMCH_ENABLED) == 0) {
810 dev_err(&intel_private.pcidev->dev,
811 "failed to enable the GTT: GMCH_CTRL=%x\n",
812 gmch_ctrl);
813 return false;
814 }
815 }
816
817 reg = intel_private.registers+I810_PGETBL_CTL;
818 writel(intel_private.PGETBL_save, reg);
819 if (HAS_PGTBL_EN && (readl(reg) & I810_PGETBL_ENABLED) == 0) {
820 dev_err(&intel_private.pcidev->dev,
821 "failed to enable the GTT: PGETBL=%x [expected %x]\n",
822 readl(reg), intel_private.PGETBL_save);
823 return false;
824 }
825
826 return true;
899} 827}
900 828
901static int i830_setup(void) 829static int i830_setup(void)
@@ -910,8 +838,6 @@ static int i830_setup(void)
910 return -ENOMEM; 838 return -ENOMEM;
911 839
912 intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE; 840 intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE;
913 intel_private.pte_bus_addr =
914 readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
915 841
916 intel_i830_setup_flush(); 842 intel_i830_setup_flush();
917 843
@@ -936,12 +862,12 @@ static int intel_fake_agp_configure(void)
936{ 862{
937 int i; 863 int i;
938 864
939 intel_enable_gtt(); 865 if (!intel_enable_gtt())
866 return -EIO;
940 867
941 agp_bridge->gart_bus_addr = intel_private.gma_bus_addr; 868 agp_bridge->gart_bus_addr = intel_private.gma_bus_addr;
942 869
943 for (i = intel_private.base.gtt_stolen_entries; 870 for (i = 0; i < intel_private.base.gtt_total_entries; i++) {
944 i < intel_private.base.gtt_total_entries; i++) {
945 intel_private.driver->write_entry(intel_private.scratch_page_dma, 871 intel_private.driver->write_entry(intel_private.scratch_page_dma,
946 i, 0); 872 i, 0);
947 } 873 }
@@ -965,10 +891,10 @@ static bool i830_check_flags(unsigned int flags)
965 return false; 891 return false;
966} 892}
967 893
968static void intel_gtt_insert_sg_entries(struct scatterlist *sg_list, 894void intel_gtt_insert_sg_entries(struct scatterlist *sg_list,
969 unsigned int sg_len, 895 unsigned int sg_len,
970 unsigned int pg_start, 896 unsigned int pg_start,
971 unsigned int flags) 897 unsigned int flags)
972{ 898{
973 struct scatterlist *sg; 899 struct scatterlist *sg;
974 unsigned int len, m; 900 unsigned int len, m;
@@ -989,27 +915,34 @@ static void intel_gtt_insert_sg_entries(struct scatterlist *sg_list,
989 } 915 }
990 readl(intel_private.gtt+j-1); 916 readl(intel_private.gtt+j-1);
991} 917}
918EXPORT_SYMBOL(intel_gtt_insert_sg_entries);
919
920void intel_gtt_insert_pages(unsigned int first_entry, unsigned int num_entries,
921 struct page **pages, unsigned int flags)
922{
923 int i, j;
924
925 for (i = 0, j = first_entry; i < num_entries; i++, j++) {
926 dma_addr_t addr = page_to_phys(pages[i]);
927 intel_private.driver->write_entry(addr,
928 j, flags);
929 }
930 readl(intel_private.gtt+j-1);
931}
932EXPORT_SYMBOL(intel_gtt_insert_pages);
992 933
993static int intel_fake_agp_insert_entries(struct agp_memory *mem, 934static int intel_fake_agp_insert_entries(struct agp_memory *mem,
994 off_t pg_start, int type) 935 off_t pg_start, int type)
995{ 936{
996 int i, j;
997 int ret = -EINVAL; 937 int ret = -EINVAL;
998 938
939 if (INTEL_GTT_GEN == 1 && type == AGP_DCACHE_MEMORY)
940 return i810_insert_dcache_entries(mem, pg_start, type);
941
999 if (mem->page_count == 0) 942 if (mem->page_count == 0)
1000 goto out; 943 goto out;
1001 944
1002 if (pg_start < intel_private.base.gtt_stolen_entries) { 945 if (pg_start + mem->page_count > intel_private.base.gtt_total_entries)
1003 dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
1004 "pg_start == 0x%.8lx, gtt_stolen_entries == 0x%.8x\n",
1005 pg_start, intel_private.base.gtt_stolen_entries);
1006
1007 dev_info(&intel_private.pcidev->dev,
1008 "trying to insert into local/stolen memory\n");
1009 goto out_err;
1010 }
1011
1012 if ((pg_start + mem->page_count) > intel_private.base.gtt_total_entries)
1013 goto out_err; 946 goto out_err;
1014 947
1015 if (type != mem->type) 948 if (type != mem->type)
@@ -1021,21 +954,17 @@ static int intel_fake_agp_insert_entries(struct agp_memory *mem,
1021 if (!mem->is_flushed) 954 if (!mem->is_flushed)
1022 global_cache_flush(); 955 global_cache_flush();
1023 956
1024 if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2) { 957 if (intel_private.base.needs_dmar) {
1025 ret = intel_agp_map_memory(mem); 958 ret = intel_gtt_map_memory(mem->pages, mem->page_count,
959 &mem->sg_list, &mem->num_sg);
1026 if (ret != 0) 960 if (ret != 0)
1027 return ret; 961 return ret;
1028 962
1029 intel_gtt_insert_sg_entries(mem->sg_list, mem->num_sg, 963 intel_gtt_insert_sg_entries(mem->sg_list, mem->num_sg,
1030 pg_start, type); 964 pg_start, type);
1031 } else { 965 } else
1032 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { 966 intel_gtt_insert_pages(pg_start, mem->page_count, mem->pages,
1033 dma_addr_t addr = page_to_phys(mem->pages[i]); 967 type);
1034 intel_private.driver->write_entry(addr,
1035 j, type);
1036 }
1037 readl(intel_private.gtt+j-1);
1038 }
1039 968
1040out: 969out:
1041 ret = 0; 970 ret = 0;
@@ -1044,40 +973,54 @@ out_err:
1044 return ret; 973 return ret;
1045} 974}
1046 975
976void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
977{
978 unsigned int i;
979
980 for (i = first_entry; i < (first_entry + num_entries); i++) {
981 intel_private.driver->write_entry(intel_private.scratch_page_dma,
982 i, 0);
983 }
984 readl(intel_private.gtt+i-1);
985}
986EXPORT_SYMBOL(intel_gtt_clear_range);
987
1047static int intel_fake_agp_remove_entries(struct agp_memory *mem, 988static int intel_fake_agp_remove_entries(struct agp_memory *mem,
1048 off_t pg_start, int type) 989 off_t pg_start, int type)
1049{ 990{
1050 int i;
1051
1052 if (mem->page_count == 0) 991 if (mem->page_count == 0)
1053 return 0; 992 return 0;
1054 993
1055 if (pg_start < intel_private.base.gtt_stolen_entries) { 994 if (intel_private.base.needs_dmar) {
1056 dev_info(&intel_private.pcidev->dev, 995 intel_gtt_unmap_memory(mem->sg_list, mem->num_sg);
1057 "trying to disable local/stolen memory\n"); 996 mem->sg_list = NULL;
1058 return -EINVAL; 997 mem->num_sg = 0;
1059 } 998 }
1060 999
1061 if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2) 1000 intel_gtt_clear_range(pg_start, mem->page_count);
1062 intel_agp_unmap_memory(mem);
1063
1064 for (i = pg_start; i < (mem->page_count + pg_start); i++) {
1065 intel_private.driver->write_entry(intel_private.scratch_page_dma,
1066 i, 0);
1067 }
1068 readl(intel_private.gtt+i-1);
1069 1001
1070 return 0; 1002 return 0;
1071} 1003}
1072 1004
1073static void intel_fake_agp_chipset_flush(struct agp_bridge_data *bridge)
1074{
1075 intel_private.driver->chipset_flush();
1076}
1077
1078static struct agp_memory *intel_fake_agp_alloc_by_type(size_t pg_count, 1005static struct agp_memory *intel_fake_agp_alloc_by_type(size_t pg_count,
1079 int type) 1006 int type)
1080{ 1007{
1008 struct agp_memory *new;
1009
1010 if (type == AGP_DCACHE_MEMORY && INTEL_GTT_GEN == 1) {
1011 if (pg_count != intel_private.num_dcache_entries)
1012 return NULL;
1013
1014 new = agp_create_memory(1);
1015 if (new == NULL)
1016 return NULL;
1017
1018 new->type = AGP_DCACHE_MEMORY;
1019 new->page_count = pg_count;
1020 new->num_scratch_pages = 0;
1021 agp_free_page_array(new);
1022 return new;
1023 }
1081 if (type == AGP_PHYS_MEMORY) 1024 if (type == AGP_PHYS_MEMORY)
1082 return alloc_agpphysmem_i8xx(pg_count, type); 1025 return alloc_agpphysmem_i8xx(pg_count, type);
1083 /* always return NULL for other allocation types for now */ 1026 /* always return NULL for other allocation types for now */
@@ -1274,40 +1217,11 @@ static int i9xx_setup(void)
1274 intel_private.gtt_bus_addr = reg_addr + gtt_offset; 1217 intel_private.gtt_bus_addr = reg_addr + gtt_offset;
1275 } 1218 }
1276 1219
1277 intel_private.pte_bus_addr =
1278 readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
1279
1280 intel_i9xx_setup_flush(); 1220 intel_i9xx_setup_flush();
1281 1221
1282 return 0; 1222 return 0;
1283} 1223}
1284 1224
1285static const struct agp_bridge_driver intel_810_driver = {
1286 .owner = THIS_MODULE,
1287 .aperture_sizes = intel_i810_sizes,
1288 .size_type = FIXED_APER_SIZE,
1289 .num_aperture_sizes = 2,
1290 .needs_scratch_page = true,
1291 .configure = intel_i810_configure,
1292 .fetch_size = intel_i810_fetch_size,
1293 .cleanup = intel_i810_cleanup,
1294 .mask_memory = intel_i810_mask_memory,
1295 .masks = intel_i810_masks,
1296 .agp_enable = intel_fake_agp_enable,
1297 .cache_flush = global_cache_flush,
1298 .create_gatt_table = agp_generic_create_gatt_table,
1299 .free_gatt_table = agp_generic_free_gatt_table,
1300 .insert_memory = intel_i810_insert_entries,
1301 .remove_memory = intel_i810_remove_entries,
1302 .alloc_by_type = intel_i810_alloc_by_type,
1303 .free_by_type = intel_i810_free_by_type,
1304 .agp_alloc_page = agp_generic_alloc_page,
1305 .agp_alloc_pages = agp_generic_alloc_pages,
1306 .agp_destroy_page = agp_generic_destroy_page,
1307 .agp_destroy_pages = agp_generic_destroy_pages,
1308 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
1309};
1310
1311static const struct agp_bridge_driver intel_fake_agp_driver = { 1225static const struct agp_bridge_driver intel_fake_agp_driver = {
1312 .owner = THIS_MODULE, 1226 .owner = THIS_MODULE,
1313 .size_type = FIXED_APER_SIZE, 1227 .size_type = FIXED_APER_SIZE,
@@ -1328,15 +1242,20 @@ static const struct agp_bridge_driver intel_fake_agp_driver = {
1328 .agp_alloc_pages = agp_generic_alloc_pages, 1242 .agp_alloc_pages = agp_generic_alloc_pages,
1329 .agp_destroy_page = agp_generic_destroy_page, 1243 .agp_destroy_page = agp_generic_destroy_page,
1330 .agp_destroy_pages = agp_generic_destroy_pages, 1244 .agp_destroy_pages = agp_generic_destroy_pages,
1331 .chipset_flush = intel_fake_agp_chipset_flush,
1332}; 1245};
1333 1246
1334static const struct intel_gtt_driver i81x_gtt_driver = { 1247static const struct intel_gtt_driver i81x_gtt_driver = {
1335 .gen = 1, 1248 .gen = 1,
1249 .has_pgtbl_enable = 1,
1336 .dma_mask_size = 32, 1250 .dma_mask_size = 32,
1251 .setup = i810_setup,
1252 .cleanup = i810_cleanup,
1253 .check_flags = i830_check_flags,
1254 .write_entry = i810_write_entry,
1337}; 1255};
1338static const struct intel_gtt_driver i8xx_gtt_driver = { 1256static const struct intel_gtt_driver i8xx_gtt_driver = {
1339 .gen = 2, 1257 .gen = 2,
1258 .has_pgtbl_enable = 1,
1340 .setup = i830_setup, 1259 .setup = i830_setup,
1341 .cleanup = i830_cleanup, 1260 .cleanup = i830_cleanup,
1342 .write_entry = i830_write_entry, 1261 .write_entry = i830_write_entry,
@@ -1346,10 +1265,11 @@ static const struct intel_gtt_driver i8xx_gtt_driver = {
1346}; 1265};
1347static const struct intel_gtt_driver i915_gtt_driver = { 1266static const struct intel_gtt_driver i915_gtt_driver = {
1348 .gen = 3, 1267 .gen = 3,
1268 .has_pgtbl_enable = 1,
1349 .setup = i9xx_setup, 1269 .setup = i9xx_setup,
1350 .cleanup = i9xx_cleanup, 1270 .cleanup = i9xx_cleanup,
1351 /* i945 is the last gpu to need phys mem (for overlay and cursors). */ 1271 /* i945 is the last gpu to need phys mem (for overlay and cursors). */
1352 .write_entry = i830_write_entry, 1272 .write_entry = i830_write_entry,
1353 .dma_mask_size = 32, 1273 .dma_mask_size = 32,
1354 .check_flags = i830_check_flags, 1274 .check_flags = i830_check_flags,
1355 .chipset_flush = i9xx_chipset_flush, 1275 .chipset_flush = i9xx_chipset_flush,
@@ -1376,6 +1296,7 @@ static const struct intel_gtt_driver pineview_gtt_driver = {
1376}; 1296};
1377static const struct intel_gtt_driver i965_gtt_driver = { 1297static const struct intel_gtt_driver i965_gtt_driver = {
1378 .gen = 4, 1298 .gen = 4,
1299 .has_pgtbl_enable = 1,
1379 .setup = i9xx_setup, 1300 .setup = i9xx_setup,
1380 .cleanup = i9xx_cleanup, 1301 .cleanup = i9xx_cleanup,
1381 .write_entry = i965_write_entry, 1302 .write_entry = i965_write_entry,
@@ -1419,93 +1340,92 @@ static const struct intel_gtt_driver sandybridge_gtt_driver = {
1419static const struct intel_gtt_driver_description { 1340static const struct intel_gtt_driver_description {
1420 unsigned int gmch_chip_id; 1341 unsigned int gmch_chip_id;
1421 char *name; 1342 char *name;
1422 const struct agp_bridge_driver *gmch_driver;
1423 const struct intel_gtt_driver *gtt_driver; 1343 const struct intel_gtt_driver *gtt_driver;
1424} intel_gtt_chipsets[] = { 1344} intel_gtt_chipsets[] = {
1425 { PCI_DEVICE_ID_INTEL_82810_IG1, "i810", &intel_810_driver, 1345 { PCI_DEVICE_ID_INTEL_82810_IG1, "i810",
1426 &i81x_gtt_driver}, 1346 &i81x_gtt_driver},
1427 { PCI_DEVICE_ID_INTEL_82810_IG3, "i810", &intel_810_driver, 1347 { PCI_DEVICE_ID_INTEL_82810_IG3, "i810",
1428 &i81x_gtt_driver}, 1348 &i81x_gtt_driver},
1429 { PCI_DEVICE_ID_INTEL_82810E_IG, "i810", &intel_810_driver, 1349 { PCI_DEVICE_ID_INTEL_82810E_IG, "i810",
1430 &i81x_gtt_driver}, 1350 &i81x_gtt_driver},
1431 { PCI_DEVICE_ID_INTEL_82815_CGC, "i815", &intel_810_driver, 1351 { PCI_DEVICE_ID_INTEL_82815_CGC, "i815",
1432 &i81x_gtt_driver}, 1352 &i81x_gtt_driver},
1433 { PCI_DEVICE_ID_INTEL_82830_CGC, "830M", 1353 { PCI_DEVICE_ID_INTEL_82830_CGC, "830M",
1434 &intel_fake_agp_driver, &i8xx_gtt_driver}, 1354 &i8xx_gtt_driver},
1435 { PCI_DEVICE_ID_INTEL_82845G_IG, "830M", 1355 { PCI_DEVICE_ID_INTEL_82845G_IG, "830M",
1436 &intel_fake_agp_driver, &i8xx_gtt_driver}, 1356 &i8xx_gtt_driver},
1437 { PCI_DEVICE_ID_INTEL_82854_IG, "854", 1357 { PCI_DEVICE_ID_INTEL_82854_IG, "854",
1438 &intel_fake_agp_driver, &i8xx_gtt_driver}, 1358 &i8xx_gtt_driver},
1439 { PCI_DEVICE_ID_INTEL_82855GM_IG, "855GM", 1359 { PCI_DEVICE_ID_INTEL_82855GM_IG, "855GM",
1440 &intel_fake_agp_driver, &i8xx_gtt_driver}, 1360 &i8xx_gtt_driver},
1441 { PCI_DEVICE_ID_INTEL_82865_IG, "865", 1361 { PCI_DEVICE_ID_INTEL_82865_IG, "865",
1442 &intel_fake_agp_driver, &i8xx_gtt_driver}, 1362 &i8xx_gtt_driver},
1443 { PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)", 1363 { PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)",
1444 &intel_fake_agp_driver, &i915_gtt_driver }, 1364 &i915_gtt_driver },
1445 { PCI_DEVICE_ID_INTEL_82915G_IG, "915G", 1365 { PCI_DEVICE_ID_INTEL_82915G_IG, "915G",
1446 &intel_fake_agp_driver, &i915_gtt_driver }, 1366 &i915_gtt_driver },
1447 { PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM", 1367 { PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM",
1448 &intel_fake_agp_driver, &i915_gtt_driver }, 1368 &i915_gtt_driver },
1449 { PCI_DEVICE_ID_INTEL_82945G_IG, "945G", 1369 { PCI_DEVICE_ID_INTEL_82945G_IG, "945G",
1450 &intel_fake_agp_driver, &i915_gtt_driver }, 1370 &i915_gtt_driver },
1451 { PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM", 1371 { PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM",
1452 &intel_fake_agp_driver, &i915_gtt_driver }, 1372 &i915_gtt_driver },
1453 { PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME", 1373 { PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME",
1454 &intel_fake_agp_driver, &i915_gtt_driver }, 1374 &i915_gtt_driver },
1455 { PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ", 1375 { PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ",
1456 &intel_fake_agp_driver, &i965_gtt_driver }, 1376 &i965_gtt_driver },
1457 { PCI_DEVICE_ID_INTEL_82G35_IG, "G35", 1377 { PCI_DEVICE_ID_INTEL_82G35_IG, "G35",
1458 &intel_fake_agp_driver, &i965_gtt_driver }, 1378 &i965_gtt_driver },
1459 { PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q", 1379 { PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q",
1460 &intel_fake_agp_driver, &i965_gtt_driver }, 1380 &i965_gtt_driver },
1461 { PCI_DEVICE_ID_INTEL_82965G_IG, "965G", 1381 { PCI_DEVICE_ID_INTEL_82965G_IG, "965G",
1462 &intel_fake_agp_driver, &i965_gtt_driver }, 1382 &i965_gtt_driver },
1463 { PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM", 1383 { PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM",
1464 &intel_fake_agp_driver, &i965_gtt_driver }, 1384 &i965_gtt_driver },
1465 { PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE", 1385 { PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE",
1466 &intel_fake_agp_driver, &i965_gtt_driver }, 1386 &i965_gtt_driver },
1467 { PCI_DEVICE_ID_INTEL_G33_IG, "G33", 1387 { PCI_DEVICE_ID_INTEL_G33_IG, "G33",
1468 &intel_fake_agp_driver, &g33_gtt_driver }, 1388 &g33_gtt_driver },
1469 { PCI_DEVICE_ID_INTEL_Q35_IG, "Q35", 1389 { PCI_DEVICE_ID_INTEL_Q35_IG, "Q35",
1470 &intel_fake_agp_driver, &g33_gtt_driver }, 1390 &g33_gtt_driver },
1471 { PCI_DEVICE_ID_INTEL_Q33_IG, "Q33", 1391 { PCI_DEVICE_ID_INTEL_Q33_IG, "Q33",
1472 &intel_fake_agp_driver, &g33_gtt_driver }, 1392 &g33_gtt_driver },
1473 { PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150", 1393 { PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150",
1474 &intel_fake_agp_driver, &pineview_gtt_driver }, 1394 &pineview_gtt_driver },
1475 { PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150", 1395 { PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150",
1476 &intel_fake_agp_driver, &pineview_gtt_driver }, 1396 &pineview_gtt_driver },
1477 { PCI_DEVICE_ID_INTEL_GM45_IG, "GM45", 1397 { PCI_DEVICE_ID_INTEL_GM45_IG, "GM45",
1478 &intel_fake_agp_driver, &g4x_gtt_driver }, 1398 &g4x_gtt_driver },
1479 { PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, "Eaglelake", 1399 { PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, "Eaglelake",
1480 &intel_fake_agp_driver, &g4x_gtt_driver }, 1400 &g4x_gtt_driver },
1481 { PCI_DEVICE_ID_INTEL_Q45_IG, "Q45/Q43", 1401 { PCI_DEVICE_ID_INTEL_Q45_IG, "Q45/Q43",
1482 &intel_fake_agp_driver, &g4x_gtt_driver }, 1402 &g4x_gtt_driver },
1483 { PCI_DEVICE_ID_INTEL_G45_IG, "G45/G43", 1403 { PCI_DEVICE_ID_INTEL_G45_IG, "G45/G43",
1484 &intel_fake_agp_driver, &g4x_gtt_driver }, 1404 &g4x_gtt_driver },
1485 { PCI_DEVICE_ID_INTEL_B43_IG, "B43", 1405 { PCI_DEVICE_ID_INTEL_B43_IG, "B43",
1486 &intel_fake_agp_driver, &g4x_gtt_driver }, 1406 &g4x_gtt_driver },
1487 { PCI_DEVICE_ID_INTEL_B43_1_IG, "B43", 1407 { PCI_DEVICE_ID_INTEL_B43_1_IG, "B43",
1488 &intel_fake_agp_driver, &g4x_gtt_driver }, 1408 &g4x_gtt_driver },
1489 { PCI_DEVICE_ID_INTEL_G41_IG, "G41", 1409 { PCI_DEVICE_ID_INTEL_G41_IG, "G41",
1490 &intel_fake_agp_driver, &g4x_gtt_driver }, 1410 &g4x_gtt_driver },
1491 { PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG, 1411 { PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG,
1492 "HD Graphics", &intel_fake_agp_driver, &ironlake_gtt_driver }, 1412 "HD Graphics", &ironlake_gtt_driver },
1493 { PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 1413 { PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
1494 "HD Graphics", &intel_fake_agp_driver, &ironlake_gtt_driver }, 1414 "HD Graphics", &ironlake_gtt_driver },
1495 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG, 1415 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG,
1496 "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver }, 1416 "Sandybridge", &sandybridge_gtt_driver },
1497 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG, 1417 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG,
1498 "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver }, 1418 "Sandybridge", &sandybridge_gtt_driver },
1499 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG, 1419 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG,
1500 "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver }, 1420 "Sandybridge", &sandybridge_gtt_driver },
1501 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG, 1421 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG,
1502 "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver }, 1422 "Sandybridge", &sandybridge_gtt_driver },
1503 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG, 1423 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG,
1504 "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver }, 1424 "Sandybridge", &sandybridge_gtt_driver },
1505 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG, 1425 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG,
1506 "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver }, 1426 "Sandybridge", &sandybridge_gtt_driver },
1507 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG, 1427 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG,
1508 "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver }, 1428 "Sandybridge", &sandybridge_gtt_driver },
1509 { 0, NULL, NULL } 1429 { 0, NULL, NULL }
1510}; 1430};
1511 1431
@@ -1530,21 +1450,20 @@ int intel_gmch_probe(struct pci_dev *pdev,
1530 struct agp_bridge_data *bridge) 1450 struct agp_bridge_data *bridge)
1531{ 1451{
1532 int i, mask; 1452 int i, mask;
1533 bridge->driver = NULL; 1453 intel_private.driver = NULL;
1534 1454
1535 for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) { 1455 for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) {
1536 if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) { 1456 if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) {
1537 bridge->driver = 1457 intel_private.driver =
1538 intel_gtt_chipsets[i].gmch_driver;
1539 intel_private.driver =
1540 intel_gtt_chipsets[i].gtt_driver; 1458 intel_gtt_chipsets[i].gtt_driver;
1541 break; 1459 break;
1542 } 1460 }
1543 } 1461 }
1544 1462
1545 if (!bridge->driver) 1463 if (!intel_private.driver)
1546 return 0; 1464 return 0;
1547 1465
1466 bridge->driver = &intel_fake_agp_driver;
1548 bridge->dev_private_data = &intel_private; 1467 bridge->dev_private_data = &intel_private;
1549 bridge->dev = pdev; 1468 bridge->dev = pdev;
1550 1469
@@ -1560,8 +1479,8 @@ int intel_gmch_probe(struct pci_dev *pdev,
1560 pci_set_consistent_dma_mask(intel_private.pcidev, 1479 pci_set_consistent_dma_mask(intel_private.pcidev,
1561 DMA_BIT_MASK(mask)); 1480 DMA_BIT_MASK(mask));
1562 1481
1563 if (bridge->driver == &intel_810_driver) 1482 /*if (bridge->driver == &intel_810_driver)
1564 return 1; 1483 return 1;*/
1565 1484
1566 if (intel_gtt_init() != 0) 1485 if (intel_gtt_init() != 0)
1567 return 0; 1486 return 0;
@@ -1570,12 +1489,19 @@ int intel_gmch_probe(struct pci_dev *pdev,
1570} 1489}
1571EXPORT_SYMBOL(intel_gmch_probe); 1490EXPORT_SYMBOL(intel_gmch_probe);
1572 1491
1573struct intel_gtt *intel_gtt_get(void) 1492const struct intel_gtt *intel_gtt_get(void)
1574{ 1493{
1575 return &intel_private.base; 1494 return &intel_private.base;
1576} 1495}
1577EXPORT_SYMBOL(intel_gtt_get); 1496EXPORT_SYMBOL(intel_gtt_get);
1578 1497
1498void intel_gtt_chipset_flush(void)
1499{
1500 if (intel_private.driver->chipset_flush)
1501 intel_private.driver->chipset_flush();
1502}
1503EXPORT_SYMBOL(intel_gtt_chipset_flush);
1504
1579void intel_gmch_remove(struct pci_dev *pdev) 1505void intel_gmch_remove(struct pci_dev *pdev)
1580{ 1506{
1581 if (intel_private.pcidev) 1507 if (intel_private.pcidev)
diff --git a/drivers/char/hvc_vio.c b/drivers/char/hvc_vio.c
index 27370e99c66f..5e2f52b33327 100644
--- a/drivers/char/hvc_vio.c
+++ b/drivers/char/hvc_vio.c
@@ -39,7 +39,7 @@
39 39
40#include "hvc_console.h" 40#include "hvc_console.h"
41 41
42char hvc_driver_name[] = "hvc_console"; 42static const char hvc_driver_name[] = "hvc_console";
43 43
44static struct vio_device_id hvc_driver_table[] __devinitdata = { 44static struct vio_device_id hvc_driver_table[] __devinitdata = {
45 {"serial", "hvterm1"}, 45 {"serial", "hvterm1"},
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index f4d334f2536e..320668f4c3aa 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -1081,7 +1081,7 @@ ipmi_nmi(struct notifier_block *self, unsigned long val, void *data)
1081{ 1081{
1082 struct die_args *args = data; 1082 struct die_args *args = data;
1083 1083
1084 if (val != DIE_NMI) 1084 if (val != DIE_NMIUNKNOWN)
1085 return NOTIFY_OK; 1085 return NOTIFY_OK;
1086 1086
1087 /* Hack, if it's a memory or I/O error, ignore it. */ 1087 /* Hack, if it's a memory or I/O error, ignore it. */
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 6ee23592700a..ef138731c0ea 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -109,7 +109,7 @@ config FSL_DMA
109 109
110config MPC512X_DMA 110config MPC512X_DMA
111 tristate "Freescale MPC512x built-in DMA engine support" 111 tristate "Freescale MPC512x built-in DMA engine support"
112 depends on PPC_MPC512x 112 depends on PPC_MPC512x || PPC_MPC831x
113 select DMA_ENGINE 113 select DMA_ENGINE
114 ---help--- 114 ---help---
115 Enable support for the Freescale MPC512x built-in DMA engine. 115 Enable support for the Freescale MPC512x built-in DMA engine.
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index 4e9cbf300594..59c270192ccc 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -1,6 +1,7 @@
1/* 1/*
2 * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008. 2 * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008.
3 * Copyright (C) Semihalf 2009 3 * Copyright (C) Semihalf 2009
4 * Copyright (C) Ilya Yanok, Emcraft Systems 2010
4 * 5 *
5 * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description 6 * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description
6 * (defines, structures and comments) was taken from MPC5121 DMA driver 7 * (defines, structures and comments) was taken from MPC5121 DMA driver
@@ -70,6 +71,8 @@
70#define MPC_DMA_DMAES_SBE (1 << 1) 71#define MPC_DMA_DMAES_SBE (1 << 1)
71#define MPC_DMA_DMAES_DBE (1 << 0) 72#define MPC_DMA_DMAES_DBE (1 << 0)
72 73
74#define MPC_DMA_DMAGPOR_SNOOP_ENABLE (1 << 6)
75
73#define MPC_DMA_TSIZE_1 0x00 76#define MPC_DMA_TSIZE_1 0x00
74#define MPC_DMA_TSIZE_2 0x01 77#define MPC_DMA_TSIZE_2 0x01
75#define MPC_DMA_TSIZE_4 0x02 78#define MPC_DMA_TSIZE_4 0x02
@@ -104,7 +107,10 @@ struct __attribute__ ((__packed__)) mpc_dma_regs {
104 /* 0x30 */ 107 /* 0x30 */
105 u32 dmahrsh; /* DMA hw request status high(ch63~32) */ 108 u32 dmahrsh; /* DMA hw request status high(ch63~32) */
106 u32 dmahrsl; /* DMA hardware request status low(ch31~0) */ 109 u32 dmahrsl; /* DMA hardware request status low(ch31~0) */
107 u32 dmaihsa; /* DMA interrupt high select AXE(ch63~32) */ 110 union {
111 u32 dmaihsa; /* DMA interrupt high select AXE(ch63~32) */
112 u32 dmagpor; /* (General purpose register on MPC8308) */
113 };
108 u32 dmailsa; /* DMA interrupt low select AXE(ch31~0) */ 114 u32 dmailsa; /* DMA interrupt low select AXE(ch31~0) */
109 /* 0x40 ~ 0xff */ 115 /* 0x40 ~ 0xff */
110 u32 reserve0[48]; /* Reserved */ 116 u32 reserve0[48]; /* Reserved */
@@ -195,7 +201,9 @@ struct mpc_dma {
195 struct mpc_dma_regs __iomem *regs; 201 struct mpc_dma_regs __iomem *regs;
196 struct mpc_dma_tcd __iomem *tcd; 202 struct mpc_dma_tcd __iomem *tcd;
197 int irq; 203 int irq;
204 int irq2;
198 uint error_status; 205 uint error_status;
206 int is_mpc8308;
199 207
200 /* Lock for error_status field in this structure */ 208 /* Lock for error_status field in this structure */
201 spinlock_t error_status_lock; 209 spinlock_t error_status_lock;
@@ -252,11 +260,13 @@ static void mpc_dma_execute(struct mpc_dma_chan *mchan)
252 prev = mdesc; 260 prev = mdesc;
253 } 261 }
254 262
255 prev->tcd->start = 0;
256 prev->tcd->int_maj = 1; 263 prev->tcd->int_maj = 1;
257 264
258 /* Send first descriptor in chain into hardware */ 265 /* Send first descriptor in chain into hardware */
259 memcpy_toio(&mdma->tcd[cid], first->tcd, sizeof(struct mpc_dma_tcd)); 266 memcpy_toio(&mdma->tcd[cid], first->tcd, sizeof(struct mpc_dma_tcd));
267
268 if (first != prev)
269 mdma->tcd[cid].e_sg = 1;
260 out_8(&mdma->regs->dmassrt, cid); 270 out_8(&mdma->regs->dmassrt, cid);
261} 271}
262 272
@@ -274,6 +284,9 @@ static void mpc_dma_irq_process(struct mpc_dma *mdma, u32 is, u32 es, int off)
274 284
275 spin_lock(&mchan->lock); 285 spin_lock(&mchan->lock);
276 286
287 out_8(&mdma->regs->dmacint, ch + off);
288 out_8(&mdma->regs->dmacerr, ch + off);
289
277 /* Check error status */ 290 /* Check error status */
278 if (es & (1 << ch)) 291 if (es & (1 << ch))
279 list_for_each_entry(mdesc, &mchan->active, node) 292 list_for_each_entry(mdesc, &mchan->active, node)
@@ -302,36 +315,68 @@ static irqreturn_t mpc_dma_irq(int irq, void *data)
302 spin_unlock(&mdma->error_status_lock); 315 spin_unlock(&mdma->error_status_lock);
303 316
304 /* Handle interrupt on each channel */ 317 /* Handle interrupt on each channel */
305 mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmainth), 318 if (mdma->dma.chancnt > 32) {
319 mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmainth),
306 in_be32(&mdma->regs->dmaerrh), 32); 320 in_be32(&mdma->regs->dmaerrh), 32);
321 }
307 mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmaintl), 322 mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmaintl),
308 in_be32(&mdma->regs->dmaerrl), 0); 323 in_be32(&mdma->regs->dmaerrl), 0);
309 324
310 /* Ack interrupt on all channels */
311 out_be32(&mdma->regs->dmainth, 0xFFFFFFFF);
312 out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF);
313 out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF);
314 out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF);
315
316 /* Schedule tasklet */ 325 /* Schedule tasklet */
317 tasklet_schedule(&mdma->tasklet); 326 tasklet_schedule(&mdma->tasklet);
318 327
319 return IRQ_HANDLED; 328 return IRQ_HANDLED;
320} 329}
321 330
322/* DMA Tasklet */ 331/* proccess completed descriptors */
323static void mpc_dma_tasklet(unsigned long data) 332static void mpc_dma_process_completed(struct mpc_dma *mdma)
324{ 333{
325 struct mpc_dma *mdma = (void *)data;
326 dma_cookie_t last_cookie = 0; 334 dma_cookie_t last_cookie = 0;
327 struct mpc_dma_chan *mchan; 335 struct mpc_dma_chan *mchan;
328 struct mpc_dma_desc *mdesc; 336 struct mpc_dma_desc *mdesc;
329 struct dma_async_tx_descriptor *desc; 337 struct dma_async_tx_descriptor *desc;
330 unsigned long flags; 338 unsigned long flags;
331 LIST_HEAD(list); 339 LIST_HEAD(list);
332 uint es;
333 int i; 340 int i;
334 341
342 for (i = 0; i < mdma->dma.chancnt; i++) {
343 mchan = &mdma->channels[i];
344
345 /* Get all completed descriptors */
346 spin_lock_irqsave(&mchan->lock, flags);
347 if (!list_empty(&mchan->completed))
348 list_splice_tail_init(&mchan->completed, &list);
349 spin_unlock_irqrestore(&mchan->lock, flags);
350
351 if (list_empty(&list))
352 continue;
353
354 /* Execute callbacks and run dependencies */
355 list_for_each_entry(mdesc, &list, node) {
356 desc = &mdesc->desc;
357
358 if (desc->callback)
359 desc->callback(desc->callback_param);
360
361 last_cookie = desc->cookie;
362 dma_run_dependencies(desc);
363 }
364
365 /* Free descriptors */
366 spin_lock_irqsave(&mchan->lock, flags);
367 list_splice_tail_init(&list, &mchan->free);
368 mchan->completed_cookie = last_cookie;
369 spin_unlock_irqrestore(&mchan->lock, flags);
370 }
371}
372
373/* DMA Tasklet */
374static void mpc_dma_tasklet(unsigned long data)
375{
376 struct mpc_dma *mdma = (void *)data;
377 unsigned long flags;
378 uint es;
379
335 spin_lock_irqsave(&mdma->error_status_lock, flags); 380 spin_lock_irqsave(&mdma->error_status_lock, flags);
336 es = mdma->error_status; 381 es = mdma->error_status;
337 mdma->error_status = 0; 382 mdma->error_status = 0;
@@ -370,35 +415,7 @@ static void mpc_dma_tasklet(unsigned long data)
370 dev_err(mdma->dma.dev, "- Destination Bus Error\n"); 415 dev_err(mdma->dma.dev, "- Destination Bus Error\n");
371 } 416 }
372 417
373 for (i = 0; i < mdma->dma.chancnt; i++) { 418 mpc_dma_process_completed(mdma);
374 mchan = &mdma->channels[i];
375
376 /* Get all completed descriptors */
377 spin_lock_irqsave(&mchan->lock, flags);
378 if (!list_empty(&mchan->completed))
379 list_splice_tail_init(&mchan->completed, &list);
380 spin_unlock_irqrestore(&mchan->lock, flags);
381
382 if (list_empty(&list))
383 continue;
384
385 /* Execute callbacks and run dependencies */
386 list_for_each_entry(mdesc, &list, node) {
387 desc = &mdesc->desc;
388
389 if (desc->callback)
390 desc->callback(desc->callback_param);
391
392 last_cookie = desc->cookie;
393 dma_run_dependencies(desc);
394 }
395
396 /* Free descriptors */
397 spin_lock_irqsave(&mchan->lock, flags);
398 list_splice_tail_init(&list, &mchan->free);
399 mchan->completed_cookie = last_cookie;
400 spin_unlock_irqrestore(&mchan->lock, flags);
401 }
402} 419}
403 420
404/* Submit descriptor to hardware */ 421/* Submit descriptor to hardware */
@@ -563,6 +580,7 @@ static struct dma_async_tx_descriptor *
563mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, 580mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
564 size_t len, unsigned long flags) 581 size_t len, unsigned long flags)
565{ 582{
583 struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
566 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); 584 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
567 struct mpc_dma_desc *mdesc = NULL; 585 struct mpc_dma_desc *mdesc = NULL;
568 struct mpc_dma_tcd *tcd; 586 struct mpc_dma_tcd *tcd;
@@ -577,8 +595,11 @@ mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
577 } 595 }
578 spin_unlock_irqrestore(&mchan->lock, iflags); 596 spin_unlock_irqrestore(&mchan->lock, iflags);
579 597
580 if (!mdesc) 598 if (!mdesc) {
599 /* try to free completed descriptors */
600 mpc_dma_process_completed(mdma);
581 return NULL; 601 return NULL;
602 }
582 603
583 mdesc->error = 0; 604 mdesc->error = 0;
584 tcd = mdesc->tcd; 605 tcd = mdesc->tcd;
@@ -591,7 +612,8 @@ mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
591 tcd->dsize = MPC_DMA_TSIZE_32; 612 tcd->dsize = MPC_DMA_TSIZE_32;
592 tcd->soff = 32; 613 tcd->soff = 32;
593 tcd->doff = 32; 614 tcd->doff = 32;
594 } else if (IS_ALIGNED(src | dst | len, 16)) { 615 } else if (!mdma->is_mpc8308 && IS_ALIGNED(src | dst | len, 16)) {
616 /* MPC8308 doesn't support 16 byte transfers */
595 tcd->ssize = MPC_DMA_TSIZE_16; 617 tcd->ssize = MPC_DMA_TSIZE_16;
596 tcd->dsize = MPC_DMA_TSIZE_16; 618 tcd->dsize = MPC_DMA_TSIZE_16;
597 tcd->soff = 16; 619 tcd->soff = 16;
@@ -651,6 +673,15 @@ static int __devinit mpc_dma_probe(struct platform_device *op,
651 return -EINVAL; 673 return -EINVAL;
652 } 674 }
653 675
676 if (of_device_is_compatible(dn, "fsl,mpc8308-dma")) {
677 mdma->is_mpc8308 = 1;
678 mdma->irq2 = irq_of_parse_and_map(dn, 1);
679 if (mdma->irq2 == NO_IRQ) {
680 dev_err(dev, "Error mapping IRQ!\n");
681 return -EINVAL;
682 }
683 }
684
654 retval = of_address_to_resource(dn, 0, &res); 685 retval = of_address_to_resource(dn, 0, &res);
655 if (retval) { 686 if (retval) {
656 dev_err(dev, "Error parsing memory region!\n"); 687 dev_err(dev, "Error parsing memory region!\n");
@@ -681,11 +712,23 @@ static int __devinit mpc_dma_probe(struct platform_device *op,
681 return -EINVAL; 712 return -EINVAL;
682 } 713 }
683 714
715 if (mdma->is_mpc8308) {
716 retval = devm_request_irq(dev, mdma->irq2, &mpc_dma_irq, 0,
717 DRV_NAME, mdma);
718 if (retval) {
719 dev_err(dev, "Error requesting IRQ2!\n");
720 return -EINVAL;
721 }
722 }
723
684 spin_lock_init(&mdma->error_status_lock); 724 spin_lock_init(&mdma->error_status_lock);
685 725
686 dma = &mdma->dma; 726 dma = &mdma->dma;
687 dma->dev = dev; 727 dma->dev = dev;
688 dma->chancnt = MPC_DMA_CHANNELS; 728 if (!mdma->is_mpc8308)
729 dma->chancnt = MPC_DMA_CHANNELS;
730 else
731 dma->chancnt = 16; /* MPC8308 DMA has only 16 channels */
689 dma->device_alloc_chan_resources = mpc_dma_alloc_chan_resources; 732 dma->device_alloc_chan_resources = mpc_dma_alloc_chan_resources;
690 dma->device_free_chan_resources = mpc_dma_free_chan_resources; 733 dma->device_free_chan_resources = mpc_dma_free_chan_resources;
691 dma->device_issue_pending = mpc_dma_issue_pending; 734 dma->device_issue_pending = mpc_dma_issue_pending;
@@ -721,26 +764,40 @@ static int __devinit mpc_dma_probe(struct platform_device *op,
721 * - Round-robin group arbitration, 764 * - Round-robin group arbitration,
722 * - Round-robin channel arbitration. 765 * - Round-robin channel arbitration.
723 */ 766 */
724 out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_EDCG | 767 if (!mdma->is_mpc8308) {
725 MPC_DMA_DMACR_ERGA | MPC_DMA_DMACR_ERCA); 768 out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_EDCG |
726 769 MPC_DMA_DMACR_ERGA | MPC_DMA_DMACR_ERCA);
727 /* Disable hardware DMA requests */ 770
728 out_be32(&mdma->regs->dmaerqh, 0); 771 /* Disable hardware DMA requests */
729 out_be32(&mdma->regs->dmaerql, 0); 772 out_be32(&mdma->regs->dmaerqh, 0);
730 773 out_be32(&mdma->regs->dmaerql, 0);
731 /* Disable error interrupts */ 774
732 out_be32(&mdma->regs->dmaeeih, 0); 775 /* Disable error interrupts */
733 out_be32(&mdma->regs->dmaeeil, 0); 776 out_be32(&mdma->regs->dmaeeih, 0);
734 777 out_be32(&mdma->regs->dmaeeil, 0);
735 /* Clear interrupts status */ 778
736 out_be32(&mdma->regs->dmainth, 0xFFFFFFFF); 779 /* Clear interrupts status */
737 out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF); 780 out_be32(&mdma->regs->dmainth, 0xFFFFFFFF);
738 out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF); 781 out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF);
739 out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF); 782 out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF);
740 783 out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF);
741 /* Route interrupts to IPIC */ 784
742 out_be32(&mdma->regs->dmaihsa, 0); 785 /* Route interrupts to IPIC */
743 out_be32(&mdma->regs->dmailsa, 0); 786 out_be32(&mdma->regs->dmaihsa, 0);
787 out_be32(&mdma->regs->dmailsa, 0);
788 } else {
789 /* MPC8308 has 16 channels and lacks some registers */
790 out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_ERCA);
791
792 /* enable snooping */
793 out_be32(&mdma->regs->dmagpor, MPC_DMA_DMAGPOR_SNOOP_ENABLE);
794 /* Disable error interrupts */
795 out_be32(&mdma->regs->dmaeeil, 0);
796
797 /* Clear interrupts status */
798 out_be32(&mdma->regs->dmaintl, 0xFFFF);
799 out_be32(&mdma->regs->dmaerrl, 0xFFFF);
800 }
744 801
745 /* Register DMA engine */ 802 /* Register DMA engine */
746 dev_set_drvdata(dev, mdma); 803 dev_set_drvdata(dev, mdma);
diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c
index 252fdb98b73a..0cb2ba50af53 100644
--- a/drivers/gpu/drm/drm_agpsupport.c
+++ b/drivers/gpu/drm/drm_agpsupport.c
@@ -466,10 +466,4 @@ drm_agp_bind_pages(struct drm_device *dev,
466} 466}
467EXPORT_SYMBOL(drm_agp_bind_pages); 467EXPORT_SYMBOL(drm_agp_bind_pages);
468 468
469void drm_agp_chipset_flush(struct drm_device *dev)
470{
471 agp_flush_chipset(dev->agp->bridge);
472}
473EXPORT_SYMBOL(drm_agp_chipset_flush);
474
475#endif /* __OS_HAS_AGP */ 469#endif /* __OS_HAS_AGP */
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 2d4e17a004db..952b3d4fb2a6 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -336,7 +336,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
336 struct drm_framebuffer *old_fb) 336 struct drm_framebuffer *old_fb)
337{ 337{
338 struct drm_device *dev = crtc->dev; 338 struct drm_device *dev = crtc->dev;
339 struct drm_display_mode *adjusted_mode, saved_mode; 339 struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode;
340 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; 340 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
341 struct drm_encoder_helper_funcs *encoder_funcs; 341 struct drm_encoder_helper_funcs *encoder_funcs;
342 int saved_x, saved_y; 342 int saved_x, saved_y;
@@ -350,6 +350,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
350 if (!crtc->enabled) 350 if (!crtc->enabled)
351 return true; 351 return true;
352 352
353 saved_hwmode = crtc->hwmode;
353 saved_mode = crtc->mode; 354 saved_mode = crtc->mode;
354 saved_x = crtc->x; 355 saved_x = crtc->x;
355 saved_y = crtc->y; 356 saved_y = crtc->y;
@@ -427,11 +428,21 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
427 428
428 } 429 }
429 430
431 /* Store real post-adjustment hardware mode. */
432 crtc->hwmode = *adjusted_mode;
433
434 /* Calculate and store various constants which
435 * are later needed by vblank and swap-completion
436 * timestamping. They are derived from true hwmode.
437 */
438 drm_calc_timestamping_constants(crtc);
439
430 /* XXX free adjustedmode */ 440 /* XXX free adjustedmode */
431 drm_mode_destroy(dev, adjusted_mode); 441 drm_mode_destroy(dev, adjusted_mode);
432 /* FIXME: add subpixel order */ 442 /* FIXME: add subpixel order */
433done: 443done:
434 if (!ret) { 444 if (!ret) {
445 crtc->hwmode = saved_hwmode;
435 crtc->mode = saved_mode; 446 crtc->mode = saved_mode;
436 crtc->x = saved_x; 447 crtc->x = saved_x;
437 crtc->y = saved_y; 448 crtc->y = saved_y;
@@ -650,6 +661,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
650 old_fb)) { 661 old_fb)) {
651 DRM_ERROR("failed to set mode on [CRTC:%d]\n", 662 DRM_ERROR("failed to set mode on [CRTC:%d]\n",
652 set->crtc->base.id); 663 set->crtc->base.id);
664 set->crtc->fb = old_fb;
653 ret = -EINVAL; 665 ret = -EINVAL;
654 goto fail; 666 goto fail;
655 } 667 }
@@ -664,8 +676,10 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
664 set->crtc->fb = set->fb; 676 set->crtc->fb = set->fb;
665 ret = crtc_funcs->mode_set_base(set->crtc, 677 ret = crtc_funcs->mode_set_base(set->crtc,
666 set->x, set->y, old_fb); 678 set->x, set->y, old_fb);
667 if (ret != 0) 679 if (ret != 0) {
680 set->crtc->fb = old_fb;
668 goto fail; 681 goto fail;
682 }
669 } 683 }
670 DRM_DEBUG_KMS("Setting connector DPMS state to on\n"); 684 DRM_DEBUG_KMS("Setting connector DPMS state to on\n");
671 for (i = 0; i < set->num_connectors; i++) { 685 for (i = 0; i < set->num_connectors; i++) {
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index d2849e4ea4d0..0307d601f5e5 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -607,6 +607,25 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
607} 607}
608EXPORT_SYMBOL(drm_fb_helper_fini); 608EXPORT_SYMBOL(drm_fb_helper_fini);
609 609
610void drm_fb_helper_fill_fix(struct fb_info *info, struct drm_framebuffer *fb)
611{
612 info->fix.type = FB_TYPE_PACKED_PIXELS;
613 info->fix.visual = fb->depth == 8 ? FB_VISUAL_PSEUDOCOLOR :
614 FB_VISUAL_TRUECOLOR;
615 info->fix.mmio_start = 0;
616 info->fix.mmio_len = 0;
617 info->fix.type_aux = 0;
618 info->fix.xpanstep = 1; /* doing it in hw */
619 info->fix.ypanstep = 1; /* doing it in hw */
620 info->fix.ywrapstep = 0;
621 info->fix.accel = FB_ACCEL_NONE;
622 info->fix.type_aux = 0;
623
624 info->fix.line_length = fb->pitch;
625 return;
626}
627EXPORT_SYMBOL(drm_fb_helper_fill_fix);
628
610static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green, 629static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
611 u16 blue, u16 regno, struct fb_info *info) 630 u16 blue, u16 regno, struct fb_info *info)
612{ 631{
@@ -816,6 +835,7 @@ int drm_fb_helper_set_par(struct fb_info *info)
816 mutex_unlock(&dev->mode_config.mutex); 835 mutex_unlock(&dev->mode_config.mutex);
817 return ret; 836 return ret;
818 } 837 }
838 drm_fb_helper_fill_fix(info, fb_helper->fb);
819 } 839 }
820 mutex_unlock(&dev->mode_config.mutex); 840 mutex_unlock(&dev->mode_config.mutex);
821 841
@@ -953,6 +973,7 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
953 973
954 if (new_fb) { 974 if (new_fb) {
955 info->var.pixclock = 0; 975 info->var.pixclock = 0;
976 drm_fb_helper_fill_fix(info, fb_helper->fb);
956 if (register_framebuffer(info) < 0) { 977 if (register_framebuffer(info) < 0) {
957 return -EINVAL; 978 return -EINVAL;
958 } 979 }
@@ -979,24 +1000,6 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
979} 1000}
980EXPORT_SYMBOL(drm_fb_helper_single_fb_probe); 1001EXPORT_SYMBOL(drm_fb_helper_single_fb_probe);
981 1002
982void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
983 uint32_t depth)
984{
985 info->fix.type = FB_TYPE_PACKED_PIXELS;
986 info->fix.visual = depth == 8 ? FB_VISUAL_PSEUDOCOLOR :
987 FB_VISUAL_TRUECOLOR;
988 info->fix.type_aux = 0;
989 info->fix.xpanstep = 1; /* doing it in hw */
990 info->fix.ypanstep = 1; /* doing it in hw */
991 info->fix.ywrapstep = 0;
992 info->fix.accel = FB_ACCEL_NONE;
993 info->fix.type_aux = 0;
994
995 info->fix.line_length = pitch;
996 return;
997}
998EXPORT_SYMBOL(drm_fb_helper_fill_fix);
999
1000void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper, 1003void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
1001 uint32_t fb_width, uint32_t fb_height) 1004 uint32_t fb_width, uint32_t fb_height)
1002{ 1005{
@@ -1005,6 +1008,7 @@ void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helpe
1005 info->var.xres_virtual = fb->width; 1008 info->var.xres_virtual = fb->width;
1006 info->var.yres_virtual = fb->height; 1009 info->var.yres_virtual = fb->height;
1007 info->var.bits_per_pixel = fb->bits_per_pixel; 1010 info->var.bits_per_pixel = fb->bits_per_pixel;
1011 info->var.accel_flags = FB_ACCELF_TEXT;
1008 info->var.xoffset = 0; 1012 info->var.xoffset = 0;
1009 info->var.yoffset = 0; 1013 info->var.yoffset = 0;
1010 info->var.activate = FB_ACTIVATE_NOW; 1014 info->var.activate = FB_ACTIVATE_NOW;
@@ -1530,3 +1534,24 @@ bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
1530} 1534}
1531EXPORT_SYMBOL(drm_fb_helper_hotplug_event); 1535EXPORT_SYMBOL(drm_fb_helper_hotplug_event);
1532 1536
1537/* The Kconfig DRM_KMS_HELPER selects FRAMEBUFFER_CONSOLE (if !EMBEDDED)
1538 * but the module doesn't depend on any fb console symbols. At least
1539 * attempt to load fbcon to avoid leaving the system without a usable console.
1540 */
1541#if defined(CONFIG_FRAMEBUFFER_CONSOLE_MODULE) && !defined(CONFIG_EMBEDDED)
1542static int __init drm_fb_helper_modinit(void)
1543{
1544 const char *name = "fbcon";
1545 struct module *fbcon;
1546
1547 mutex_lock(&module_mutex);
1548 fbcon = find_module(name);
1549 mutex_unlock(&module_mutex);
1550
1551 if (!fbcon)
1552 request_module_nowait(name);
1553 return 0;
1554}
1555
1556module_init(drm_fb_helper_modinit);
1557#endif
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index a39794bac04b..2ec7d48fc4a8 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -236,6 +236,8 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
236 return -EBUSY; /* No exclusive opens */ 236 return -EBUSY; /* No exclusive opens */
237 if (!drm_cpu_valid()) 237 if (!drm_cpu_valid())
238 return -EINVAL; 238 return -EINVAL;
239 if (dev->switch_power_state != DRM_SWITCH_POWER_ON)
240 return -EINVAL;
239 241
240 DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor_id); 242 DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor_id);
241 243
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 16d5155edad1..0054e957203f 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -40,6 +40,22 @@
40#include <linux/slab.h> 40#include <linux/slab.h>
41 41
42#include <linux/vgaarb.h> 42#include <linux/vgaarb.h>
43
44/* Access macro for slots in vblank timestamp ringbuffer. */
45#define vblanktimestamp(dev, crtc, count) ( \
46 (dev)->_vblank_time[(crtc) * DRM_VBLANKTIME_RBSIZE + \
47 ((count) % DRM_VBLANKTIME_RBSIZE)])
48
49/* Retry timestamp calculation up to 3 times to satisfy
50 * drm_timestamp_precision before giving up.
51 */
52#define DRM_TIMESTAMP_MAXRETRIES 3
53
54/* Threshold in nanoseconds for detection of redundant
55 * vblank irq in drm_handle_vblank(). 1 msec should be ok.
56 */
57#define DRM_REDUNDANT_VBLIRQ_THRESH_NS 1000000
58
43/** 59/**
44 * Get interrupt from bus id. 60 * Get interrupt from bus id.
45 * 61 *
@@ -77,6 +93,87 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
77 return 0; 93 return 0;
78} 94}
79 95
96/*
97 * Clear vblank timestamp buffer for a crtc.
98 */
99static void clear_vblank_timestamps(struct drm_device *dev, int crtc)
100{
101 memset(&dev->_vblank_time[crtc * DRM_VBLANKTIME_RBSIZE], 0,
102 DRM_VBLANKTIME_RBSIZE * sizeof(struct timeval));
103}
104
105/*
106 * Disable vblank irq's on crtc, make sure that last vblank count
107 * of hardware and corresponding consistent software vblank counter
108 * are preserved, even if there are any spurious vblank irq's after
109 * disable.
110 */
111static void vblank_disable_and_save(struct drm_device *dev, int crtc)
112{
113 unsigned long irqflags;
114 u32 vblcount;
115 s64 diff_ns;
116 int vblrc;
117 struct timeval tvblank;
118
119 /* Prevent vblank irq processing while disabling vblank irqs,
120 * so no updates of timestamps or count can happen after we've
121 * disabled. Needed to prevent races in case of delayed irq's.
122 * Disable preemption, so vblank_time_lock is held as short as
123 * possible, even under a kernel with PREEMPT_RT patches.
124 */
125 preempt_disable();
126 spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
127
128 dev->driver->disable_vblank(dev, crtc);
129 dev->vblank_enabled[crtc] = 0;
130
131 /* No further vblank irq's will be processed after
132 * this point. Get current hardware vblank count and
133 * vblank timestamp, repeat until they are consistent.
134 *
135 * FIXME: There is still a race condition here and in
136 * drm_update_vblank_count() which can cause off-by-one
137 * reinitialization of software vblank counter. If gpu
138 * vblank counter doesn't increment exactly at the leading
139 * edge of a vblank interval, then we can lose 1 count if
140 * we happen to execute between start of vblank and the
141 * delayed gpu counter increment.
142 */
143 do {
144 dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc);
145 vblrc = drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0);
146 } while (dev->last_vblank[crtc] != dev->driver->get_vblank_counter(dev, crtc));
147
148 /* Compute time difference to stored timestamp of last vblank
149 * as updated by last invocation of drm_handle_vblank() in vblank irq.
150 */
151 vblcount = atomic_read(&dev->_vblank_count[crtc]);
152 diff_ns = timeval_to_ns(&tvblank) -
153 timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount));
154
155 /* If there is at least 1 msec difference between the last stored
156 * timestamp and tvblank, then we are currently executing our
157 * disable inside a new vblank interval, the tvblank timestamp
158 * corresponds to this new vblank interval and the irq handler
159 * for this vblank didn't run yet and won't run due to our disable.
160 * Therefore we need to do the job of drm_handle_vblank() and
161 * increment the vblank counter by one to account for this vblank.
162 *
163 * Skip this step if there isn't any high precision timestamp
164 * available. In that case we can't account for this and just
165 * hope for the best.
166 */
167 if ((vblrc > 0) && (abs(diff_ns) > 1000000))
168 atomic_inc(&dev->_vblank_count[crtc]);
169
170 /* Invalidate all timestamps while vblank irq's are off. */
171 clear_vblank_timestamps(dev, crtc);
172
173 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
174 preempt_enable();
175}
176
80static void vblank_disable_fn(unsigned long arg) 177static void vblank_disable_fn(unsigned long arg)
81{ 178{
82 struct drm_device *dev = (struct drm_device *)arg; 179 struct drm_device *dev = (struct drm_device *)arg;
@@ -91,10 +188,7 @@ static void vblank_disable_fn(unsigned long arg)
91 if (atomic_read(&dev->vblank_refcount[i]) == 0 && 188 if (atomic_read(&dev->vblank_refcount[i]) == 0 &&
92 dev->vblank_enabled[i]) { 189 dev->vblank_enabled[i]) {
93 DRM_DEBUG("disabling vblank on crtc %d\n", i); 190 DRM_DEBUG("disabling vblank on crtc %d\n", i);
94 dev->last_vblank[i] = 191 vblank_disable_and_save(dev, i);
95 dev->driver->get_vblank_counter(dev, i);
96 dev->driver->disable_vblank(dev, i);
97 dev->vblank_enabled[i] = 0;
98 } 192 }
99 spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 193 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
100 } 194 }
@@ -117,6 +211,7 @@ void drm_vblank_cleanup(struct drm_device *dev)
117 kfree(dev->last_vblank); 211 kfree(dev->last_vblank);
118 kfree(dev->last_vblank_wait); 212 kfree(dev->last_vblank_wait);
119 kfree(dev->vblank_inmodeset); 213 kfree(dev->vblank_inmodeset);
214 kfree(dev->_vblank_time);
120 215
121 dev->num_crtcs = 0; 216 dev->num_crtcs = 0;
122} 217}
@@ -129,6 +224,8 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
129 setup_timer(&dev->vblank_disable_timer, vblank_disable_fn, 224 setup_timer(&dev->vblank_disable_timer, vblank_disable_fn,
130 (unsigned long)dev); 225 (unsigned long)dev);
131 spin_lock_init(&dev->vbl_lock); 226 spin_lock_init(&dev->vbl_lock);
227 spin_lock_init(&dev->vblank_time_lock);
228
132 dev->num_crtcs = num_crtcs; 229 dev->num_crtcs = num_crtcs;
133 230
134 dev->vbl_queue = kmalloc(sizeof(wait_queue_head_t) * num_crtcs, 231 dev->vbl_queue = kmalloc(sizeof(wait_queue_head_t) * num_crtcs,
@@ -161,6 +258,19 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
161 if (!dev->vblank_inmodeset) 258 if (!dev->vblank_inmodeset)
162 goto err; 259 goto err;
163 260
261 dev->_vblank_time = kcalloc(num_crtcs * DRM_VBLANKTIME_RBSIZE,
262 sizeof(struct timeval), GFP_KERNEL);
263 if (!dev->_vblank_time)
264 goto err;
265
266 DRM_INFO("Supports vblank timestamp caching Rev 1 (10.10.2010).\n");
267
268 /* Driver specific high-precision vblank timestamping supported? */
269 if (dev->driver->get_vblank_timestamp)
270 DRM_INFO("Driver supports precise vblank timestamp query.\n");
271 else
272 DRM_INFO("No driver support for vblank timestamp query.\n");
273
164 /* Zero per-crtc vblank stuff */ 274 /* Zero per-crtc vblank stuff */
165 for (i = 0; i < num_crtcs; i++) { 275 for (i = 0; i < num_crtcs; i++) {
166 init_waitqueue_head(&dev->vbl_queue[i]); 276 init_waitqueue_head(&dev->vbl_queue[i]);
@@ -279,7 +389,7 @@ EXPORT_SYMBOL(drm_irq_install);
279 * 389 *
280 * Calls the driver's \c drm_driver_irq_uninstall() function, and stops the irq. 390 * Calls the driver's \c drm_driver_irq_uninstall() function, and stops the irq.
281 */ 391 */
282int drm_irq_uninstall(struct drm_device * dev) 392int drm_irq_uninstall(struct drm_device *dev)
283{ 393{
284 unsigned long irqflags; 394 unsigned long irqflags;
285 int irq_enabled, i; 395 int irq_enabled, i;
@@ -335,7 +445,9 @@ int drm_control(struct drm_device *dev, void *data,
335{ 445{
336 struct drm_control *ctl = data; 446 struct drm_control *ctl = data;
337 447
338 /* if we haven't irq we fallback for compatibility reasons - this used to be a separate function in drm_dma.h */ 448 /* if we haven't irq we fallback for compatibility reasons -
449 * this used to be a separate function in drm_dma.h
450 */
339 451
340 452
341 switch (ctl->func) { 453 switch (ctl->func) {
@@ -360,6 +472,287 @@ int drm_control(struct drm_device *dev, void *data,
360} 472}
361 473
362/** 474/**
475 * drm_calc_timestamping_constants - Calculate and
476 * store various constants which are later needed by
477 * vblank and swap-completion timestamping, e.g, by
478 * drm_calc_vbltimestamp_from_scanoutpos().
479 * They are derived from crtc's true scanout timing,
480 * so they take things like panel scaling or other
481 * adjustments into account.
482 *
483 * @crtc drm_crtc whose timestamp constants should be updated.
484 *
485 */
486void drm_calc_timestamping_constants(struct drm_crtc *crtc)
487{
488 s64 linedur_ns = 0, pixeldur_ns = 0, framedur_ns = 0;
489 u64 dotclock;
490
491 /* Dot clock in Hz: */
492 dotclock = (u64) crtc->hwmode.clock * 1000;
493
494 /* Valid dotclock? */
495 if (dotclock > 0) {
496 /* Convert scanline length in pixels and video dot clock to
497 * line duration, frame duration and pixel duration in
498 * nanoseconds:
499 */
500 pixeldur_ns = (s64) div64_u64(1000000000, dotclock);
501 linedur_ns = (s64) div64_u64(((u64) crtc->hwmode.crtc_htotal *
502 1000000000), dotclock);
503 framedur_ns = (s64) crtc->hwmode.crtc_vtotal * linedur_ns;
504 } else
505 DRM_ERROR("crtc %d: Can't calculate constants, dotclock = 0!\n",
506 crtc->base.id);
507
508 crtc->pixeldur_ns = pixeldur_ns;
509 crtc->linedur_ns = linedur_ns;
510 crtc->framedur_ns = framedur_ns;
511
512 DRM_DEBUG("crtc %d: hwmode: htotal %d, vtotal %d, vdisplay %d\n",
513 crtc->base.id, crtc->hwmode.crtc_htotal,
514 crtc->hwmode.crtc_vtotal, crtc->hwmode.crtc_vdisplay);
515 DRM_DEBUG("crtc %d: clock %d kHz framedur %d linedur %d, pixeldur %d\n",
516 crtc->base.id, (int) dotclock/1000, (int) framedur_ns,
517 (int) linedur_ns, (int) pixeldur_ns);
518}
519EXPORT_SYMBOL(drm_calc_timestamping_constants);
520
521/**
522 * drm_calc_vbltimestamp_from_scanoutpos - helper routine for kms
523 * drivers. Implements calculation of exact vblank timestamps from
524 * given drm_display_mode timings and current video scanout position
525 * of a crtc. This can be called from within get_vblank_timestamp()
526 * implementation of a kms driver to implement the actual timestamping.
527 *
528 * Should return timestamps conforming to the OML_sync_control OpenML
529 * extension specification. The timestamp corresponds to the end of
530 * the vblank interval, aka start of scanout of topmost-leftmost display
531 * pixel in the following video frame.
532 *
533 * Requires support for optional dev->driver->get_scanout_position()
534 * in kms driver, plus a bit of setup code to provide a drm_display_mode
535 * that corresponds to the true scanout timing.
536 *
537 * The current implementation only handles standard video modes. It
538 * returns as no operation if a doublescan or interlaced video mode is
539 * active. Higher level code is expected to handle this.
540 *
541 * @dev: DRM device.
542 * @crtc: Which crtc's vblank timestamp to retrieve.
543 * @max_error: Desired maximum allowable error in timestamps (nanosecs).
544 * On return contains true maximum error of timestamp.
545 * @vblank_time: Pointer to struct timeval which should receive the timestamp.
546 * @flags: Flags to pass to driver:
547 * 0 = Default.
548 * DRM_CALLED_FROM_VBLIRQ = If function is called from vbl irq handler.
549 * @refcrtc: drm_crtc* of crtc which defines scanout timing.
550 *
551 * Returns negative value on error, failure or if not supported in current
552 * video mode:
553 *
554 * -EINVAL - Invalid crtc.
555 * -EAGAIN - Temporary unavailable, e.g., called before initial modeset.
556 * -ENOTSUPP - Function not supported in current display mode.
557 * -EIO - Failed, e.g., due to failed scanout position query.
558 *
559 * Returns or'ed positive status flags on success:
560 *
561 * DRM_VBLANKTIME_SCANOUTPOS_METHOD - Signal this method used for timestamping.
562 * DRM_VBLANKTIME_INVBL - Timestamp taken while scanout was in vblank interval.
563 *
564 */
565int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
566 int *max_error,
567 struct timeval *vblank_time,
568 unsigned flags,
569 struct drm_crtc *refcrtc)
570{
571 struct timeval stime, raw_time;
572 struct drm_display_mode *mode;
573 int vbl_status, vtotal, vdisplay;
574 int vpos, hpos, i;
575 s64 framedur_ns, linedur_ns, pixeldur_ns, delta_ns, duration_ns;
576 bool invbl;
577
578 if (crtc < 0 || crtc >= dev->num_crtcs) {
579 DRM_ERROR("Invalid crtc %d\n", crtc);
580 return -EINVAL;
581 }
582
583 /* Scanout position query not supported? Should not happen. */
584 if (!dev->driver->get_scanout_position) {
585 DRM_ERROR("Called from driver w/o get_scanout_position()!?\n");
586 return -EIO;
587 }
588
589 mode = &refcrtc->hwmode;
590 vtotal = mode->crtc_vtotal;
591 vdisplay = mode->crtc_vdisplay;
592
593 /* Durations of frames, lines, pixels in nanoseconds. */
594 framedur_ns = refcrtc->framedur_ns;
595 linedur_ns = refcrtc->linedur_ns;
596 pixeldur_ns = refcrtc->pixeldur_ns;
597
598 /* If mode timing undefined, just return as no-op:
599 * Happens during initial modesetting of a crtc.
600 */
601 if (vtotal <= 0 || vdisplay <= 0 || framedur_ns == 0) {
602 DRM_DEBUG("crtc %d: Noop due to uninitialized mode.\n", crtc);
603 return -EAGAIN;
604 }
605
606 /* Don't know yet how to handle interlaced or
607 * double scan modes. Just no-op for now.
608 */
609 if (mode->flags & (DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLSCAN)) {
610 DRM_DEBUG("crtc %d: Noop due to unsupported mode.\n", crtc);
611 return -ENOTSUPP;
612 }
613
614 /* Get current scanout position with system timestamp.
615 * Repeat query up to DRM_TIMESTAMP_MAXRETRIES times
616 * if single query takes longer than max_error nanoseconds.
617 *
618 * This guarantees a tight bound on maximum error if
619 * code gets preempted or delayed for some reason.
620 */
621 for (i = 0; i < DRM_TIMESTAMP_MAXRETRIES; i++) {
622 /* Disable preemption to make it very likely to
623 * succeed in the first iteration even on PREEMPT_RT kernel.
624 */
625 preempt_disable();
626
627 /* Get system timestamp before query. */
628 do_gettimeofday(&stime);
629
630 /* Get vertical and horizontal scanout pos. vpos, hpos. */
631 vbl_status = dev->driver->get_scanout_position(dev, crtc, &vpos, &hpos);
632
633 /* Get system timestamp after query. */
634 do_gettimeofday(&raw_time);
635
636 preempt_enable();
637
638 /* Return as no-op if scanout query unsupported or failed. */
639 if (!(vbl_status & DRM_SCANOUTPOS_VALID)) {
640 DRM_DEBUG("crtc %d : scanoutpos query failed [%d].\n",
641 crtc, vbl_status);
642 return -EIO;
643 }
644
645 duration_ns = timeval_to_ns(&raw_time) - timeval_to_ns(&stime);
646
647 /* Accept result with < max_error nsecs timing uncertainty. */
648 if (duration_ns <= (s64) *max_error)
649 break;
650 }
651
652 /* Noisy system timing? */
653 if (i == DRM_TIMESTAMP_MAXRETRIES) {
654 DRM_DEBUG("crtc %d: Noisy timestamp %d us > %d us [%d reps].\n",
655 crtc, (int) duration_ns/1000, *max_error/1000, i);
656 }
657
658 /* Return upper bound of timestamp precision error. */
659 *max_error = (int) duration_ns;
660
661 /* Check if in vblank area:
662 * vpos is >=0 in video scanout area, but negative
663 * within vblank area, counting down the number of lines until
664 * start of scanout.
665 */
666 invbl = vbl_status & DRM_SCANOUTPOS_INVBL;
667
668 /* Convert scanout position into elapsed time at raw_time query
669 * since start of scanout at first display scanline. delta_ns
670 * can be negative if start of scanout hasn't happened yet.
671 */
672 delta_ns = (s64) vpos * linedur_ns + (s64) hpos * pixeldur_ns;
673
674 /* Is vpos outside nominal vblank area, but less than
675 * 1/100 of a frame height away from start of vblank?
676 * If so, assume this isn't a massively delayed vblank
677 * interrupt, but a vblank interrupt that fired a few
678 * microseconds before true start of vblank. Compensate
679 * by adding a full frame duration to the final timestamp.
680 * Happens, e.g., on ATI R500, R600.
681 *
682 * We only do this if DRM_CALLED_FROM_VBLIRQ.
683 */
684 if ((flags & DRM_CALLED_FROM_VBLIRQ) && !invbl &&
685 ((vdisplay - vpos) < vtotal / 100)) {
686 delta_ns = delta_ns - framedur_ns;
687
688 /* Signal this correction as "applied". */
689 vbl_status |= 0x8;
690 }
691
692 /* Subtract time delta from raw timestamp to get final
693 * vblank_time timestamp for end of vblank.
694 */
695 *vblank_time = ns_to_timeval(timeval_to_ns(&raw_time) - delta_ns);
696
697 DRM_DEBUG("crtc %d : v %d p(%d,%d)@ %d.%d -> %d.%d [e %d us, %d rep]\n",
698 crtc, (int) vbl_status, hpos, vpos, raw_time.tv_sec,
699 raw_time.tv_usec, vblank_time->tv_sec, vblank_time->tv_usec,
700 (int) duration_ns/1000, i);
701
702 vbl_status = DRM_VBLANKTIME_SCANOUTPOS_METHOD;
703 if (invbl)
704 vbl_status |= DRM_VBLANKTIME_INVBL;
705
706 return vbl_status;
707}
708EXPORT_SYMBOL(drm_calc_vbltimestamp_from_scanoutpos);
709
710/**
711 * drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent
712 * vblank interval.
713 *
714 * @dev: DRM device
715 * @crtc: which crtc's vblank timestamp to retrieve
716 * @tvblank: Pointer to target struct timeval which should receive the timestamp
717 * @flags: Flags to pass to driver:
718 * 0 = Default.
719 * DRM_CALLED_FROM_VBLIRQ = If function is called from vbl irq handler.
720 *
721 * Fetches the system timestamp corresponding to the time of the most recent
722 * vblank interval on specified crtc. May call into kms-driver to
723 * compute the timestamp with a high-precision GPU specific method.
724 *
725 * Returns zero if timestamp originates from uncorrected do_gettimeofday()
726 * call, i.e., it isn't very precisely locked to the true vblank.
727 *
728 * Returns non-zero if timestamp is considered to be very precise.
729 */
730u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
731 struct timeval *tvblank, unsigned flags)
732{
733 int ret = 0;
734
735 /* Define requested maximum error on timestamps (nanoseconds). */
736 int max_error = (int) drm_timestamp_precision * 1000;
737
738 /* Query driver if possible and precision timestamping enabled. */
739 if (dev->driver->get_vblank_timestamp && (max_error > 0)) {
740 ret = dev->driver->get_vblank_timestamp(dev, crtc, &max_error,
741 tvblank, flags);
742 if (ret > 0)
743 return (u32) ret;
744 }
745
746 /* GPU high precision timestamp query unsupported or failed.
747 * Return gettimeofday timestamp as best estimate.
748 */
749 do_gettimeofday(tvblank);
750
751 return 0;
752}
753EXPORT_SYMBOL(drm_get_last_vbltimestamp);
754
755/**
363 * drm_vblank_count - retrieve "cooked" vblank counter value 756 * drm_vblank_count - retrieve "cooked" vblank counter value
364 * @dev: DRM device 757 * @dev: DRM device
365 * @crtc: which counter to retrieve 758 * @crtc: which counter to retrieve
@@ -375,6 +768,40 @@ u32 drm_vblank_count(struct drm_device *dev, int crtc)
375EXPORT_SYMBOL(drm_vblank_count); 768EXPORT_SYMBOL(drm_vblank_count);
376 769
377/** 770/**
771 * drm_vblank_count_and_time - retrieve "cooked" vblank counter value
772 * and the system timestamp corresponding to that vblank counter value.
773 *
774 * @dev: DRM device
775 * @crtc: which counter to retrieve
776 * @vblanktime: Pointer to struct timeval to receive the vblank timestamp.
777 *
778 * Fetches the "cooked" vblank count value that represents the number of
779 * vblank events since the system was booted, including lost events due to
780 * modesetting activity. Returns corresponding system timestamp of the time
781 * of the vblank interval that corresponds to the current value vblank counter
782 * value.
783 */
784u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
785 struct timeval *vblanktime)
786{
787 u32 cur_vblank;
788
789 /* Read timestamp from slot of _vblank_time ringbuffer
790 * that corresponds to current vblank count. Retry if
791 * count has incremented during readout. This works like
792 * a seqlock.
793 */
794 do {
795 cur_vblank = atomic_read(&dev->_vblank_count[crtc]);
796 *vblanktime = vblanktimestamp(dev, crtc, cur_vblank);
797 smp_rmb();
798 } while (cur_vblank != atomic_read(&dev->_vblank_count[crtc]));
799
800 return cur_vblank;
801}
802EXPORT_SYMBOL(drm_vblank_count_and_time);
803
804/**
378 * drm_update_vblank_count - update the master vblank counter 805 * drm_update_vblank_count - update the master vblank counter
379 * @dev: DRM device 806 * @dev: DRM device
380 * @crtc: counter to update 807 * @crtc: counter to update
@@ -392,7 +819,8 @@ EXPORT_SYMBOL(drm_vblank_count);
392 */ 819 */
393static void drm_update_vblank_count(struct drm_device *dev, int crtc) 820static void drm_update_vblank_count(struct drm_device *dev, int crtc)
394{ 821{
395 u32 cur_vblank, diff; 822 u32 cur_vblank, diff, tslot, rc;
823 struct timeval t_vblank;
396 824
397 /* 825 /*
398 * Interrupts were disabled prior to this call, so deal with counter 826 * Interrupts were disabled prior to this call, so deal with counter
@@ -400,8 +828,18 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
400 * NOTE! It's possible we lost a full dev->max_vblank_count events 828 * NOTE! It's possible we lost a full dev->max_vblank_count events
401 * here if the register is small or we had vblank interrupts off for 829 * here if the register is small or we had vblank interrupts off for
402 * a long time. 830 * a long time.
831 *
832 * We repeat the hardware vblank counter & timestamp query until
833 * we get consistent results. This to prevent races between gpu
834 * updating its hardware counter while we are retrieving the
835 * corresponding vblank timestamp.
403 */ 836 */
404 cur_vblank = dev->driver->get_vblank_counter(dev, crtc); 837 do {
838 cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
839 rc = drm_get_last_vbltimestamp(dev, crtc, &t_vblank, 0);
840 } while (cur_vblank != dev->driver->get_vblank_counter(dev, crtc));
841
842 /* Deal with counter wrap */
405 diff = cur_vblank - dev->last_vblank[crtc]; 843 diff = cur_vblank - dev->last_vblank[crtc];
406 if (cur_vblank < dev->last_vblank[crtc]) { 844 if (cur_vblank < dev->last_vblank[crtc]) {
407 diff += dev->max_vblank_count; 845 diff += dev->max_vblank_count;
@@ -413,6 +851,16 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
413 DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n", 851 DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n",
414 crtc, diff); 852 crtc, diff);
415 853
854 /* Reinitialize corresponding vblank timestamp if high-precision query
855 * available. Skip this step if query unsupported or failed. Will
856 * reinitialize delayed at next vblank interrupt in that case.
857 */
858 if (rc) {
859 tslot = atomic_read(&dev->_vblank_count[crtc]) + diff;
860 vblanktimestamp(dev, crtc, tslot) = t_vblank;
861 smp_wmb();
862 }
863
416 atomic_add(diff, &dev->_vblank_count[crtc]); 864 atomic_add(diff, &dev->_vblank_count[crtc]);
417} 865}
418 866
@@ -429,15 +877,27 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
429 */ 877 */
430int drm_vblank_get(struct drm_device *dev, int crtc) 878int drm_vblank_get(struct drm_device *dev, int crtc)
431{ 879{
432 unsigned long irqflags; 880 unsigned long irqflags, irqflags2;
433 int ret = 0; 881 int ret = 0;
434 882
435 spin_lock_irqsave(&dev->vbl_lock, irqflags); 883 spin_lock_irqsave(&dev->vbl_lock, irqflags);
436 /* Going from 0->1 means we have to enable interrupts again */ 884 /* Going from 0->1 means we have to enable interrupts again */
437 if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) { 885 if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) {
886 /* Disable preemption while holding vblank_time_lock. Do
887 * it explicitely to guard against PREEMPT_RT kernel.
888 */
889 preempt_disable();
890 spin_lock_irqsave(&dev->vblank_time_lock, irqflags2);
438 if (!dev->vblank_enabled[crtc]) { 891 if (!dev->vblank_enabled[crtc]) {
892 /* Enable vblank irqs under vblank_time_lock protection.
893 * All vblank count & timestamp updates are held off
894 * until we are done reinitializing master counter and
895 * timestamps. Filtercode in drm_handle_vblank() will
896 * prevent double-accounting of same vblank interval.
897 */
439 ret = dev->driver->enable_vblank(dev, crtc); 898 ret = dev->driver->enable_vblank(dev, crtc);
440 DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret); 899 DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n",
900 crtc, ret);
441 if (ret) 901 if (ret)
442 atomic_dec(&dev->vblank_refcount[crtc]); 902 atomic_dec(&dev->vblank_refcount[crtc]);
443 else { 903 else {
@@ -445,6 +905,8 @@ int drm_vblank_get(struct drm_device *dev, int crtc)
445 drm_update_vblank_count(dev, crtc); 905 drm_update_vblank_count(dev, crtc);
446 } 906 }
447 } 907 }
908 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags2);
909 preempt_enable();
448 } else { 910 } else {
449 if (!dev->vblank_enabled[crtc]) { 911 if (!dev->vblank_enabled[crtc]) {
450 atomic_dec(&dev->vblank_refcount[crtc]); 912 atomic_dec(&dev->vblank_refcount[crtc]);
@@ -463,15 +925,17 @@ EXPORT_SYMBOL(drm_vblank_get);
463 * @crtc: which counter to give up 925 * @crtc: which counter to give up
464 * 926 *
465 * Release ownership of a given vblank counter, turning off interrupts 927 * Release ownership of a given vblank counter, turning off interrupts
466 * if possible. 928 * if possible. Disable interrupts after drm_vblank_offdelay milliseconds.
467 */ 929 */
468void drm_vblank_put(struct drm_device *dev, int crtc) 930void drm_vblank_put(struct drm_device *dev, int crtc)
469{ 931{
470 BUG_ON (atomic_read (&dev->vblank_refcount[crtc]) == 0); 932 BUG_ON(atomic_read(&dev->vblank_refcount[crtc]) == 0);
471 933
472 /* Last user schedules interrupt disable */ 934 /* Last user schedules interrupt disable */
473 if (atomic_dec_and_test(&dev->vblank_refcount[crtc])) 935 if (atomic_dec_and_test(&dev->vblank_refcount[crtc]) &&
474 mod_timer(&dev->vblank_disable_timer, jiffies + 5*DRM_HZ); 936 (drm_vblank_offdelay > 0))
937 mod_timer(&dev->vblank_disable_timer,
938 jiffies + ((drm_vblank_offdelay * DRM_HZ)/1000));
475} 939}
476EXPORT_SYMBOL(drm_vblank_put); 940EXPORT_SYMBOL(drm_vblank_put);
477 941
@@ -480,10 +944,8 @@ void drm_vblank_off(struct drm_device *dev, int crtc)
480 unsigned long irqflags; 944 unsigned long irqflags;
481 945
482 spin_lock_irqsave(&dev->vbl_lock, irqflags); 946 spin_lock_irqsave(&dev->vbl_lock, irqflags);
483 dev->driver->disable_vblank(dev, crtc); 947 vblank_disable_and_save(dev, crtc);
484 DRM_WAKEUP(&dev->vbl_queue[crtc]); 948 DRM_WAKEUP(&dev->vbl_queue[crtc]);
485 dev->vblank_enabled[crtc] = 0;
486 dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc);
487 spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 949 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
488} 950}
489EXPORT_SYMBOL(drm_vblank_off); 951EXPORT_SYMBOL(drm_vblank_off);
@@ -602,7 +1064,6 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
602 e->base.file_priv = file_priv; 1064 e->base.file_priv = file_priv;
603 e->base.destroy = (void (*) (struct drm_pending_event *)) kfree; 1065 e->base.destroy = (void (*) (struct drm_pending_event *)) kfree;
604 1066
605 do_gettimeofday(&now);
606 spin_lock_irqsave(&dev->event_lock, flags); 1067 spin_lock_irqsave(&dev->event_lock, flags);
607 1068
608 if (file_priv->event_space < sizeof e->event) { 1069 if (file_priv->event_space < sizeof e->event) {
@@ -611,7 +1072,8 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
611 } 1072 }
612 1073
613 file_priv->event_space -= sizeof e->event; 1074 file_priv->event_space -= sizeof e->event;
614 seq = drm_vblank_count(dev, pipe); 1075 seq = drm_vblank_count_and_time(dev, pipe, &now);
1076
615 if ((vblwait->request.type & _DRM_VBLANK_NEXTONMISS) && 1077 if ((vblwait->request.type & _DRM_VBLANK_NEXTONMISS) &&
616 (seq - vblwait->request.sequence) <= (1 << 23)) { 1078 (seq - vblwait->request.sequence) <= (1 << 23)) {
617 vblwait->request.sequence = seq + 1; 1079 vblwait->request.sequence = seq + 1;
@@ -626,15 +1088,18 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
626 1088
627 e->event.sequence = vblwait->request.sequence; 1089 e->event.sequence = vblwait->request.sequence;
628 if ((seq - vblwait->request.sequence) <= (1 << 23)) { 1090 if ((seq - vblwait->request.sequence) <= (1 << 23)) {
1091 e->event.sequence = seq;
629 e->event.tv_sec = now.tv_sec; 1092 e->event.tv_sec = now.tv_sec;
630 e->event.tv_usec = now.tv_usec; 1093 e->event.tv_usec = now.tv_usec;
631 drm_vblank_put(dev, pipe); 1094 drm_vblank_put(dev, pipe);
632 list_add_tail(&e->base.link, &e->base.file_priv->event_list); 1095 list_add_tail(&e->base.link, &e->base.file_priv->event_list);
633 wake_up_interruptible(&e->base.file_priv->event_wait); 1096 wake_up_interruptible(&e->base.file_priv->event_wait);
1097 vblwait->reply.sequence = seq;
634 trace_drm_vblank_event_delivered(current->pid, pipe, 1098 trace_drm_vblank_event_delivered(current->pid, pipe,
635 vblwait->request.sequence); 1099 vblwait->request.sequence);
636 } else { 1100 } else {
637 list_add_tail(&e->base.link, &dev->vblank_event_list); 1101 list_add_tail(&e->base.link, &dev->vblank_event_list);
1102 vblwait->reply.sequence = vblwait->request.sequence;
638 } 1103 }
639 1104
640 spin_unlock_irqrestore(&dev->event_lock, flags); 1105 spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -727,11 +1192,10 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
727 if (ret != -EINTR) { 1192 if (ret != -EINTR) {
728 struct timeval now; 1193 struct timeval now;
729 1194
730 do_gettimeofday(&now); 1195 vblwait->reply.sequence = drm_vblank_count_and_time(dev, crtc, &now);
731
732 vblwait->reply.tval_sec = now.tv_sec; 1196 vblwait->reply.tval_sec = now.tv_sec;
733 vblwait->reply.tval_usec = now.tv_usec; 1197 vblwait->reply.tval_usec = now.tv_usec;
734 vblwait->reply.sequence = drm_vblank_count(dev, crtc); 1198
735 DRM_DEBUG("returning %d to client\n", 1199 DRM_DEBUG("returning %d to client\n",
736 vblwait->reply.sequence); 1200 vblwait->reply.sequence);
737 } else { 1201 } else {
@@ -750,8 +1214,7 @@ void drm_handle_vblank_events(struct drm_device *dev, int crtc)
750 unsigned long flags; 1214 unsigned long flags;
751 unsigned int seq; 1215 unsigned int seq;
752 1216
753 do_gettimeofday(&now); 1217 seq = drm_vblank_count_and_time(dev, crtc, &now);
754 seq = drm_vblank_count(dev, crtc);
755 1218
756 spin_lock_irqsave(&dev->event_lock, flags); 1219 spin_lock_irqsave(&dev->event_lock, flags);
757 1220
@@ -789,11 +1252,64 @@ void drm_handle_vblank_events(struct drm_device *dev, int crtc)
789 */ 1252 */
790void drm_handle_vblank(struct drm_device *dev, int crtc) 1253void drm_handle_vblank(struct drm_device *dev, int crtc)
791{ 1254{
1255 u32 vblcount;
1256 s64 diff_ns;
1257 struct timeval tvblank;
1258 unsigned long irqflags;
1259
792 if (!dev->num_crtcs) 1260 if (!dev->num_crtcs)
793 return; 1261 return;
794 1262
795 atomic_inc(&dev->_vblank_count[crtc]); 1263 /* Need timestamp lock to prevent concurrent execution with
1264 * vblank enable/disable, as this would cause inconsistent
1265 * or corrupted timestamps and vblank counts.
1266 */
1267 spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
1268
1269 /* Vblank irq handling disabled. Nothing to do. */
1270 if (!dev->vblank_enabled[crtc]) {
1271 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
1272 return;
1273 }
1274
1275 /* Fetch corresponding timestamp for this vblank interval from
1276 * driver and store it in proper slot of timestamp ringbuffer.
1277 */
1278
1279 /* Get current timestamp and count. */
1280 vblcount = atomic_read(&dev->_vblank_count[crtc]);
1281 drm_get_last_vbltimestamp(dev, crtc, &tvblank, DRM_CALLED_FROM_VBLIRQ);
1282
1283 /* Compute time difference to timestamp of last vblank */
1284 diff_ns = timeval_to_ns(&tvblank) -
1285 timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount));
1286
1287 /* Update vblank timestamp and count if at least
1288 * DRM_REDUNDANT_VBLIRQ_THRESH_NS nanoseconds
1289 * difference between last stored timestamp and current
1290 * timestamp. A smaller difference means basically
1291 * identical timestamps. Happens if this vblank has
1292 * been already processed and this is a redundant call,
1293 * e.g., due to spurious vblank interrupts. We need to
1294 * ignore those for accounting.
1295 */
1296 if (abs(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS) {
1297 /* Store new timestamp in ringbuffer. */
1298 vblanktimestamp(dev, crtc, vblcount + 1) = tvblank;
1299 smp_wmb();
1300
1301 /* Increment cooked vblank count. This also atomically commits
1302 * the timestamp computed above.
1303 */
1304 atomic_inc(&dev->_vblank_count[crtc]);
1305 } else {
1306 DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n",
1307 crtc, (int) diff_ns);
1308 }
1309
796 DRM_WAKEUP(&dev->vbl_queue[crtc]); 1310 DRM_WAKEUP(&dev->vbl_queue[crtc]);
797 drm_handle_vblank_events(dev, crtc); 1311 drm_handle_vblank_events(dev, crtc);
1312
1313 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
798} 1314}
799EXPORT_SYMBOL(drm_handle_vblank); 1315EXPORT_SYMBOL(drm_handle_vblank);
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index a6bfc302ed90..c59515ba7e69 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -392,10 +392,36 @@ void drm_mm_init_scan(struct drm_mm *mm, unsigned long size,
392 mm->scanned_blocks = 0; 392 mm->scanned_blocks = 0;
393 mm->scan_hit_start = 0; 393 mm->scan_hit_start = 0;
394 mm->scan_hit_size = 0; 394 mm->scan_hit_size = 0;
395 mm->scan_check_range = 0;
395} 396}
396EXPORT_SYMBOL(drm_mm_init_scan); 397EXPORT_SYMBOL(drm_mm_init_scan);
397 398
398/** 399/**
400 * Initializa lru scanning.
401 *
402 * This simply sets up the scanning routines with the parameters for the desired
403 * hole. This version is for range-restricted scans.
404 *
405 * Warning: As long as the scan list is non-empty, no other operations than
406 * adding/removing nodes to/from the scan list are allowed.
407 */
408void drm_mm_init_scan_with_range(struct drm_mm *mm, unsigned long size,
409 unsigned alignment,
410 unsigned long start,
411 unsigned long end)
412{
413 mm->scan_alignment = alignment;
414 mm->scan_size = size;
415 mm->scanned_blocks = 0;
416 mm->scan_hit_start = 0;
417 mm->scan_hit_size = 0;
418 mm->scan_start = start;
419 mm->scan_end = end;
420 mm->scan_check_range = 1;
421}
422EXPORT_SYMBOL(drm_mm_init_scan_with_range);
423
424/**
399 * Add a node to the scan list that might be freed to make space for the desired 425 * Add a node to the scan list that might be freed to make space for the desired
400 * hole. 426 * hole.
401 * 427 *
@@ -406,6 +432,8 @@ int drm_mm_scan_add_block(struct drm_mm_node *node)
406 struct drm_mm *mm = node->mm; 432 struct drm_mm *mm = node->mm;
407 struct list_head *prev_free, *next_free; 433 struct list_head *prev_free, *next_free;
408 struct drm_mm_node *prev_node, *next_node; 434 struct drm_mm_node *prev_node, *next_node;
435 unsigned long adj_start;
436 unsigned long adj_end;
409 437
410 mm->scanned_blocks++; 438 mm->scanned_blocks++;
411 439
@@ -452,7 +480,17 @@ int drm_mm_scan_add_block(struct drm_mm_node *node)
452 node->free_stack.prev = prev_free; 480 node->free_stack.prev = prev_free;
453 node->free_stack.next = next_free; 481 node->free_stack.next = next_free;
454 482
455 if (check_free_hole(node->start, node->start + node->size, 483 if (mm->scan_check_range) {
484 adj_start = node->start < mm->scan_start ?
485 mm->scan_start : node->start;
486 adj_end = node->start + node->size > mm->scan_end ?
487 mm->scan_end : node->start + node->size;
488 } else {
489 adj_start = node->start;
490 adj_end = node->start + node->size;
491 }
492
493 if (check_free_hole(adj_start , adj_end,
456 mm->scan_size, mm->scan_alignment)) { 494 mm->scan_size, mm->scan_alignment)) {
457 mm->scan_hit_start = node->start; 495 mm->scan_hit_start = node->start;
458 mm->scan_hit_size = node->size; 496 mm->scan_hit_size = node->size;
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index cdc89ee042cc..d59edc18301f 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -40,12 +40,22 @@
40unsigned int drm_debug = 0; /* 1 to enable debug output */ 40unsigned int drm_debug = 0; /* 1 to enable debug output */
41EXPORT_SYMBOL(drm_debug); 41EXPORT_SYMBOL(drm_debug);
42 42
43unsigned int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */
44EXPORT_SYMBOL(drm_vblank_offdelay);
45
46unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */
47EXPORT_SYMBOL(drm_timestamp_precision);
48
43MODULE_AUTHOR(CORE_AUTHOR); 49MODULE_AUTHOR(CORE_AUTHOR);
44MODULE_DESCRIPTION(CORE_DESC); 50MODULE_DESCRIPTION(CORE_DESC);
45MODULE_LICENSE("GPL and additional rights"); 51MODULE_LICENSE("GPL and additional rights");
46MODULE_PARM_DESC(debug, "Enable debug output"); 52MODULE_PARM_DESC(debug, "Enable debug output");
53MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]");
54MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
47 55
48module_param_named(debug, drm_debug, int, 0600); 56module_param_named(debug, drm_debug, int, 0600);
57module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600);
58module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600);
49 59
50struct idr drm_minors_idr; 60struct idr drm_minors_idr;
51 61
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index fdc833d5cc7b..0ae6a7c5020f 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -9,6 +9,8 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
9 i915_gem.o \ 9 i915_gem.o \
10 i915_gem_debug.o \ 10 i915_gem_debug.o \
11 i915_gem_evict.o \ 11 i915_gem_evict.o \
12 i915_gem_execbuffer.o \
13 i915_gem_gtt.o \
12 i915_gem_tiling.o \ 14 i915_gem_tiling.o \
13 i915_trace_points.o \ 15 i915_trace_points.o \
14 intel_display.o \ 16 intel_display.o \
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 1f4f3ceb63c7..92f75782c332 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -32,6 +32,7 @@
32#include "drmP.h" 32#include "drmP.h"
33#include "drm.h" 33#include "drm.h"
34#include "intel_drv.h" 34#include "intel_drv.h"
35#include "intel_ringbuffer.h"
35#include "i915_drm.h" 36#include "i915_drm.h"
36#include "i915_drv.h" 37#include "i915_drv.h"
37 38
@@ -72,7 +73,6 @@ static int i915_capabilities(struct seq_file *m, void *data)
72 B(is_broadwater); 73 B(is_broadwater);
73 B(is_crestline); 74 B(is_crestline);
74 B(has_fbc); 75 B(has_fbc);
75 B(has_rc6);
76 B(has_pipe_cxsr); 76 B(has_pipe_cxsr);
77 B(has_hotplug); 77 B(has_hotplug);
78 B(cursor_needs_physical); 78 B(cursor_needs_physical);
@@ -86,19 +86,19 @@ static int i915_capabilities(struct seq_file *m, void *data)
86 return 0; 86 return 0;
87} 87}
88 88
89static const char *get_pin_flag(struct drm_i915_gem_object *obj_priv) 89static const char *get_pin_flag(struct drm_i915_gem_object *obj)
90{ 90{
91 if (obj_priv->user_pin_count > 0) 91 if (obj->user_pin_count > 0)
92 return "P"; 92 return "P";
93 else if (obj_priv->pin_count > 0) 93 else if (obj->pin_count > 0)
94 return "p"; 94 return "p";
95 else 95 else
96 return " "; 96 return " ";
97} 97}
98 98
99static const char *get_tiling_flag(struct drm_i915_gem_object *obj_priv) 99static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
100{ 100{
101 switch (obj_priv->tiling_mode) { 101 switch (obj->tiling_mode) {
102 default: 102 default:
103 case I915_TILING_NONE: return " "; 103 case I915_TILING_NONE: return " ";
104 case I915_TILING_X: return "X"; 104 case I915_TILING_X: return "X";
@@ -109,7 +109,7 @@ static const char *get_tiling_flag(struct drm_i915_gem_object *obj_priv)
109static void 109static void
110describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) 110describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
111{ 111{
112 seq_printf(m, "%p: %s%s %8zd %08x %08x %d%s%s", 112 seq_printf(m, "%p: %s%s %8zd %04x %04x %d %d%s%s",
113 &obj->base, 113 &obj->base,
114 get_pin_flag(obj), 114 get_pin_flag(obj),
115 get_tiling_flag(obj), 115 get_tiling_flag(obj),
@@ -117,6 +117,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
117 obj->base.read_domains, 117 obj->base.read_domains,
118 obj->base.write_domain, 118 obj->base.write_domain,
119 obj->last_rendering_seqno, 119 obj->last_rendering_seqno,
120 obj->last_fenced_seqno,
120 obj->dirty ? " dirty" : "", 121 obj->dirty ? " dirty" : "",
121 obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); 122 obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
122 if (obj->base.name) 123 if (obj->base.name)
@@ -124,7 +125,17 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
124 if (obj->fence_reg != I915_FENCE_REG_NONE) 125 if (obj->fence_reg != I915_FENCE_REG_NONE)
125 seq_printf(m, " (fence: %d)", obj->fence_reg); 126 seq_printf(m, " (fence: %d)", obj->fence_reg);
126 if (obj->gtt_space != NULL) 127 if (obj->gtt_space != NULL)
127 seq_printf(m, " (gtt_offset: %08x)", obj->gtt_offset); 128 seq_printf(m, " (gtt offset: %08x, size: %08x)",
129 obj->gtt_offset, (unsigned int)obj->gtt_space->size);
130 if (obj->pin_mappable || obj->fault_mappable) {
131 char s[3], *t = s;
132 if (obj->pin_mappable)
133 *t++ = 'p';
134 if (obj->fault_mappable)
135 *t++ = 'f';
136 *t = '\0';
137 seq_printf(m, " (%s mappable)", s);
138 }
128 if (obj->ring != NULL) 139 if (obj->ring != NULL)
129 seq_printf(m, " (%s)", obj->ring->name); 140 seq_printf(m, " (%s)", obj->ring->name);
130} 141}
@@ -136,7 +147,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
136 struct list_head *head; 147 struct list_head *head;
137 struct drm_device *dev = node->minor->dev; 148 struct drm_device *dev = node->minor->dev;
138 drm_i915_private_t *dev_priv = dev->dev_private; 149 drm_i915_private_t *dev_priv = dev->dev_private;
139 struct drm_i915_gem_object *obj_priv; 150 struct drm_i915_gem_object *obj;
140 size_t total_obj_size, total_gtt_size; 151 size_t total_obj_size, total_gtt_size;
141 int count, ret; 152 int count, ret;
142 153
@@ -171,12 +182,12 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
171 } 182 }
172 183
173 total_obj_size = total_gtt_size = count = 0; 184 total_obj_size = total_gtt_size = count = 0;
174 list_for_each_entry(obj_priv, head, mm_list) { 185 list_for_each_entry(obj, head, mm_list) {
175 seq_printf(m, " "); 186 seq_printf(m, " ");
176 describe_obj(m, obj_priv); 187 describe_obj(m, obj);
177 seq_printf(m, "\n"); 188 seq_printf(m, "\n");
178 total_obj_size += obj_priv->base.size; 189 total_obj_size += obj->base.size;
179 total_gtt_size += obj_priv->gtt_space->size; 190 total_gtt_size += obj->gtt_space->size;
180 count++; 191 count++;
181 } 192 }
182 mutex_unlock(&dev->struct_mutex); 193 mutex_unlock(&dev->struct_mutex);
@@ -186,24 +197,79 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
186 return 0; 197 return 0;
187} 198}
188 199
200#define count_objects(list, member) do { \
201 list_for_each_entry(obj, list, member) { \
202 size += obj->gtt_space->size; \
203 ++count; \
204 if (obj->map_and_fenceable) { \
205 mappable_size += obj->gtt_space->size; \
206 ++mappable_count; \
207 } \
208 } \
209} while(0)
210
189static int i915_gem_object_info(struct seq_file *m, void* data) 211static int i915_gem_object_info(struct seq_file *m, void* data)
190{ 212{
191 struct drm_info_node *node = (struct drm_info_node *) m->private; 213 struct drm_info_node *node = (struct drm_info_node *) m->private;
192 struct drm_device *dev = node->minor->dev; 214 struct drm_device *dev = node->minor->dev;
193 struct drm_i915_private *dev_priv = dev->dev_private; 215 struct drm_i915_private *dev_priv = dev->dev_private;
216 u32 count, mappable_count;
217 size_t size, mappable_size;
218 struct drm_i915_gem_object *obj;
194 int ret; 219 int ret;
195 220
196 ret = mutex_lock_interruptible(&dev->struct_mutex); 221 ret = mutex_lock_interruptible(&dev->struct_mutex);
197 if (ret) 222 if (ret)
198 return ret; 223 return ret;
199 224
200 seq_printf(m, "%u objects\n", dev_priv->mm.object_count); 225 seq_printf(m, "%u objects, %zu bytes\n",
201 seq_printf(m, "%zu object bytes\n", dev_priv->mm.object_memory); 226 dev_priv->mm.object_count,
202 seq_printf(m, "%u pinned\n", dev_priv->mm.pin_count); 227 dev_priv->mm.object_memory);
203 seq_printf(m, "%zu pin bytes\n", dev_priv->mm.pin_memory); 228
204 seq_printf(m, "%u objects in gtt\n", dev_priv->mm.gtt_count); 229 size = count = mappable_size = mappable_count = 0;
205 seq_printf(m, "%zu gtt bytes\n", dev_priv->mm.gtt_memory); 230 count_objects(&dev_priv->mm.gtt_list, gtt_list);
206 seq_printf(m, "%zu gtt total\n", dev_priv->mm.gtt_total); 231 seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
232 count, mappable_count, size, mappable_size);
233
234 size = count = mappable_size = mappable_count = 0;
235 count_objects(&dev_priv->mm.active_list, mm_list);
236 count_objects(&dev_priv->mm.flushing_list, mm_list);
237 seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n",
238 count, mappable_count, size, mappable_size);
239
240 size = count = mappable_size = mappable_count = 0;
241 count_objects(&dev_priv->mm.pinned_list, mm_list);
242 seq_printf(m, " %u [%u] pinned objects, %zu [%zu] bytes\n",
243 count, mappable_count, size, mappable_size);
244
245 size = count = mappable_size = mappable_count = 0;
246 count_objects(&dev_priv->mm.inactive_list, mm_list);
247 seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n",
248 count, mappable_count, size, mappable_size);
249
250 size = count = mappable_size = mappable_count = 0;
251 count_objects(&dev_priv->mm.deferred_free_list, mm_list);
252 seq_printf(m, " %u [%u] freed objects, %zu [%zu] bytes\n",
253 count, mappable_count, size, mappable_size);
254
255 size = count = mappable_size = mappable_count = 0;
256 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
257 if (obj->fault_mappable) {
258 size += obj->gtt_space->size;
259 ++count;
260 }
261 if (obj->pin_mappable) {
262 mappable_size += obj->gtt_space->size;
263 ++mappable_count;
264 }
265 }
266 seq_printf(m, "%u pinned mappable objects, %zu bytes\n",
267 mappable_count, mappable_size);
268 seq_printf(m, "%u fault mappable objects, %zu bytes\n",
269 count, size);
270
271 seq_printf(m, "%zu [%zu] gtt total\n",
272 dev_priv->mm.gtt_total, dev_priv->mm.mappable_gtt_total);
207 273
208 mutex_unlock(&dev->struct_mutex); 274 mutex_unlock(&dev->struct_mutex);
209 275
@@ -243,14 +309,14 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
243 seq_printf(m, "%d prepares\n", work->pending); 309 seq_printf(m, "%d prepares\n", work->pending);
244 310
245 if (work->old_fb_obj) { 311 if (work->old_fb_obj) {
246 struct drm_i915_gem_object *obj_priv = to_intel_bo(work->old_fb_obj); 312 struct drm_i915_gem_object *obj = work->old_fb_obj;
247 if(obj_priv) 313 if (obj)
248 seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset ); 314 seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
249 } 315 }
250 if (work->pending_flip_obj) { 316 if (work->pending_flip_obj) {
251 struct drm_i915_gem_object *obj_priv = to_intel_bo(work->pending_flip_obj); 317 struct drm_i915_gem_object *obj = work->pending_flip_obj;
252 if(obj_priv) 318 if (obj)
253 seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset ); 319 seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
254 } 320 }
255 } 321 }
256 spin_unlock_irqrestore(&dev->event_lock, flags); 322 spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -265,44 +331,80 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
265 struct drm_device *dev = node->minor->dev; 331 struct drm_device *dev = node->minor->dev;
266 drm_i915_private_t *dev_priv = dev->dev_private; 332 drm_i915_private_t *dev_priv = dev->dev_private;
267 struct drm_i915_gem_request *gem_request; 333 struct drm_i915_gem_request *gem_request;
268 int ret; 334 int ret, count;
269 335
270 ret = mutex_lock_interruptible(&dev->struct_mutex); 336 ret = mutex_lock_interruptible(&dev->struct_mutex);
271 if (ret) 337 if (ret)
272 return ret; 338 return ret;
273 339
274 seq_printf(m, "Request:\n"); 340 count = 0;
275 list_for_each_entry(gem_request, &dev_priv->render_ring.request_list, 341 if (!list_empty(&dev_priv->ring[RCS].request_list)) {
276 list) { 342 seq_printf(m, "Render requests:\n");
277 seq_printf(m, " %d @ %d\n", 343 list_for_each_entry(gem_request,
278 gem_request->seqno, 344 &dev_priv->ring[RCS].request_list,
279 (int) (jiffies - gem_request->emitted_jiffies)); 345 list) {
346 seq_printf(m, " %d @ %d\n",
347 gem_request->seqno,
348 (int) (jiffies - gem_request->emitted_jiffies));
349 }
350 count++;
351 }
352 if (!list_empty(&dev_priv->ring[VCS].request_list)) {
353 seq_printf(m, "BSD requests:\n");
354 list_for_each_entry(gem_request,
355 &dev_priv->ring[VCS].request_list,
356 list) {
357 seq_printf(m, " %d @ %d\n",
358 gem_request->seqno,
359 (int) (jiffies - gem_request->emitted_jiffies));
360 }
361 count++;
362 }
363 if (!list_empty(&dev_priv->ring[BCS].request_list)) {
364 seq_printf(m, "BLT requests:\n");
365 list_for_each_entry(gem_request,
366 &dev_priv->ring[BCS].request_list,
367 list) {
368 seq_printf(m, " %d @ %d\n",
369 gem_request->seqno,
370 (int) (jiffies - gem_request->emitted_jiffies));
371 }
372 count++;
280 } 373 }
281 mutex_unlock(&dev->struct_mutex); 374 mutex_unlock(&dev->struct_mutex);
282 375
376 if (count == 0)
377 seq_printf(m, "No requests\n");
378
283 return 0; 379 return 0;
284} 380}
285 381
382static void i915_ring_seqno_info(struct seq_file *m,
383 struct intel_ring_buffer *ring)
384{
385 if (ring->get_seqno) {
386 seq_printf(m, "Current sequence (%s): %d\n",
387 ring->name, ring->get_seqno(ring));
388 seq_printf(m, "Waiter sequence (%s): %d\n",
389 ring->name, ring->waiting_seqno);
390 seq_printf(m, "IRQ sequence (%s): %d\n",
391 ring->name, ring->irq_seqno);
392 }
393}
394
286static int i915_gem_seqno_info(struct seq_file *m, void *data) 395static int i915_gem_seqno_info(struct seq_file *m, void *data)
287{ 396{
288 struct drm_info_node *node = (struct drm_info_node *) m->private; 397 struct drm_info_node *node = (struct drm_info_node *) m->private;
289 struct drm_device *dev = node->minor->dev; 398 struct drm_device *dev = node->minor->dev;
290 drm_i915_private_t *dev_priv = dev->dev_private; 399 drm_i915_private_t *dev_priv = dev->dev_private;
291 int ret; 400 int ret, i;
292 401
293 ret = mutex_lock_interruptible(&dev->struct_mutex); 402 ret = mutex_lock_interruptible(&dev->struct_mutex);
294 if (ret) 403 if (ret)
295 return ret; 404 return ret;
296 405
297 if (dev_priv->render_ring.status_page.page_addr != NULL) { 406 for (i = 0; i < I915_NUM_RINGS; i++)
298 seq_printf(m, "Current sequence: %d\n", 407 i915_ring_seqno_info(m, &dev_priv->ring[i]);
299 dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring));
300 } else {
301 seq_printf(m, "Current sequence: hws uninitialized\n");
302 }
303 seq_printf(m, "Waiter sequence: %d\n",
304 dev_priv->mm.waiting_gem_seqno);
305 seq_printf(m, "IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno);
306 408
307 mutex_unlock(&dev->struct_mutex); 409 mutex_unlock(&dev->struct_mutex);
308 410
@@ -315,7 +417,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
315 struct drm_info_node *node = (struct drm_info_node *) m->private; 417 struct drm_info_node *node = (struct drm_info_node *) m->private;
316 struct drm_device *dev = node->minor->dev; 418 struct drm_device *dev = node->minor->dev;
317 drm_i915_private_t *dev_priv = dev->dev_private; 419 drm_i915_private_t *dev_priv = dev->dev_private;
318 int ret; 420 int ret, i;
319 421
320 ret = mutex_lock_interruptible(&dev->struct_mutex); 422 ret = mutex_lock_interruptible(&dev->struct_mutex);
321 if (ret) 423 if (ret)
@@ -354,16 +456,8 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
354 } 456 }
355 seq_printf(m, "Interrupts received: %d\n", 457 seq_printf(m, "Interrupts received: %d\n",
356 atomic_read(&dev_priv->irq_received)); 458 atomic_read(&dev_priv->irq_received));
357 if (dev_priv->render_ring.status_page.page_addr != NULL) { 459 for (i = 0; i < I915_NUM_RINGS; i++)
358 seq_printf(m, "Current sequence: %d\n", 460 i915_ring_seqno_info(m, &dev_priv->ring[i]);
359 dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring));
360 } else {
361 seq_printf(m, "Current sequence: hws uninitialized\n");
362 }
363 seq_printf(m, "Waiter sequence: %d\n",
364 dev_priv->mm.waiting_gem_seqno);
365 seq_printf(m, "IRQ sequence: %d\n",
366 dev_priv->mm.irq_gem_seqno);
367 mutex_unlock(&dev->struct_mutex); 461 mutex_unlock(&dev->struct_mutex);
368 462
369 return 0; 463 return 0;
@@ -383,29 +477,17 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
383 seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start); 477 seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
384 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs); 478 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
385 for (i = 0; i < dev_priv->num_fence_regs; i++) { 479 for (i = 0; i < dev_priv->num_fence_regs; i++) {
386 struct drm_gem_object *obj = dev_priv->fence_regs[i].obj; 480 struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj;
387 481
388 if (obj == NULL) { 482 seq_printf(m, "Fenced object[%2d] = ", i);
389 seq_printf(m, "Fenced object[%2d] = unused\n", i); 483 if (obj == NULL)
390 } else { 484 seq_printf(m, "unused");
391 struct drm_i915_gem_object *obj_priv; 485 else
392 486 describe_obj(m, obj);
393 obj_priv = to_intel_bo(obj); 487 seq_printf(m, "\n");
394 seq_printf(m, "Fenced object[%2d] = %p: %s "
395 "%08x %08zx %08x %s %08x %08x %d",
396 i, obj, get_pin_flag(obj_priv),
397 obj_priv->gtt_offset,
398 obj->size, obj_priv->stride,
399 get_tiling_flag(obj_priv),
400 obj->read_domains, obj->write_domain,
401 obj_priv->last_rendering_seqno);
402 if (obj->name)
403 seq_printf(m, " (name: %d)", obj->name);
404 seq_printf(m, "\n");
405 }
406 } 488 }
407 mutex_unlock(&dev->struct_mutex);
408 489
490 mutex_unlock(&dev->struct_mutex);
409 return 0; 491 return 0;
410} 492}
411 493
@@ -414,10 +496,12 @@ static int i915_hws_info(struct seq_file *m, void *data)
414 struct drm_info_node *node = (struct drm_info_node *) m->private; 496 struct drm_info_node *node = (struct drm_info_node *) m->private;
415 struct drm_device *dev = node->minor->dev; 497 struct drm_device *dev = node->minor->dev;
416 drm_i915_private_t *dev_priv = dev->dev_private; 498 drm_i915_private_t *dev_priv = dev->dev_private;
417 int i; 499 struct intel_ring_buffer *ring;
418 volatile u32 *hws; 500 volatile u32 *hws;
501 int i;
419 502
420 hws = (volatile u32 *)dev_priv->render_ring.status_page.page_addr; 503 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
504 hws = (volatile u32 *)ring->status_page.page_addr;
421 if (hws == NULL) 505 if (hws == NULL)
422 return 0; 506 return 0;
423 507
@@ -431,14 +515,14 @@ static int i915_hws_info(struct seq_file *m, void *data)
431 515
432static void i915_dump_object(struct seq_file *m, 516static void i915_dump_object(struct seq_file *m,
433 struct io_mapping *mapping, 517 struct io_mapping *mapping,
434 struct drm_i915_gem_object *obj_priv) 518 struct drm_i915_gem_object *obj)
435{ 519{
436 int page, page_count, i; 520 int page, page_count, i;
437 521
438 page_count = obj_priv->base.size / PAGE_SIZE; 522 page_count = obj->base.size / PAGE_SIZE;
439 for (page = 0; page < page_count; page++) { 523 for (page = 0; page < page_count; page++) {
440 u32 *mem = io_mapping_map_wc(mapping, 524 u32 *mem = io_mapping_map_wc(mapping,
441 obj_priv->gtt_offset + page * PAGE_SIZE); 525 obj->gtt_offset + page * PAGE_SIZE);
442 for (i = 0; i < PAGE_SIZE; i += 4) 526 for (i = 0; i < PAGE_SIZE; i += 4)
443 seq_printf(m, "%08x : %08x\n", i, mem[i / 4]); 527 seq_printf(m, "%08x : %08x\n", i, mem[i / 4]);
444 io_mapping_unmap(mem); 528 io_mapping_unmap(mem);
@@ -450,25 +534,21 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data)
450 struct drm_info_node *node = (struct drm_info_node *) m->private; 534 struct drm_info_node *node = (struct drm_info_node *) m->private;
451 struct drm_device *dev = node->minor->dev; 535 struct drm_device *dev = node->minor->dev;
452 drm_i915_private_t *dev_priv = dev->dev_private; 536 drm_i915_private_t *dev_priv = dev->dev_private;
453 struct drm_gem_object *obj; 537 struct drm_i915_gem_object *obj;
454 struct drm_i915_gem_object *obj_priv;
455 int ret; 538 int ret;
456 539
457 ret = mutex_lock_interruptible(&dev->struct_mutex); 540 ret = mutex_lock_interruptible(&dev->struct_mutex);
458 if (ret) 541 if (ret)
459 return ret; 542 return ret;
460 543
461 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) { 544 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
462 obj = &obj_priv->base; 545 if (obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) {
463 if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) { 546 seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset);
464 seq_printf(m, "--- gtt_offset = 0x%08x\n", 547 i915_dump_object(m, dev_priv->mm.gtt_mapping, obj);
465 obj_priv->gtt_offset);
466 i915_dump_object(m, dev_priv->mm.gtt_mapping, obj_priv);
467 } 548 }
468 } 549 }
469 550
470 mutex_unlock(&dev->struct_mutex); 551 mutex_unlock(&dev->struct_mutex);
471
472 return 0; 552 return 0;
473} 553}
474 554
@@ -477,19 +557,21 @@ static int i915_ringbuffer_data(struct seq_file *m, void *data)
477 struct drm_info_node *node = (struct drm_info_node *) m->private; 557 struct drm_info_node *node = (struct drm_info_node *) m->private;
478 struct drm_device *dev = node->minor->dev; 558 struct drm_device *dev = node->minor->dev;
479 drm_i915_private_t *dev_priv = dev->dev_private; 559 drm_i915_private_t *dev_priv = dev->dev_private;
560 struct intel_ring_buffer *ring;
480 int ret; 561 int ret;
481 562
482 ret = mutex_lock_interruptible(&dev->struct_mutex); 563 ret = mutex_lock_interruptible(&dev->struct_mutex);
483 if (ret) 564 if (ret)
484 return ret; 565 return ret;
485 566
486 if (!dev_priv->render_ring.gem_object) { 567 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
568 if (!ring->obj) {
487 seq_printf(m, "No ringbuffer setup\n"); 569 seq_printf(m, "No ringbuffer setup\n");
488 } else { 570 } else {
489 u8 *virt = dev_priv->render_ring.virtual_start; 571 u8 *virt = ring->virtual_start;
490 uint32_t off; 572 uint32_t off;
491 573
492 for (off = 0; off < dev_priv->render_ring.size; off += 4) { 574 for (off = 0; off < ring->size; off += 4) {
493 uint32_t *ptr = (uint32_t *)(virt + off); 575 uint32_t *ptr = (uint32_t *)(virt + off);
494 seq_printf(m, "%08x : %08x\n", off, *ptr); 576 seq_printf(m, "%08x : %08x\n", off, *ptr);
495 } 577 }
@@ -504,19 +586,38 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data)
504 struct drm_info_node *node = (struct drm_info_node *) m->private; 586 struct drm_info_node *node = (struct drm_info_node *) m->private;
505 struct drm_device *dev = node->minor->dev; 587 struct drm_device *dev = node->minor->dev;
506 drm_i915_private_t *dev_priv = dev->dev_private; 588 drm_i915_private_t *dev_priv = dev->dev_private;
507 unsigned int head, tail; 589 struct intel_ring_buffer *ring;
508 590
509 head = I915_READ(PRB0_HEAD) & HEAD_ADDR; 591 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
510 tail = I915_READ(PRB0_TAIL) & TAIL_ADDR; 592 if (ring->size == 0)
593 return 0;
511 594
512 seq_printf(m, "RingHead : %08x\n", head); 595 seq_printf(m, "Ring %s:\n", ring->name);
513 seq_printf(m, "RingTail : %08x\n", tail); 596 seq_printf(m, " Head : %08x\n", I915_READ_HEAD(ring) & HEAD_ADDR);
514 seq_printf(m, "RingSize : %08lx\n", dev_priv->render_ring.size); 597 seq_printf(m, " Tail : %08x\n", I915_READ_TAIL(ring) & TAIL_ADDR);
515 seq_printf(m, "Acthd : %08x\n", I915_READ(INTEL_INFO(dev)->gen >= 4 ? ACTHD_I965 : ACTHD)); 598 seq_printf(m, " Size : %08x\n", ring->size);
599 seq_printf(m, " Active : %08x\n", intel_ring_get_active_head(ring));
600 seq_printf(m, " NOPID : %08x\n", I915_READ_NOPID(ring));
601 if (IS_GEN6(dev)) {
602 seq_printf(m, " Sync 0 : %08x\n", I915_READ_SYNC_0(ring));
603 seq_printf(m, " Sync 1 : %08x\n", I915_READ_SYNC_1(ring));
604 }
605 seq_printf(m, " Control : %08x\n", I915_READ_CTL(ring));
606 seq_printf(m, " Start : %08x\n", I915_READ_START(ring));
516 607
517 return 0; 608 return 0;
518} 609}
519 610
611static const char *ring_str(int ring)
612{
613 switch (ring) {
614 case RING_RENDER: return " render";
615 case RING_BSD: return " bsd";
616 case RING_BLT: return " blt";
617 default: return "";
618 }
619}
620
520static const char *pin_flag(int pinned) 621static const char *pin_flag(int pinned)
521{ 622{
522 if (pinned > 0) 623 if (pinned > 0)
@@ -547,6 +648,36 @@ static const char *purgeable_flag(int purgeable)
547 return purgeable ? " purgeable" : ""; 648 return purgeable ? " purgeable" : "";
548} 649}
549 650
651static void print_error_buffers(struct seq_file *m,
652 const char *name,
653 struct drm_i915_error_buffer *err,
654 int count)
655{
656 seq_printf(m, "%s [%d]:\n", name, count);
657
658 while (count--) {
659 seq_printf(m, " %08x %8zd %04x %04x %08x%s%s%s%s%s",
660 err->gtt_offset,
661 err->size,
662 err->read_domains,
663 err->write_domain,
664 err->seqno,
665 pin_flag(err->pinned),
666 tiling_flag(err->tiling),
667 dirty_flag(err->dirty),
668 purgeable_flag(err->purgeable),
669 ring_str(err->ring));
670
671 if (err->name)
672 seq_printf(m, " (name: %d)", err->name);
673 if (err->fence_reg != I915_FENCE_REG_NONE)
674 seq_printf(m, " (fence: %d)", err->fence_reg);
675
676 seq_printf(m, "\n");
677 err++;
678 }
679}
680
550static int i915_error_state(struct seq_file *m, void *unused) 681static int i915_error_state(struct seq_file *m, void *unused)
551{ 682{
552 struct drm_info_node *node = (struct drm_info_node *) m->private; 683 struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -568,41 +699,46 @@ static int i915_error_state(struct seq_file *m, void *unused)
568 error->time.tv_usec); 699 error->time.tv_usec);
569 seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device); 700 seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
570 seq_printf(m, "EIR: 0x%08x\n", error->eir); 701 seq_printf(m, "EIR: 0x%08x\n", error->eir);
571 seq_printf(m, " PGTBL_ER: 0x%08x\n", error->pgtbl_er); 702 seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
572 seq_printf(m, " INSTPM: 0x%08x\n", error->instpm); 703 if (INTEL_INFO(dev)->gen >= 6) {
704 seq_printf(m, "ERROR: 0x%08x\n", error->error);
705 seq_printf(m, "Blitter command stream:\n");
706 seq_printf(m, " ACTHD: 0x%08x\n", error->bcs_acthd);
707 seq_printf(m, " IPEIR: 0x%08x\n", error->bcs_ipeir);
708 seq_printf(m, " IPEHR: 0x%08x\n", error->bcs_ipehr);
709 seq_printf(m, " INSTDONE: 0x%08x\n", error->bcs_instdone);
710 seq_printf(m, " seqno: 0x%08x\n", error->bcs_seqno);
711 seq_printf(m, "Video (BSD) command stream:\n");
712 seq_printf(m, " ACTHD: 0x%08x\n", error->vcs_acthd);
713 seq_printf(m, " IPEIR: 0x%08x\n", error->vcs_ipeir);
714 seq_printf(m, " IPEHR: 0x%08x\n", error->vcs_ipehr);
715 seq_printf(m, " INSTDONE: 0x%08x\n", error->vcs_instdone);
716 seq_printf(m, " seqno: 0x%08x\n", error->vcs_seqno);
717 }
718 seq_printf(m, "Render command stream:\n");
719 seq_printf(m, " ACTHD: 0x%08x\n", error->acthd);
573 seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir); 720 seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir);
574 seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr); 721 seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr);
575 seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone); 722 seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone);
576 seq_printf(m, " ACTHD: 0x%08x\n", error->acthd);
577 if (INTEL_INFO(dev)->gen >= 4) { 723 if (INTEL_INFO(dev)->gen >= 4) {
578 seq_printf(m, " INSTPS: 0x%08x\n", error->instps);
579 seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1); 724 seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1);
725 seq_printf(m, " INSTPS: 0x%08x\n", error->instps);
580 } 726 }
581 seq_printf(m, "seqno: 0x%08x\n", error->seqno); 727 seq_printf(m, " INSTPM: 0x%08x\n", error->instpm);
582 728 seq_printf(m, " seqno: 0x%08x\n", error->seqno);
583 if (error->active_bo_count) { 729
584 seq_printf(m, "Buffers [%d]:\n", error->active_bo_count); 730 for (i = 0; i < 16; i++)
585 731 seq_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
586 for (i = 0; i < error->active_bo_count; i++) { 732
587 seq_printf(m, " %08x %8zd %08x %08x %08x%s%s%s%s", 733 if (error->active_bo)
588 error->active_bo[i].gtt_offset, 734 print_error_buffers(m, "Active",
589 error->active_bo[i].size, 735 error->active_bo,
590 error->active_bo[i].read_domains, 736 error->active_bo_count);
591 error->active_bo[i].write_domain, 737
592 error->active_bo[i].seqno, 738 if (error->pinned_bo)
593 pin_flag(error->active_bo[i].pinned), 739 print_error_buffers(m, "Pinned",
594 tiling_flag(error->active_bo[i].tiling), 740 error->pinned_bo,
595 dirty_flag(error->active_bo[i].dirty), 741 error->pinned_bo_count);
596 purgeable_flag(error->active_bo[i].purgeable));
597
598 if (error->active_bo[i].name)
599 seq_printf(m, " (name: %d)", error->active_bo[i].name);
600 if (error->active_bo[i].fence_reg != I915_FENCE_REG_NONE)
601 seq_printf(m, " (fence: %d)", error->active_bo[i].fence_reg);
602
603 seq_printf(m, "\n");
604 }
605 }
606 742
607 for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++) { 743 for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++) {
608 if (error->batchbuffer[i]) { 744 if (error->batchbuffer[i]) {
@@ -635,6 +771,9 @@ static int i915_error_state(struct seq_file *m, void *unused)
635 if (error->overlay) 771 if (error->overlay)
636 intel_overlay_print_error_state(m, error->overlay); 772 intel_overlay_print_error_state(m, error->overlay);
637 773
774 if (error->display)
775 intel_display_print_error_state(m, dev, error->display);
776
638out: 777out:
639 spin_unlock_irqrestore(&dev_priv->error_lock, flags); 778 spin_unlock_irqrestore(&dev_priv->error_lock, flags);
640 779
@@ -658,15 +797,51 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
658 struct drm_info_node *node = (struct drm_info_node *) m->private; 797 struct drm_info_node *node = (struct drm_info_node *) m->private;
659 struct drm_device *dev = node->minor->dev; 798 struct drm_device *dev = node->minor->dev;
660 drm_i915_private_t *dev_priv = dev->dev_private; 799 drm_i915_private_t *dev_priv = dev->dev_private;
661 u16 rgvswctl = I915_READ16(MEMSWCTL);
662 u16 rgvstat = I915_READ16(MEMSTAT_ILK);
663 800
664 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf); 801 if (IS_GEN5(dev)) {
665 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f); 802 u16 rgvswctl = I915_READ16(MEMSWCTL);
666 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >> 803 u16 rgvstat = I915_READ16(MEMSTAT_ILK);
667 MEMSTAT_VID_SHIFT); 804
668 seq_printf(m, "Current P-state: %d\n", 805 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
669 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); 806 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
807 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
808 MEMSTAT_VID_SHIFT);
809 seq_printf(m, "Current P-state: %d\n",
810 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
811 } else if (IS_GEN6(dev)) {
812 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
813 u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
814 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
815 int max_freq;
816
817 /* RPSTAT1 is in the GT power well */
818 __gen6_force_wake_get(dev_priv);
819
820 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
821 seq_printf(m, "RPSTAT1: 0x%08x\n", I915_READ(GEN6_RPSTAT1));
822 seq_printf(m, "Render p-state ratio: %d\n",
823 (gt_perf_status & 0xff00) >> 8);
824 seq_printf(m, "Render p-state VID: %d\n",
825 gt_perf_status & 0xff);
826 seq_printf(m, "Render p-state limit: %d\n",
827 rp_state_limits & 0xff);
828
829 max_freq = (rp_state_cap & 0xff0000) >> 16;
830 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
831 max_freq * 100);
832
833 max_freq = (rp_state_cap & 0xff00) >> 8;
834 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
835 max_freq * 100);
836
837 max_freq = rp_state_cap & 0xff;
838 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
839 max_freq * 100);
840
841 __gen6_force_wake_put(dev_priv);
842 } else {
843 seq_printf(m, "no P-state info available\n");
844 }
670 845
671 return 0; 846 return 0;
672} 847}
@@ -794,7 +969,7 @@ static int i915_sr_status(struct seq_file *m, void *unused)
794 drm_i915_private_t *dev_priv = dev->dev_private; 969 drm_i915_private_t *dev_priv = dev->dev_private;
795 bool sr_enabled = false; 970 bool sr_enabled = false;
796 971
797 if (IS_GEN5(dev)) 972 if (HAS_PCH_SPLIT(dev))
798 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN; 973 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
799 else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev)) 974 else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev))
800 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; 975 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
@@ -886,7 +1061,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
886 fb->base.height, 1061 fb->base.height,
887 fb->base.depth, 1062 fb->base.depth,
888 fb->base.bits_per_pixel); 1063 fb->base.bits_per_pixel);
889 describe_obj(m, to_intel_bo(fb->obj)); 1064 describe_obj(m, fb->obj);
890 seq_printf(m, "\n"); 1065 seq_printf(m, "\n");
891 1066
892 list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) { 1067 list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
@@ -898,7 +1073,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
898 fb->base.height, 1073 fb->base.height,
899 fb->base.depth, 1074 fb->base.depth,
900 fb->base.bits_per_pixel); 1075 fb->base.bits_per_pixel);
901 describe_obj(m, to_intel_bo(fb->obj)); 1076 describe_obj(m, fb->obj);
902 seq_printf(m, "\n"); 1077 seq_printf(m, "\n");
903 } 1078 }
904 1079
@@ -943,7 +1118,6 @@ i915_wedged_write(struct file *filp,
943 loff_t *ppos) 1118 loff_t *ppos)
944{ 1119{
945 struct drm_device *dev = filp->private_data; 1120 struct drm_device *dev = filp->private_data;
946 drm_i915_private_t *dev_priv = dev->dev_private;
947 char buf[20]; 1121 char buf[20];
948 int val = 1; 1122 int val = 1;
949 1123
@@ -959,12 +1133,7 @@ i915_wedged_write(struct file *filp,
959 } 1133 }
960 1134
961 DRM_INFO("Manually setting wedged to %d\n", val); 1135 DRM_INFO("Manually setting wedged to %d\n", val);
962 1136 i915_handle_error(dev, val);
963 atomic_set(&dev_priv->mm.wedged, val);
964 if (val) {
965 wake_up_all(&dev_priv->irq_queue);
966 queue_work(dev_priv->wq, &dev_priv->error_work);
967 }
968 1137
969 return cnt; 1138 return cnt;
970} 1139}
@@ -1028,9 +1197,15 @@ static struct drm_info_list i915_debugfs_list[] = {
1028 {"i915_gem_seqno", i915_gem_seqno_info, 0}, 1197 {"i915_gem_seqno", i915_gem_seqno_info, 0},
1029 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, 1198 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
1030 {"i915_gem_interrupt", i915_interrupt_info, 0}, 1199 {"i915_gem_interrupt", i915_interrupt_info, 0},
1031 {"i915_gem_hws", i915_hws_info, 0}, 1200 {"i915_gem_hws", i915_hws_info, 0, (void *)RCS},
1032 {"i915_ringbuffer_data", i915_ringbuffer_data, 0}, 1201 {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
1033 {"i915_ringbuffer_info", i915_ringbuffer_info, 0}, 1202 {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
1203 {"i915_ringbuffer_data", i915_ringbuffer_data, 0, (void *)RCS},
1204 {"i915_ringbuffer_info", i915_ringbuffer_info, 0, (void *)RCS},
1205 {"i915_bsd_ringbuffer_data", i915_ringbuffer_data, 0, (void *)VCS},
1206 {"i915_bsd_ringbuffer_info", i915_ringbuffer_info, 0, (void *)VCS},
1207 {"i915_blt_ringbuffer_data", i915_ringbuffer_data, 0, (void *)BCS},
1208 {"i915_blt_ringbuffer_info", i915_ringbuffer_info, 0, (void *)BCS},
1034 {"i915_batchbuffers", i915_batchbuffer_info, 0}, 1209 {"i915_batchbuffers", i915_batchbuffer_info, 0},
1035 {"i915_error_state", i915_error_state, 0}, 1210 {"i915_error_state", i915_error_state, 0},
1036 {"i915_rstdby_delays", i915_rstdby_delays, 0}, 1211 {"i915_rstdby_delays", i915_rstdby_delays, 0},
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index cb900dc83d95..0568dbdc10ef 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -50,6 +50,8 @@
50static int i915_init_phys_hws(struct drm_device *dev) 50static int i915_init_phys_hws(struct drm_device *dev)
51{ 51{
52 drm_i915_private_t *dev_priv = dev->dev_private; 52 drm_i915_private_t *dev_priv = dev->dev_private;
53 struct intel_ring_buffer *ring = LP_RING(dev_priv);
54
53 /* Program Hardware Status Page */ 55 /* Program Hardware Status Page */
54 dev_priv->status_page_dmah = 56 dev_priv->status_page_dmah =
55 drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE); 57 drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE);
@@ -58,11 +60,10 @@ static int i915_init_phys_hws(struct drm_device *dev)
58 DRM_ERROR("Can not allocate hardware status page\n"); 60 DRM_ERROR("Can not allocate hardware status page\n");
59 return -ENOMEM; 61 return -ENOMEM;
60 } 62 }
61 dev_priv->render_ring.status_page.page_addr 63 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
62 = dev_priv->status_page_dmah->vaddr;
63 dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr; 64 dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
64 65
65 memset(dev_priv->render_ring.status_page.page_addr, 0, PAGE_SIZE); 66 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
66 67
67 if (INTEL_INFO(dev)->gen >= 4) 68 if (INTEL_INFO(dev)->gen >= 4)
68 dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) & 69 dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) &
@@ -80,13 +81,15 @@ static int i915_init_phys_hws(struct drm_device *dev)
80static void i915_free_hws(struct drm_device *dev) 81static void i915_free_hws(struct drm_device *dev)
81{ 82{
82 drm_i915_private_t *dev_priv = dev->dev_private; 83 drm_i915_private_t *dev_priv = dev->dev_private;
84 struct intel_ring_buffer *ring = LP_RING(dev_priv);
85
83 if (dev_priv->status_page_dmah) { 86 if (dev_priv->status_page_dmah) {
84 drm_pci_free(dev, dev_priv->status_page_dmah); 87 drm_pci_free(dev, dev_priv->status_page_dmah);
85 dev_priv->status_page_dmah = NULL; 88 dev_priv->status_page_dmah = NULL;
86 } 89 }
87 90
88 if (dev_priv->render_ring.status_page.gfx_addr) { 91 if (ring->status_page.gfx_addr) {
89 dev_priv->render_ring.status_page.gfx_addr = 0; 92 ring->status_page.gfx_addr = 0;
90 drm_core_ioremapfree(&dev_priv->hws_map, dev); 93 drm_core_ioremapfree(&dev_priv->hws_map, dev);
91 } 94 }
92 95
@@ -98,7 +101,7 @@ void i915_kernel_lost_context(struct drm_device * dev)
98{ 101{
99 drm_i915_private_t *dev_priv = dev->dev_private; 102 drm_i915_private_t *dev_priv = dev->dev_private;
100 struct drm_i915_master_private *master_priv; 103 struct drm_i915_master_private *master_priv;
101 struct intel_ring_buffer *ring = &dev_priv->render_ring; 104 struct intel_ring_buffer *ring = LP_RING(dev_priv);
102 105
103 /* 106 /*
104 * We should never lose context on the ring with modesetting 107 * We should never lose context on the ring with modesetting
@@ -107,8 +110,8 @@ void i915_kernel_lost_context(struct drm_device * dev)
107 if (drm_core_check_feature(dev, DRIVER_MODESET)) 110 if (drm_core_check_feature(dev, DRIVER_MODESET))
108 return; 111 return;
109 112
110 ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; 113 ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
111 ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR; 114 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
112 ring->space = ring->head - (ring->tail + 8); 115 ring->space = ring->head - (ring->tail + 8);
113 if (ring->space < 0) 116 if (ring->space < 0)
114 ring->space += ring->size; 117 ring->space += ring->size;
@@ -124,6 +127,8 @@ void i915_kernel_lost_context(struct drm_device * dev)
124static int i915_dma_cleanup(struct drm_device * dev) 127static int i915_dma_cleanup(struct drm_device * dev)
125{ 128{
126 drm_i915_private_t *dev_priv = dev->dev_private; 129 drm_i915_private_t *dev_priv = dev->dev_private;
130 int i;
131
127 /* Make sure interrupts are disabled here because the uninstall ioctl 132 /* Make sure interrupts are disabled here because the uninstall ioctl
128 * may not have been called from userspace and after dev_private 133 * may not have been called from userspace and after dev_private
129 * is freed, it's too late. 134 * is freed, it's too late.
@@ -132,9 +137,8 @@ static int i915_dma_cleanup(struct drm_device * dev)
132 drm_irq_uninstall(dev); 137 drm_irq_uninstall(dev);
133 138
134 mutex_lock(&dev->struct_mutex); 139 mutex_lock(&dev->struct_mutex);
135 intel_cleanup_ring_buffer(dev, &dev_priv->render_ring); 140 for (i = 0; i < I915_NUM_RINGS; i++)
136 intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring); 141 intel_cleanup_ring_buffer(&dev_priv->ring[i]);
137 intel_cleanup_ring_buffer(dev, &dev_priv->blt_ring);
138 mutex_unlock(&dev->struct_mutex); 142 mutex_unlock(&dev->struct_mutex);
139 143
140 /* Clear the HWS virtual address at teardown */ 144 /* Clear the HWS virtual address at teardown */
@@ -148,6 +152,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
148{ 152{
149 drm_i915_private_t *dev_priv = dev->dev_private; 153 drm_i915_private_t *dev_priv = dev->dev_private;
150 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 154 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
155 struct intel_ring_buffer *ring = LP_RING(dev_priv);
151 156
152 master_priv->sarea = drm_getsarea(dev); 157 master_priv->sarea = drm_getsarea(dev);
153 if (master_priv->sarea) { 158 if (master_priv->sarea) {
@@ -158,24 +163,24 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
158 } 163 }
159 164
160 if (init->ring_size != 0) { 165 if (init->ring_size != 0) {
161 if (dev_priv->render_ring.gem_object != NULL) { 166 if (ring->obj != NULL) {
162 i915_dma_cleanup(dev); 167 i915_dma_cleanup(dev);
163 DRM_ERROR("Client tried to initialize ringbuffer in " 168 DRM_ERROR("Client tried to initialize ringbuffer in "
164 "GEM mode\n"); 169 "GEM mode\n");
165 return -EINVAL; 170 return -EINVAL;
166 } 171 }
167 172
168 dev_priv->render_ring.size = init->ring_size; 173 ring->size = init->ring_size;
169 174
170 dev_priv->render_ring.map.offset = init->ring_start; 175 ring->map.offset = init->ring_start;
171 dev_priv->render_ring.map.size = init->ring_size; 176 ring->map.size = init->ring_size;
172 dev_priv->render_ring.map.type = 0; 177 ring->map.type = 0;
173 dev_priv->render_ring.map.flags = 0; 178 ring->map.flags = 0;
174 dev_priv->render_ring.map.mtrr = 0; 179 ring->map.mtrr = 0;
175 180
176 drm_core_ioremap_wc(&dev_priv->render_ring.map, dev); 181 drm_core_ioremap_wc(&ring->map, dev);
177 182
178 if (dev_priv->render_ring.map.handle == NULL) { 183 if (ring->map.handle == NULL) {
179 i915_dma_cleanup(dev); 184 i915_dma_cleanup(dev);
180 DRM_ERROR("can not ioremap virtual address for" 185 DRM_ERROR("can not ioremap virtual address for"
181 " ring buffer\n"); 186 " ring buffer\n");
@@ -183,7 +188,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
183 } 188 }
184 } 189 }
185 190
186 dev_priv->render_ring.virtual_start = dev_priv->render_ring.map.handle; 191 ring->virtual_start = ring->map.handle;
187 192
188 dev_priv->cpp = init->cpp; 193 dev_priv->cpp = init->cpp;
189 dev_priv->back_offset = init->back_offset; 194 dev_priv->back_offset = init->back_offset;
@@ -202,12 +207,10 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
202static int i915_dma_resume(struct drm_device * dev) 207static int i915_dma_resume(struct drm_device * dev)
203{ 208{
204 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 209 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
210 struct intel_ring_buffer *ring = LP_RING(dev_priv);
205 211
206 struct intel_ring_buffer *ring;
207 DRM_DEBUG_DRIVER("%s\n", __func__); 212 DRM_DEBUG_DRIVER("%s\n", __func__);
208 213
209 ring = &dev_priv->render_ring;
210
211 if (ring->map.handle == NULL) { 214 if (ring->map.handle == NULL) {
212 DRM_ERROR("can not ioremap virtual address for" 215 DRM_ERROR("can not ioremap virtual address for"
213 " ring buffer\n"); 216 " ring buffer\n");
@@ -222,7 +225,7 @@ static int i915_dma_resume(struct drm_device * dev)
222 DRM_DEBUG_DRIVER("hw status page @ %p\n", 225 DRM_DEBUG_DRIVER("hw status page @ %p\n",
223 ring->status_page.page_addr); 226 ring->status_page.page_addr);
224 if (ring->status_page.gfx_addr != 0) 227 if (ring->status_page.gfx_addr != 0)
225 intel_ring_setup_status_page(dev, ring); 228 intel_ring_setup_status_page(ring);
226 else 229 else
227 I915_WRITE(HWS_PGA, dev_priv->dma_status_page); 230 I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
228 231
@@ -264,7 +267,7 @@ static int i915_dma_init(struct drm_device *dev, void *data,
264 * instruction detected will be given a size of zero, which is a 267 * instruction detected will be given a size of zero, which is a
265 * signal to abort the rest of the buffer. 268 * signal to abort the rest of the buffer.
266 */ 269 */
267static int do_validate_cmd(int cmd) 270static int validate_cmd(int cmd)
268{ 271{
269 switch (((cmd >> 29) & 0x7)) { 272 switch (((cmd >> 29) & 0x7)) {
270 case 0x0: 273 case 0x0:
@@ -322,40 +325,27 @@ static int do_validate_cmd(int cmd)
322 return 0; 325 return 0;
323} 326}
324 327
325static int validate_cmd(int cmd)
326{
327 int ret = do_validate_cmd(cmd);
328
329/* printk("validate_cmd( %x ): %d\n", cmd, ret); */
330
331 return ret;
332}
333
334static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords) 328static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
335{ 329{
336 drm_i915_private_t *dev_priv = dev->dev_private; 330 drm_i915_private_t *dev_priv = dev->dev_private;
337 int i; 331 int i, ret;
338 332
339 if ((dwords+1) * sizeof(int) >= dev_priv->render_ring.size - 8) 333 if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8)
340 return -EINVAL; 334 return -EINVAL;
341 335
342 BEGIN_LP_RING((dwords+1)&~1);
343
344 for (i = 0; i < dwords;) { 336 for (i = 0; i < dwords;) {
345 int cmd, sz; 337 int sz = validate_cmd(buffer[i]);
346 338 if (sz == 0 || i + sz > dwords)
347 cmd = buffer[i];
348
349 if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords)
350 return -EINVAL; 339 return -EINVAL;
351 340 i += sz;
352 OUT_RING(cmd);
353
354 while (++i, --sz) {
355 OUT_RING(buffer[i]);
356 }
357 } 341 }
358 342
343 ret = BEGIN_LP_RING((dwords+1)&~1);
344 if (ret)
345 return ret;
346
347 for (i = 0; i < dwords; i++)
348 OUT_RING(buffer[i]);
359 if (dwords & 1) 349 if (dwords & 1)
360 OUT_RING(0); 350 OUT_RING(0);
361 351
@@ -366,34 +356,41 @@ static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
366 356
367int 357int
368i915_emit_box(struct drm_device *dev, 358i915_emit_box(struct drm_device *dev,
369 struct drm_clip_rect *boxes, 359 struct drm_clip_rect *box,
370 int i, int DR1, int DR4) 360 int DR1, int DR4)
371{ 361{
372 struct drm_clip_rect box = boxes[i]; 362 struct drm_i915_private *dev_priv = dev->dev_private;
363 int ret;
373 364
374 if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) { 365 if (box->y2 <= box->y1 || box->x2 <= box->x1 ||
366 box->y2 <= 0 || box->x2 <= 0) {
375 DRM_ERROR("Bad box %d,%d..%d,%d\n", 367 DRM_ERROR("Bad box %d,%d..%d,%d\n",
376 box.x1, box.y1, box.x2, box.y2); 368 box->x1, box->y1, box->x2, box->y2);
377 return -EINVAL; 369 return -EINVAL;
378 } 370 }
379 371
380 if (INTEL_INFO(dev)->gen >= 4) { 372 if (INTEL_INFO(dev)->gen >= 4) {
381 BEGIN_LP_RING(4); 373 ret = BEGIN_LP_RING(4);
374 if (ret)
375 return ret;
376
382 OUT_RING(GFX_OP_DRAWRECT_INFO_I965); 377 OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
383 OUT_RING((box.x1 & 0xffff) | (box.y1 << 16)); 378 OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
384 OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16)); 379 OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
385 OUT_RING(DR4); 380 OUT_RING(DR4);
386 ADVANCE_LP_RING();
387 } else { 381 } else {
388 BEGIN_LP_RING(6); 382 ret = BEGIN_LP_RING(6);
383 if (ret)
384 return ret;
385
389 OUT_RING(GFX_OP_DRAWRECT_INFO); 386 OUT_RING(GFX_OP_DRAWRECT_INFO);
390 OUT_RING(DR1); 387 OUT_RING(DR1);
391 OUT_RING((box.x1 & 0xffff) | (box.y1 << 16)); 388 OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
392 OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16)); 389 OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
393 OUT_RING(DR4); 390 OUT_RING(DR4);
394 OUT_RING(0); 391 OUT_RING(0);
395 ADVANCE_LP_RING();
396 } 392 }
393 ADVANCE_LP_RING();
397 394
398 return 0; 395 return 0;
399} 396}
@@ -413,12 +410,13 @@ static void i915_emit_breadcrumb(struct drm_device *dev)
413 if (master_priv->sarea_priv) 410 if (master_priv->sarea_priv)
414 master_priv->sarea_priv->last_enqueue = dev_priv->counter; 411 master_priv->sarea_priv->last_enqueue = dev_priv->counter;
415 412
416 BEGIN_LP_RING(4); 413 if (BEGIN_LP_RING(4) == 0) {
417 OUT_RING(MI_STORE_DWORD_INDEX); 414 OUT_RING(MI_STORE_DWORD_INDEX);
418 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 415 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
419 OUT_RING(dev_priv->counter); 416 OUT_RING(dev_priv->counter);
420 OUT_RING(0); 417 OUT_RING(0);
421 ADVANCE_LP_RING(); 418 ADVANCE_LP_RING();
419 }
422} 420}
423 421
424static int i915_dispatch_cmdbuffer(struct drm_device * dev, 422static int i915_dispatch_cmdbuffer(struct drm_device * dev,
@@ -440,7 +438,7 @@ static int i915_dispatch_cmdbuffer(struct drm_device * dev,
440 438
441 for (i = 0; i < count; i++) { 439 for (i = 0; i < count; i++) {
442 if (i < nbox) { 440 if (i < nbox) {
443 ret = i915_emit_box(dev, cliprects, i, 441 ret = i915_emit_box(dev, &cliprects[i],
444 cmd->DR1, cmd->DR4); 442 cmd->DR1, cmd->DR4);
445 if (ret) 443 if (ret)
446 return ret; 444 return ret;
@@ -459,8 +457,9 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
459 drm_i915_batchbuffer_t * batch, 457 drm_i915_batchbuffer_t * batch,
460 struct drm_clip_rect *cliprects) 458 struct drm_clip_rect *cliprects)
461{ 459{
460 struct drm_i915_private *dev_priv = dev->dev_private;
462 int nbox = batch->num_cliprects; 461 int nbox = batch->num_cliprects;
463 int i = 0, count; 462 int i, count, ret;
464 463
465 if ((batch->start | batch->used) & 0x7) { 464 if ((batch->start | batch->used) & 0x7) {
466 DRM_ERROR("alignment"); 465 DRM_ERROR("alignment");
@@ -470,17 +469,19 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
470 i915_kernel_lost_context(dev); 469 i915_kernel_lost_context(dev);
471 470
472 count = nbox ? nbox : 1; 471 count = nbox ? nbox : 1;
473
474 for (i = 0; i < count; i++) { 472 for (i = 0; i < count; i++) {
475 if (i < nbox) { 473 if (i < nbox) {
476 int ret = i915_emit_box(dev, cliprects, i, 474 ret = i915_emit_box(dev, &cliprects[i],
477 batch->DR1, batch->DR4); 475 batch->DR1, batch->DR4);
478 if (ret) 476 if (ret)
479 return ret; 477 return ret;
480 } 478 }
481 479
482 if (!IS_I830(dev) && !IS_845G(dev)) { 480 if (!IS_I830(dev) && !IS_845G(dev)) {
483 BEGIN_LP_RING(2); 481 ret = BEGIN_LP_RING(2);
482 if (ret)
483 return ret;
484
484 if (INTEL_INFO(dev)->gen >= 4) { 485 if (INTEL_INFO(dev)->gen >= 4) {
485 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965); 486 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
486 OUT_RING(batch->start); 487 OUT_RING(batch->start);
@@ -488,26 +489,29 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
488 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6)); 489 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
489 OUT_RING(batch->start | MI_BATCH_NON_SECURE); 490 OUT_RING(batch->start | MI_BATCH_NON_SECURE);
490 } 491 }
491 ADVANCE_LP_RING();
492 } else { 492 } else {
493 BEGIN_LP_RING(4); 493 ret = BEGIN_LP_RING(4);
494 if (ret)
495 return ret;
496
494 OUT_RING(MI_BATCH_BUFFER); 497 OUT_RING(MI_BATCH_BUFFER);
495 OUT_RING(batch->start | MI_BATCH_NON_SECURE); 498 OUT_RING(batch->start | MI_BATCH_NON_SECURE);
496 OUT_RING(batch->start + batch->used - 4); 499 OUT_RING(batch->start + batch->used - 4);
497 OUT_RING(0); 500 OUT_RING(0);
498 ADVANCE_LP_RING();
499 } 501 }
502 ADVANCE_LP_RING();
500 } 503 }
501 504
502 505
503 if (IS_G4X(dev) || IS_GEN5(dev)) { 506 if (IS_G4X(dev) || IS_GEN5(dev)) {
504 BEGIN_LP_RING(2); 507 if (BEGIN_LP_RING(2) == 0) {
505 OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP); 508 OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP);
506 OUT_RING(MI_NOOP); 509 OUT_RING(MI_NOOP);
507 ADVANCE_LP_RING(); 510 ADVANCE_LP_RING();
511 }
508 } 512 }
509 i915_emit_breadcrumb(dev);
510 513
514 i915_emit_breadcrumb(dev);
511 return 0; 515 return 0;
512} 516}
513 517
@@ -516,6 +520,7 @@ static int i915_dispatch_flip(struct drm_device * dev)
516 drm_i915_private_t *dev_priv = dev->dev_private; 520 drm_i915_private_t *dev_priv = dev->dev_private;
517 struct drm_i915_master_private *master_priv = 521 struct drm_i915_master_private *master_priv =
518 dev->primary->master->driver_priv; 522 dev->primary->master->driver_priv;
523 int ret;
519 524
520 if (!master_priv->sarea_priv) 525 if (!master_priv->sarea_priv)
521 return -EINVAL; 526 return -EINVAL;
@@ -527,12 +532,13 @@ static int i915_dispatch_flip(struct drm_device * dev)
527 532
528 i915_kernel_lost_context(dev); 533 i915_kernel_lost_context(dev);
529 534
530 BEGIN_LP_RING(2); 535 ret = BEGIN_LP_RING(10);
536 if (ret)
537 return ret;
538
531 OUT_RING(MI_FLUSH | MI_READ_FLUSH); 539 OUT_RING(MI_FLUSH | MI_READ_FLUSH);
532 OUT_RING(0); 540 OUT_RING(0);
533 ADVANCE_LP_RING();
534 541
535 BEGIN_LP_RING(6);
536 OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP); 542 OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
537 OUT_RING(0); 543 OUT_RING(0);
538 if (dev_priv->current_page == 0) { 544 if (dev_priv->current_page == 0) {
@@ -543,33 +549,32 @@ static int i915_dispatch_flip(struct drm_device * dev)
543 dev_priv->current_page = 0; 549 dev_priv->current_page = 0;
544 } 550 }
545 OUT_RING(0); 551 OUT_RING(0);
546 ADVANCE_LP_RING();
547 552
548 BEGIN_LP_RING(2);
549 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP); 553 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
550 OUT_RING(0); 554 OUT_RING(0);
555
551 ADVANCE_LP_RING(); 556 ADVANCE_LP_RING();
552 557
553 master_priv->sarea_priv->last_enqueue = dev_priv->counter++; 558 master_priv->sarea_priv->last_enqueue = dev_priv->counter++;
554 559
555 BEGIN_LP_RING(4); 560 if (BEGIN_LP_RING(4) == 0) {
556 OUT_RING(MI_STORE_DWORD_INDEX); 561 OUT_RING(MI_STORE_DWORD_INDEX);
557 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 562 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
558 OUT_RING(dev_priv->counter); 563 OUT_RING(dev_priv->counter);
559 OUT_RING(0); 564 OUT_RING(0);
560 ADVANCE_LP_RING(); 565 ADVANCE_LP_RING();
566 }
561 567
562 master_priv->sarea_priv->pf_current_page = dev_priv->current_page; 568 master_priv->sarea_priv->pf_current_page = dev_priv->current_page;
563 return 0; 569 return 0;
564} 570}
565 571
566static int i915_quiescent(struct drm_device * dev) 572static int i915_quiescent(struct drm_device *dev)
567{ 573{
568 drm_i915_private_t *dev_priv = dev->dev_private; 574 struct intel_ring_buffer *ring = LP_RING(dev->dev_private);
569 575
570 i915_kernel_lost_context(dev); 576 i915_kernel_lost_context(dev);
571 return intel_wait_ring_buffer(dev, &dev_priv->render_ring, 577 return intel_wait_ring_buffer(ring, ring->size - 8);
572 dev_priv->render_ring.size - 8);
573} 578}
574 579
575static int i915_flush_ioctl(struct drm_device *dev, void *data, 580static int i915_flush_ioctl(struct drm_device *dev, void *data,
@@ -768,9 +773,15 @@ static int i915_getparam(struct drm_device *dev, void *data,
768 case I915_PARAM_HAS_BLT: 773 case I915_PARAM_HAS_BLT:
769 value = HAS_BLT(dev); 774 value = HAS_BLT(dev);
770 break; 775 break;
776 case I915_PARAM_HAS_RELAXED_FENCING:
777 value = 1;
778 break;
771 case I915_PARAM_HAS_COHERENT_RINGS: 779 case I915_PARAM_HAS_COHERENT_RINGS:
772 value = 1; 780 value = 1;
773 break; 781 break;
782 case I915_PARAM_HAS_EXEC_CONSTANTS:
783 value = INTEL_INFO(dev)->gen >= 4;
784 break;
774 default: 785 default:
775 DRM_DEBUG_DRIVER("Unknown parameter %d\n", 786 DRM_DEBUG_DRIVER("Unknown parameter %d\n",
776 param->param); 787 param->param);
@@ -826,7 +837,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
826{ 837{
827 drm_i915_private_t *dev_priv = dev->dev_private; 838 drm_i915_private_t *dev_priv = dev->dev_private;
828 drm_i915_hws_addr_t *hws = data; 839 drm_i915_hws_addr_t *hws = data;
829 struct intel_ring_buffer *ring = &dev_priv->render_ring; 840 struct intel_ring_buffer *ring = LP_RING(dev_priv);
830 841
831 if (!I915_NEED_GFX_HWS(dev)) 842 if (!I915_NEED_GFX_HWS(dev))
832 return -EINVAL; 843 return -EINVAL;
@@ -1005,73 +1016,47 @@ intel_teardown_mchbar(struct drm_device *dev)
1005#define PTE_VALID (1 << 0) 1016#define PTE_VALID (1 << 0)
1006 1017
1007/** 1018/**
1008 * i915_gtt_to_phys - take a GTT address and turn it into a physical one 1019 * i915_stolen_to_phys - take an offset into stolen memory and turn it into
1020 * a physical one
1009 * @dev: drm device 1021 * @dev: drm device
1010 * @gtt_addr: address to translate 1022 * @offset: address to translate
1011 * 1023 *
1012 * Some chip functions require allocations from stolen space but need the 1024 * Some chip functions require allocations from stolen space and need the
1013 * physical address of the memory in question. We use this routine 1025 * physical address of the memory in question.
1014 * to get a physical address suitable for register programming from a given
1015 * GTT address.
1016 */ 1026 */
1017static unsigned long i915_gtt_to_phys(struct drm_device *dev, 1027static unsigned long i915_stolen_to_phys(struct drm_device *dev, u32 offset)
1018 unsigned long gtt_addr)
1019{ 1028{
1020 unsigned long *gtt; 1029 struct drm_i915_private *dev_priv = dev->dev_private;
1021 unsigned long entry, phys; 1030 struct pci_dev *pdev = dev_priv->bridge_dev;
1022 int gtt_bar = IS_GEN2(dev) ? 1 : 0; 1031 u32 base;
1023 int gtt_offset, gtt_size; 1032
1024 1033#if 0
1025 if (INTEL_INFO(dev)->gen >= 4) { 1034 /* On the machines I have tested the Graphics Base of Stolen Memory
1026 if (IS_G4X(dev) || INTEL_INFO(dev)->gen > 4) { 1035 * is unreliable, so compute the base by subtracting the stolen memory
1027 gtt_offset = 2*1024*1024; 1036 * from the Top of Low Usable DRAM which is where the BIOS places
1028 gtt_size = 2*1024*1024; 1037 * the graphics stolen memory.
1029 } else { 1038 */
1030 gtt_offset = 512*1024; 1039 if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
1031 gtt_size = 512*1024; 1040 /* top 32bits are reserved = 0 */
1032 } 1041 pci_read_config_dword(pdev, 0xA4, &base);
1033 } else { 1042 } else {
1034 gtt_bar = 3; 1043 /* XXX presume 8xx is the same as i915 */
1035 gtt_offset = 0; 1044 pci_bus_read_config_dword(pdev->bus, 2, 0x5C, &base);
1036 gtt_size = pci_resource_len(dev->pdev, gtt_bar); 1045 }
1037 } 1046#else
1038 1047 if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
1039 gtt = ioremap_wc(pci_resource_start(dev->pdev, gtt_bar) + gtt_offset, 1048 u16 val;
1040 gtt_size); 1049 pci_read_config_word(pdev, 0xb0, &val);
1041 if (!gtt) { 1050 base = val >> 4 << 20;
1042 DRM_ERROR("ioremap of GTT failed\n"); 1051 } else {
1043 return 0; 1052 u8 val;
1044 } 1053 pci_read_config_byte(pdev, 0x9c, &val);
1045 1054 base = val >> 3 << 27;
1046 entry = *(volatile u32 *)(gtt + (gtt_addr / 1024));
1047
1048 DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, PTE: 0x%08lx\n", gtt_addr, entry);
1049
1050 /* Mask out these reserved bits on this hardware. */
1051 if (INTEL_INFO(dev)->gen < 4 && !IS_G33(dev))
1052 entry &= ~PTE_ADDRESS_MASK_HIGH;
1053
1054 /* If it's not a mapping type we know, then bail. */
1055 if ((entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_UNCACHED &&
1056 (entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_CACHED) {
1057 iounmap(gtt);
1058 return 0;
1059 }
1060
1061 if (!(entry & PTE_VALID)) {
1062 DRM_ERROR("bad GTT entry in stolen space\n");
1063 iounmap(gtt);
1064 return 0;
1065 } 1055 }
1056 base -= dev_priv->mm.gtt->stolen_size;
1057#endif
1066 1058
1067 iounmap(gtt); 1059 return base + offset;
1068
1069 phys =(entry & PTE_ADDRESS_MASK) |
1070 ((uint64_t)(entry & PTE_ADDRESS_MASK_HIGH) << (32 - 4));
1071
1072 DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, phys addr: 0x%08lx\n", gtt_addr, phys);
1073
1074 return phys;
1075} 1060}
1076 1061
1077static void i915_warn_stolen(struct drm_device *dev) 1062static void i915_warn_stolen(struct drm_device *dev)
@@ -1087,54 +1072,35 @@ static void i915_setup_compression(struct drm_device *dev, int size)
1087 unsigned long cfb_base; 1072 unsigned long cfb_base;
1088 unsigned long ll_base = 0; 1073 unsigned long ll_base = 0;
1089 1074
1090 /* Leave 1M for line length buffer & misc. */ 1075 compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0);
1091 compressed_fb = drm_mm_search_free(&dev_priv->mm.vram, size, 4096, 0); 1076 if (compressed_fb)
1092 if (!compressed_fb) { 1077 compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
1093 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; 1078 if (!compressed_fb)
1094 i915_warn_stolen(dev); 1079 goto err;
1095 return;
1096 }
1097 1080
1098 compressed_fb = drm_mm_get_block(compressed_fb, size, 4096); 1081 cfb_base = i915_stolen_to_phys(dev, compressed_fb->start);
1099 if (!compressed_fb) { 1082 if (!cfb_base)
1100 i915_warn_stolen(dev); 1083 goto err_fb;
1101 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
1102 return;
1103 }
1104 1084
1105 cfb_base = i915_gtt_to_phys(dev, compressed_fb->start); 1085 if (!(IS_GM45(dev) || HAS_PCH_SPLIT(dev))) {
1106 if (!cfb_base) { 1086 compressed_llb = drm_mm_search_free(&dev_priv->mm.stolen,
1107 DRM_ERROR("failed to get stolen phys addr, disabling FBC\n"); 1087 4096, 4096, 0);
1108 drm_mm_put_block(compressed_fb); 1088 if (compressed_llb)
1109 } 1089 compressed_llb = drm_mm_get_block(compressed_llb,
1090 4096, 4096);
1091 if (!compressed_llb)
1092 goto err_fb;
1110 1093
1111 if (!(IS_GM45(dev) || IS_IRONLAKE_M(dev))) { 1094 ll_base = i915_stolen_to_phys(dev, compressed_llb->start);
1112 compressed_llb = drm_mm_search_free(&dev_priv->mm.vram, 4096, 1095 if (!ll_base)
1113 4096, 0); 1096 goto err_llb;
1114 if (!compressed_llb) {
1115 i915_warn_stolen(dev);
1116 return;
1117 }
1118
1119 compressed_llb = drm_mm_get_block(compressed_llb, 4096, 4096);
1120 if (!compressed_llb) {
1121 i915_warn_stolen(dev);
1122 return;
1123 }
1124
1125 ll_base = i915_gtt_to_phys(dev, compressed_llb->start);
1126 if (!ll_base) {
1127 DRM_ERROR("failed to get stolen phys addr, disabling FBC\n");
1128 drm_mm_put_block(compressed_fb);
1129 drm_mm_put_block(compressed_llb);
1130 }
1131 } 1097 }
1132 1098
1133 dev_priv->cfb_size = size; 1099 dev_priv->cfb_size = size;
1134 1100
1135 intel_disable_fbc(dev); 1101 intel_disable_fbc(dev);
1136 dev_priv->compressed_fb = compressed_fb; 1102 dev_priv->compressed_fb = compressed_fb;
1137 if (IS_IRONLAKE_M(dev)) 1103 if (HAS_PCH_SPLIT(dev))
1138 I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start); 1104 I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
1139 else if (IS_GM45(dev)) { 1105 else if (IS_GM45(dev)) {
1140 I915_WRITE(DPFC_CB_BASE, compressed_fb->start); 1106 I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
@@ -1144,8 +1110,17 @@ static void i915_setup_compression(struct drm_device *dev, int size)
1144 dev_priv->compressed_llb = compressed_llb; 1110 dev_priv->compressed_llb = compressed_llb;
1145 } 1111 }
1146 1112
1147 DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base, 1113 DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n",
1148 ll_base, size >> 20); 1114 cfb_base, ll_base, size >> 20);
1115 return;
1116
1117err_llb:
1118 drm_mm_put_block(compressed_llb);
1119err_fb:
1120 drm_mm_put_block(compressed_fb);
1121err:
1122 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
1123 i915_warn_stolen(dev);
1149} 1124}
1150 1125
1151static void i915_cleanup_compression(struct drm_device *dev) 1126static void i915_cleanup_compression(struct drm_device *dev)
@@ -1176,12 +1151,16 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_
1176 pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; 1151 pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
1177 if (state == VGA_SWITCHEROO_ON) { 1152 if (state == VGA_SWITCHEROO_ON) {
1178 printk(KERN_INFO "i915: switched on\n"); 1153 printk(KERN_INFO "i915: switched on\n");
1154 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1179 /* i915 resume handler doesn't set to D0 */ 1155 /* i915 resume handler doesn't set to D0 */
1180 pci_set_power_state(dev->pdev, PCI_D0); 1156 pci_set_power_state(dev->pdev, PCI_D0);
1181 i915_resume(dev); 1157 i915_resume(dev);
1158 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1182 } else { 1159 } else {
1183 printk(KERN_ERR "i915: switched off\n"); 1160 printk(KERN_ERR "i915: switched off\n");
1161 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1184 i915_suspend(dev, pmm); 1162 i915_suspend(dev, pmm);
1163 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1185 } 1164 }
1186} 1165}
1187 1166
@@ -1196,17 +1175,20 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
1196 return can_switch; 1175 return can_switch;
1197} 1176}
1198 1177
1199static int i915_load_modeset_init(struct drm_device *dev, 1178static int i915_load_modeset_init(struct drm_device *dev)
1200 unsigned long prealloc_size,
1201 unsigned long agp_size)
1202{ 1179{
1203 struct drm_i915_private *dev_priv = dev->dev_private; 1180 struct drm_i915_private *dev_priv = dev->dev_private;
1181 unsigned long prealloc_size, gtt_size, mappable_size;
1204 int ret = 0; 1182 int ret = 0;
1205 1183
1206 /* Basic memrange allocator for stolen space (aka mm.vram) */ 1184 prealloc_size = dev_priv->mm.gtt->stolen_size;
1207 drm_mm_init(&dev_priv->mm.vram, 0, prealloc_size); 1185 gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
1186 mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
1208 1187
1209 /* Let GEM Manage from end of prealloc space to end of aperture. 1188 /* Basic memrange allocator for stolen space */
1189 drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size);
1190
1191 /* Let GEM Manage all of the aperture.
1210 * 1192 *
1211 * However, leave one page at the end still bound to the scratch page. 1193 * However, leave one page at the end still bound to the scratch page.
1212 * There are a number of places where the hardware apparently 1194 * There are a number of places where the hardware apparently
@@ -1215,7 +1197,7 @@ static int i915_load_modeset_init(struct drm_device *dev,
1215 * at the last page of the aperture. One page should be enough to 1197 * at the last page of the aperture. One page should be enough to
1216 * keep any prefetching inside of the aperture. 1198 * keep any prefetching inside of the aperture.
1217 */ 1199 */
1218 i915_gem_do_init(dev, prealloc_size, agp_size - 4096); 1200 i915_gem_do_init(dev, 0, mappable_size, gtt_size - PAGE_SIZE);
1219 1201
1220 mutex_lock(&dev->struct_mutex); 1202 mutex_lock(&dev->struct_mutex);
1221 ret = i915_gem_init_ringbuffer(dev); 1203 ret = i915_gem_init_ringbuffer(dev);
@@ -1227,16 +1209,17 @@ static int i915_load_modeset_init(struct drm_device *dev,
1227 if (I915_HAS_FBC(dev) && i915_powersave) { 1209 if (I915_HAS_FBC(dev) && i915_powersave) {
1228 int cfb_size; 1210 int cfb_size;
1229 1211
1230 /* Try to get an 8M buffer... */ 1212 /* Leave 1M for line length buffer & misc. */
1231 if (prealloc_size > (9*1024*1024)) 1213
1232 cfb_size = 8*1024*1024; 1214 /* Try to get a 32M buffer... */
1215 if (prealloc_size > (36*1024*1024))
1216 cfb_size = 32*1024*1024;
1233 else /* fall back to 7/8 of the stolen space */ 1217 else /* fall back to 7/8 of the stolen space */
1234 cfb_size = prealloc_size * 7 / 8; 1218 cfb_size = prealloc_size * 7 / 8;
1235 i915_setup_compression(dev, cfb_size); 1219 i915_setup_compression(dev, cfb_size);
1236 } 1220 }
1237 1221
1238 /* Allow hardware batchbuffers unless told otherwise. 1222 /* Allow hardware batchbuffers unless told otherwise. */
1239 */
1240 dev_priv->allow_batchbuffer = 1; 1223 dev_priv->allow_batchbuffer = 1;
1241 1224
1242 ret = intel_parse_bios(dev); 1225 ret = intel_parse_bios(dev);
@@ -1252,6 +1235,7 @@ static int i915_load_modeset_init(struct drm_device *dev,
1252 1235
1253 ret = vga_switcheroo_register_client(dev->pdev, 1236 ret = vga_switcheroo_register_client(dev->pdev,
1254 i915_switcheroo_set_state, 1237 i915_switcheroo_set_state,
1238 NULL,
1255 i915_switcheroo_can_switch); 1239 i915_switcheroo_can_switch);
1256 if (ret) 1240 if (ret)
1257 goto cleanup_vga_client; 1241 goto cleanup_vga_client;
@@ -1426,152 +1410,12 @@ static void i915_ironlake_get_mem_freq(struct drm_device *dev)
1426 } 1410 }
1427} 1411}
1428 1412
1429struct v_table { 1413static const struct cparams {
1430 u8 vid; 1414 u16 i;
1431 unsigned long vd; /* in .1 mil */ 1415 u16 t;
1432 unsigned long vm; /* in .1 mil */ 1416 u16 m;
1433 u8 pvid; 1417 u16 c;
1434}; 1418} cparams[] = {
1435
1436static struct v_table v_table[] = {
1437 { 0, 16125, 15000, 0x7f, },
1438 { 1, 16000, 14875, 0x7e, },
1439 { 2, 15875, 14750, 0x7d, },
1440 { 3, 15750, 14625, 0x7c, },
1441 { 4, 15625, 14500, 0x7b, },
1442 { 5, 15500, 14375, 0x7a, },
1443 { 6, 15375, 14250, 0x79, },
1444 { 7, 15250, 14125, 0x78, },
1445 { 8, 15125, 14000, 0x77, },
1446 { 9, 15000, 13875, 0x76, },
1447 { 10, 14875, 13750, 0x75, },
1448 { 11, 14750, 13625, 0x74, },
1449 { 12, 14625, 13500, 0x73, },
1450 { 13, 14500, 13375, 0x72, },
1451 { 14, 14375, 13250, 0x71, },
1452 { 15, 14250, 13125, 0x70, },
1453 { 16, 14125, 13000, 0x6f, },
1454 { 17, 14000, 12875, 0x6e, },
1455 { 18, 13875, 12750, 0x6d, },
1456 { 19, 13750, 12625, 0x6c, },
1457 { 20, 13625, 12500, 0x6b, },
1458 { 21, 13500, 12375, 0x6a, },
1459 { 22, 13375, 12250, 0x69, },
1460 { 23, 13250, 12125, 0x68, },
1461 { 24, 13125, 12000, 0x67, },
1462 { 25, 13000, 11875, 0x66, },
1463 { 26, 12875, 11750, 0x65, },
1464 { 27, 12750, 11625, 0x64, },
1465 { 28, 12625, 11500, 0x63, },
1466 { 29, 12500, 11375, 0x62, },
1467 { 30, 12375, 11250, 0x61, },
1468 { 31, 12250, 11125, 0x60, },
1469 { 32, 12125, 11000, 0x5f, },
1470 { 33, 12000, 10875, 0x5e, },
1471 { 34, 11875, 10750, 0x5d, },
1472 { 35, 11750, 10625, 0x5c, },
1473 { 36, 11625, 10500, 0x5b, },
1474 { 37, 11500, 10375, 0x5a, },
1475 { 38, 11375, 10250, 0x59, },
1476 { 39, 11250, 10125, 0x58, },
1477 { 40, 11125, 10000, 0x57, },
1478 { 41, 11000, 9875, 0x56, },
1479 { 42, 10875, 9750, 0x55, },
1480 { 43, 10750, 9625, 0x54, },
1481 { 44, 10625, 9500, 0x53, },
1482 { 45, 10500, 9375, 0x52, },
1483 { 46, 10375, 9250, 0x51, },
1484 { 47, 10250, 9125, 0x50, },
1485 { 48, 10125, 9000, 0x4f, },
1486 { 49, 10000, 8875, 0x4e, },
1487 { 50, 9875, 8750, 0x4d, },
1488 { 51, 9750, 8625, 0x4c, },
1489 { 52, 9625, 8500, 0x4b, },
1490 { 53, 9500, 8375, 0x4a, },
1491 { 54, 9375, 8250, 0x49, },
1492 { 55, 9250, 8125, 0x48, },
1493 { 56, 9125, 8000, 0x47, },
1494 { 57, 9000, 7875, 0x46, },
1495 { 58, 8875, 7750, 0x45, },
1496 { 59, 8750, 7625, 0x44, },
1497 { 60, 8625, 7500, 0x43, },
1498 { 61, 8500, 7375, 0x42, },
1499 { 62, 8375, 7250, 0x41, },
1500 { 63, 8250, 7125, 0x40, },
1501 { 64, 8125, 7000, 0x3f, },
1502 { 65, 8000, 6875, 0x3e, },
1503 { 66, 7875, 6750, 0x3d, },
1504 { 67, 7750, 6625, 0x3c, },
1505 { 68, 7625, 6500, 0x3b, },
1506 { 69, 7500, 6375, 0x3a, },
1507 { 70, 7375, 6250, 0x39, },
1508 { 71, 7250, 6125, 0x38, },
1509 { 72, 7125, 6000, 0x37, },
1510 { 73, 7000, 5875, 0x36, },
1511 { 74, 6875, 5750, 0x35, },
1512 { 75, 6750, 5625, 0x34, },
1513 { 76, 6625, 5500, 0x33, },
1514 { 77, 6500, 5375, 0x32, },
1515 { 78, 6375, 5250, 0x31, },
1516 { 79, 6250, 5125, 0x30, },
1517 { 80, 6125, 5000, 0x2f, },
1518 { 81, 6000, 4875, 0x2e, },
1519 { 82, 5875, 4750, 0x2d, },
1520 { 83, 5750, 4625, 0x2c, },
1521 { 84, 5625, 4500, 0x2b, },
1522 { 85, 5500, 4375, 0x2a, },
1523 { 86, 5375, 4250, 0x29, },
1524 { 87, 5250, 4125, 0x28, },
1525 { 88, 5125, 4000, 0x27, },
1526 { 89, 5000, 3875, 0x26, },
1527 { 90, 4875, 3750, 0x25, },
1528 { 91, 4750, 3625, 0x24, },
1529 { 92, 4625, 3500, 0x23, },
1530 { 93, 4500, 3375, 0x22, },
1531 { 94, 4375, 3250, 0x21, },
1532 { 95, 4250, 3125, 0x20, },
1533 { 96, 4125, 3000, 0x1f, },
1534 { 97, 4125, 3000, 0x1e, },
1535 { 98, 4125, 3000, 0x1d, },
1536 { 99, 4125, 3000, 0x1c, },
1537 { 100, 4125, 3000, 0x1b, },
1538 { 101, 4125, 3000, 0x1a, },
1539 { 102, 4125, 3000, 0x19, },
1540 { 103, 4125, 3000, 0x18, },
1541 { 104, 4125, 3000, 0x17, },
1542 { 105, 4125, 3000, 0x16, },
1543 { 106, 4125, 3000, 0x15, },
1544 { 107, 4125, 3000, 0x14, },
1545 { 108, 4125, 3000, 0x13, },
1546 { 109, 4125, 3000, 0x12, },
1547 { 110, 4125, 3000, 0x11, },
1548 { 111, 4125, 3000, 0x10, },
1549 { 112, 4125, 3000, 0x0f, },
1550 { 113, 4125, 3000, 0x0e, },
1551 { 114, 4125, 3000, 0x0d, },
1552 { 115, 4125, 3000, 0x0c, },
1553 { 116, 4125, 3000, 0x0b, },
1554 { 117, 4125, 3000, 0x0a, },
1555 { 118, 4125, 3000, 0x09, },
1556 { 119, 4125, 3000, 0x08, },
1557 { 120, 1125, 0, 0x07, },
1558 { 121, 1000, 0, 0x06, },
1559 { 122, 875, 0, 0x05, },
1560 { 123, 750, 0, 0x04, },
1561 { 124, 625, 0, 0x03, },
1562 { 125, 500, 0, 0x02, },
1563 { 126, 375, 0, 0x01, },
1564 { 127, 0, 0, 0x00, },
1565};
1566
1567struct cparams {
1568 int i;
1569 int t;
1570 int m;
1571 int c;
1572};
1573
1574static struct cparams cparams[] = {
1575 { 1, 1333, 301, 28664 }, 1419 { 1, 1333, 301, 28664 },
1576 { 1, 1066, 294, 24460 }, 1420 { 1, 1066, 294, 24460 },
1577 { 1, 800, 294, 25192 }, 1421 { 1, 800, 294, 25192 },
@@ -1637,21 +1481,145 @@ unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
1637 return ((m * x) / 127) - b; 1481 return ((m * x) / 127) - b;
1638} 1482}
1639 1483
1640static unsigned long pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid) 1484static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
1641{ 1485{
1642 unsigned long val = 0; 1486 static const struct v_table {
1643 int i; 1487 u16 vd; /* in .1 mil */
1644 1488 u16 vm; /* in .1 mil */
1645 for (i = 0; i < ARRAY_SIZE(v_table); i++) { 1489 } v_table[] = {
1646 if (v_table[i].pvid == pxvid) { 1490 { 0, 0, },
1647 if (IS_MOBILE(dev_priv->dev)) 1491 { 375, 0, },
1648 val = v_table[i].vm; 1492 { 500, 0, },
1649 else 1493 { 625, 0, },
1650 val = v_table[i].vd; 1494 { 750, 0, },
1651 } 1495 { 875, 0, },
1652 } 1496 { 1000, 0, },
1653 1497 { 1125, 0, },
1654 return val; 1498 { 4125, 3000, },
1499 { 4125, 3000, },
1500 { 4125, 3000, },
1501 { 4125, 3000, },
1502 { 4125, 3000, },
1503 { 4125, 3000, },
1504 { 4125, 3000, },
1505 { 4125, 3000, },
1506 { 4125, 3000, },
1507 { 4125, 3000, },
1508 { 4125, 3000, },
1509 { 4125, 3000, },
1510 { 4125, 3000, },
1511 { 4125, 3000, },
1512 { 4125, 3000, },
1513 { 4125, 3000, },
1514 { 4125, 3000, },
1515 { 4125, 3000, },
1516 { 4125, 3000, },
1517 { 4125, 3000, },
1518 { 4125, 3000, },
1519 { 4125, 3000, },
1520 { 4125, 3000, },
1521 { 4125, 3000, },
1522 { 4250, 3125, },
1523 { 4375, 3250, },
1524 { 4500, 3375, },
1525 { 4625, 3500, },
1526 { 4750, 3625, },
1527 { 4875, 3750, },
1528 { 5000, 3875, },
1529 { 5125, 4000, },
1530 { 5250, 4125, },
1531 { 5375, 4250, },
1532 { 5500, 4375, },
1533 { 5625, 4500, },
1534 { 5750, 4625, },
1535 { 5875, 4750, },
1536 { 6000, 4875, },
1537 { 6125, 5000, },
1538 { 6250, 5125, },
1539 { 6375, 5250, },
1540 { 6500, 5375, },
1541 { 6625, 5500, },
1542 { 6750, 5625, },
1543 { 6875, 5750, },
1544 { 7000, 5875, },
1545 { 7125, 6000, },
1546 { 7250, 6125, },
1547 { 7375, 6250, },
1548 { 7500, 6375, },
1549 { 7625, 6500, },
1550 { 7750, 6625, },
1551 { 7875, 6750, },
1552 { 8000, 6875, },
1553 { 8125, 7000, },
1554 { 8250, 7125, },
1555 { 8375, 7250, },
1556 { 8500, 7375, },
1557 { 8625, 7500, },
1558 { 8750, 7625, },
1559 { 8875, 7750, },
1560 { 9000, 7875, },
1561 { 9125, 8000, },
1562 { 9250, 8125, },
1563 { 9375, 8250, },
1564 { 9500, 8375, },
1565 { 9625, 8500, },
1566 { 9750, 8625, },
1567 { 9875, 8750, },
1568 { 10000, 8875, },
1569 { 10125, 9000, },
1570 { 10250, 9125, },
1571 { 10375, 9250, },
1572 { 10500, 9375, },
1573 { 10625, 9500, },
1574 { 10750, 9625, },
1575 { 10875, 9750, },
1576 { 11000, 9875, },
1577 { 11125, 10000, },
1578 { 11250, 10125, },
1579 { 11375, 10250, },
1580 { 11500, 10375, },
1581 { 11625, 10500, },
1582 { 11750, 10625, },
1583 { 11875, 10750, },
1584 { 12000, 10875, },
1585 { 12125, 11000, },
1586 { 12250, 11125, },
1587 { 12375, 11250, },
1588 { 12500, 11375, },
1589 { 12625, 11500, },
1590 { 12750, 11625, },
1591 { 12875, 11750, },
1592 { 13000, 11875, },
1593 { 13125, 12000, },
1594 { 13250, 12125, },
1595 { 13375, 12250, },
1596 { 13500, 12375, },
1597 { 13625, 12500, },
1598 { 13750, 12625, },
1599 { 13875, 12750, },
1600 { 14000, 12875, },
1601 { 14125, 13000, },
1602 { 14250, 13125, },
1603 { 14375, 13250, },
1604 { 14500, 13375, },
1605 { 14625, 13500, },
1606 { 14750, 13625, },
1607 { 14875, 13750, },
1608 { 15000, 13875, },
1609 { 15125, 14000, },
1610 { 15250, 14125, },
1611 { 15375, 14250, },
1612 { 15500, 14375, },
1613 { 15625, 14500, },
1614 { 15750, 14625, },
1615 { 15875, 14750, },
1616 { 16000, 14875, },
1617 { 16125, 15000, },
1618 };
1619 if (dev_priv->info->is_mobile)
1620 return v_table[pxvid].vm;
1621 else
1622 return v_table[pxvid].vd;
1655} 1623}
1656 1624
1657void i915_update_gfx_val(struct drm_i915_private *dev_priv) 1625void i915_update_gfx_val(struct drm_i915_private *dev_priv)
@@ -1905,9 +1873,9 @@ ips_ping_for_i915_load(void)
1905int i915_driver_load(struct drm_device *dev, unsigned long flags) 1873int i915_driver_load(struct drm_device *dev, unsigned long flags)
1906{ 1874{
1907 struct drm_i915_private *dev_priv; 1875 struct drm_i915_private *dev_priv;
1908 resource_size_t base, size;
1909 int ret = 0, mmio_bar; 1876 int ret = 0, mmio_bar;
1910 uint32_t agp_size, prealloc_size; 1877 uint32_t agp_size;
1878
1911 /* i915 has 4 more counters */ 1879 /* i915 has 4 more counters */
1912 dev->counters += 4; 1880 dev->counters += 4;
1913 dev->types[6] = _DRM_STAT_IRQ; 1881 dev->types[6] = _DRM_STAT_IRQ;
@@ -1923,11 +1891,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1923 dev_priv->dev = dev; 1891 dev_priv->dev = dev;
1924 dev_priv->info = (struct intel_device_info *) flags; 1892 dev_priv->info = (struct intel_device_info *) flags;
1925 1893
1926 /* Add register map (needed for suspend/resume) */
1927 mmio_bar = IS_GEN2(dev) ? 1 : 0;
1928 base = pci_resource_start(dev->pdev, mmio_bar);
1929 size = pci_resource_len(dev->pdev, mmio_bar);
1930
1931 if (i915_get_bridge_dev(dev)) { 1894 if (i915_get_bridge_dev(dev)) {
1932 ret = -EIO; 1895 ret = -EIO;
1933 goto free_priv; 1896 goto free_priv;
@@ -1937,16 +1900,25 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1937 if (IS_GEN2(dev)) 1900 if (IS_GEN2(dev))
1938 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); 1901 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
1939 1902
1940 dev_priv->regs = ioremap(base, size); 1903 mmio_bar = IS_GEN2(dev) ? 1 : 0;
1904 dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0);
1941 if (!dev_priv->regs) { 1905 if (!dev_priv->regs) {
1942 DRM_ERROR("failed to map registers\n"); 1906 DRM_ERROR("failed to map registers\n");
1943 ret = -EIO; 1907 ret = -EIO;
1944 goto put_bridge; 1908 goto put_bridge;
1945 } 1909 }
1946 1910
1911 dev_priv->mm.gtt = intel_gtt_get();
1912 if (!dev_priv->mm.gtt) {
1913 DRM_ERROR("Failed to initialize GTT\n");
1914 ret = -ENODEV;
1915 goto out_iomapfree;
1916 }
1917
1918 agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
1919
1947 dev_priv->mm.gtt_mapping = 1920 dev_priv->mm.gtt_mapping =
1948 io_mapping_create_wc(dev->agp->base, 1921 io_mapping_create_wc(dev->agp->base, agp_size);
1949 dev->agp->agp_info.aper_size * 1024*1024);
1950 if (dev_priv->mm.gtt_mapping == NULL) { 1922 if (dev_priv->mm.gtt_mapping == NULL) {
1951 ret = -EIO; 1923 ret = -EIO;
1952 goto out_rmmap; 1924 goto out_rmmap;
@@ -1958,24 +1930,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1958 * MTRR if present. Even if a UC MTRR isn't present. 1930 * MTRR if present. Even if a UC MTRR isn't present.
1959 */ 1931 */
1960 dev_priv->mm.gtt_mtrr = mtrr_add(dev->agp->base, 1932 dev_priv->mm.gtt_mtrr = mtrr_add(dev->agp->base,
1961 dev->agp->agp_info.aper_size * 1933 agp_size,
1962 1024 * 1024,
1963 MTRR_TYPE_WRCOMB, 1); 1934 MTRR_TYPE_WRCOMB, 1);
1964 if (dev_priv->mm.gtt_mtrr < 0) { 1935 if (dev_priv->mm.gtt_mtrr < 0) {
1965 DRM_INFO("MTRR allocation failed. Graphics " 1936 DRM_INFO("MTRR allocation failed. Graphics "
1966 "performance may suffer.\n"); 1937 "performance may suffer.\n");
1967 } 1938 }
1968 1939
1969 dev_priv->mm.gtt = intel_gtt_get();
1970 if (!dev_priv->mm.gtt) {
1971 DRM_ERROR("Failed to initialize GTT\n");
1972 ret = -ENODEV;
1973 goto out_iomapfree;
1974 }
1975
1976 prealloc_size = dev_priv->mm.gtt->gtt_stolen_entries << PAGE_SHIFT;
1977 agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
1978
1979 /* The i915 workqueue is primarily used for batched retirement of 1940 /* The i915 workqueue is primarily used for batched retirement of
1980 * requests (and thus managing bo) once the task has been completed 1941 * requests (and thus managing bo) once the task has been completed
1981 * by the GPU. i915_gem_retire_requests() is called directly when we 1942 * by the GPU. i915_gem_retire_requests() is called directly when we
@@ -1983,7 +1944,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1983 * bo. 1944 * bo.
1984 * 1945 *
1985 * It is also used for periodic low-priority events, such as 1946 * It is also used for periodic low-priority events, such as
1986 * idle-timers and hangcheck. 1947 * idle-timers and recording error state.
1987 * 1948 *
1988 * All tasks on the workqueue are expected to acquire the dev mutex 1949 * All tasks on the workqueue are expected to acquire the dev mutex
1989 * so there is no point in running more than one instance of the 1950 * so there is no point in running more than one instance of the
@@ -2001,20 +1962,11 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
2001 /* enable GEM by default */ 1962 /* enable GEM by default */
2002 dev_priv->has_gem = 1; 1963 dev_priv->has_gem = 1;
2003 1964
2004 if (prealloc_size > agp_size * 3 / 4) {
2005 DRM_ERROR("Detected broken video BIOS with %d/%dkB of video "
2006 "memory stolen.\n",
2007 prealloc_size / 1024, agp_size / 1024);
2008 DRM_ERROR("Disabling GEM. (try reducing stolen memory or "
2009 "updating the BIOS to fix).\n");
2010 dev_priv->has_gem = 0;
2011 }
2012
2013 if (dev_priv->has_gem == 0 && 1965 if (dev_priv->has_gem == 0 &&
2014 drm_core_check_feature(dev, DRIVER_MODESET)) { 1966 drm_core_check_feature(dev, DRIVER_MODESET)) {
2015 DRM_ERROR("kernel modesetting requires GEM, disabling driver.\n"); 1967 DRM_ERROR("kernel modesetting requires GEM, disabling driver.\n");
2016 ret = -ENODEV; 1968 ret = -ENODEV;
2017 goto out_iomapfree; 1969 goto out_workqueue_free;
2018 } 1970 }
2019 1971
2020 dev->driver->get_vblank_counter = i915_get_vblank_counter; 1972 dev->driver->get_vblank_counter = i915_get_vblank_counter;
@@ -2037,8 +1989,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
2037 /* Init HWS */ 1989 /* Init HWS */
2038 if (!I915_NEED_GFX_HWS(dev)) { 1990 if (!I915_NEED_GFX_HWS(dev)) {
2039 ret = i915_init_phys_hws(dev); 1991 ret = i915_init_phys_hws(dev);
2040 if (ret != 0) 1992 if (ret)
2041 goto out_workqueue_free; 1993 goto out_gem_unload;
2042 } 1994 }
2043 1995
2044 if (IS_PINEVIEW(dev)) 1996 if (IS_PINEVIEW(dev))
@@ -2060,16 +2012,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
2060 if (!IS_I945G(dev) && !IS_I945GM(dev)) 2012 if (!IS_I945G(dev) && !IS_I945GM(dev))
2061 pci_enable_msi(dev->pdev); 2013 pci_enable_msi(dev->pdev);
2062 2014
2063 spin_lock_init(&dev_priv->user_irq_lock); 2015 spin_lock_init(&dev_priv->irq_lock);
2064 spin_lock_init(&dev_priv->error_lock); 2016 spin_lock_init(&dev_priv->error_lock);
2065 dev_priv->trace_irq_seqno = 0; 2017 dev_priv->trace_irq_seqno = 0;
2066 2018
2067 ret = drm_vblank_init(dev, I915_NUM_PIPE); 2019 ret = drm_vblank_init(dev, I915_NUM_PIPE);
2068 2020 if (ret)
2069 if (ret) { 2021 goto out_gem_unload;
2070 (void) i915_driver_unload(dev);
2071 return ret;
2072 }
2073 2022
2074 /* Start out suspended */ 2023 /* Start out suspended */
2075 dev_priv->mm.suspended = 1; 2024 dev_priv->mm.suspended = 1;
@@ -2077,10 +2026,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
2077 intel_detect_pch(dev); 2026 intel_detect_pch(dev);
2078 2027
2079 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 2028 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
2080 ret = i915_load_modeset_init(dev, prealloc_size, agp_size); 2029 ret = i915_load_modeset_init(dev);
2081 if (ret < 0) { 2030 if (ret < 0) {
2082 DRM_ERROR("failed to init modeset\n"); 2031 DRM_ERROR("failed to init modeset\n");
2083 goto out_workqueue_free; 2032 goto out_gem_unload;
2084 } 2033 }
2085 } 2034 }
2086 2035
@@ -2100,12 +2049,18 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
2100 2049
2101 return 0; 2050 return 0;
2102 2051
2052out_gem_unload:
2053 if (dev->pdev->msi_enabled)
2054 pci_disable_msi(dev->pdev);
2055
2056 intel_teardown_gmbus(dev);
2057 intel_teardown_mchbar(dev);
2103out_workqueue_free: 2058out_workqueue_free:
2104 destroy_workqueue(dev_priv->wq); 2059 destroy_workqueue(dev_priv->wq);
2105out_iomapfree: 2060out_iomapfree:
2106 io_mapping_free(dev_priv->mm.gtt_mapping); 2061 io_mapping_free(dev_priv->mm.gtt_mapping);
2107out_rmmap: 2062out_rmmap:
2108 iounmap(dev_priv->regs); 2063 pci_iounmap(dev->pdev, dev_priv->regs);
2109put_bridge: 2064put_bridge:
2110 pci_dev_put(dev_priv->bridge_dev); 2065 pci_dev_put(dev_priv->bridge_dev);
2111free_priv: 2066free_priv:
@@ -2122,6 +2077,9 @@ int i915_driver_unload(struct drm_device *dev)
2122 i915_mch_dev = NULL; 2077 i915_mch_dev = NULL;
2123 spin_unlock(&mchdev_lock); 2078 spin_unlock(&mchdev_lock);
2124 2079
2080 if (dev_priv->mm.inactive_shrinker.shrink)
2081 unregister_shrinker(&dev_priv->mm.inactive_shrinker);
2082
2125 mutex_lock(&dev->struct_mutex); 2083 mutex_lock(&dev->struct_mutex);
2126 ret = i915_gpu_idle(dev); 2084 ret = i915_gpu_idle(dev);
2127 if (ret) 2085 if (ret)
@@ -2179,7 +2137,7 @@ int i915_driver_unload(struct drm_device *dev)
2179 mutex_unlock(&dev->struct_mutex); 2137 mutex_unlock(&dev->struct_mutex);
2180 if (I915_HAS_FBC(dev) && i915_powersave) 2138 if (I915_HAS_FBC(dev) && i915_powersave)
2181 i915_cleanup_compression(dev); 2139 i915_cleanup_compression(dev);
2182 drm_mm_takedown(&dev_priv->mm.vram); 2140 drm_mm_takedown(&dev_priv->mm.stolen);
2183 2141
2184 intel_cleanup_overlay(dev); 2142 intel_cleanup_overlay(dev);
2185 2143
@@ -2188,7 +2146,7 @@ int i915_driver_unload(struct drm_device *dev)
2188 } 2146 }
2189 2147
2190 if (dev_priv->regs != NULL) 2148 if (dev_priv->regs != NULL)
2191 iounmap(dev_priv->regs); 2149 pci_iounmap(dev->pdev, dev_priv->regs);
2192 2150
2193 intel_teardown_gmbus(dev); 2151 intel_teardown_gmbus(dev);
2194 intel_teardown_mchbar(dev); 2152 intel_teardown_mchbar(dev);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index f737960712e6..872493331988 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -111,7 +111,7 @@ static const struct intel_device_info intel_i965g_info = {
111 111
112static const struct intel_device_info intel_i965gm_info = { 112static const struct intel_device_info intel_i965gm_info = {
113 .gen = 4, .is_crestline = 1, 113 .gen = 4, .is_crestline = 1,
114 .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1, 114 .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
115 .has_overlay = 1, 115 .has_overlay = 1,
116 .supports_tv = 1, 116 .supports_tv = 1,
117}; 117};
@@ -130,7 +130,7 @@ static const struct intel_device_info intel_g45_info = {
130 130
131static const struct intel_device_info intel_gm45_info = { 131static const struct intel_device_info intel_gm45_info = {
132 .gen = 4, .is_g4x = 1, 132 .gen = 4, .is_g4x = 1,
133 .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, 133 .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
134 .has_pipe_cxsr = 1, .has_hotplug = 1, 134 .has_pipe_cxsr = 1, .has_hotplug = 1,
135 .supports_tv = 1, 135 .supports_tv = 1,
136 .has_bsd_ring = 1, 136 .has_bsd_ring = 1,
@@ -150,7 +150,7 @@ static const struct intel_device_info intel_ironlake_d_info = {
150 150
151static const struct intel_device_info intel_ironlake_m_info = { 151static const struct intel_device_info intel_ironlake_m_info = {
152 .gen = 5, .is_mobile = 1, 152 .gen = 5, .is_mobile = 1,
153 .need_gfx_hws = 1, .has_rc6 = 1, .has_hotplug = 1, 153 .need_gfx_hws = 1, .has_hotplug = 1,
154 .has_fbc = 0, /* disabled due to buggy hardware */ 154 .has_fbc = 0, /* disabled due to buggy hardware */
155 .has_bsd_ring = 1, 155 .has_bsd_ring = 1,
156}; 156};
@@ -165,6 +165,7 @@ static const struct intel_device_info intel_sandybridge_d_info = {
165static const struct intel_device_info intel_sandybridge_m_info = { 165static const struct intel_device_info intel_sandybridge_m_info = {
166 .gen = 6, .is_mobile = 1, 166 .gen = 6, .is_mobile = 1,
167 .need_gfx_hws = 1, .has_hotplug = 1, 167 .need_gfx_hws = 1, .has_hotplug = 1,
168 .has_fbc = 1,
168 .has_bsd_ring = 1, 169 .has_bsd_ring = 1,
169 .has_blt_ring = 1, 170 .has_blt_ring = 1,
170}; 171};
@@ -244,10 +245,34 @@ void intel_detect_pch (struct drm_device *dev)
244 } 245 }
245} 246}
246 247
248void __gen6_force_wake_get(struct drm_i915_private *dev_priv)
249{
250 int count;
251
252 count = 0;
253 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
254 udelay(10);
255
256 I915_WRITE_NOTRACE(FORCEWAKE, 1);
257 POSTING_READ(FORCEWAKE);
258
259 count = 0;
260 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0)
261 udelay(10);
262}
263
264void __gen6_force_wake_put(struct drm_i915_private *dev_priv)
265{
266 I915_WRITE_NOTRACE(FORCEWAKE, 0);
267 POSTING_READ(FORCEWAKE);
268}
269
247static int i915_drm_freeze(struct drm_device *dev) 270static int i915_drm_freeze(struct drm_device *dev)
248{ 271{
249 struct drm_i915_private *dev_priv = dev->dev_private; 272 struct drm_i915_private *dev_priv = dev->dev_private;
250 273
274 drm_kms_helper_poll_disable(dev);
275
251 pci_save_state(dev->pdev); 276 pci_save_state(dev->pdev);
252 277
253 /* If KMS is active, we do the leavevt stuff here */ 278 /* If KMS is active, we do the leavevt stuff here */
@@ -284,7 +309,9 @@ int i915_suspend(struct drm_device *dev, pm_message_t state)
284 if (state.event == PM_EVENT_PRETHAW) 309 if (state.event == PM_EVENT_PRETHAW)
285 return 0; 310 return 0;
286 311
287 drm_kms_helper_poll_disable(dev); 312
313 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
314 return 0;
288 315
289 error = i915_drm_freeze(dev); 316 error = i915_drm_freeze(dev);
290 if (error) 317 if (error)
@@ -304,6 +331,12 @@ static int i915_drm_thaw(struct drm_device *dev)
304 struct drm_i915_private *dev_priv = dev->dev_private; 331 struct drm_i915_private *dev_priv = dev->dev_private;
305 int error = 0; 332 int error = 0;
306 333
334 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
335 mutex_lock(&dev->struct_mutex);
336 i915_gem_restore_gtt_mappings(dev);
337 mutex_unlock(&dev->struct_mutex);
338 }
339
307 i915_restore_state(dev); 340 i915_restore_state(dev);
308 intel_opregion_setup(dev); 341 intel_opregion_setup(dev);
309 342
@@ -332,6 +365,9 @@ int i915_resume(struct drm_device *dev)
332{ 365{
333 int ret; 366 int ret;
334 367
368 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
369 return 0;
370
335 if (pci_enable_device(dev->pdev)) 371 if (pci_enable_device(dev->pdev))
336 return -EIO; 372 return -EIO;
337 373
@@ -405,6 +441,14 @@ static int ironlake_do_reset(struct drm_device *dev, u8 flags)
405 return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500); 441 return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
406} 442}
407 443
444static int gen6_do_reset(struct drm_device *dev, u8 flags)
445{
446 struct drm_i915_private *dev_priv = dev->dev_private;
447
448 I915_WRITE(GEN6_GDRST, GEN6_GRDOM_FULL);
449 return wait_for((I915_READ(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
450}
451
408/** 452/**
409 * i965_reset - reset chip after a hang 453 * i965_reset - reset chip after a hang
410 * @dev: drm device to reset 454 * @dev: drm device to reset
@@ -431,7 +475,8 @@ int i915_reset(struct drm_device *dev, u8 flags)
431 bool need_display = true; 475 bool need_display = true;
432 int ret; 476 int ret;
433 477
434 mutex_lock(&dev->struct_mutex); 478 if (!mutex_trylock(&dev->struct_mutex))
479 return -EBUSY;
435 480
436 i915_gem_reset(dev); 481 i915_gem_reset(dev);
437 482
@@ -439,6 +484,9 @@ int i915_reset(struct drm_device *dev, u8 flags)
439 if (get_seconds() - dev_priv->last_gpu_reset < 5) { 484 if (get_seconds() - dev_priv->last_gpu_reset < 5) {
440 DRM_ERROR("GPU hanging too fast, declaring wedged!\n"); 485 DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
441 } else switch (INTEL_INFO(dev)->gen) { 486 } else switch (INTEL_INFO(dev)->gen) {
487 case 6:
488 ret = gen6_do_reset(dev, flags);
489 break;
442 case 5: 490 case 5:
443 ret = ironlake_do_reset(dev, flags); 491 ret = ironlake_do_reset(dev, flags);
444 break; 492 break;
@@ -472,9 +520,14 @@ int i915_reset(struct drm_device *dev, u8 flags)
472 */ 520 */
473 if (drm_core_check_feature(dev, DRIVER_MODESET) || 521 if (drm_core_check_feature(dev, DRIVER_MODESET) ||
474 !dev_priv->mm.suspended) { 522 !dev_priv->mm.suspended) {
475 struct intel_ring_buffer *ring = &dev_priv->render_ring;
476 dev_priv->mm.suspended = 0; 523 dev_priv->mm.suspended = 0;
477 ring->init(dev, ring); 524
525 dev_priv->ring[RCS].init(&dev_priv->ring[RCS]);
526 if (HAS_BSD(dev))
527 dev_priv->ring[VCS].init(&dev_priv->ring[VCS]);
528 if (HAS_BLT(dev))
529 dev_priv->ring[BCS].init(&dev_priv->ring[BCS]);
530
478 mutex_unlock(&dev->struct_mutex); 531 mutex_unlock(&dev->struct_mutex);
479 drm_irq_uninstall(dev); 532 drm_irq_uninstall(dev);
480 drm_irq_install(dev); 533 drm_irq_install(dev);
@@ -523,6 +576,9 @@ static int i915_pm_suspend(struct device *dev)
523 return -ENODEV; 576 return -ENODEV;
524 } 577 }
525 578
579 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
580 return 0;
581
526 error = i915_drm_freeze(drm_dev); 582 error = i915_drm_freeze(drm_dev);
527 if (error) 583 if (error)
528 return error; 584 return error;
@@ -606,6 +662,8 @@ static struct drm_driver driver = {
606 .device_is_agp = i915_driver_device_is_agp, 662 .device_is_agp = i915_driver_device_is_agp,
607 .enable_vblank = i915_enable_vblank, 663 .enable_vblank = i915_enable_vblank,
608 .disable_vblank = i915_disable_vblank, 664 .disable_vblank = i915_disable_vblank,
665 .get_vblank_timestamp = i915_get_vblank_timestamp,
666 .get_scanout_position = i915_get_crtc_scanoutpos,
609 .irq_preinstall = i915_driver_irq_preinstall, 667 .irq_preinstall = i915_driver_irq_preinstall,
610 .irq_postinstall = i915_driver_irq_postinstall, 668 .irq_postinstall = i915_driver_irq_postinstall,
611 .irq_uninstall = i915_driver_irq_uninstall, 669 .irq_uninstall = i915_driver_irq_uninstall,
@@ -661,8 +719,6 @@ static int __init i915_init(void)
661 719
662 driver.num_ioctls = i915_max_ioctl; 720 driver.num_ioctls = i915_max_ioctl;
663 721
664 i915_gem_shrinker_init();
665
666 /* 722 /*
667 * If CONFIG_DRM_I915_KMS is set, default to KMS unless 723 * If CONFIG_DRM_I915_KMS is set, default to KMS unless
668 * explicitly disabled with the module pararmeter. 724 * explicitly disabled with the module pararmeter.
@@ -684,17 +740,11 @@ static int __init i915_init(void)
684 driver.driver_features &= ~DRIVER_MODESET; 740 driver.driver_features &= ~DRIVER_MODESET;
685#endif 741#endif
686 742
687 if (!(driver.driver_features & DRIVER_MODESET)) {
688 driver.suspend = i915_suspend;
689 driver.resume = i915_resume;
690 }
691
692 return drm_init(&driver); 743 return drm_init(&driver);
693} 744}
694 745
695static void __exit i915_exit(void) 746static void __exit i915_exit(void)
696{ 747{
697 i915_gem_shrinker_exit();
698 drm_exit(&driver); 748 drm_exit(&driver);
699} 749}
700 750
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 409826da3099..aac1bf332f75 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -89,7 +89,7 @@ struct drm_i915_gem_phys_object {
89 int id; 89 int id;
90 struct page **page_list; 90 struct page **page_list;
91 drm_dma_handle_t *handle; 91 drm_dma_handle_t *handle;
92 struct drm_gem_object *cur_obj; 92 struct drm_i915_gem_object *cur_obj;
93}; 93};
94 94
95struct mem_block { 95struct mem_block {
@@ -124,9 +124,9 @@ struct drm_i915_master_private {
124#define I915_FENCE_REG_NONE -1 124#define I915_FENCE_REG_NONE -1
125 125
126struct drm_i915_fence_reg { 126struct drm_i915_fence_reg {
127 struct drm_gem_object *obj;
128 struct list_head lru_list; 127 struct list_head lru_list;
129 bool gpu; 128 struct drm_i915_gem_object *obj;
129 uint32_t setup_seqno;
130}; 130};
131 131
132struct sdvo_device_mapping { 132struct sdvo_device_mapping {
@@ -139,6 +139,8 @@ struct sdvo_device_mapping {
139 u8 ddc_pin; 139 u8 ddc_pin;
140}; 140};
141 141
142struct intel_display_error_state;
143
142struct drm_i915_error_state { 144struct drm_i915_error_state {
143 u32 eir; 145 u32 eir;
144 u32 pgtbl_er; 146 u32 pgtbl_er;
@@ -148,11 +150,23 @@ struct drm_i915_error_state {
148 u32 ipehr; 150 u32 ipehr;
149 u32 instdone; 151 u32 instdone;
150 u32 acthd; 152 u32 acthd;
153 u32 error; /* gen6+ */
154 u32 bcs_acthd; /* gen6+ blt engine */
155 u32 bcs_ipehr;
156 u32 bcs_ipeir;
157 u32 bcs_instdone;
158 u32 bcs_seqno;
159 u32 vcs_acthd; /* gen6+ bsd engine */
160 u32 vcs_ipehr;
161 u32 vcs_ipeir;
162 u32 vcs_instdone;
163 u32 vcs_seqno;
151 u32 instpm; 164 u32 instpm;
152 u32 instps; 165 u32 instps;
153 u32 instdone1; 166 u32 instdone1;
154 u32 seqno; 167 u32 seqno;
155 u64 bbaddr; 168 u64 bbaddr;
169 u64 fence[16];
156 struct timeval time; 170 struct timeval time;
157 struct drm_i915_error_object { 171 struct drm_i915_error_object {
158 int page_count; 172 int page_count;
@@ -171,9 +185,11 @@ struct drm_i915_error_state {
171 u32 tiling:2; 185 u32 tiling:2;
172 u32 dirty:1; 186 u32 dirty:1;
173 u32 purgeable:1; 187 u32 purgeable:1;
174 } *active_bo; 188 u32 ring:4;
175 u32 active_bo_count; 189 } *active_bo, *pinned_bo;
190 u32 active_bo_count, pinned_bo_count;
176 struct intel_overlay_error_state *overlay; 191 struct intel_overlay_error_state *overlay;
192 struct intel_display_error_state *display;
177}; 193};
178 194
179struct drm_i915_display_funcs { 195struct drm_i915_display_funcs {
@@ -207,7 +223,6 @@ struct intel_device_info {
207 u8 is_broadwater : 1; 223 u8 is_broadwater : 1;
208 u8 is_crestline : 1; 224 u8 is_crestline : 1;
209 u8 has_fbc : 1; 225 u8 has_fbc : 1;
210 u8 has_rc6 : 1;
211 u8 has_pipe_cxsr : 1; 226 u8 has_pipe_cxsr : 1;
212 u8 has_hotplug : 1; 227 u8 has_hotplug : 1;
213 u8 cursor_needs_physical : 1; 228 u8 cursor_needs_physical : 1;
@@ -243,6 +258,7 @@ typedef struct drm_i915_private {
243 const struct intel_device_info *info; 258 const struct intel_device_info *info;
244 259
245 int has_gem; 260 int has_gem;
261 int relative_constants_mode;
246 262
247 void __iomem *regs; 263 void __iomem *regs;
248 264
@@ -253,20 +269,15 @@ typedef struct drm_i915_private {
253 } *gmbus; 269 } *gmbus;
254 270
255 struct pci_dev *bridge_dev; 271 struct pci_dev *bridge_dev;
256 struct intel_ring_buffer render_ring; 272 struct intel_ring_buffer ring[I915_NUM_RINGS];
257 struct intel_ring_buffer bsd_ring;
258 struct intel_ring_buffer blt_ring;
259 uint32_t next_seqno; 273 uint32_t next_seqno;
260 274
261 drm_dma_handle_t *status_page_dmah; 275 drm_dma_handle_t *status_page_dmah;
262 void *seqno_page;
263 dma_addr_t dma_status_page; 276 dma_addr_t dma_status_page;
264 uint32_t counter; 277 uint32_t counter;
265 unsigned int seqno_gfx_addr;
266 drm_local_map_t hws_map; 278 drm_local_map_t hws_map;
267 struct drm_gem_object *seqno_obj; 279 struct drm_i915_gem_object *pwrctx;
268 struct drm_gem_object *pwrctx; 280 struct drm_i915_gem_object *renderctx;
269 struct drm_gem_object *renderctx;
270 281
271 struct resource mch_res; 282 struct resource mch_res;
272 283
@@ -275,25 +286,17 @@ typedef struct drm_i915_private {
275 int front_offset; 286 int front_offset;
276 int current_page; 287 int current_page;
277 int page_flipping; 288 int page_flipping;
278#define I915_DEBUG_READ (1<<0)
279#define I915_DEBUG_WRITE (1<<1)
280 unsigned long debug_flags;
281 289
282 wait_queue_head_t irq_queue;
283 atomic_t irq_received; 290 atomic_t irq_received;
284 /** Protects user_irq_refcount and irq_mask_reg */
285 spinlock_t user_irq_lock;
286 u32 trace_irq_seqno; 291 u32 trace_irq_seqno;
292
293 /* protects the irq masks */
294 spinlock_t irq_lock;
287 /** Cached value of IMR to avoid reads in updating the bitfield */ 295 /** Cached value of IMR to avoid reads in updating the bitfield */
288 u32 irq_mask_reg;
289 u32 pipestat[2]; 296 u32 pipestat[2];
290 /** splitted irq regs for graphics and display engine on Ironlake, 297 u32 irq_mask;
291 irq_mask_reg is still used for display irq. */ 298 u32 gt_irq_mask;
292 u32 gt_irq_mask_reg; 299 u32 pch_irq_mask;
293 u32 gt_irq_enable_reg;
294 u32 de_irq_enable_reg;
295 u32 pch_irq_mask_reg;
296 u32 pch_irq_enable_reg;
297 300
298 u32 hotplug_supported_mask; 301 u32 hotplug_supported_mask;
299 struct work_struct hotplug_work; 302 struct work_struct hotplug_work;
@@ -306,7 +309,7 @@ typedef struct drm_i915_private {
306 int num_pipe; 309 int num_pipe;
307 310
308 /* For hangcheck timer */ 311 /* For hangcheck timer */
309#define DRM_I915_HANGCHECK_PERIOD 250 /* in ms */ 312#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
310 struct timer_list hangcheck_timer; 313 struct timer_list hangcheck_timer;
311 int hangcheck_count; 314 int hangcheck_count;
312 uint32_t last_acthd; 315 uint32_t last_acthd;
@@ -530,23 +533,21 @@ typedef struct drm_i915_private {
530 533
531 struct { 534 struct {
532 /** Bridge to intel-gtt-ko */ 535 /** Bridge to intel-gtt-ko */
533 struct intel_gtt *gtt; 536 const struct intel_gtt *gtt;
534 /** Memory allocator for GTT stolen memory */ 537 /** Memory allocator for GTT stolen memory */
535 struct drm_mm vram; 538 struct drm_mm stolen;
536 /** Memory allocator for GTT */ 539 /** Memory allocator for GTT */
537 struct drm_mm gtt_space; 540 struct drm_mm gtt_space;
541 /** List of all objects in gtt_space. Used to restore gtt
542 * mappings on resume */
543 struct list_head gtt_list;
544 /** End of mappable part of GTT */
545 unsigned long gtt_mappable_end;
538 546
539 struct io_mapping *gtt_mapping; 547 struct io_mapping *gtt_mapping;
540 int gtt_mtrr; 548 int gtt_mtrr;
541 549
542 /** 550 struct shrinker inactive_shrinker;
543 * Membership on list of all loaded devices, used to evict
544 * inactive buffers under memory pressure.
545 *
546 * Modifications should only be done whilst holding the
547 * shrink_list_lock spinlock.
548 */
549 struct list_head shrink_list;
550 551
551 /** 552 /**
552 * List of objects currently involved in rendering. 553 * List of objects currently involved in rendering.
@@ -609,16 +610,6 @@ typedef struct drm_i915_private {
609 struct delayed_work retire_work; 610 struct delayed_work retire_work;
610 611
611 /** 612 /**
612 * Waiting sequence number, if any
613 */
614 uint32_t waiting_gem_seqno;
615
616 /**
617 * Last seq seen at irq time
618 */
619 uint32_t irq_gem_seqno;
620
621 /**
622 * Flag if the X Server, and thus DRM, is not currently in 613 * Flag if the X Server, and thus DRM, is not currently in
623 * control of the device. 614 * control of the device.
624 * 615 *
@@ -645,16 +636,11 @@ typedef struct drm_i915_private {
645 /* storage for physical objects */ 636 /* storage for physical objects */
646 struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT]; 637 struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
647 638
648 uint32_t flush_rings;
649
650 /* accounting, useful for userland debugging */ 639 /* accounting, useful for userland debugging */
651 size_t object_memory;
652 size_t pin_memory;
653 size_t gtt_memory;
654 size_t gtt_total; 640 size_t gtt_total;
641 size_t mappable_gtt_total;
642 size_t object_memory;
655 u32 object_count; 643 u32 object_count;
656 u32 pin_count;
657 u32 gtt_count;
658 } mm; 644 } mm;
659 struct sdvo_device_mapping sdvo_mappings[2]; 645 struct sdvo_device_mapping sdvo_mappings[2];
660 /* indicate whether the LVDS_BORDER should be enabled or not */ 646 /* indicate whether the LVDS_BORDER should be enabled or not */
@@ -688,14 +674,14 @@ typedef struct drm_i915_private {
688 u8 fmax; 674 u8 fmax;
689 u8 fstart; 675 u8 fstart;
690 676
691 u64 last_count1; 677 u64 last_count1;
692 unsigned long last_time1; 678 unsigned long last_time1;
693 u64 last_count2; 679 u64 last_count2;
694 struct timespec last_time2; 680 struct timespec last_time2;
695 unsigned long gfx_power; 681 unsigned long gfx_power;
696 int c_m; 682 int c_m;
697 int r_t; 683 int r_t;
698 u8 corr; 684 u8 corr;
699 spinlock_t *mchdev_lock; 685 spinlock_t *mchdev_lock;
700 686
701 enum no_fbc_reason no_fbc_reason; 687 enum no_fbc_reason no_fbc_reason;
@@ -709,20 +695,20 @@ typedef struct drm_i915_private {
709 struct intel_fbdev *fbdev; 695 struct intel_fbdev *fbdev;
710} drm_i915_private_t; 696} drm_i915_private_t;
711 697
712/** driver private structure attached to each drm_gem_object */
713struct drm_i915_gem_object { 698struct drm_i915_gem_object {
714 struct drm_gem_object base; 699 struct drm_gem_object base;
715 700
716 /** Current space allocated to this object in the GTT, if any. */ 701 /** Current space allocated to this object in the GTT, if any. */
717 struct drm_mm_node *gtt_space; 702 struct drm_mm_node *gtt_space;
703 struct list_head gtt_list;
718 704
719 /** This object's place on the active/flushing/inactive lists */ 705 /** This object's place on the active/flushing/inactive lists */
720 struct list_head ring_list; 706 struct list_head ring_list;
721 struct list_head mm_list; 707 struct list_head mm_list;
722 /** This object's place on GPU write list */ 708 /** This object's place on GPU write list */
723 struct list_head gpu_write_list; 709 struct list_head gpu_write_list;
724 /** This object's place on eviction list */ 710 /** This object's place in the batchbuffer or on the eviction list */
725 struct list_head evict_list; 711 struct list_head exec_list;
726 712
727 /** 713 /**
728 * This is set if the object is on the active or flushing lists 714 * This is set if the object is on the active or flushing lists
@@ -738,6 +724,12 @@ struct drm_i915_gem_object {
738 unsigned int dirty : 1; 724 unsigned int dirty : 1;
739 725
740 /** 726 /**
727 * This is set if the object has been written to since the last
728 * GPU flush.
729 */
730 unsigned int pending_gpu_write : 1;
731
732 /**
741 * Fence register bits (if any) for this object. Will be set 733 * Fence register bits (if any) for this object. Will be set
742 * as needed when mapped into the GTT. 734 * as needed when mapped into the GTT.
743 * Protected by dev->struct_mutex. 735 * Protected by dev->struct_mutex.
@@ -747,29 +739,15 @@ struct drm_i915_gem_object {
747 signed int fence_reg : 5; 739 signed int fence_reg : 5;
748 740
749 /** 741 /**
750 * Used for checking the object doesn't appear more than once
751 * in an execbuffer object list.
752 */
753 unsigned int in_execbuffer : 1;
754
755 /**
756 * Advice: are the backing pages purgeable? 742 * Advice: are the backing pages purgeable?
757 */ 743 */
758 unsigned int madv : 2; 744 unsigned int madv : 2;
759 745
760 /** 746 /**
761 * Refcount for the pages array. With the current locking scheme, there
762 * are at most two concurrent users: Binding a bo to the gtt and
763 * pwrite/pread using physical addresses. So two bits for a maximum
764 * of two users are enough.
765 */
766 unsigned int pages_refcount : 2;
767#define DRM_I915_GEM_OBJECT_MAX_PAGES_REFCOUNT 0x3
768
769 /**
770 * Current tiling mode for the object. 747 * Current tiling mode for the object.
771 */ 748 */
772 unsigned int tiling_mode : 2; 749 unsigned int tiling_mode : 2;
750 unsigned int tiling_changed : 1;
773 751
774 /** How many users have pinned this object in GTT space. The following 752 /** How many users have pinned this object in GTT space. The following
775 * users can each hold at most one reference: pwrite/pread, pin_ioctl 753 * users can each hold at most one reference: pwrite/pread, pin_ioctl
@@ -783,28 +761,54 @@ struct drm_i915_gem_object {
783 unsigned int pin_count : 4; 761 unsigned int pin_count : 4;
784#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf 762#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
785 763
786 /** AGP memory structure for our GTT binding. */ 764 /**
787 DRM_AGP_MEM *agp_mem; 765 * Is the object at the current location in the gtt mappable and
766 * fenceable? Used to avoid costly recalculations.
767 */
768 unsigned int map_and_fenceable : 1;
769
770 /**
771 * Whether the current gtt mapping needs to be mappable (and isn't just
772 * mappable by accident). Track pin and fault separate for a more
773 * accurate mappable working set.
774 */
775 unsigned int fault_mappable : 1;
776 unsigned int pin_mappable : 1;
777
778 /*
779 * Is the GPU currently using a fence to access this buffer,
780 */
781 unsigned int pending_fenced_gpu_access:1;
782 unsigned int fenced_gpu_access:1;
788 783
789 struct page **pages; 784 struct page **pages;
790 785
791 /** 786 /**
792 * Current offset of the object in GTT space. 787 * DMAR support
793 *
794 * This is the same as gtt_space->start
795 */ 788 */
796 uint32_t gtt_offset; 789 struct scatterlist *sg_list;
790 int num_sg;
797 791
798 /* Which ring is refering to is this object */ 792 /**
799 struct intel_ring_buffer *ring; 793 * Used for performing relocations during execbuffer insertion.
794 */
795 struct hlist_node exec_node;
796 unsigned long exec_handle;
800 797
801 /** 798 /**
802 * Fake offset for use by mmap(2) 799 * Current offset of the object in GTT space.
800 *
801 * This is the same as gtt_space->start
803 */ 802 */
804 uint64_t mmap_offset; 803 uint32_t gtt_offset;
805 804
806 /** Breadcrumb of last rendering to the buffer. */ 805 /** Breadcrumb of last rendering to the buffer. */
807 uint32_t last_rendering_seqno; 806 uint32_t last_rendering_seqno;
807 struct intel_ring_buffer *ring;
808
809 /** Breadcrumb of last fenced GPU access to the buffer. */
810 uint32_t last_fenced_seqno;
811 struct intel_ring_buffer *last_fenced_ring;
808 812
809 /** Current tiling stride for the object, if it's tiled. */ 813 /** Current tiling stride for the object, if it's tiled. */
810 uint32_t stride; 814 uint32_t stride;
@@ -880,6 +884,68 @@ enum intel_chip_family {
880 CHIP_I965 = 0x08, 884 CHIP_I965 = 0x08,
881}; 885};
882 886
887#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info)
888
889#define IS_I830(dev) ((dev)->pci_device == 0x3577)
890#define IS_845G(dev) ((dev)->pci_device == 0x2562)
891#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
892#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
893#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
894#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
895#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
896#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
897#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
898#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
899#define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
900#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
901#define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001)
902#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011)
903#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
904#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
905#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
906#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
907#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
908
909#define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2)
910#define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3)
911#define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4)
912#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5)
913#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
914
915#define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring)
916#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring)
917#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
918
919#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
920#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
921
922/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
923 * rows, which changed the alignment requirements and fence programming.
924 */
925#define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
926 IS_I915GM(dev)))
927#define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev))
928#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev))
929#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev))
930#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
931#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
932#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
933/* dsparb controlled by hw only */
934#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
935
936#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
937#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
938#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
939
940#define HAS_PCH_SPLIT(dev) (IS_GEN5(dev) || IS_GEN6(dev))
941#define HAS_PIPE_CONTROL(dev) (IS_GEN5(dev) || IS_GEN6(dev))
942
943#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
944#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
945#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
946
947#include "i915_trace.h"
948
883extern struct drm_ioctl_desc i915_ioctls[]; 949extern struct drm_ioctl_desc i915_ioctls[];
884extern int i915_max_ioctl; 950extern int i915_max_ioctl;
885extern unsigned int i915_fbpercrtc; 951extern unsigned int i915_fbpercrtc;
@@ -907,8 +973,8 @@ extern int i915_driver_device_is_agp(struct drm_device * dev);
907extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, 973extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
908 unsigned long arg); 974 unsigned long arg);
909extern int i915_emit_box(struct drm_device *dev, 975extern int i915_emit_box(struct drm_device *dev,
910 struct drm_clip_rect *boxes, 976 struct drm_clip_rect *box,
911 int i, int DR1, int DR4); 977 int DR1, int DR4);
912extern int i915_reset(struct drm_device *dev, u8 flags); 978extern int i915_reset(struct drm_device *dev, u8 flags);
913extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); 979extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
914extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); 980extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
@@ -918,6 +984,7 @@ extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
918 984
919/* i915_irq.c */ 985/* i915_irq.c */
920void i915_hangcheck_elapsed(unsigned long data); 986void i915_hangcheck_elapsed(unsigned long data);
987void i915_handle_error(struct drm_device *dev, bool wedged);
921extern int i915_irq_emit(struct drm_device *dev, void *data, 988extern int i915_irq_emit(struct drm_device *dev, void *data,
922 struct drm_file *file_priv); 989 struct drm_file *file_priv);
923extern int i915_irq_wait(struct drm_device *dev, void *data, 990extern int i915_irq_wait(struct drm_device *dev, void *data,
@@ -953,6 +1020,13 @@ void
953i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); 1020i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
954 1021
955void intel_enable_asle (struct drm_device *dev); 1022void intel_enable_asle (struct drm_device *dev);
1023int i915_get_vblank_timestamp(struct drm_device *dev, int crtc,
1024 int *max_error,
1025 struct timeval *vblank_time,
1026 unsigned flags);
1027
1028int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
1029 int *vpos, int *hpos);
956 1030
957#ifdef CONFIG_DEBUG_FS 1031#ifdef CONFIG_DEBUG_FS
958extern void i915_destroy_error_state(struct drm_device *dev); 1032extern void i915_destroy_error_state(struct drm_device *dev);
@@ -1017,15 +1091,28 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
1017 struct drm_file *file_priv); 1091 struct drm_file *file_priv);
1018void i915_gem_load(struct drm_device *dev); 1092void i915_gem_load(struct drm_device *dev);
1019int i915_gem_init_object(struct drm_gem_object *obj); 1093int i915_gem_init_object(struct drm_gem_object *obj);
1020struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev, 1094void i915_gem_flush_ring(struct drm_device *dev,
1021 size_t size); 1095 struct intel_ring_buffer *ring,
1096 uint32_t invalidate_domains,
1097 uint32_t flush_domains);
1098struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
1099 size_t size);
1022void i915_gem_free_object(struct drm_gem_object *obj); 1100void i915_gem_free_object(struct drm_gem_object *obj);
1023int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment); 1101int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
1024void i915_gem_object_unpin(struct drm_gem_object *obj); 1102 uint32_t alignment,
1025int i915_gem_object_unbind(struct drm_gem_object *obj); 1103 bool map_and_fenceable);
1026void i915_gem_release_mmap(struct drm_gem_object *obj); 1104void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
1105int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj);
1106void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
1027void i915_gem_lastclose(struct drm_device *dev); 1107void i915_gem_lastclose(struct drm_device *dev);
1028 1108
1109int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
1110int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1111 bool interruptible);
1112void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1113 struct intel_ring_buffer *ring,
1114 u32 seqno);
1115
1029/** 1116/**
1030 * Returns true if seq1 is later than seq2. 1117 * Returns true if seq1 is later than seq2.
1031 */ 1118 */
@@ -1035,73 +1122,88 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
1035 return (int32_t)(seq1 - seq2) >= 0; 1122 return (int32_t)(seq1 - seq2) >= 0;
1036} 1123}
1037 1124
1038int i915_gem_object_get_fence_reg(struct drm_gem_object *obj, 1125static inline u32
1039 bool interruptible); 1126i915_gem_next_request_seqno(struct drm_device *dev,
1040int i915_gem_object_put_fence_reg(struct drm_gem_object *obj, 1127 struct intel_ring_buffer *ring)
1041 bool interruptible); 1128{
1129 drm_i915_private_t *dev_priv = dev->dev_private;
1130 return ring->outstanding_lazy_request = dev_priv->next_seqno;
1131}
1132
1133int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
1134 struct intel_ring_buffer *pipelined,
1135 bool interruptible);
1136int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
1137
1042void i915_gem_retire_requests(struct drm_device *dev); 1138void i915_gem_retire_requests(struct drm_device *dev);
1043void i915_gem_reset(struct drm_device *dev); 1139void i915_gem_reset(struct drm_device *dev);
1044void i915_gem_clflush_object(struct drm_gem_object *obj); 1140void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
1045int i915_gem_object_set_domain(struct drm_gem_object *obj, 1141int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
1046 uint32_t read_domains, 1142 uint32_t read_domains,
1047 uint32_t write_domain); 1143 uint32_t write_domain);
1048int i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj, 1144int __must_check i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
1049 bool interruptible); 1145 bool interruptible);
1050int i915_gem_init_ringbuffer(struct drm_device *dev); 1146int __must_check i915_gem_init_ringbuffer(struct drm_device *dev);
1051void i915_gem_cleanup_ringbuffer(struct drm_device *dev); 1147void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
1052int i915_gem_do_init(struct drm_device *dev, unsigned long start, 1148void i915_gem_do_init(struct drm_device *dev,
1053 unsigned long end); 1149 unsigned long start,
1054int i915_gpu_idle(struct drm_device *dev); 1150 unsigned long mappable_end,
1055int i915_gem_idle(struct drm_device *dev); 1151 unsigned long end);
1056uint32_t i915_add_request(struct drm_device *dev, 1152int __must_check i915_gpu_idle(struct drm_device *dev);
1057 struct drm_file *file_priv, 1153int __must_check i915_gem_idle(struct drm_device *dev);
1058 struct drm_i915_gem_request *request, 1154int __must_check i915_add_request(struct drm_device *dev,
1059 struct intel_ring_buffer *ring); 1155 struct drm_file *file_priv,
1060int i915_do_wait_request(struct drm_device *dev, 1156 struct drm_i915_gem_request *request,
1061 uint32_t seqno, 1157 struct intel_ring_buffer *ring);
1062 bool interruptible, 1158int __must_check i915_do_wait_request(struct drm_device *dev,
1063 struct intel_ring_buffer *ring); 1159 uint32_t seqno,
1160 bool interruptible,
1161 struct intel_ring_buffer *ring);
1064int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); 1162int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
1065int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, 1163int __must_check
1066 int write); 1164i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
1067int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj, 1165 bool write);
1068 bool pipelined); 1166int __must_check
1167i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
1168 struct intel_ring_buffer *pipelined);
1069int i915_gem_attach_phys_object(struct drm_device *dev, 1169int i915_gem_attach_phys_object(struct drm_device *dev,
1070 struct drm_gem_object *obj, 1170 struct drm_i915_gem_object *obj,
1071 int id, 1171 int id,
1072 int align); 1172 int align);
1073void i915_gem_detach_phys_object(struct drm_device *dev, 1173void i915_gem_detach_phys_object(struct drm_device *dev,
1074 struct drm_gem_object *obj); 1174 struct drm_i915_gem_object *obj);
1075void i915_gem_free_all_phys_object(struct drm_device *dev); 1175void i915_gem_free_all_phys_object(struct drm_device *dev);
1076void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv); 1176void i915_gem_release(struct drm_device *dev, struct drm_file *file);
1077 1177
1078void i915_gem_shrinker_init(void); 1178/* i915_gem_gtt.c */
1079void i915_gem_shrinker_exit(void); 1179void i915_gem_restore_gtt_mappings(struct drm_device *dev);
1180int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj);
1181void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
1080 1182
1081/* i915_gem_evict.c */ 1183/* i915_gem_evict.c */
1082int i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment); 1184int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size,
1083int i915_gem_evict_everything(struct drm_device *dev); 1185 unsigned alignment, bool mappable);
1084int i915_gem_evict_inactive(struct drm_device *dev); 1186int __must_check i915_gem_evict_everything(struct drm_device *dev,
1187 bool purgeable_only);
1188int __must_check i915_gem_evict_inactive(struct drm_device *dev,
1189 bool purgeable_only);
1085 1190
1086/* i915_gem_tiling.c */ 1191/* i915_gem_tiling.c */
1087void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); 1192void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
1088void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj); 1193void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
1089void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj); 1194void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
1090bool i915_tiling_ok(struct drm_device *dev, int stride, int size,
1091 int tiling_mode);
1092bool i915_gem_object_fence_offset_ok(struct drm_gem_object *obj,
1093 int tiling_mode);
1094 1195
1095/* i915_gem_debug.c */ 1196/* i915_gem_debug.c */
1096void i915_gem_dump_object(struct drm_gem_object *obj, int len, 1197void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
1097 const char *where, uint32_t mark); 1198 const char *where, uint32_t mark);
1098#if WATCH_LISTS 1199#if WATCH_LISTS
1099int i915_verify_lists(struct drm_device *dev); 1200int i915_verify_lists(struct drm_device *dev);
1100#else 1201#else
1101#define i915_verify_lists(dev) 0 1202#define i915_verify_lists(dev) 0
1102#endif 1203#endif
1103void i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle); 1204void i915_gem_object_check_coherency(struct drm_i915_gem_object *obj,
1104void i915_gem_dump_object(struct drm_gem_object *obj, int len, 1205 int handle);
1206void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
1105 const char *where, uint32_t mark); 1207 const char *where, uint32_t mark);
1106 1208
1107/* i915_debugfs.c */ 1209/* i915_debugfs.c */
@@ -1163,6 +1265,7 @@ extern void intel_disable_fbc(struct drm_device *dev);
1163extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval); 1265extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval);
1164extern bool intel_fbc_enabled(struct drm_device *dev); 1266extern bool intel_fbc_enabled(struct drm_device *dev);
1165extern bool ironlake_set_drps(struct drm_device *dev, u8 val); 1267extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
1268extern void gen6_set_rps(struct drm_device *dev, u8 val);
1166extern void intel_detect_pch (struct drm_device *dev); 1269extern void intel_detect_pch (struct drm_device *dev);
1167extern int intel_trans_dp_port_sel (struct drm_crtc *crtc); 1270extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
1168 1271
@@ -1170,79 +1273,120 @@ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
1170#ifdef CONFIG_DEBUG_FS 1273#ifdef CONFIG_DEBUG_FS
1171extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev); 1274extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
1172extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error); 1275extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error);
1276
1277extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev);
1278extern void intel_display_print_error_state(struct seq_file *m,
1279 struct drm_device *dev,
1280 struct intel_display_error_state *error);
1173#endif 1281#endif
1174 1282
1283#define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
1284
1285#define BEGIN_LP_RING(n) \
1286 intel_ring_begin(LP_RING(dev_priv), (n))
1287
1288#define OUT_RING(x) \
1289 intel_ring_emit(LP_RING(dev_priv), x)
1290
1291#define ADVANCE_LP_RING() \
1292 intel_ring_advance(LP_RING(dev_priv))
1293
1175/** 1294/**
1176 * Lock test for when it's just for synchronization of ring access. 1295 * Lock test for when it's just for synchronization of ring access.
1177 * 1296 *
1178 * In that case, we don't need to do it when GEM is initialized as nobody else 1297 * In that case, we don't need to do it when GEM is initialized as nobody else
1179 * has access to the ring. 1298 * has access to the ring.
1180 */ 1299 */
1181#define RING_LOCK_TEST_WITH_RETURN(dev, file_priv) do { \ 1300#define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \
1182 if (((drm_i915_private_t *)dev->dev_private)->render_ring.gem_object \ 1301 if (LP_RING(dev->dev_private)->obj == NULL) \
1183 == NULL) \ 1302 LOCK_TEST_WITH_RETURN(dev, file); \
1184 LOCK_TEST_WITH_RETURN(dev, file_priv); \
1185} while (0) 1303} while (0)
1186 1304
1187static inline u32 i915_read(struct drm_i915_private *dev_priv, u32 reg) 1305
1306#define __i915_read(x, y) \
1307static inline u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
1308 u##x val = read##y(dev_priv->regs + reg); \
1309 trace_i915_reg_rw('R', reg, val, sizeof(val)); \
1310 return val; \
1311}
1312__i915_read(8, b)
1313__i915_read(16, w)
1314__i915_read(32, l)
1315__i915_read(64, q)
1316#undef __i915_read
1317
1318#define __i915_write(x, y) \
1319static inline void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
1320 trace_i915_reg_rw('W', reg, val, sizeof(val)); \
1321 write##y(val, dev_priv->regs + reg); \
1322}
1323__i915_write(8, b)
1324__i915_write(16, w)
1325__i915_write(32, l)
1326__i915_write(64, q)
1327#undef __i915_write
1328
1329#define I915_READ8(reg) i915_read8(dev_priv, (reg))
1330#define I915_WRITE8(reg, val) i915_write8(dev_priv, (reg), (val))
1331
1332#define I915_READ16(reg) i915_read16(dev_priv, (reg))
1333#define I915_WRITE16(reg, val) i915_write16(dev_priv, (reg), (val))
1334#define I915_READ16_NOTRACE(reg) readw(dev_priv->regs + (reg))
1335#define I915_WRITE16_NOTRACE(reg, val) writew(val, dev_priv->regs + (reg))
1336
1337#define I915_READ(reg) i915_read32(dev_priv, (reg))
1338#define I915_WRITE(reg, val) i915_write32(dev_priv, (reg), (val))
1339#define I915_READ_NOTRACE(reg) readl(dev_priv->regs + (reg))
1340#define I915_WRITE_NOTRACE(reg, val) writel(val, dev_priv->regs + (reg))
1341
1342#define I915_WRITE64(reg, val) i915_write64(dev_priv, (reg), (val))
1343#define I915_READ64(reg) i915_read64(dev_priv, (reg))
1344
1345#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
1346#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
1347
1348
1349/* On SNB platform, before reading ring registers forcewake bit
1350 * must be set to prevent GT core from power down and stale values being
1351 * returned.
1352 */
1353void __gen6_force_wake_get(struct drm_i915_private *dev_priv);
1354void __gen6_force_wake_put (struct drm_i915_private *dev_priv);
1355static inline u32 i915_safe_read(struct drm_i915_private *dev_priv, u32 reg)
1188{ 1356{
1189 u32 val; 1357 u32 val;
1190 1358
1191 val = readl(dev_priv->regs + reg); 1359 if (dev_priv->info->gen >= 6) {
1192 if (dev_priv->debug_flags & I915_DEBUG_READ) 1360 __gen6_force_wake_get(dev_priv);
1193 printk(KERN_ERR "read 0x%08x from 0x%08x\n", val, reg); 1361 val = I915_READ(reg);
1362 __gen6_force_wake_put(dev_priv);
1363 } else
1364 val = I915_READ(reg);
1365
1194 return val; 1366 return val;
1195} 1367}
1196 1368
1197static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg, 1369static inline void
1198 u32 val) 1370i915_write(struct drm_i915_private *dev_priv, u32 reg, u64 val, int len)
1199{ 1371{
1200 writel(val, dev_priv->regs + reg); 1372 /* Trace down the write operation before the real write */
1201 if (dev_priv->debug_flags & I915_DEBUG_WRITE) 1373 trace_i915_reg_rw('W', reg, val, len);
1202 printk(KERN_ERR "wrote 0x%08x to 0x%08x\n", val, reg); 1374 switch (len) {
1375 case 8:
1376 writeq(val, dev_priv->regs + reg);
1377 break;
1378 case 4:
1379 writel(val, dev_priv->regs + reg);
1380 break;
1381 case 2:
1382 writew(val, dev_priv->regs + reg);
1383 break;
1384 case 1:
1385 writeb(val, dev_priv->regs + reg);
1386 break;
1387 }
1203} 1388}
1204 1389
1205#define I915_READ(reg) i915_read(dev_priv, (reg))
1206#define I915_WRITE(reg, val) i915_write(dev_priv, (reg), (val))
1207#define I915_READ16(reg) readw(dev_priv->regs + (reg))
1208#define I915_WRITE16(reg, val) writel(val, dev_priv->regs + (reg))
1209#define I915_READ8(reg) readb(dev_priv->regs + (reg))
1210#define I915_WRITE8(reg, val) writeb(val, dev_priv->regs + (reg))
1211#define I915_WRITE64(reg, val) writeq(val, dev_priv->regs + (reg))
1212#define I915_READ64(reg) readq(dev_priv->regs + (reg))
1213#define POSTING_READ(reg) (void)I915_READ(reg)
1214#define POSTING_READ16(reg) (void)I915_READ16(reg)
1215
1216#define I915_DEBUG_ENABLE_IO() (dev_priv->debug_flags |= I915_DEBUG_READ | \
1217 I915_DEBUG_WRITE)
1218#define I915_DEBUG_DISABLE_IO() (dev_priv->debug_flags &= ~(I915_DEBUG_READ | \
1219 I915_DEBUG_WRITE))
1220
1221#define I915_VERBOSE 0
1222
1223#define BEGIN_LP_RING(n) do { \
1224 drm_i915_private_t *dev_priv__ = dev->dev_private; \
1225 if (I915_VERBOSE) \
1226 DRM_DEBUG(" BEGIN_LP_RING %x\n", (int)(n)); \
1227 intel_ring_begin(dev, &dev_priv__->render_ring, (n)); \
1228} while (0)
1229
1230
1231#define OUT_RING(x) do { \
1232 drm_i915_private_t *dev_priv__ = dev->dev_private; \
1233 if (I915_VERBOSE) \
1234 DRM_DEBUG(" OUT_RING %x\n", (int)(x)); \
1235 intel_ring_emit(dev, &dev_priv__->render_ring, x); \
1236} while (0)
1237
1238#define ADVANCE_LP_RING() do { \
1239 drm_i915_private_t *dev_priv__ = dev->dev_private; \
1240 if (I915_VERBOSE) \
1241 DRM_DEBUG("ADVANCE_LP_RING %x\n", \
1242 dev_priv__->render_ring.tail); \
1243 intel_ring_advance(dev, &dev_priv__->render_ring); \
1244} while(0)
1245
1246/** 1390/**
1247 * Reads a dword out of the status page, which is written to from the command 1391 * Reads a dword out of the status page, which is written to from the command
1248 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or 1392 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
@@ -1259,72 +1403,9 @@ static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg,
1259 * The area from dword 0x20 to 0x3ff is available for driver usage. 1403 * The area from dword 0x20 to 0x3ff is available for driver usage.
1260 */ 1404 */
1261#define READ_HWSP(dev_priv, reg) (((volatile u32 *)\ 1405#define READ_HWSP(dev_priv, reg) (((volatile u32 *)\
1262 (dev_priv->render_ring.status_page.page_addr))[reg]) 1406 (LP_RING(dev_priv)->status_page.page_addr))[reg])
1263#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX) 1407#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
1264#define I915_GEM_HWS_INDEX 0x20 1408#define I915_GEM_HWS_INDEX 0x20
1265#define I915_BREADCRUMB_INDEX 0x21 1409#define I915_BREADCRUMB_INDEX 0x21
1266 1410
1267#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info)
1268
1269#define IS_I830(dev) ((dev)->pci_device == 0x3577)
1270#define IS_845G(dev) ((dev)->pci_device == 0x2562)
1271#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
1272#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
1273#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
1274#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
1275#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
1276#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
1277#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
1278#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
1279#define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
1280#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
1281#define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001)
1282#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011)
1283#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
1284#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
1285#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
1286#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
1287#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
1288
1289#define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2)
1290#define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3)
1291#define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4)
1292#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5)
1293#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
1294
1295#define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring)
1296#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring)
1297#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
1298
1299#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
1300#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
1301
1302/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
1303 * rows, which changed the alignment requirements and fence programming.
1304 */
1305#define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
1306 IS_I915GM(dev)))
1307#define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev))
1308#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev))
1309#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev))
1310#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
1311#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
1312#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
1313/* dsparb controlled by hw only */
1314#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
1315
1316#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
1317#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
1318#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
1319#define I915_HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6)
1320
1321#define HAS_PCH_SPLIT(dev) (IS_GEN5(dev) || IS_GEN6(dev))
1322#define HAS_PIPE_CONTROL(dev) (IS_GEN5(dev) || IS_GEN6(dev))
1323
1324#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
1325#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
1326#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
1327
1328#define PRIMARY_RINGBUFFER_SIZE (128*1024)
1329
1330#endif 1411#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 275ec6ed43ae..c79c0b62ef60 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -34,38 +34,31 @@
34#include <linux/slab.h> 34#include <linux/slab.h>
35#include <linux/swap.h> 35#include <linux/swap.h>
36#include <linux/pci.h> 36#include <linux/pci.h>
37#include <linux/intel-gtt.h>
38 37
39static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj); 38static void i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj);
40 39static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
41static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj); 40static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
42static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); 41static int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj,
43static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj); 42 bool write);
44static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, 43static int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
45 int write);
46static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
47 uint64_t offset, 44 uint64_t offset,
48 uint64_t size); 45 uint64_t size);
49static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj); 46static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj);
50static int i915_gem_object_wait_rendering(struct drm_gem_object *obj, 47static int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
51 bool interruptible); 48 unsigned alignment,
52static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, 49 bool map_and_fenceable);
53 unsigned alignment); 50static void i915_gem_clear_fence_reg(struct drm_device *dev,
54static void i915_gem_clear_fence_reg(struct drm_gem_object *obj); 51 struct drm_i915_fence_reg *reg);
55static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, 52static int i915_gem_phys_pwrite(struct drm_device *dev,
53 struct drm_i915_gem_object *obj,
56 struct drm_i915_gem_pwrite *args, 54 struct drm_i915_gem_pwrite *args,
57 struct drm_file *file_priv); 55 struct drm_file *file);
58static void i915_gem_free_object_tail(struct drm_gem_object *obj); 56static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj);
59 57
60static int 58static int i915_gem_inactive_shrink(struct shrinker *shrinker,
61i915_gem_object_get_pages(struct drm_gem_object *obj, 59 int nr_to_scan,
62 gfp_t gfpmask); 60 gfp_t gfp_mask);
63 61
64static void
65i915_gem_object_put_pages(struct drm_gem_object *obj);
66
67static LIST_HEAD(shrink_list);
68static DEFINE_SPINLOCK(shrink_list_lock);
69 62
70/* some bookkeeping */ 63/* some bookkeeping */
71static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv, 64static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
@@ -82,34 +75,6 @@ static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
82 dev_priv->mm.object_memory -= size; 75 dev_priv->mm.object_memory -= size;
83} 76}
84 77
85static void i915_gem_info_add_gtt(struct drm_i915_private *dev_priv,
86 size_t size)
87{
88 dev_priv->mm.gtt_count++;
89 dev_priv->mm.gtt_memory += size;
90}
91
92static void i915_gem_info_remove_gtt(struct drm_i915_private *dev_priv,
93 size_t size)
94{
95 dev_priv->mm.gtt_count--;
96 dev_priv->mm.gtt_memory -= size;
97}
98
99static void i915_gem_info_add_pin(struct drm_i915_private *dev_priv,
100 size_t size)
101{
102 dev_priv->mm.pin_count++;
103 dev_priv->mm.pin_memory += size;
104}
105
106static void i915_gem_info_remove_pin(struct drm_i915_private *dev_priv,
107 size_t size)
108{
109 dev_priv->mm.pin_count--;
110 dev_priv->mm.pin_memory -= size;
111}
112
113int 78int
114i915_gem_check_is_wedged(struct drm_device *dev) 79i915_gem_check_is_wedged(struct drm_device *dev)
115{ 80{
@@ -140,7 +105,7 @@ i915_gem_check_is_wedged(struct drm_device *dev)
140 return -EIO; 105 return -EIO;
141} 106}
142 107
143static int i915_mutex_lock_interruptible(struct drm_device *dev) 108int i915_mutex_lock_interruptible(struct drm_device *dev)
144{ 109{
145 struct drm_i915_private *dev_priv = dev->dev_private; 110 struct drm_i915_private *dev_priv = dev->dev_private;
146 int ret; 111 int ret;
@@ -163,75 +128,76 @@ static int i915_mutex_lock_interruptible(struct drm_device *dev)
163} 128}
164 129
165static inline bool 130static inline bool
166i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv) 131i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
167{ 132{
168 return obj_priv->gtt_space && 133 return obj->gtt_space && !obj->active && obj->pin_count == 0;
169 !obj_priv->active &&
170 obj_priv->pin_count == 0;
171} 134}
172 135
173int i915_gem_do_init(struct drm_device *dev, 136void i915_gem_do_init(struct drm_device *dev,
174 unsigned long start, 137 unsigned long start,
175 unsigned long end) 138 unsigned long mappable_end,
139 unsigned long end)
176{ 140{
177 drm_i915_private_t *dev_priv = dev->dev_private; 141 drm_i915_private_t *dev_priv = dev->dev_private;
178 142
179 if (start >= end ||
180 (start & (PAGE_SIZE - 1)) != 0 ||
181 (end & (PAGE_SIZE - 1)) != 0) {
182 return -EINVAL;
183 }
184
185 drm_mm_init(&dev_priv->mm.gtt_space, start, 143 drm_mm_init(&dev_priv->mm.gtt_space, start,
186 end - start); 144 end - start);
187 145
188 dev_priv->mm.gtt_total = end - start; 146 dev_priv->mm.gtt_total = end - start;
189 147 dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
190 return 0; 148 dev_priv->mm.gtt_mappable_end = mappable_end;
191} 149}
192 150
193int 151int
194i915_gem_init_ioctl(struct drm_device *dev, void *data, 152i915_gem_init_ioctl(struct drm_device *dev, void *data,
195 struct drm_file *file_priv) 153 struct drm_file *file)
196{ 154{
197 struct drm_i915_gem_init *args = data; 155 struct drm_i915_gem_init *args = data;
198 int ret; 156
157 if (args->gtt_start >= args->gtt_end ||
158 (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
159 return -EINVAL;
199 160
200 mutex_lock(&dev->struct_mutex); 161 mutex_lock(&dev->struct_mutex);
201 ret = i915_gem_do_init(dev, args->gtt_start, args->gtt_end); 162 i915_gem_do_init(dev, args->gtt_start, args->gtt_end, args->gtt_end);
202 mutex_unlock(&dev->struct_mutex); 163 mutex_unlock(&dev->struct_mutex);
203 164
204 return ret; 165 return 0;
205} 166}
206 167
207int 168int
208i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, 169i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
209 struct drm_file *file_priv) 170 struct drm_file *file)
210{ 171{
211 struct drm_i915_private *dev_priv = dev->dev_private; 172 struct drm_i915_private *dev_priv = dev->dev_private;
212 struct drm_i915_gem_get_aperture *args = data; 173 struct drm_i915_gem_get_aperture *args = data;
174 struct drm_i915_gem_object *obj;
175 size_t pinned;
213 176
214 if (!(dev->driver->driver_features & DRIVER_GEM)) 177 if (!(dev->driver->driver_features & DRIVER_GEM))
215 return -ENODEV; 178 return -ENODEV;
216 179
180 pinned = 0;
217 mutex_lock(&dev->struct_mutex); 181 mutex_lock(&dev->struct_mutex);
218 args->aper_size = dev_priv->mm.gtt_total; 182 list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list)
219 args->aper_available_size = args->aper_size - dev_priv->mm.pin_memory; 183 pinned += obj->gtt_space->size;
220 mutex_unlock(&dev->struct_mutex); 184 mutex_unlock(&dev->struct_mutex);
221 185
186 args->aper_size = dev_priv->mm.gtt_total;
187 args->aper_available_size = args->aper_size -pinned;
188
222 return 0; 189 return 0;
223} 190}
224 191
225
226/** 192/**
227 * Creates a new mm object and returns a handle to it. 193 * Creates a new mm object and returns a handle to it.
228 */ 194 */
229int 195int
230i915_gem_create_ioctl(struct drm_device *dev, void *data, 196i915_gem_create_ioctl(struct drm_device *dev, void *data,
231 struct drm_file *file_priv) 197 struct drm_file *file)
232{ 198{
233 struct drm_i915_gem_create *args = data; 199 struct drm_i915_gem_create *args = data;
234 struct drm_gem_object *obj; 200 struct drm_i915_gem_object *obj;
235 int ret; 201 int ret;
236 u32 handle; 202 u32 handle;
237 203
@@ -242,45 +208,28 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
242 if (obj == NULL) 208 if (obj == NULL)
243 return -ENOMEM; 209 return -ENOMEM;
244 210
245 ret = drm_gem_handle_create(file_priv, obj, &handle); 211 ret = drm_gem_handle_create(file, &obj->base, &handle);
246 if (ret) { 212 if (ret) {
247 drm_gem_object_release(obj); 213 drm_gem_object_release(&obj->base);
248 i915_gem_info_remove_obj(dev->dev_private, obj->size); 214 i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
249 kfree(obj); 215 kfree(obj);
250 return ret; 216 return ret;
251 } 217 }
252 218
253 /* drop reference from allocate - handle holds it now */ 219 /* drop reference from allocate - handle holds it now */
254 drm_gem_object_unreference(obj); 220 drm_gem_object_unreference(&obj->base);
255 trace_i915_gem_object_create(obj); 221 trace_i915_gem_object_create(obj);
256 222
257 args->handle = handle; 223 args->handle = handle;
258 return 0; 224 return 0;
259} 225}
260 226
261static inline int 227static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
262fast_shmem_read(struct page **pages,
263 loff_t page_base, int page_offset,
264 char __user *data,
265 int length)
266{
267 char *vaddr;
268 int ret;
269
270 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT]);
271 ret = __copy_to_user_inatomic(data, vaddr + page_offset, length);
272 kunmap_atomic(vaddr);
273
274 return ret;
275}
276
277static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
278{ 228{
279 drm_i915_private_t *dev_priv = obj->dev->dev_private; 229 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
280 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
281 230
282 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && 231 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
283 obj_priv->tiling_mode != I915_TILING_NONE; 232 obj->tiling_mode != I915_TILING_NONE;
284} 233}
285 234
286static inline void 235static inline void
@@ -356,38 +305,51 @@ slow_shmem_bit17_copy(struct page *gpu_page,
356 * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow(). 305 * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
357 */ 306 */
358static int 307static int
359i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj, 308i915_gem_shmem_pread_fast(struct drm_device *dev,
309 struct drm_i915_gem_object *obj,
360 struct drm_i915_gem_pread *args, 310 struct drm_i915_gem_pread *args,
361 struct drm_file *file_priv) 311 struct drm_file *file)
362{ 312{
363 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 313 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
364 ssize_t remain; 314 ssize_t remain;
365 loff_t offset, page_base; 315 loff_t offset;
366 char __user *user_data; 316 char __user *user_data;
367 int page_offset, page_length; 317 int page_offset, page_length;
368 318
369 user_data = (char __user *) (uintptr_t) args->data_ptr; 319 user_data = (char __user *) (uintptr_t) args->data_ptr;
370 remain = args->size; 320 remain = args->size;
371 321
372 obj_priv = to_intel_bo(obj);
373 offset = args->offset; 322 offset = args->offset;
374 323
375 while (remain > 0) { 324 while (remain > 0) {
325 struct page *page;
326 char *vaddr;
327 int ret;
328
376 /* Operation in this page 329 /* Operation in this page
377 * 330 *
378 * page_base = page offset within aperture
379 * page_offset = offset within page 331 * page_offset = offset within page
380 * page_length = bytes to copy for this page 332 * page_length = bytes to copy for this page
381 */ 333 */
382 page_base = (offset & ~(PAGE_SIZE-1));
383 page_offset = offset & (PAGE_SIZE-1); 334 page_offset = offset & (PAGE_SIZE-1);
384 page_length = remain; 335 page_length = remain;
385 if ((page_offset + remain) > PAGE_SIZE) 336 if ((page_offset + remain) > PAGE_SIZE)
386 page_length = PAGE_SIZE - page_offset; 337 page_length = PAGE_SIZE - page_offset;
387 338
388 if (fast_shmem_read(obj_priv->pages, 339 page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
389 page_base, page_offset, 340 GFP_HIGHUSER | __GFP_RECLAIMABLE);
390 user_data, page_length)) 341 if (IS_ERR(page))
342 return PTR_ERR(page);
343
344 vaddr = kmap_atomic(page);
345 ret = __copy_to_user_inatomic(user_data,
346 vaddr + page_offset,
347 page_length);
348 kunmap_atomic(vaddr);
349
350 mark_page_accessed(page);
351 page_cache_release(page);
352 if (ret)
391 return -EFAULT; 353 return -EFAULT;
392 354
393 remain -= page_length; 355 remain -= page_length;
@@ -398,30 +360,6 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
398 return 0; 360 return 0;
399} 361}
400 362
401static int
402i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
403{
404 int ret;
405
406 ret = i915_gem_object_get_pages(obj, __GFP_NORETRY | __GFP_NOWARN);
407
408 /* If we've insufficient memory to map in the pages, attempt
409 * to make some space by throwing out some old buffers.
410 */
411 if (ret == -ENOMEM) {
412 struct drm_device *dev = obj->dev;
413
414 ret = i915_gem_evict_something(dev, obj->size,
415 i915_gem_get_gtt_alignment(obj));
416 if (ret)
417 return ret;
418
419 ret = i915_gem_object_get_pages(obj, 0);
420 }
421
422 return ret;
423}
424
425/** 363/**
426 * This is the fallback shmem pread path, which allocates temporary storage 364 * This is the fallback shmem pread path, which allocates temporary storage
427 * in kernel space to copy_to_user into outside of the struct_mutex, so we 365 * in kernel space to copy_to_user into outside of the struct_mutex, so we
@@ -429,18 +367,19 @@ i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
429 * and not take page faults. 367 * and not take page faults.
430 */ 368 */
431static int 369static int
432i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, 370i915_gem_shmem_pread_slow(struct drm_device *dev,
371 struct drm_i915_gem_object *obj,
433 struct drm_i915_gem_pread *args, 372 struct drm_i915_gem_pread *args,
434 struct drm_file *file_priv) 373 struct drm_file *file)
435{ 374{
436 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 375 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
437 struct mm_struct *mm = current->mm; 376 struct mm_struct *mm = current->mm;
438 struct page **user_pages; 377 struct page **user_pages;
439 ssize_t remain; 378 ssize_t remain;
440 loff_t offset, pinned_pages, i; 379 loff_t offset, pinned_pages, i;
441 loff_t first_data_page, last_data_page, num_pages; 380 loff_t first_data_page, last_data_page, num_pages;
442 int shmem_page_index, shmem_page_offset; 381 int shmem_page_offset;
443 int data_page_index, data_page_offset; 382 int data_page_index, data_page_offset;
444 int page_length; 383 int page_length;
445 int ret; 384 int ret;
446 uint64_t data_ptr = args->data_ptr; 385 uint64_t data_ptr = args->data_ptr;
@@ -479,19 +418,18 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
479 418
480 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); 419 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
481 420
482 obj_priv = to_intel_bo(obj);
483 offset = args->offset; 421 offset = args->offset;
484 422
485 while (remain > 0) { 423 while (remain > 0) {
424 struct page *page;
425
486 /* Operation in this page 426 /* Operation in this page
487 * 427 *
488 * shmem_page_index = page number within shmem file
489 * shmem_page_offset = offset within page in shmem file 428 * shmem_page_offset = offset within page in shmem file
490 * data_page_index = page number in get_user_pages return 429 * data_page_index = page number in get_user_pages return
491 * data_page_offset = offset with data_page_index page. 430 * data_page_offset = offset with data_page_index page.
492 * page_length = bytes to copy for this page 431 * page_length = bytes to copy for this page
493 */ 432 */
494 shmem_page_index = offset / PAGE_SIZE;
495 shmem_page_offset = offset & ~PAGE_MASK; 433 shmem_page_offset = offset & ~PAGE_MASK;
496 data_page_index = data_ptr / PAGE_SIZE - first_data_page; 434 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
497 data_page_offset = data_ptr & ~PAGE_MASK; 435 data_page_offset = data_ptr & ~PAGE_MASK;
@@ -502,8 +440,13 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
502 if ((data_page_offset + page_length) > PAGE_SIZE) 440 if ((data_page_offset + page_length) > PAGE_SIZE)
503 page_length = PAGE_SIZE - data_page_offset; 441 page_length = PAGE_SIZE - data_page_offset;
504 442
443 page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
444 GFP_HIGHUSER | __GFP_RECLAIMABLE);
445 if (IS_ERR(page))
446 return PTR_ERR(page);
447
505 if (do_bit17_swizzling) { 448 if (do_bit17_swizzling) {
506 slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index], 449 slow_shmem_bit17_copy(page,
507 shmem_page_offset, 450 shmem_page_offset,
508 user_pages[data_page_index], 451 user_pages[data_page_index],
509 data_page_offset, 452 data_page_offset,
@@ -512,11 +455,14 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
512 } else { 455 } else {
513 slow_shmem_copy(user_pages[data_page_index], 456 slow_shmem_copy(user_pages[data_page_index],
514 data_page_offset, 457 data_page_offset,
515 obj_priv->pages[shmem_page_index], 458 page,
516 shmem_page_offset, 459 shmem_page_offset,
517 page_length); 460 page_length);
518 } 461 }
519 462
463 mark_page_accessed(page);
464 page_cache_release(page);
465
520 remain -= page_length; 466 remain -= page_length;
521 data_ptr += page_length; 467 data_ptr += page_length;
522 offset += page_length; 468 offset += page_length;
@@ -525,6 +471,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
525out: 471out:
526 for (i = 0; i < pinned_pages; i++) { 472 for (i = 0; i < pinned_pages; i++) {
527 SetPageDirty(user_pages[i]); 473 SetPageDirty(user_pages[i]);
474 mark_page_accessed(user_pages[i]);
528 page_cache_release(user_pages[i]); 475 page_cache_release(user_pages[i]);
529 } 476 }
530 drm_free_large(user_pages); 477 drm_free_large(user_pages);
@@ -539,11 +486,10 @@ out:
539 */ 486 */
540int 487int
541i915_gem_pread_ioctl(struct drm_device *dev, void *data, 488i915_gem_pread_ioctl(struct drm_device *dev, void *data,
542 struct drm_file *file_priv) 489 struct drm_file *file)
543{ 490{
544 struct drm_i915_gem_pread *args = data; 491 struct drm_i915_gem_pread *args = data;
545 struct drm_gem_object *obj; 492 struct drm_i915_gem_object *obj;
546 struct drm_i915_gem_object *obj_priv;
547 int ret = 0; 493 int ret = 0;
548 494
549 if (args->size == 0) 495 if (args->size == 0)
@@ -563,39 +509,33 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
563 if (ret) 509 if (ret)
564 return ret; 510 return ret;
565 511
566 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 512 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
567 if (obj == NULL) { 513 if (obj == NULL) {
568 ret = -ENOENT; 514 ret = -ENOENT;
569 goto unlock; 515 goto unlock;
570 } 516 }
571 obj_priv = to_intel_bo(obj);
572 517
573 /* Bounds check source. */ 518 /* Bounds check source. */
574 if (args->offset > obj->size || args->size > obj->size - args->offset) { 519 if (args->offset > obj->base.size ||
520 args->size > obj->base.size - args->offset) {
575 ret = -EINVAL; 521 ret = -EINVAL;
576 goto out; 522 goto out;
577 } 523 }
578 524
579 ret = i915_gem_object_get_pages_or_evict(obj);
580 if (ret)
581 goto out;
582
583 ret = i915_gem_object_set_cpu_read_domain_range(obj, 525 ret = i915_gem_object_set_cpu_read_domain_range(obj,
584 args->offset, 526 args->offset,
585 args->size); 527 args->size);
586 if (ret) 528 if (ret)
587 goto out_put; 529 goto out;
588 530
589 ret = -EFAULT; 531 ret = -EFAULT;
590 if (!i915_gem_object_needs_bit17_swizzle(obj)) 532 if (!i915_gem_object_needs_bit17_swizzle(obj))
591 ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv); 533 ret = i915_gem_shmem_pread_fast(dev, obj, args, file);
592 if (ret == -EFAULT) 534 if (ret == -EFAULT)
593 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv); 535 ret = i915_gem_shmem_pread_slow(dev, obj, args, file);
594 536
595out_put:
596 i915_gem_object_put_pages(obj);
597out: 537out:
598 drm_gem_object_unreference(obj); 538 drm_gem_object_unreference(&obj->base);
599unlock: 539unlock:
600 mutex_unlock(&dev->struct_mutex); 540 mutex_unlock(&dev->struct_mutex);
601 return ret; 541 return ret;
@@ -645,32 +585,16 @@ slow_kernel_write(struct io_mapping *mapping,
645 io_mapping_unmap(dst_vaddr); 585 io_mapping_unmap(dst_vaddr);
646} 586}
647 587
648static inline int
649fast_shmem_write(struct page **pages,
650 loff_t page_base, int page_offset,
651 char __user *data,
652 int length)
653{
654 char *vaddr;
655 int ret;
656
657 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT]);
658 ret = __copy_from_user_inatomic(vaddr + page_offset, data, length);
659 kunmap_atomic(vaddr);
660
661 return ret;
662}
663
664/** 588/**
665 * This is the fast pwrite path, where we copy the data directly from the 589 * This is the fast pwrite path, where we copy the data directly from the
666 * user into the GTT, uncached. 590 * user into the GTT, uncached.
667 */ 591 */
668static int 592static int
669i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, 593i915_gem_gtt_pwrite_fast(struct drm_device *dev,
594 struct drm_i915_gem_object *obj,
670 struct drm_i915_gem_pwrite *args, 595 struct drm_i915_gem_pwrite *args,
671 struct drm_file *file_priv) 596 struct drm_file *file)
672{ 597{
673 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
674 drm_i915_private_t *dev_priv = dev->dev_private; 598 drm_i915_private_t *dev_priv = dev->dev_private;
675 ssize_t remain; 599 ssize_t remain;
676 loff_t offset, page_base; 600 loff_t offset, page_base;
@@ -680,8 +604,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
680 user_data = (char __user *) (uintptr_t) args->data_ptr; 604 user_data = (char __user *) (uintptr_t) args->data_ptr;
681 remain = args->size; 605 remain = args->size;
682 606
683 obj_priv = to_intel_bo(obj); 607 offset = obj->gtt_offset + args->offset;
684 offset = obj_priv->gtt_offset + args->offset;
685 608
686 while (remain > 0) { 609 while (remain > 0) {
687 /* Operation in this page 610 /* Operation in this page
@@ -721,11 +644,11 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
721 * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit). 644 * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
722 */ 645 */
723static int 646static int
724i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, 647i915_gem_gtt_pwrite_slow(struct drm_device *dev,
648 struct drm_i915_gem_object *obj,
725 struct drm_i915_gem_pwrite *args, 649 struct drm_i915_gem_pwrite *args,
726 struct drm_file *file_priv) 650 struct drm_file *file)
727{ 651{
728 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
729 drm_i915_private_t *dev_priv = dev->dev_private; 652 drm_i915_private_t *dev_priv = dev->dev_private;
730 ssize_t remain; 653 ssize_t remain;
731 loff_t gtt_page_base, offset; 654 loff_t gtt_page_base, offset;
@@ -762,12 +685,15 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
762 goto out_unpin_pages; 685 goto out_unpin_pages;
763 } 686 }
764 687
765 ret = i915_gem_object_set_to_gtt_domain(obj, 1); 688 ret = i915_gem_object_set_to_gtt_domain(obj, true);
766 if (ret) 689 if (ret)
767 goto out_unpin_pages; 690 goto out_unpin_pages;
768 691
769 obj_priv = to_intel_bo(obj); 692 ret = i915_gem_object_put_fence(obj);
770 offset = obj_priv->gtt_offset + args->offset; 693 if (ret)
694 goto out_unpin_pages;
695
696 offset = obj->gtt_offset + args->offset;
771 697
772 while (remain > 0) { 698 while (remain > 0) {
773 /* Operation in this page 699 /* Operation in this page
@@ -813,39 +739,58 @@ out_unpin_pages:
813 * copy_from_user into the kmapped pages backing the object. 739 * copy_from_user into the kmapped pages backing the object.
814 */ 740 */
815static int 741static int
816i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, 742i915_gem_shmem_pwrite_fast(struct drm_device *dev,
743 struct drm_i915_gem_object *obj,
817 struct drm_i915_gem_pwrite *args, 744 struct drm_i915_gem_pwrite *args,
818 struct drm_file *file_priv) 745 struct drm_file *file)
819{ 746{
820 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 747 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
821 ssize_t remain; 748 ssize_t remain;
822 loff_t offset, page_base; 749 loff_t offset;
823 char __user *user_data; 750 char __user *user_data;
824 int page_offset, page_length; 751 int page_offset, page_length;
825 752
826 user_data = (char __user *) (uintptr_t) args->data_ptr; 753 user_data = (char __user *) (uintptr_t) args->data_ptr;
827 remain = args->size; 754 remain = args->size;
828 755
829 obj_priv = to_intel_bo(obj);
830 offset = args->offset; 756 offset = args->offset;
831 obj_priv->dirty = 1; 757 obj->dirty = 1;
832 758
833 while (remain > 0) { 759 while (remain > 0) {
760 struct page *page;
761 char *vaddr;
762 int ret;
763
834 /* Operation in this page 764 /* Operation in this page
835 * 765 *
836 * page_base = page offset within aperture
837 * page_offset = offset within page 766 * page_offset = offset within page
838 * page_length = bytes to copy for this page 767 * page_length = bytes to copy for this page
839 */ 768 */
840 page_base = (offset & ~(PAGE_SIZE-1));
841 page_offset = offset & (PAGE_SIZE-1); 769 page_offset = offset & (PAGE_SIZE-1);
842 page_length = remain; 770 page_length = remain;
843 if ((page_offset + remain) > PAGE_SIZE) 771 if ((page_offset + remain) > PAGE_SIZE)
844 page_length = PAGE_SIZE - page_offset; 772 page_length = PAGE_SIZE - page_offset;
845 773
846 if (fast_shmem_write(obj_priv->pages, 774 page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
847 page_base, page_offset, 775 GFP_HIGHUSER | __GFP_RECLAIMABLE);
848 user_data, page_length)) 776 if (IS_ERR(page))
777 return PTR_ERR(page);
778
779 vaddr = kmap_atomic(page, KM_USER0);
780 ret = __copy_from_user_inatomic(vaddr + page_offset,
781 user_data,
782 page_length);
783 kunmap_atomic(vaddr, KM_USER0);
784
785 set_page_dirty(page);
786 mark_page_accessed(page);
787 page_cache_release(page);
788
789 /* If we get a fault while copying data, then (presumably) our
790 * source page isn't available. Return the error and we'll
791 * retry in the slow path.
792 */
793 if (ret)
849 return -EFAULT; 794 return -EFAULT;
850 795
851 remain -= page_length; 796 remain -= page_length;
@@ -864,17 +809,18 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
864 * struct_mutex is held. 809 * struct_mutex is held.
865 */ 810 */
866static int 811static int
867i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, 812i915_gem_shmem_pwrite_slow(struct drm_device *dev,
813 struct drm_i915_gem_object *obj,
868 struct drm_i915_gem_pwrite *args, 814 struct drm_i915_gem_pwrite *args,
869 struct drm_file *file_priv) 815 struct drm_file *file)
870{ 816{
871 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 817 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
872 struct mm_struct *mm = current->mm; 818 struct mm_struct *mm = current->mm;
873 struct page **user_pages; 819 struct page **user_pages;
874 ssize_t remain; 820 ssize_t remain;
875 loff_t offset, pinned_pages, i; 821 loff_t offset, pinned_pages, i;
876 loff_t first_data_page, last_data_page, num_pages; 822 loff_t first_data_page, last_data_page, num_pages;
877 int shmem_page_index, shmem_page_offset; 823 int shmem_page_offset;
878 int data_page_index, data_page_offset; 824 int data_page_index, data_page_offset;
879 int page_length; 825 int page_length;
880 int ret; 826 int ret;
@@ -912,20 +858,19 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
912 858
913 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); 859 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
914 860
915 obj_priv = to_intel_bo(obj);
916 offset = args->offset; 861 offset = args->offset;
917 obj_priv->dirty = 1; 862 obj->dirty = 1;
918 863
919 while (remain > 0) { 864 while (remain > 0) {
865 struct page *page;
866
920 /* Operation in this page 867 /* Operation in this page
921 * 868 *
922 * shmem_page_index = page number within shmem file
923 * shmem_page_offset = offset within page in shmem file 869 * shmem_page_offset = offset within page in shmem file
924 * data_page_index = page number in get_user_pages return 870 * data_page_index = page number in get_user_pages return
925 * data_page_offset = offset with data_page_index page. 871 * data_page_offset = offset with data_page_index page.
926 * page_length = bytes to copy for this page 872 * page_length = bytes to copy for this page
927 */ 873 */
928 shmem_page_index = offset / PAGE_SIZE;
929 shmem_page_offset = offset & ~PAGE_MASK; 874 shmem_page_offset = offset & ~PAGE_MASK;
930 data_page_index = data_ptr / PAGE_SIZE - first_data_page; 875 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
931 data_page_offset = data_ptr & ~PAGE_MASK; 876 data_page_offset = data_ptr & ~PAGE_MASK;
@@ -936,21 +881,32 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
936 if ((data_page_offset + page_length) > PAGE_SIZE) 881 if ((data_page_offset + page_length) > PAGE_SIZE)
937 page_length = PAGE_SIZE - data_page_offset; 882 page_length = PAGE_SIZE - data_page_offset;
938 883
884 page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
885 GFP_HIGHUSER | __GFP_RECLAIMABLE);
886 if (IS_ERR(page)) {
887 ret = PTR_ERR(page);
888 goto out;
889 }
890
939 if (do_bit17_swizzling) { 891 if (do_bit17_swizzling) {
940 slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index], 892 slow_shmem_bit17_copy(page,
941 shmem_page_offset, 893 shmem_page_offset,
942 user_pages[data_page_index], 894 user_pages[data_page_index],
943 data_page_offset, 895 data_page_offset,
944 page_length, 896 page_length,
945 0); 897 0);
946 } else { 898 } else {
947 slow_shmem_copy(obj_priv->pages[shmem_page_index], 899 slow_shmem_copy(page,
948 shmem_page_offset, 900 shmem_page_offset,
949 user_pages[data_page_index], 901 user_pages[data_page_index],
950 data_page_offset, 902 data_page_offset,
951 page_length); 903 page_length);
952 } 904 }
953 905
906 set_page_dirty(page);
907 mark_page_accessed(page);
908 page_cache_release(page);
909
954 remain -= page_length; 910 remain -= page_length;
955 data_ptr += page_length; 911 data_ptr += page_length;
956 offset += page_length; 912 offset += page_length;
@@ -974,8 +930,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
974 struct drm_file *file) 930 struct drm_file *file)
975{ 931{
976 struct drm_i915_gem_pwrite *args = data; 932 struct drm_i915_gem_pwrite *args = data;
977 struct drm_gem_object *obj; 933 struct drm_i915_gem_object *obj;
978 struct drm_i915_gem_object *obj_priv;
979 int ret; 934 int ret;
980 935
981 if (args->size == 0) 936 if (args->size == 0)
@@ -995,15 +950,15 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
995 if (ret) 950 if (ret)
996 return ret; 951 return ret;
997 952
998 obj = drm_gem_object_lookup(dev, file, args->handle); 953 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
999 if (obj == NULL) { 954 if (obj == NULL) {
1000 ret = -ENOENT; 955 ret = -ENOENT;
1001 goto unlock; 956 goto unlock;
1002 } 957 }
1003 obj_priv = to_intel_bo(obj);
1004 958
1005 /* Bounds check destination. */ 959 /* Bounds check destination. */
1006 if (args->offset > obj->size || args->size > obj->size - args->offset) { 960 if (args->offset > obj->base.size ||
961 args->size > obj->base.size - args->offset) {
1007 ret = -EINVAL; 962 ret = -EINVAL;
1008 goto out; 963 goto out;
1009 } 964 }
@@ -1014,16 +969,19 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1014 * pread/pwrite currently are reading and writing from the CPU 969 * pread/pwrite currently are reading and writing from the CPU
1015 * perspective, requiring manual detiling by the client. 970 * perspective, requiring manual detiling by the client.
1016 */ 971 */
1017 if (obj_priv->phys_obj) 972 if (obj->phys_obj)
1018 ret = i915_gem_phys_pwrite(dev, obj, args, file); 973 ret = i915_gem_phys_pwrite(dev, obj, args, file);
1019 else if (obj_priv->tiling_mode == I915_TILING_NONE && 974 else if (obj->gtt_space &&
1020 obj_priv->gtt_space && 975 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
1021 obj->write_domain != I915_GEM_DOMAIN_CPU) { 976 ret = i915_gem_object_pin(obj, 0, true);
1022 ret = i915_gem_object_pin(obj, 0);
1023 if (ret) 977 if (ret)
1024 goto out; 978 goto out;
1025 979
1026 ret = i915_gem_object_set_to_gtt_domain(obj, 1); 980 ret = i915_gem_object_set_to_gtt_domain(obj, true);
981 if (ret)
982 goto out_unpin;
983
984 ret = i915_gem_object_put_fence(obj);
1027 if (ret) 985 if (ret)
1028 goto out_unpin; 986 goto out_unpin;
1029 987
@@ -1034,26 +992,19 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1034out_unpin: 992out_unpin:
1035 i915_gem_object_unpin(obj); 993 i915_gem_object_unpin(obj);
1036 } else { 994 } else {
1037 ret = i915_gem_object_get_pages_or_evict(obj);
1038 if (ret)
1039 goto out;
1040
1041 ret = i915_gem_object_set_to_cpu_domain(obj, 1); 995 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
1042 if (ret) 996 if (ret)
1043 goto out_put; 997 goto out;
1044 998
1045 ret = -EFAULT; 999 ret = -EFAULT;
1046 if (!i915_gem_object_needs_bit17_swizzle(obj)) 1000 if (!i915_gem_object_needs_bit17_swizzle(obj))
1047 ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file); 1001 ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file);
1048 if (ret == -EFAULT) 1002 if (ret == -EFAULT)
1049 ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file); 1003 ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file);
1050
1051out_put:
1052 i915_gem_object_put_pages(obj);
1053 } 1004 }
1054 1005
1055out: 1006out:
1056 drm_gem_object_unreference(obj); 1007 drm_gem_object_unreference(&obj->base);
1057unlock: 1008unlock:
1058 mutex_unlock(&dev->struct_mutex); 1009 mutex_unlock(&dev->struct_mutex);
1059 return ret; 1010 return ret;
@@ -1065,12 +1016,10 @@ unlock:
1065 */ 1016 */
1066int 1017int
1067i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, 1018i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1068 struct drm_file *file_priv) 1019 struct drm_file *file)
1069{ 1020{
1070 struct drm_i915_private *dev_priv = dev->dev_private;
1071 struct drm_i915_gem_set_domain *args = data; 1021 struct drm_i915_gem_set_domain *args = data;
1072 struct drm_gem_object *obj; 1022 struct drm_i915_gem_object *obj;
1073 struct drm_i915_gem_object *obj_priv;
1074 uint32_t read_domains = args->read_domains; 1023 uint32_t read_domains = args->read_domains;
1075 uint32_t write_domain = args->write_domain; 1024 uint32_t write_domain = args->write_domain;
1076 int ret; 1025 int ret;
@@ -1095,28 +1044,15 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1095 if (ret) 1044 if (ret)
1096 return ret; 1045 return ret;
1097 1046
1098 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 1047 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1099 if (obj == NULL) { 1048 if (obj == NULL) {
1100 ret = -ENOENT; 1049 ret = -ENOENT;
1101 goto unlock; 1050 goto unlock;
1102 } 1051 }
1103 obj_priv = to_intel_bo(obj);
1104
1105 intel_mark_busy(dev, obj);
1106 1052
1107 if (read_domains & I915_GEM_DOMAIN_GTT) { 1053 if (read_domains & I915_GEM_DOMAIN_GTT) {
1108 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0); 1054 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1109 1055
1110 /* Update the LRU on the fence for the CPU access that's
1111 * about to occur.
1112 */
1113 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
1114 struct drm_i915_fence_reg *reg =
1115 &dev_priv->fence_regs[obj_priv->fence_reg];
1116 list_move_tail(&reg->lru_list,
1117 &dev_priv->mm.fence_list);
1118 }
1119
1120 /* Silently promote "you're not bound, there was nothing to do" 1056 /* Silently promote "you're not bound, there was nothing to do"
1121 * to success, since the client was just asking us to 1057 * to success, since the client was just asking us to
1122 * make sure everything was done. 1058 * make sure everything was done.
@@ -1127,11 +1063,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1127 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0); 1063 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1128 } 1064 }
1129 1065
1130 /* Maintain LRU order of "inactive" objects */ 1066 drm_gem_object_unreference(&obj->base);
1131 if (ret == 0 && i915_gem_object_is_inactive(obj_priv))
1132 list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
1133
1134 drm_gem_object_unreference(obj);
1135unlock: 1067unlock:
1136 mutex_unlock(&dev->struct_mutex); 1068 mutex_unlock(&dev->struct_mutex);
1137 return ret; 1069 return ret;
@@ -1142,10 +1074,10 @@ unlock:
1142 */ 1074 */
1143int 1075int
1144i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, 1076i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1145 struct drm_file *file_priv) 1077 struct drm_file *file)
1146{ 1078{
1147 struct drm_i915_gem_sw_finish *args = data; 1079 struct drm_i915_gem_sw_finish *args = data;
1148 struct drm_gem_object *obj; 1080 struct drm_i915_gem_object *obj;
1149 int ret = 0; 1081 int ret = 0;
1150 1082
1151 if (!(dev->driver->driver_features & DRIVER_GEM)) 1083 if (!(dev->driver->driver_features & DRIVER_GEM))
@@ -1155,17 +1087,17 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1155 if (ret) 1087 if (ret)
1156 return ret; 1088 return ret;
1157 1089
1158 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 1090 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1159 if (obj == NULL) { 1091 if (obj == NULL) {
1160 ret = -ENOENT; 1092 ret = -ENOENT;
1161 goto unlock; 1093 goto unlock;
1162 } 1094 }
1163 1095
1164 /* Pinned buffers may be scanout, so flush the cache */ 1096 /* Pinned buffers may be scanout, so flush the cache */
1165 if (to_intel_bo(obj)->pin_count) 1097 if (obj->pin_count)
1166 i915_gem_object_flush_cpu_write_domain(obj); 1098 i915_gem_object_flush_cpu_write_domain(obj);
1167 1099
1168 drm_gem_object_unreference(obj); 1100 drm_gem_object_unreference(&obj->base);
1169unlock: 1101unlock:
1170 mutex_unlock(&dev->struct_mutex); 1102 mutex_unlock(&dev->struct_mutex);
1171 return ret; 1103 return ret;
@@ -1180,8 +1112,9 @@ unlock:
1180 */ 1112 */
1181int 1113int
1182i915_gem_mmap_ioctl(struct drm_device *dev, void *data, 1114i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1183 struct drm_file *file_priv) 1115 struct drm_file *file)
1184{ 1116{
1117 struct drm_i915_private *dev_priv = dev->dev_private;
1185 struct drm_i915_gem_mmap *args = data; 1118 struct drm_i915_gem_mmap *args = data;
1186 struct drm_gem_object *obj; 1119 struct drm_gem_object *obj;
1187 loff_t offset; 1120 loff_t offset;
@@ -1190,10 +1123,15 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1190 if (!(dev->driver->driver_features & DRIVER_GEM)) 1123 if (!(dev->driver->driver_features & DRIVER_GEM))
1191 return -ENODEV; 1124 return -ENODEV;
1192 1125
1193 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 1126 obj = drm_gem_object_lookup(dev, file, args->handle);
1194 if (obj == NULL) 1127 if (obj == NULL)
1195 return -ENOENT; 1128 return -ENOENT;
1196 1129
1130 if (obj->size > dev_priv->mm.gtt_mappable_end) {
1131 drm_gem_object_unreference_unlocked(obj);
1132 return -E2BIG;
1133 }
1134
1197 offset = args->offset; 1135 offset = args->offset;
1198 1136
1199 down_write(&current->mm->mmap_sem); 1137 down_write(&current->mm->mmap_sem);
@@ -1228,10 +1166,9 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1228 */ 1166 */
1229int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1167int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1230{ 1168{
1231 struct drm_gem_object *obj = vma->vm_private_data; 1169 struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
1232 struct drm_device *dev = obj->dev; 1170 struct drm_device *dev = obj->base.dev;
1233 drm_i915_private_t *dev_priv = dev->dev_private; 1171 drm_i915_private_t *dev_priv = dev->dev_private;
1234 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1235 pgoff_t page_offset; 1172 pgoff_t page_offset;
1236 unsigned long pfn; 1173 unsigned long pfn;
1237 int ret = 0; 1174 int ret = 0;
@@ -1243,27 +1180,35 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1243 1180
1244 /* Now bind it into the GTT if needed */ 1181 /* Now bind it into the GTT if needed */
1245 mutex_lock(&dev->struct_mutex); 1182 mutex_lock(&dev->struct_mutex);
1246 if (!obj_priv->gtt_space) {
1247 ret = i915_gem_object_bind_to_gtt(obj, 0);
1248 if (ret)
1249 goto unlock;
1250 1183
1251 ret = i915_gem_object_set_to_gtt_domain(obj, write); 1184 if (!obj->map_and_fenceable) {
1185 ret = i915_gem_object_unbind(obj);
1252 if (ret) 1186 if (ret)
1253 goto unlock; 1187 goto unlock;
1254 } 1188 }
1255 1189 if (!obj->gtt_space) {
1256 /* Need a new fence register? */ 1190 ret = i915_gem_object_bind_to_gtt(obj, 0, true);
1257 if (obj_priv->tiling_mode != I915_TILING_NONE) {
1258 ret = i915_gem_object_get_fence_reg(obj, true);
1259 if (ret) 1191 if (ret)
1260 goto unlock; 1192 goto unlock;
1261 } 1193 }
1262 1194
1263 if (i915_gem_object_is_inactive(obj_priv)) 1195 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1264 list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list); 1196 if (ret)
1197 goto unlock;
1198
1199 if (obj->tiling_mode == I915_TILING_NONE)
1200 ret = i915_gem_object_put_fence(obj);
1201 else
1202 ret = i915_gem_object_get_fence(obj, NULL, true);
1203 if (ret)
1204 goto unlock;
1205
1206 if (i915_gem_object_is_inactive(obj))
1207 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
1208
1209 obj->fault_mappable = true;
1265 1210
1266 pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) + 1211 pfn = ((dev->agp->base + obj->gtt_offset) >> PAGE_SHIFT) +
1267 page_offset; 1212 page_offset;
1268 1213
1269 /* Finally, remap it using the new GTT offset */ 1214 /* Finally, remap it using the new GTT offset */
@@ -1272,11 +1217,12 @@ unlock:
1272 mutex_unlock(&dev->struct_mutex); 1217 mutex_unlock(&dev->struct_mutex);
1273 1218
1274 switch (ret) { 1219 switch (ret) {
1220 case -EAGAIN:
1221 set_need_resched();
1275 case 0: 1222 case 0:
1276 case -ERESTARTSYS: 1223 case -ERESTARTSYS:
1277 return VM_FAULT_NOPAGE; 1224 return VM_FAULT_NOPAGE;
1278 case -ENOMEM: 1225 case -ENOMEM:
1279 case -EAGAIN:
1280 return VM_FAULT_OOM; 1226 return VM_FAULT_OOM;
1281 default: 1227 default:
1282 return VM_FAULT_SIGBUS; 1228 return VM_FAULT_SIGBUS;
@@ -1295,37 +1241,39 @@ unlock:
1295 * This routine allocates and attaches a fake offset for @obj. 1241 * This routine allocates and attaches a fake offset for @obj.
1296 */ 1242 */
1297static int 1243static int
1298i915_gem_create_mmap_offset(struct drm_gem_object *obj) 1244i915_gem_create_mmap_offset(struct drm_i915_gem_object *obj)
1299{ 1245{
1300 struct drm_device *dev = obj->dev; 1246 struct drm_device *dev = obj->base.dev;
1301 struct drm_gem_mm *mm = dev->mm_private; 1247 struct drm_gem_mm *mm = dev->mm_private;
1302 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1303 struct drm_map_list *list; 1248 struct drm_map_list *list;
1304 struct drm_local_map *map; 1249 struct drm_local_map *map;
1305 int ret = 0; 1250 int ret = 0;
1306 1251
1307 /* Set the object up for mmap'ing */ 1252 /* Set the object up for mmap'ing */
1308 list = &obj->map_list; 1253 list = &obj->base.map_list;
1309 list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL); 1254 list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
1310 if (!list->map) 1255 if (!list->map)
1311 return -ENOMEM; 1256 return -ENOMEM;
1312 1257
1313 map = list->map; 1258 map = list->map;
1314 map->type = _DRM_GEM; 1259 map->type = _DRM_GEM;
1315 map->size = obj->size; 1260 map->size = obj->base.size;
1316 map->handle = obj; 1261 map->handle = obj;
1317 1262
1318 /* Get a DRM GEM mmap offset allocated... */ 1263 /* Get a DRM GEM mmap offset allocated... */
1319 list->file_offset_node = drm_mm_search_free(&mm->offset_manager, 1264 list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
1320 obj->size / PAGE_SIZE, 0, 0); 1265 obj->base.size / PAGE_SIZE,
1266 0, 0);
1321 if (!list->file_offset_node) { 1267 if (!list->file_offset_node) {
1322 DRM_ERROR("failed to allocate offset for bo %d\n", obj->name); 1268 DRM_ERROR("failed to allocate offset for bo %d\n",
1269 obj->base.name);
1323 ret = -ENOSPC; 1270 ret = -ENOSPC;
1324 goto out_free_list; 1271 goto out_free_list;
1325 } 1272 }
1326 1273
1327 list->file_offset_node = drm_mm_get_block(list->file_offset_node, 1274 list->file_offset_node = drm_mm_get_block(list->file_offset_node,
1328 obj->size / PAGE_SIZE, 0); 1275 obj->base.size / PAGE_SIZE,
1276 0);
1329 if (!list->file_offset_node) { 1277 if (!list->file_offset_node) {
1330 ret = -ENOMEM; 1278 ret = -ENOMEM;
1331 goto out_free_list; 1279 goto out_free_list;
@@ -1338,16 +1286,13 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj)
1338 goto out_free_mm; 1286 goto out_free_mm;
1339 } 1287 }
1340 1288
1341 /* By now we should be all set, any drm_mmap request on the offset
1342 * below will get to our mmap & fault handler */
1343 obj_priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT;
1344
1345 return 0; 1289 return 0;
1346 1290
1347out_free_mm: 1291out_free_mm:
1348 drm_mm_put_block(list->file_offset_node); 1292 drm_mm_put_block(list->file_offset_node);
1349out_free_list: 1293out_free_list:
1350 kfree(list->map); 1294 kfree(list->map);
1295 list->map = NULL;
1351 1296
1352 return ret; 1297 return ret;
1353} 1298}
@@ -1367,38 +1312,51 @@ out_free_list:
1367 * fixup by i915_gem_fault(). 1312 * fixup by i915_gem_fault().
1368 */ 1313 */
1369void 1314void
1370i915_gem_release_mmap(struct drm_gem_object *obj) 1315i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1371{ 1316{
1372 struct drm_device *dev = obj->dev; 1317 if (!obj->fault_mappable)
1373 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 1318 return;
1374 1319
1375 if (dev->dev_mapping) 1320 unmap_mapping_range(obj->base.dev->dev_mapping,
1376 unmap_mapping_range(dev->dev_mapping, 1321 (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT,
1377 obj_priv->mmap_offset, obj->size, 1); 1322 obj->base.size, 1);
1323
1324 obj->fault_mappable = false;
1378} 1325}
1379 1326
1380static void 1327static void
1381i915_gem_free_mmap_offset(struct drm_gem_object *obj) 1328i915_gem_free_mmap_offset(struct drm_i915_gem_object *obj)
1382{ 1329{
1383 struct drm_device *dev = obj->dev; 1330 struct drm_device *dev = obj->base.dev;
1384 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1385 struct drm_gem_mm *mm = dev->mm_private; 1331 struct drm_gem_mm *mm = dev->mm_private;
1386 struct drm_map_list *list; 1332 struct drm_map_list *list = &obj->base.map_list;
1387 1333
1388 list = &obj->map_list;
1389 drm_ht_remove_item(&mm->offset_hash, &list->hash); 1334 drm_ht_remove_item(&mm->offset_hash, &list->hash);
1335 drm_mm_put_block(list->file_offset_node);
1336 kfree(list->map);
1337 list->map = NULL;
1338}
1390 1339
1391 if (list->file_offset_node) { 1340static uint32_t
1392 drm_mm_put_block(list->file_offset_node); 1341i915_gem_get_gtt_size(struct drm_i915_gem_object *obj)
1393 list->file_offset_node = NULL; 1342{
1394 } 1343 struct drm_device *dev = obj->base.dev;
1344 uint32_t size;
1395 1345
1396 if (list->map) { 1346 if (INTEL_INFO(dev)->gen >= 4 ||
1397 kfree(list->map); 1347 obj->tiling_mode == I915_TILING_NONE)
1398 list->map = NULL; 1348 return obj->base.size;
1399 } 1349
1350 /* Previous chips need a power-of-two fence region when tiling */
1351 if (INTEL_INFO(dev)->gen == 3)
1352 size = 1024*1024;
1353 else
1354 size = 512*1024;
1355
1356 while (size < obj->base.size)
1357 size <<= 1;
1400 1358
1401 obj_priv->mmap_offset = 0; 1359 return size;
1402} 1360}
1403 1361
1404/** 1362/**
@@ -1406,42 +1364,68 @@ i915_gem_free_mmap_offset(struct drm_gem_object *obj)
1406 * @obj: object to check 1364 * @obj: object to check
1407 * 1365 *
1408 * Return the required GTT alignment for an object, taking into account 1366 * Return the required GTT alignment for an object, taking into account
1409 * potential fence register mapping if needed. 1367 * potential fence register mapping.
1410 */ 1368 */
1411static uint32_t 1369static uint32_t
1412i915_gem_get_gtt_alignment(struct drm_gem_object *obj) 1370i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj)
1413{ 1371{
1414 struct drm_device *dev = obj->dev; 1372 struct drm_device *dev = obj->base.dev;
1415 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1416 int start, i;
1417 1373
1418 /* 1374 /*
1419 * Minimum alignment is 4k (GTT page size), but might be greater 1375 * Minimum alignment is 4k (GTT page size), but might be greater
1420 * if a fence register is needed for the object. 1376 * if a fence register is needed for the object.
1421 */ 1377 */
1422 if (INTEL_INFO(dev)->gen >= 4 || obj_priv->tiling_mode == I915_TILING_NONE) 1378 if (INTEL_INFO(dev)->gen >= 4 ||
1379 obj->tiling_mode == I915_TILING_NONE)
1423 return 4096; 1380 return 4096;
1424 1381
1425 /* 1382 /*
1426 * Previous chips need to be aligned to the size of the smallest 1383 * Previous chips need to be aligned to the size of the smallest
1427 * fence register that can contain the object. 1384 * fence register that can contain the object.
1428 */ 1385 */
1429 if (INTEL_INFO(dev)->gen == 3) 1386 return i915_gem_get_gtt_size(obj);
1430 start = 1024*1024; 1387}
1431 else 1388
1432 start = 512*1024; 1389/**
1390 * i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an
1391 * unfenced object
1392 * @obj: object to check
1393 *
1394 * Return the required GTT alignment for an object, only taking into account
1395 * unfenced tiled surface requirements.
1396 */
1397static uint32_t
1398i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj)
1399{
1400 struct drm_device *dev = obj->base.dev;
1401 int tile_height;
1433 1402
1434 for (i = start; i < obj->size; i <<= 1) 1403 /*
1435 ; 1404 * Minimum alignment is 4k (GTT page size) for sane hw.
1405 */
1406 if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) ||
1407 obj->tiling_mode == I915_TILING_NONE)
1408 return 4096;
1436 1409
1437 return i; 1410 /*
1411 * Older chips need unfenced tiled buffers to be aligned to the left
1412 * edge of an even tile row (where tile rows are counted as if the bo is
1413 * placed in a fenced gtt region).
1414 */
1415 if (IS_GEN2(dev) ||
1416 (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
1417 tile_height = 32;
1418 else
1419 tile_height = 8;
1420
1421 return tile_height * obj->stride * 2;
1438} 1422}
1439 1423
1440/** 1424/**
1441 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing 1425 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1442 * @dev: DRM device 1426 * @dev: DRM device
1443 * @data: GTT mapping ioctl data 1427 * @data: GTT mapping ioctl data
1444 * @file_priv: GEM object info 1428 * @file: GEM object info
1445 * 1429 *
1446 * Simply returns the fake offset to userspace so it can mmap it. 1430 * Simply returns the fake offset to userspace so it can mmap it.
1447 * The mmap call will end up in drm_gem_mmap(), which will set things 1431 * The mmap call will end up in drm_gem_mmap(), which will set things
@@ -1454,11 +1438,11 @@ i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
1454 */ 1438 */
1455int 1439int
1456i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, 1440i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1457 struct drm_file *file_priv) 1441 struct drm_file *file)
1458{ 1442{
1443 struct drm_i915_private *dev_priv = dev->dev_private;
1459 struct drm_i915_gem_mmap_gtt *args = data; 1444 struct drm_i915_gem_mmap_gtt *args = data;
1460 struct drm_gem_object *obj; 1445 struct drm_i915_gem_object *obj;
1461 struct drm_i915_gem_object *obj_priv;
1462 int ret; 1446 int ret;
1463 1447
1464 if (!(dev->driver->driver_features & DRIVER_GEM)) 1448 if (!(dev->driver->driver_features & DRIVER_GEM))
@@ -1468,130 +1452,196 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1468 if (ret) 1452 if (ret)
1469 return ret; 1453 return ret;
1470 1454
1471 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 1455 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1472 if (obj == NULL) { 1456 if (obj == NULL) {
1473 ret = -ENOENT; 1457 ret = -ENOENT;
1474 goto unlock; 1458 goto unlock;
1475 } 1459 }
1476 obj_priv = to_intel_bo(obj);
1477 1460
1478 if (obj_priv->madv != I915_MADV_WILLNEED) { 1461 if (obj->base.size > dev_priv->mm.gtt_mappable_end) {
1462 ret = -E2BIG;
1463 goto unlock;
1464 }
1465
1466 if (obj->madv != I915_MADV_WILLNEED) {
1479 DRM_ERROR("Attempting to mmap a purgeable buffer\n"); 1467 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
1480 ret = -EINVAL; 1468 ret = -EINVAL;
1481 goto out; 1469 goto out;
1482 } 1470 }
1483 1471
1484 if (!obj_priv->mmap_offset) { 1472 if (!obj->base.map_list.map) {
1485 ret = i915_gem_create_mmap_offset(obj); 1473 ret = i915_gem_create_mmap_offset(obj);
1486 if (ret) 1474 if (ret)
1487 goto out; 1475 goto out;
1488 } 1476 }
1489 1477
1490 args->offset = obj_priv->mmap_offset; 1478 args->offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
1491
1492 /*
1493 * Pull it into the GTT so that we have a page list (makes the
1494 * initial fault faster and any subsequent flushing possible).
1495 */
1496 if (!obj_priv->agp_mem) {
1497 ret = i915_gem_object_bind_to_gtt(obj, 0);
1498 if (ret)
1499 goto out;
1500 }
1501 1479
1502out: 1480out:
1503 drm_gem_object_unreference(obj); 1481 drm_gem_object_unreference(&obj->base);
1504unlock: 1482unlock:
1505 mutex_unlock(&dev->struct_mutex); 1483 mutex_unlock(&dev->struct_mutex);
1506 return ret; 1484 return ret;
1507} 1485}
1508 1486
1487static int
1488i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
1489 gfp_t gfpmask)
1490{
1491 int page_count, i;
1492 struct address_space *mapping;
1493 struct inode *inode;
1494 struct page *page;
1495
1496 /* Get the list of pages out of our struct file. They'll be pinned
1497 * at this point until we release them.
1498 */
1499 page_count = obj->base.size / PAGE_SIZE;
1500 BUG_ON(obj->pages != NULL);
1501 obj->pages = drm_malloc_ab(page_count, sizeof(struct page *));
1502 if (obj->pages == NULL)
1503 return -ENOMEM;
1504
1505 inode = obj->base.filp->f_path.dentry->d_inode;
1506 mapping = inode->i_mapping;
1507 for (i = 0; i < page_count; i++) {
1508 page = read_cache_page_gfp(mapping, i,
1509 GFP_HIGHUSER |
1510 __GFP_COLD |
1511 __GFP_RECLAIMABLE |
1512 gfpmask);
1513 if (IS_ERR(page))
1514 goto err_pages;
1515
1516 obj->pages[i] = page;
1517 }
1518
1519 if (obj->tiling_mode != I915_TILING_NONE)
1520 i915_gem_object_do_bit_17_swizzle(obj);
1521
1522 return 0;
1523
1524err_pages:
1525 while (i--)
1526 page_cache_release(obj->pages[i]);
1527
1528 drm_free_large(obj->pages);
1529 obj->pages = NULL;
1530 return PTR_ERR(page);
1531}
1532
1509static void 1533static void
1510i915_gem_object_put_pages(struct drm_gem_object *obj) 1534i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1511{ 1535{
1512 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 1536 int page_count = obj->base.size / PAGE_SIZE;
1513 int page_count = obj->size / PAGE_SIZE;
1514 int i; 1537 int i;
1515 1538
1516 BUG_ON(obj_priv->pages_refcount == 0); 1539 BUG_ON(obj->madv == __I915_MADV_PURGED);
1517 BUG_ON(obj_priv->madv == __I915_MADV_PURGED);
1518
1519 if (--obj_priv->pages_refcount != 0)
1520 return;
1521 1540
1522 if (obj_priv->tiling_mode != I915_TILING_NONE) 1541 if (obj->tiling_mode != I915_TILING_NONE)
1523 i915_gem_object_save_bit_17_swizzle(obj); 1542 i915_gem_object_save_bit_17_swizzle(obj);
1524 1543
1525 if (obj_priv->madv == I915_MADV_DONTNEED) 1544 if (obj->madv == I915_MADV_DONTNEED)
1526 obj_priv->dirty = 0; 1545 obj->dirty = 0;
1527 1546
1528 for (i = 0; i < page_count; i++) { 1547 for (i = 0; i < page_count; i++) {
1529 if (obj_priv->dirty) 1548 if (obj->dirty)
1530 set_page_dirty(obj_priv->pages[i]); 1549 set_page_dirty(obj->pages[i]);
1531 1550
1532 if (obj_priv->madv == I915_MADV_WILLNEED) 1551 if (obj->madv == I915_MADV_WILLNEED)
1533 mark_page_accessed(obj_priv->pages[i]); 1552 mark_page_accessed(obj->pages[i]);
1534 1553
1535 page_cache_release(obj_priv->pages[i]); 1554 page_cache_release(obj->pages[i]);
1536 } 1555 }
1537 obj_priv->dirty = 0; 1556 obj->dirty = 0;
1538 1557
1539 drm_free_large(obj_priv->pages); 1558 drm_free_large(obj->pages);
1540 obj_priv->pages = NULL; 1559 obj->pages = NULL;
1541} 1560}
1542 1561
1543static uint32_t 1562void
1544i915_gem_next_request_seqno(struct drm_device *dev, 1563i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1545 struct intel_ring_buffer *ring) 1564 struct intel_ring_buffer *ring,
1546{ 1565 u32 seqno)
1547 drm_i915_private_t *dev_priv = dev->dev_private;
1548
1549 ring->outstanding_lazy_request = true;
1550 return dev_priv->next_seqno;
1551}
1552
1553static void
1554i915_gem_object_move_to_active(struct drm_gem_object *obj,
1555 struct intel_ring_buffer *ring)
1556{ 1566{
1557 struct drm_device *dev = obj->dev; 1567 struct drm_device *dev = obj->base.dev;
1558 struct drm_i915_private *dev_priv = dev->dev_private; 1568 struct drm_i915_private *dev_priv = dev->dev_private;
1559 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1560 uint32_t seqno = i915_gem_next_request_seqno(dev, ring);
1561 1569
1562 BUG_ON(ring == NULL); 1570 BUG_ON(ring == NULL);
1563 obj_priv->ring = ring; 1571 obj->ring = ring;
1564 1572
1565 /* Add a reference if we're newly entering the active list. */ 1573 /* Add a reference if we're newly entering the active list. */
1566 if (!obj_priv->active) { 1574 if (!obj->active) {
1567 drm_gem_object_reference(obj); 1575 drm_gem_object_reference(&obj->base);
1568 obj_priv->active = 1; 1576 obj->active = 1;
1569 } 1577 }
1570 1578
1571 /* Move from whatever list we were on to the tail of execution. */ 1579 /* Move from whatever list we were on to the tail of execution. */
1572 list_move_tail(&obj_priv->mm_list, &dev_priv->mm.active_list); 1580 list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
1573 list_move_tail(&obj_priv->ring_list, &ring->active_list); 1581 list_move_tail(&obj->ring_list, &ring->active_list);
1574 obj_priv->last_rendering_seqno = seqno; 1582
1583 obj->last_rendering_seqno = seqno;
1584 if (obj->fenced_gpu_access) {
1585 struct drm_i915_fence_reg *reg;
1586
1587 BUG_ON(obj->fence_reg == I915_FENCE_REG_NONE);
1588
1589 obj->last_fenced_seqno = seqno;
1590 obj->last_fenced_ring = ring;
1591
1592 reg = &dev_priv->fence_regs[obj->fence_reg];
1593 list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
1594 }
1575} 1595}
1576 1596
1577static void 1597static void
1578i915_gem_object_move_to_flushing(struct drm_gem_object *obj) 1598i915_gem_object_move_off_active(struct drm_i915_gem_object *obj)
1579{ 1599{
1580 struct drm_device *dev = obj->dev; 1600 list_del_init(&obj->ring_list);
1601 obj->last_rendering_seqno = 0;
1602}
1603
1604static void
1605i915_gem_object_move_to_flushing(struct drm_i915_gem_object *obj)
1606{
1607 struct drm_device *dev = obj->base.dev;
1581 drm_i915_private_t *dev_priv = dev->dev_private; 1608 drm_i915_private_t *dev_priv = dev->dev_private;
1582 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1583 1609
1584 BUG_ON(!obj_priv->active); 1610 BUG_ON(!obj->active);
1585 list_move_tail(&obj_priv->mm_list, &dev_priv->mm.flushing_list); 1611 list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list);
1586 list_del_init(&obj_priv->ring_list); 1612
1587 obj_priv->last_rendering_seqno = 0; 1613 i915_gem_object_move_off_active(obj);
1614}
1615
1616static void
1617i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
1618{
1619 struct drm_device *dev = obj->base.dev;
1620 struct drm_i915_private *dev_priv = dev->dev_private;
1621
1622 if (obj->pin_count != 0)
1623 list_move_tail(&obj->mm_list, &dev_priv->mm.pinned_list);
1624 else
1625 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
1626
1627 BUG_ON(!list_empty(&obj->gpu_write_list));
1628 BUG_ON(!obj->active);
1629 obj->ring = NULL;
1630
1631 i915_gem_object_move_off_active(obj);
1632 obj->fenced_gpu_access = false;
1633
1634 obj->active = 0;
1635 obj->pending_gpu_write = false;
1636 drm_gem_object_unreference(&obj->base);
1637
1638 WARN_ON(i915_verify_lists(dev));
1588} 1639}
1589 1640
1590/* Immediately discard the backing storage */ 1641/* Immediately discard the backing storage */
1591static void 1642static void
1592i915_gem_object_truncate(struct drm_gem_object *obj) 1643i915_gem_object_truncate(struct drm_i915_gem_object *obj)
1593{ 1644{
1594 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1595 struct inode *inode; 1645 struct inode *inode;
1596 1646
1597 /* Our goal here is to return as much of the memory as 1647 /* Our goal here is to return as much of the memory as
@@ -1600,42 +1650,18 @@ i915_gem_object_truncate(struct drm_gem_object *obj)
1600 * backing pages, *now*. Here we mirror the actions taken 1650 * backing pages, *now*. Here we mirror the actions taken
1601 * when by shmem_delete_inode() to release the backing store. 1651 * when by shmem_delete_inode() to release the backing store.
1602 */ 1652 */
1603 inode = obj->filp->f_path.dentry->d_inode; 1653 inode = obj->base.filp->f_path.dentry->d_inode;
1604 truncate_inode_pages(inode->i_mapping, 0); 1654 truncate_inode_pages(inode->i_mapping, 0);
1605 if (inode->i_op->truncate_range) 1655 if (inode->i_op->truncate_range)
1606 inode->i_op->truncate_range(inode, 0, (loff_t)-1); 1656 inode->i_op->truncate_range(inode, 0, (loff_t)-1);
1607 1657
1608 obj_priv->madv = __I915_MADV_PURGED; 1658 obj->madv = __I915_MADV_PURGED;
1609} 1659}
1610 1660
1611static inline int 1661static inline int
1612i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv) 1662i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1613{ 1663{
1614 return obj_priv->madv == I915_MADV_DONTNEED; 1664 return obj->madv == I915_MADV_DONTNEED;
1615}
1616
1617static void
1618i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
1619{
1620 struct drm_device *dev = obj->dev;
1621 drm_i915_private_t *dev_priv = dev->dev_private;
1622 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1623
1624 if (obj_priv->pin_count != 0)
1625 list_move_tail(&obj_priv->mm_list, &dev_priv->mm.pinned_list);
1626 else
1627 list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
1628 list_del_init(&obj_priv->ring_list);
1629
1630 BUG_ON(!list_empty(&obj_priv->gpu_write_list));
1631
1632 obj_priv->last_rendering_seqno = 0;
1633 obj_priv->ring = NULL;
1634 if (obj_priv->active) {
1635 obj_priv->active = 0;
1636 drm_gem_object_unreference(obj);
1637 }
1638 WARN_ON(i915_verify_lists(dev));
1639} 1665}
1640 1666
1641static void 1667static void
@@ -1643,37 +1669,27 @@ i915_gem_process_flushing_list(struct drm_device *dev,
1643 uint32_t flush_domains, 1669 uint32_t flush_domains,
1644 struct intel_ring_buffer *ring) 1670 struct intel_ring_buffer *ring)
1645{ 1671{
1646 drm_i915_private_t *dev_priv = dev->dev_private; 1672 struct drm_i915_gem_object *obj, *next;
1647 struct drm_i915_gem_object *obj_priv, *next;
1648 1673
1649 list_for_each_entry_safe(obj_priv, next, 1674 list_for_each_entry_safe(obj, next,
1650 &ring->gpu_write_list, 1675 &ring->gpu_write_list,
1651 gpu_write_list) { 1676 gpu_write_list) {
1652 struct drm_gem_object *obj = &obj_priv->base; 1677 if (obj->base.write_domain & flush_domains) {
1678 uint32_t old_write_domain = obj->base.write_domain;
1653 1679
1654 if (obj->write_domain & flush_domains) { 1680 obj->base.write_domain = 0;
1655 uint32_t old_write_domain = obj->write_domain; 1681 list_del_init(&obj->gpu_write_list);
1656 1682 i915_gem_object_move_to_active(obj, ring,
1657 obj->write_domain = 0; 1683 i915_gem_next_request_seqno(dev, ring));
1658 list_del_init(&obj_priv->gpu_write_list);
1659 i915_gem_object_move_to_active(obj, ring);
1660
1661 /* update the fence lru list */
1662 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
1663 struct drm_i915_fence_reg *reg =
1664 &dev_priv->fence_regs[obj_priv->fence_reg];
1665 list_move_tail(&reg->lru_list,
1666 &dev_priv->mm.fence_list);
1667 }
1668 1684
1669 trace_i915_gem_object_change_domain(obj, 1685 trace_i915_gem_object_change_domain(obj,
1670 obj->read_domains, 1686 obj->base.read_domains,
1671 old_write_domain); 1687 old_write_domain);
1672 } 1688 }
1673 } 1689 }
1674} 1690}
1675 1691
1676uint32_t 1692int
1677i915_add_request(struct drm_device *dev, 1693i915_add_request(struct drm_device *dev,
1678 struct drm_file *file, 1694 struct drm_file *file,
1679 struct drm_i915_gem_request *request, 1695 struct drm_i915_gem_request *request,
@@ -1683,17 +1699,17 @@ i915_add_request(struct drm_device *dev,
1683 struct drm_i915_file_private *file_priv = NULL; 1699 struct drm_i915_file_private *file_priv = NULL;
1684 uint32_t seqno; 1700 uint32_t seqno;
1685 int was_empty; 1701 int was_empty;
1702 int ret;
1703
1704 BUG_ON(request == NULL);
1686 1705
1687 if (file != NULL) 1706 if (file != NULL)
1688 file_priv = file->driver_priv; 1707 file_priv = file->driver_priv;
1689 1708
1690 if (request == NULL) { 1709 ret = ring->add_request(ring, &seqno);
1691 request = kzalloc(sizeof(*request), GFP_KERNEL); 1710 if (ret)
1692 if (request == NULL) 1711 return ret;
1693 return 0;
1694 }
1695 1712
1696 seqno = ring->add_request(dev, ring, 0);
1697 ring->outstanding_lazy_request = false; 1713 ring->outstanding_lazy_request = false;
1698 1714
1699 request->seqno = seqno; 1715 request->seqno = seqno;
@@ -1717,26 +1733,7 @@ i915_add_request(struct drm_device *dev,
1717 queue_delayed_work(dev_priv->wq, 1733 queue_delayed_work(dev_priv->wq,
1718 &dev_priv->mm.retire_work, HZ); 1734 &dev_priv->mm.retire_work, HZ);
1719 } 1735 }
1720 return seqno; 1736 return 0;
1721}
1722
1723/**
1724 * Command execution barrier
1725 *
1726 * Ensures that all commands in the ring are finished
1727 * before signalling the CPU
1728 */
1729static void
1730i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring)
1731{
1732 uint32_t flush_domains = 0;
1733
1734 /* The sampler always gets flushed on i965 (sigh) */
1735 if (INTEL_INFO(dev)->gen >= 4)
1736 flush_domains |= I915_GEM_DOMAIN_SAMPLER;
1737
1738 ring->flush(dev, ring,
1739 I915_GEM_DOMAIN_COMMAND, flush_domains);
1740} 1737}
1741 1738
1742static inline void 1739static inline void
@@ -1769,62 +1766,76 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
1769 } 1766 }
1770 1767
1771 while (!list_empty(&ring->active_list)) { 1768 while (!list_empty(&ring->active_list)) {
1772 struct drm_i915_gem_object *obj_priv; 1769 struct drm_i915_gem_object *obj;
1773 1770
1774 obj_priv = list_first_entry(&ring->active_list, 1771 obj = list_first_entry(&ring->active_list,
1775 struct drm_i915_gem_object, 1772 struct drm_i915_gem_object,
1776 ring_list); 1773 ring_list);
1777 1774
1778 obj_priv->base.write_domain = 0; 1775 obj->base.write_domain = 0;
1779 list_del_init(&obj_priv->gpu_write_list); 1776 list_del_init(&obj->gpu_write_list);
1780 i915_gem_object_move_to_inactive(&obj_priv->base); 1777 i915_gem_object_move_to_inactive(obj);
1778 }
1779}
1780
1781static void i915_gem_reset_fences(struct drm_device *dev)
1782{
1783 struct drm_i915_private *dev_priv = dev->dev_private;
1784 int i;
1785
1786 for (i = 0; i < 16; i++) {
1787 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
1788 struct drm_i915_gem_object *obj = reg->obj;
1789
1790 if (!obj)
1791 continue;
1792
1793 if (obj->tiling_mode)
1794 i915_gem_release_mmap(obj);
1795
1796 reg->obj->fence_reg = I915_FENCE_REG_NONE;
1797 reg->obj->fenced_gpu_access = false;
1798 reg->obj->last_fenced_seqno = 0;
1799 reg->obj->last_fenced_ring = NULL;
1800 i915_gem_clear_fence_reg(dev, reg);
1781 } 1801 }
1782} 1802}
1783 1803
1784void i915_gem_reset(struct drm_device *dev) 1804void i915_gem_reset(struct drm_device *dev)
1785{ 1805{
1786 struct drm_i915_private *dev_priv = dev->dev_private; 1806 struct drm_i915_private *dev_priv = dev->dev_private;
1787 struct drm_i915_gem_object *obj_priv; 1807 struct drm_i915_gem_object *obj;
1788 int i; 1808 int i;
1789 1809
1790 i915_gem_reset_ring_lists(dev_priv, &dev_priv->render_ring); 1810 for (i = 0; i < I915_NUM_RINGS; i++)
1791 i915_gem_reset_ring_lists(dev_priv, &dev_priv->bsd_ring); 1811 i915_gem_reset_ring_lists(dev_priv, &dev_priv->ring[i]);
1792 i915_gem_reset_ring_lists(dev_priv, &dev_priv->blt_ring);
1793 1812
1794 /* Remove anything from the flushing lists. The GPU cache is likely 1813 /* Remove anything from the flushing lists. The GPU cache is likely
1795 * to be lost on reset along with the data, so simply move the 1814 * to be lost on reset along with the data, so simply move the
1796 * lost bo to the inactive list. 1815 * lost bo to the inactive list.
1797 */ 1816 */
1798 while (!list_empty(&dev_priv->mm.flushing_list)) { 1817 while (!list_empty(&dev_priv->mm.flushing_list)) {
1799 obj_priv = list_first_entry(&dev_priv->mm.flushing_list, 1818 obj= list_first_entry(&dev_priv->mm.flushing_list,
1800 struct drm_i915_gem_object, 1819 struct drm_i915_gem_object,
1801 mm_list); 1820 mm_list);
1802 1821
1803 obj_priv->base.write_domain = 0; 1822 obj->base.write_domain = 0;
1804 list_del_init(&obj_priv->gpu_write_list); 1823 list_del_init(&obj->gpu_write_list);
1805 i915_gem_object_move_to_inactive(&obj_priv->base); 1824 i915_gem_object_move_to_inactive(obj);
1806 } 1825 }
1807 1826
1808 /* Move everything out of the GPU domains to ensure we do any 1827 /* Move everything out of the GPU domains to ensure we do any
1809 * necessary invalidation upon reuse. 1828 * necessary invalidation upon reuse.
1810 */ 1829 */
1811 list_for_each_entry(obj_priv, 1830 list_for_each_entry(obj,
1812 &dev_priv->mm.inactive_list, 1831 &dev_priv->mm.inactive_list,
1813 mm_list) 1832 mm_list)
1814 { 1833 {
1815 obj_priv->base.read_domains &= ~I915_GEM_GPU_DOMAINS; 1834 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
1816 } 1835 }
1817 1836
1818 /* The fence registers are invalidated so clear them out */ 1837 /* The fence registers are invalidated so clear them out */
1819 for (i = 0; i < 16; i++) { 1838 i915_gem_reset_fences(dev);
1820 struct drm_i915_fence_reg *reg;
1821
1822 reg = &dev_priv->fence_regs[i];
1823 if (!reg->obj)
1824 continue;
1825
1826 i915_gem_clear_fence_reg(reg->obj);
1827 }
1828} 1839}
1829 1840
1830/** 1841/**
@@ -1836,6 +1847,7 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
1836{ 1847{
1837 drm_i915_private_t *dev_priv = dev->dev_private; 1848 drm_i915_private_t *dev_priv = dev->dev_private;
1838 uint32_t seqno; 1849 uint32_t seqno;
1850 int i;
1839 1851
1840 if (!ring->status_page.page_addr || 1852 if (!ring->status_page.page_addr ||
1841 list_empty(&ring->request_list)) 1853 list_empty(&ring->request_list))
@@ -1843,7 +1855,12 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
1843 1855
1844 WARN_ON(i915_verify_lists(dev)); 1856 WARN_ON(i915_verify_lists(dev));
1845 1857
1846 seqno = ring->get_seqno(dev, ring); 1858 seqno = ring->get_seqno(ring);
1859
1860 for (i = 0; i < I915_NUM_RINGS; i++)
1861 if (seqno >= ring->sync_seqno[i])
1862 ring->sync_seqno[i] = 0;
1863
1847 while (!list_empty(&ring->request_list)) { 1864 while (!list_empty(&ring->request_list)) {
1848 struct drm_i915_gem_request *request; 1865 struct drm_i915_gem_request *request;
1849 1866
@@ -1865,18 +1882,16 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
1865 * by the ringbuffer to the flushing/inactive lists as appropriate. 1882 * by the ringbuffer to the flushing/inactive lists as appropriate.
1866 */ 1883 */
1867 while (!list_empty(&ring->active_list)) { 1884 while (!list_empty(&ring->active_list)) {
1868 struct drm_gem_object *obj; 1885 struct drm_i915_gem_object *obj;
1869 struct drm_i915_gem_object *obj_priv;
1870 1886
1871 obj_priv = list_first_entry(&ring->active_list, 1887 obj= list_first_entry(&ring->active_list,
1872 struct drm_i915_gem_object, 1888 struct drm_i915_gem_object,
1873 ring_list); 1889 ring_list);
1874 1890
1875 if (!i915_seqno_passed(seqno, obj_priv->last_rendering_seqno)) 1891 if (!i915_seqno_passed(seqno, obj->last_rendering_seqno))
1876 break; 1892 break;
1877 1893
1878 obj = &obj_priv->base; 1894 if (obj->base.write_domain != 0)
1879 if (obj->write_domain != 0)
1880 i915_gem_object_move_to_flushing(obj); 1895 i915_gem_object_move_to_flushing(obj);
1881 else 1896 else
1882 i915_gem_object_move_to_inactive(obj); 1897 i915_gem_object_move_to_inactive(obj);
@@ -1884,7 +1899,7 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
1884 1899
1885 if (unlikely (dev_priv->trace_irq_seqno && 1900 if (unlikely (dev_priv->trace_irq_seqno &&
1886 i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) { 1901 i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
1887 ring->user_irq_put(dev, ring); 1902 ring->irq_put(ring);
1888 dev_priv->trace_irq_seqno = 0; 1903 dev_priv->trace_irq_seqno = 0;
1889 } 1904 }
1890 1905
@@ -1895,24 +1910,24 @@ void
1895i915_gem_retire_requests(struct drm_device *dev) 1910i915_gem_retire_requests(struct drm_device *dev)
1896{ 1911{
1897 drm_i915_private_t *dev_priv = dev->dev_private; 1912 drm_i915_private_t *dev_priv = dev->dev_private;
1913 int i;
1898 1914
1899 if (!list_empty(&dev_priv->mm.deferred_free_list)) { 1915 if (!list_empty(&dev_priv->mm.deferred_free_list)) {
1900 struct drm_i915_gem_object *obj_priv, *tmp; 1916 struct drm_i915_gem_object *obj, *next;
1901 1917
1902 /* We must be careful that during unbind() we do not 1918 /* We must be careful that during unbind() we do not
1903 * accidentally infinitely recurse into retire requests. 1919 * accidentally infinitely recurse into retire requests.
1904 * Currently: 1920 * Currently:
1905 * retire -> free -> unbind -> wait -> retire_ring 1921 * retire -> free -> unbind -> wait -> retire_ring
1906 */ 1922 */
1907 list_for_each_entry_safe(obj_priv, tmp, 1923 list_for_each_entry_safe(obj, next,
1908 &dev_priv->mm.deferred_free_list, 1924 &dev_priv->mm.deferred_free_list,
1909 mm_list) 1925 mm_list)
1910 i915_gem_free_object_tail(&obj_priv->base); 1926 i915_gem_free_object_tail(obj);
1911 } 1927 }
1912 1928
1913 i915_gem_retire_requests_ring(dev, &dev_priv->render_ring); 1929 for (i = 0; i < I915_NUM_RINGS; i++)
1914 i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring); 1930 i915_gem_retire_requests_ring(dev, &dev_priv->ring[i]);
1915 i915_gem_retire_requests_ring(dev, &dev_priv->blt_ring);
1916} 1931}
1917 1932
1918static void 1933static void
@@ -1934,9 +1949,9 @@ i915_gem_retire_work_handler(struct work_struct *work)
1934 i915_gem_retire_requests(dev); 1949 i915_gem_retire_requests(dev);
1935 1950
1936 if (!dev_priv->mm.suspended && 1951 if (!dev_priv->mm.suspended &&
1937 (!list_empty(&dev_priv->render_ring.request_list) || 1952 (!list_empty(&dev_priv->ring[RCS].request_list) ||
1938 !list_empty(&dev_priv->bsd_ring.request_list) || 1953 !list_empty(&dev_priv->ring[VCS].request_list) ||
1939 !list_empty(&dev_priv->blt_ring.request_list))) 1954 !list_empty(&dev_priv->ring[BCS].request_list)))
1940 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); 1955 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
1941 mutex_unlock(&dev->struct_mutex); 1956 mutex_unlock(&dev->struct_mutex);
1942} 1957}
@@ -1954,14 +1969,23 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
1954 if (atomic_read(&dev_priv->mm.wedged)) 1969 if (atomic_read(&dev_priv->mm.wedged))
1955 return -EAGAIN; 1970 return -EAGAIN;
1956 1971
1957 if (ring->outstanding_lazy_request) { 1972 if (seqno == ring->outstanding_lazy_request) {
1958 seqno = i915_add_request(dev, NULL, NULL, ring); 1973 struct drm_i915_gem_request *request;
1959 if (seqno == 0) 1974
1975 request = kzalloc(sizeof(*request), GFP_KERNEL);
1976 if (request == NULL)
1960 return -ENOMEM; 1977 return -ENOMEM;
1978
1979 ret = i915_add_request(dev, NULL, request, ring);
1980 if (ret) {
1981 kfree(request);
1982 return ret;
1983 }
1984
1985 seqno = request->seqno;
1961 } 1986 }
1962 BUG_ON(seqno == dev_priv->next_seqno);
1963 1987
1964 if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) { 1988 if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
1965 if (HAS_PCH_SPLIT(dev)) 1989 if (HAS_PCH_SPLIT(dev))
1966 ier = I915_READ(DEIER) | I915_READ(GTIER); 1990 ier = I915_READ(DEIER) | I915_READ(GTIER);
1967 else 1991 else
@@ -1975,21 +1999,23 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
1975 1999
1976 trace_i915_gem_request_wait_begin(dev, seqno); 2000 trace_i915_gem_request_wait_begin(dev, seqno);
1977 2001
1978 ring->waiting_gem_seqno = seqno; 2002 ring->waiting_seqno = seqno;
1979 ring->user_irq_get(dev, ring); 2003 if (ring->irq_get(ring)) {
1980 if (interruptible) 2004 if (interruptible)
1981 ret = wait_event_interruptible(ring->irq_queue, 2005 ret = wait_event_interruptible(ring->irq_queue,
1982 i915_seqno_passed( 2006 i915_seqno_passed(ring->get_seqno(ring), seqno)
1983 ring->get_seqno(dev, ring), seqno) 2007 || atomic_read(&dev_priv->mm.wedged));
1984 || atomic_read(&dev_priv->mm.wedged)); 2008 else
1985 else 2009 wait_event(ring->irq_queue,
1986 wait_event(ring->irq_queue, 2010 i915_seqno_passed(ring->get_seqno(ring), seqno)
1987 i915_seqno_passed( 2011 || atomic_read(&dev_priv->mm.wedged));
1988 ring->get_seqno(dev, ring), seqno)
1989 || atomic_read(&dev_priv->mm.wedged));
1990 2012
1991 ring->user_irq_put(dev, ring); 2013 ring->irq_put(ring);
1992 ring->waiting_gem_seqno = 0; 2014 } else if (wait_for(i915_seqno_passed(ring->get_seqno(ring),
2015 seqno) ||
2016 atomic_read(&dev_priv->mm.wedged), 3000))
2017 ret = -EBUSY;
2018 ring->waiting_seqno = 0;
1993 2019
1994 trace_i915_gem_request_wait_end(dev, seqno); 2020 trace_i915_gem_request_wait_end(dev, seqno);
1995 } 2021 }
@@ -1998,7 +2024,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
1998 2024
1999 if (ret && ret != -ERESTARTSYS) 2025 if (ret && ret != -ERESTARTSYS)
2000 DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n", 2026 DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n",
2001 __func__, ret, seqno, ring->get_seqno(dev, ring), 2027 __func__, ret, seqno, ring->get_seqno(ring),
2002 dev_priv->next_seqno); 2028 dev_priv->next_seqno);
2003 2029
2004 /* Directly dispatch request retiring. While we have the work queue 2030 /* Directly dispatch request retiring. While we have the work queue
@@ -2023,70 +2049,30 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno,
2023 return i915_do_wait_request(dev, seqno, 1, ring); 2049 return i915_do_wait_request(dev, seqno, 1, ring);
2024} 2050}
2025 2051
2026static void
2027i915_gem_flush_ring(struct drm_device *dev,
2028 struct drm_file *file_priv,
2029 struct intel_ring_buffer *ring,
2030 uint32_t invalidate_domains,
2031 uint32_t flush_domains)
2032{
2033 ring->flush(dev, ring, invalidate_domains, flush_domains);
2034 i915_gem_process_flushing_list(dev, flush_domains, ring);
2035}
2036
2037static void
2038i915_gem_flush(struct drm_device *dev,
2039 struct drm_file *file_priv,
2040 uint32_t invalidate_domains,
2041 uint32_t flush_domains,
2042 uint32_t flush_rings)
2043{
2044 drm_i915_private_t *dev_priv = dev->dev_private;
2045
2046 if (flush_domains & I915_GEM_DOMAIN_CPU)
2047 drm_agp_chipset_flush(dev);
2048
2049 if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
2050 if (flush_rings & RING_RENDER)
2051 i915_gem_flush_ring(dev, file_priv,
2052 &dev_priv->render_ring,
2053 invalidate_domains, flush_domains);
2054 if (flush_rings & RING_BSD)
2055 i915_gem_flush_ring(dev, file_priv,
2056 &dev_priv->bsd_ring,
2057 invalidate_domains, flush_domains);
2058 if (flush_rings & RING_BLT)
2059 i915_gem_flush_ring(dev, file_priv,
2060 &dev_priv->blt_ring,
2061 invalidate_domains, flush_domains);
2062 }
2063}
2064
2065/** 2052/**
2066 * Ensures that all rendering to the object has completed and the object is 2053 * Ensures that all rendering to the object has completed and the object is
2067 * safe to unbind from the GTT or access from the CPU. 2054 * safe to unbind from the GTT or access from the CPU.
2068 */ 2055 */
2069static int 2056int
2070i915_gem_object_wait_rendering(struct drm_gem_object *obj, 2057i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
2071 bool interruptible) 2058 bool interruptible)
2072{ 2059{
2073 struct drm_device *dev = obj->dev; 2060 struct drm_device *dev = obj->base.dev;
2074 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2075 int ret; 2061 int ret;
2076 2062
2077 /* This function only exists to support waiting for existing rendering, 2063 /* This function only exists to support waiting for existing rendering,
2078 * not for emitting required flushes. 2064 * not for emitting required flushes.
2079 */ 2065 */
2080 BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0); 2066 BUG_ON((obj->base.write_domain & I915_GEM_GPU_DOMAINS) != 0);
2081 2067
2082 /* If there is rendering queued on the buffer being evicted, wait for 2068 /* If there is rendering queued on the buffer being evicted, wait for
2083 * it. 2069 * it.
2084 */ 2070 */
2085 if (obj_priv->active) { 2071 if (obj->active) {
2086 ret = i915_do_wait_request(dev, 2072 ret = i915_do_wait_request(dev,
2087 obj_priv->last_rendering_seqno, 2073 obj->last_rendering_seqno,
2088 interruptible, 2074 interruptible,
2089 obj_priv->ring); 2075 obj->ring);
2090 if (ret) 2076 if (ret)
2091 return ret; 2077 return ret;
2092 } 2078 }
@@ -2098,17 +2084,14 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj,
2098 * Unbinds an object from the GTT aperture. 2084 * Unbinds an object from the GTT aperture.
2099 */ 2085 */
2100int 2086int
2101i915_gem_object_unbind(struct drm_gem_object *obj) 2087i915_gem_object_unbind(struct drm_i915_gem_object *obj)
2102{ 2088{
2103 struct drm_device *dev = obj->dev;
2104 struct drm_i915_private *dev_priv = dev->dev_private;
2105 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2106 int ret = 0; 2089 int ret = 0;
2107 2090
2108 if (obj_priv->gtt_space == NULL) 2091 if (obj->gtt_space == NULL)
2109 return 0; 2092 return 0;
2110 2093
2111 if (obj_priv->pin_count != 0) { 2094 if (obj->pin_count != 0) {
2112 DRM_ERROR("Attempting to unbind pinned buffer\n"); 2095 DRM_ERROR("Attempting to unbind pinned buffer\n");
2113 return -EINVAL; 2096 return -EINVAL;
2114 } 2097 }
@@ -2131,27 +2114,27 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
2131 */ 2114 */
2132 if (ret) { 2115 if (ret) {
2133 i915_gem_clflush_object(obj); 2116 i915_gem_clflush_object(obj);
2134 obj->read_domains = obj->write_domain = I915_GEM_DOMAIN_CPU; 2117 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
2135 } 2118 }
2136 2119
2137 /* release the fence reg _after_ flushing */ 2120 /* release the fence reg _after_ flushing */
2138 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) 2121 ret = i915_gem_object_put_fence(obj);
2139 i915_gem_clear_fence_reg(obj); 2122 if (ret == -ERESTARTSYS)
2140 2123 return ret;
2141 drm_unbind_agp(obj_priv->agp_mem);
2142 drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
2143 2124
2144 i915_gem_object_put_pages(obj); 2125 i915_gem_gtt_unbind_object(obj);
2145 BUG_ON(obj_priv->pages_refcount); 2126 i915_gem_object_put_pages_gtt(obj);
2146 2127
2147 i915_gem_info_remove_gtt(dev_priv, obj->size); 2128 list_del_init(&obj->gtt_list);
2148 list_del_init(&obj_priv->mm_list); 2129 list_del_init(&obj->mm_list);
2130 /* Avoid an unnecessary call to unbind on rebind. */
2131 obj->map_and_fenceable = true;
2149 2132
2150 drm_mm_put_block(obj_priv->gtt_space); 2133 drm_mm_put_block(obj->gtt_space);
2151 obj_priv->gtt_space = NULL; 2134 obj->gtt_space = NULL;
2152 obj_priv->gtt_offset = 0; 2135 obj->gtt_offset = 0;
2153 2136
2154 if (i915_gem_object_is_purgeable(obj_priv)) 2137 if (i915_gem_object_is_purgeable(obj))
2155 i915_gem_object_truncate(obj); 2138 i915_gem_object_truncate(obj);
2156 2139
2157 trace_i915_gem_object_unbind(obj); 2140 trace_i915_gem_object_unbind(obj);
@@ -2159,14 +2142,25 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
2159 return ret; 2142 return ret;
2160} 2143}
2161 2144
2145void
2146i915_gem_flush_ring(struct drm_device *dev,
2147 struct intel_ring_buffer *ring,
2148 uint32_t invalidate_domains,
2149 uint32_t flush_domains)
2150{
2151 ring->flush(ring, invalidate_domains, flush_domains);
2152 i915_gem_process_flushing_list(dev, flush_domains, ring);
2153}
2154
2162static int i915_ring_idle(struct drm_device *dev, 2155static int i915_ring_idle(struct drm_device *dev,
2163 struct intel_ring_buffer *ring) 2156 struct intel_ring_buffer *ring)
2164{ 2157{
2165 if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list)) 2158 if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
2166 return 0; 2159 return 0;
2167 2160
2168 i915_gem_flush_ring(dev, NULL, ring, 2161 if (!list_empty(&ring->gpu_write_list))
2169 I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); 2162 i915_gem_flush_ring(dev, ring,
2163 I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
2170 return i915_wait_request(dev, 2164 return i915_wait_request(dev,
2171 i915_gem_next_request_seqno(dev, ring), 2165 i915_gem_next_request_seqno(dev, ring),
2172 ring); 2166 ring);
@@ -2177,7 +2171,7 @@ i915_gpu_idle(struct drm_device *dev)
2177{ 2171{
2178 drm_i915_private_t *dev_priv = dev->dev_private; 2172 drm_i915_private_t *dev_priv = dev->dev_private;
2179 bool lists_empty; 2173 bool lists_empty;
2180 int ret; 2174 int ret, i;
2181 2175
2182 lists_empty = (list_empty(&dev_priv->mm.flushing_list) && 2176 lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
2183 list_empty(&dev_priv->mm.active_list)); 2177 list_empty(&dev_priv->mm.active_list));
@@ -2185,258 +2179,296 @@ i915_gpu_idle(struct drm_device *dev)
2185 return 0; 2179 return 0;
2186 2180
2187 /* Flush everything onto the inactive list. */ 2181 /* Flush everything onto the inactive list. */
2188 ret = i915_ring_idle(dev, &dev_priv->render_ring); 2182 for (i = 0; i < I915_NUM_RINGS; i++) {
2189 if (ret) 2183 ret = i915_ring_idle(dev, &dev_priv->ring[i]);
2190 return ret; 2184 if (ret)
2191 2185 return ret;
2192 ret = i915_ring_idle(dev, &dev_priv->bsd_ring);
2193 if (ret)
2194 return ret;
2195
2196 ret = i915_ring_idle(dev, &dev_priv->blt_ring);
2197 if (ret)
2198 return ret;
2199
2200 return 0;
2201}
2202
2203static int
2204i915_gem_object_get_pages(struct drm_gem_object *obj,
2205 gfp_t gfpmask)
2206{
2207 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2208 int page_count, i;
2209 struct address_space *mapping;
2210 struct inode *inode;
2211 struct page *page;
2212
2213 BUG_ON(obj_priv->pages_refcount
2214 == DRM_I915_GEM_OBJECT_MAX_PAGES_REFCOUNT);
2215
2216 if (obj_priv->pages_refcount++ != 0)
2217 return 0;
2218
2219 /* Get the list of pages out of our struct file. They'll be pinned
2220 * at this point until we release them.
2221 */
2222 page_count = obj->size / PAGE_SIZE;
2223 BUG_ON(obj_priv->pages != NULL);
2224 obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *));
2225 if (obj_priv->pages == NULL) {
2226 obj_priv->pages_refcount--;
2227 return -ENOMEM;
2228 }
2229
2230 inode = obj->filp->f_path.dentry->d_inode;
2231 mapping = inode->i_mapping;
2232 for (i = 0; i < page_count; i++) {
2233 page = read_cache_page_gfp(mapping, i,
2234 GFP_HIGHUSER |
2235 __GFP_COLD |
2236 __GFP_RECLAIMABLE |
2237 gfpmask);
2238 if (IS_ERR(page))
2239 goto err_pages;
2240
2241 obj_priv->pages[i] = page;
2242 } 2186 }
2243 2187
2244 if (obj_priv->tiling_mode != I915_TILING_NONE)
2245 i915_gem_object_do_bit_17_swizzle(obj);
2246
2247 return 0; 2188 return 0;
2248
2249err_pages:
2250 while (i--)
2251 page_cache_release(obj_priv->pages[i]);
2252
2253 drm_free_large(obj_priv->pages);
2254 obj_priv->pages = NULL;
2255 obj_priv->pages_refcount--;
2256 return PTR_ERR(page);
2257} 2189}
2258 2190
2259static void sandybridge_write_fence_reg(struct drm_i915_fence_reg *reg) 2191static int sandybridge_write_fence_reg(struct drm_i915_gem_object *obj,
2192 struct intel_ring_buffer *pipelined)
2260{ 2193{
2261 struct drm_gem_object *obj = reg->obj; 2194 struct drm_device *dev = obj->base.dev;
2262 struct drm_device *dev = obj->dev;
2263 drm_i915_private_t *dev_priv = dev->dev_private; 2195 drm_i915_private_t *dev_priv = dev->dev_private;
2264 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 2196 u32 size = obj->gtt_space->size;
2265 int regnum = obj_priv->fence_reg; 2197 int regnum = obj->fence_reg;
2266 uint64_t val; 2198 uint64_t val;
2267 2199
2268 val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) & 2200 val = (uint64_t)((obj->gtt_offset + size - 4096) &
2269 0xfffff000) << 32; 2201 0xfffff000) << 32;
2270 val |= obj_priv->gtt_offset & 0xfffff000; 2202 val |= obj->gtt_offset & 0xfffff000;
2271 val |= (uint64_t)((obj_priv->stride / 128) - 1) << 2203 val |= (uint64_t)((obj->stride / 128) - 1) <<
2272 SANDYBRIDGE_FENCE_PITCH_SHIFT; 2204 SANDYBRIDGE_FENCE_PITCH_SHIFT;
2273 2205
2274 if (obj_priv->tiling_mode == I915_TILING_Y) 2206 if (obj->tiling_mode == I915_TILING_Y)
2275 val |= 1 << I965_FENCE_TILING_Y_SHIFT; 2207 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2276 val |= I965_FENCE_REG_VALID; 2208 val |= I965_FENCE_REG_VALID;
2277 2209
2278 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (regnum * 8), val); 2210 if (pipelined) {
2211 int ret = intel_ring_begin(pipelined, 6);
2212 if (ret)
2213 return ret;
2214
2215 intel_ring_emit(pipelined, MI_NOOP);
2216 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
2217 intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8);
2218 intel_ring_emit(pipelined, (u32)val);
2219 intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8 + 4);
2220 intel_ring_emit(pipelined, (u32)(val >> 32));
2221 intel_ring_advance(pipelined);
2222 } else
2223 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + regnum * 8, val);
2224
2225 return 0;
2279} 2226}
2280 2227
2281static void i965_write_fence_reg(struct drm_i915_fence_reg *reg) 2228static int i965_write_fence_reg(struct drm_i915_gem_object *obj,
2229 struct intel_ring_buffer *pipelined)
2282{ 2230{
2283 struct drm_gem_object *obj = reg->obj; 2231 struct drm_device *dev = obj->base.dev;
2284 struct drm_device *dev = obj->dev;
2285 drm_i915_private_t *dev_priv = dev->dev_private; 2232 drm_i915_private_t *dev_priv = dev->dev_private;
2286 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 2233 u32 size = obj->gtt_space->size;
2287 int regnum = obj_priv->fence_reg; 2234 int regnum = obj->fence_reg;
2288 uint64_t val; 2235 uint64_t val;
2289 2236
2290 val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) & 2237 val = (uint64_t)((obj->gtt_offset + size - 4096) &
2291 0xfffff000) << 32; 2238 0xfffff000) << 32;
2292 val |= obj_priv->gtt_offset & 0xfffff000; 2239 val |= obj->gtt_offset & 0xfffff000;
2293 val |= ((obj_priv->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT; 2240 val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
2294 if (obj_priv->tiling_mode == I915_TILING_Y) 2241 if (obj->tiling_mode == I915_TILING_Y)
2295 val |= 1 << I965_FENCE_TILING_Y_SHIFT; 2242 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2296 val |= I965_FENCE_REG_VALID; 2243 val |= I965_FENCE_REG_VALID;
2297 2244
2298 I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val); 2245 if (pipelined) {
2246 int ret = intel_ring_begin(pipelined, 6);
2247 if (ret)
2248 return ret;
2249
2250 intel_ring_emit(pipelined, MI_NOOP);
2251 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
2252 intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8);
2253 intel_ring_emit(pipelined, (u32)val);
2254 intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8 + 4);
2255 intel_ring_emit(pipelined, (u32)(val >> 32));
2256 intel_ring_advance(pipelined);
2257 } else
2258 I915_WRITE64(FENCE_REG_965_0 + regnum * 8, val);
2259
2260 return 0;
2299} 2261}
2300 2262
2301static void i915_write_fence_reg(struct drm_i915_fence_reg *reg) 2263static int i915_write_fence_reg(struct drm_i915_gem_object *obj,
2264 struct intel_ring_buffer *pipelined)
2302{ 2265{
2303 struct drm_gem_object *obj = reg->obj; 2266 struct drm_device *dev = obj->base.dev;
2304 struct drm_device *dev = obj->dev;
2305 drm_i915_private_t *dev_priv = dev->dev_private; 2267 drm_i915_private_t *dev_priv = dev->dev_private;
2306 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 2268 u32 size = obj->gtt_space->size;
2307 int regnum = obj_priv->fence_reg; 2269 u32 fence_reg, val, pitch_val;
2308 int tile_width; 2270 int tile_width;
2309 uint32_t fence_reg, val;
2310 uint32_t pitch_val;
2311 2271
2312 if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) || 2272 if (WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
2313 (obj_priv->gtt_offset & (obj->size - 1))) { 2273 (size & -size) != size ||
2314 WARN(1, "%s: object 0x%08x not 1M or size (0x%zx) aligned\n", 2274 (obj->gtt_offset & (size - 1)),
2315 __func__, obj_priv->gtt_offset, obj->size); 2275 "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
2316 return; 2276 obj->gtt_offset, obj->map_and_fenceable, size))
2317 } 2277 return -EINVAL;
2318 2278
2319 if (obj_priv->tiling_mode == I915_TILING_Y && 2279 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
2320 HAS_128_BYTE_Y_TILING(dev))
2321 tile_width = 128; 2280 tile_width = 128;
2322 else 2281 else
2323 tile_width = 512; 2282 tile_width = 512;
2324 2283
2325 /* Note: pitch better be a power of two tile widths */ 2284 /* Note: pitch better be a power of two tile widths */
2326 pitch_val = obj_priv->stride / tile_width; 2285 pitch_val = obj->stride / tile_width;
2327 pitch_val = ffs(pitch_val) - 1; 2286 pitch_val = ffs(pitch_val) - 1;
2328 2287
2329 if (obj_priv->tiling_mode == I915_TILING_Y && 2288 val = obj->gtt_offset;
2330 HAS_128_BYTE_Y_TILING(dev)) 2289 if (obj->tiling_mode == I915_TILING_Y)
2331 WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
2332 else
2333 WARN_ON(pitch_val > I915_FENCE_MAX_PITCH_VAL);
2334
2335 val = obj_priv->gtt_offset;
2336 if (obj_priv->tiling_mode == I915_TILING_Y)
2337 val |= 1 << I830_FENCE_TILING_Y_SHIFT; 2290 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2338 val |= I915_FENCE_SIZE_BITS(obj->size); 2291 val |= I915_FENCE_SIZE_BITS(size);
2339 val |= pitch_val << I830_FENCE_PITCH_SHIFT; 2292 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2340 val |= I830_FENCE_REG_VALID; 2293 val |= I830_FENCE_REG_VALID;
2341 2294
2342 if (regnum < 8) 2295 fence_reg = obj->fence_reg;
2343 fence_reg = FENCE_REG_830_0 + (regnum * 4); 2296 if (fence_reg < 8)
2297 fence_reg = FENCE_REG_830_0 + fence_reg * 4;
2344 else 2298 else
2345 fence_reg = FENCE_REG_945_8 + ((regnum - 8) * 4); 2299 fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
2346 I915_WRITE(fence_reg, val); 2300
2301 if (pipelined) {
2302 int ret = intel_ring_begin(pipelined, 4);
2303 if (ret)
2304 return ret;
2305
2306 intel_ring_emit(pipelined, MI_NOOP);
2307 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
2308 intel_ring_emit(pipelined, fence_reg);
2309 intel_ring_emit(pipelined, val);
2310 intel_ring_advance(pipelined);
2311 } else
2312 I915_WRITE(fence_reg, val);
2313
2314 return 0;
2347} 2315}
2348 2316
2349static void i830_write_fence_reg(struct drm_i915_fence_reg *reg) 2317static int i830_write_fence_reg(struct drm_i915_gem_object *obj,
2318 struct intel_ring_buffer *pipelined)
2350{ 2319{
2351 struct drm_gem_object *obj = reg->obj; 2320 struct drm_device *dev = obj->base.dev;
2352 struct drm_device *dev = obj->dev;
2353 drm_i915_private_t *dev_priv = dev->dev_private; 2321 drm_i915_private_t *dev_priv = dev->dev_private;
2354 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 2322 u32 size = obj->gtt_space->size;
2355 int regnum = obj_priv->fence_reg; 2323 int regnum = obj->fence_reg;
2356 uint32_t val; 2324 uint32_t val;
2357 uint32_t pitch_val; 2325 uint32_t pitch_val;
2358 uint32_t fence_size_bits;
2359 2326
2360 if ((obj_priv->gtt_offset & ~I830_FENCE_START_MASK) || 2327 if (WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
2361 (obj_priv->gtt_offset & (obj->size - 1))) { 2328 (size & -size) != size ||
2362 WARN(1, "%s: object 0x%08x not 512K or size aligned\n", 2329 (obj->gtt_offset & (size - 1)),
2363 __func__, obj_priv->gtt_offset); 2330 "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
2364 return; 2331 obj->gtt_offset, size))
2365 } 2332 return -EINVAL;
2366 2333
2367 pitch_val = obj_priv->stride / 128; 2334 pitch_val = obj->stride / 128;
2368 pitch_val = ffs(pitch_val) - 1; 2335 pitch_val = ffs(pitch_val) - 1;
2369 WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
2370 2336
2371 val = obj_priv->gtt_offset; 2337 val = obj->gtt_offset;
2372 if (obj_priv->tiling_mode == I915_TILING_Y) 2338 if (obj->tiling_mode == I915_TILING_Y)
2373 val |= 1 << I830_FENCE_TILING_Y_SHIFT; 2339 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2374 fence_size_bits = I830_FENCE_SIZE_BITS(obj->size); 2340 val |= I830_FENCE_SIZE_BITS(size);
2375 WARN_ON(fence_size_bits & ~0x00000f00);
2376 val |= fence_size_bits;
2377 val |= pitch_val << I830_FENCE_PITCH_SHIFT; 2341 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2378 val |= I830_FENCE_REG_VALID; 2342 val |= I830_FENCE_REG_VALID;
2379 2343
2380 I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val); 2344 if (pipelined) {
2345 int ret = intel_ring_begin(pipelined, 4);
2346 if (ret)
2347 return ret;
2348
2349 intel_ring_emit(pipelined, MI_NOOP);
2350 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
2351 intel_ring_emit(pipelined, FENCE_REG_830_0 + regnum*4);
2352 intel_ring_emit(pipelined, val);
2353 intel_ring_advance(pipelined);
2354 } else
2355 I915_WRITE(FENCE_REG_830_0 + regnum * 4, val);
2356
2357 return 0;
2381} 2358}
2382 2359
2383static int i915_find_fence_reg(struct drm_device *dev, 2360static bool ring_passed_seqno(struct intel_ring_buffer *ring, u32 seqno)
2384 bool interruptible) 2361{
2362 return i915_seqno_passed(ring->get_seqno(ring), seqno);
2363}
2364
2365static int
2366i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
2367 struct intel_ring_buffer *pipelined,
2368 bool interruptible)
2369{
2370 int ret;
2371
2372 if (obj->fenced_gpu_access) {
2373 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS)
2374 i915_gem_flush_ring(obj->base.dev,
2375 obj->last_fenced_ring,
2376 0, obj->base.write_domain);
2377
2378 obj->fenced_gpu_access = false;
2379 }
2380
2381 if (obj->last_fenced_seqno && pipelined != obj->last_fenced_ring) {
2382 if (!ring_passed_seqno(obj->last_fenced_ring,
2383 obj->last_fenced_seqno)) {
2384 ret = i915_do_wait_request(obj->base.dev,
2385 obj->last_fenced_seqno,
2386 interruptible,
2387 obj->last_fenced_ring);
2388 if (ret)
2389 return ret;
2390 }
2391
2392 obj->last_fenced_seqno = 0;
2393 obj->last_fenced_ring = NULL;
2394 }
2395
2396 return 0;
2397}
2398
2399int
2400i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
2401{
2402 int ret;
2403
2404 if (obj->tiling_mode)
2405 i915_gem_release_mmap(obj);
2406
2407 ret = i915_gem_object_flush_fence(obj, NULL, true);
2408 if (ret)
2409 return ret;
2410
2411 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2412 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2413 i915_gem_clear_fence_reg(obj->base.dev,
2414 &dev_priv->fence_regs[obj->fence_reg]);
2415
2416 obj->fence_reg = I915_FENCE_REG_NONE;
2417 }
2418
2419 return 0;
2420}
2421
2422static struct drm_i915_fence_reg *
2423i915_find_fence_reg(struct drm_device *dev,
2424 struct intel_ring_buffer *pipelined)
2385{ 2425{
2386 struct drm_i915_fence_reg *reg = NULL;
2387 struct drm_i915_gem_object *obj_priv = NULL;
2388 struct drm_i915_private *dev_priv = dev->dev_private; 2426 struct drm_i915_private *dev_priv = dev->dev_private;
2389 struct drm_gem_object *obj = NULL; 2427 struct drm_i915_fence_reg *reg, *first, *avail;
2390 int i, avail, ret; 2428 int i;
2391 2429
2392 /* First try to find a free reg */ 2430 /* First try to find a free reg */
2393 avail = 0; 2431 avail = NULL;
2394 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) { 2432 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2395 reg = &dev_priv->fence_regs[i]; 2433 reg = &dev_priv->fence_regs[i];
2396 if (!reg->obj) 2434 if (!reg->obj)
2397 return i; 2435 return reg;
2398 2436
2399 obj_priv = to_intel_bo(reg->obj); 2437 if (!reg->obj->pin_count)
2400 if (!obj_priv->pin_count) 2438 avail = reg;
2401 avail++;
2402 } 2439 }
2403 2440
2404 if (avail == 0) 2441 if (avail == NULL)
2405 return -ENOSPC; 2442 return NULL;
2406 2443
2407 /* None available, try to steal one or wait for a user to finish */ 2444 /* None available, try to steal one or wait for a user to finish */
2408 i = I915_FENCE_REG_NONE; 2445 avail = first = NULL;
2409 list_for_each_entry(reg, &dev_priv->mm.fence_list, 2446 list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
2410 lru_list) { 2447 if (reg->obj->pin_count)
2411 obj = reg->obj;
2412 obj_priv = to_intel_bo(obj);
2413
2414 if (obj_priv->pin_count)
2415 continue; 2448 continue;
2416 2449
2417 /* found one! */ 2450 if (first == NULL)
2418 i = obj_priv->fence_reg; 2451 first = reg;
2419 break;
2420 }
2421 2452
2422 BUG_ON(i == I915_FENCE_REG_NONE); 2453 if (!pipelined ||
2454 !reg->obj->last_fenced_ring ||
2455 reg->obj->last_fenced_ring == pipelined) {
2456 avail = reg;
2457 break;
2458 }
2459 }
2423 2460
2424 /* We only have a reference on obj from the active list. put_fence_reg 2461 if (avail == NULL)
2425 * might drop that one, causing a use-after-free in it. So hold a 2462 avail = first;
2426 * private reference to obj like the other callers of put_fence_reg
2427 * (set_tiling ioctl) do. */
2428 drm_gem_object_reference(obj);
2429 ret = i915_gem_object_put_fence_reg(obj, interruptible);
2430 drm_gem_object_unreference(obj);
2431 if (ret != 0)
2432 return ret;
2433 2463
2434 return i; 2464 return avail;
2435} 2465}
2436 2466
2437/** 2467/**
2438 * i915_gem_object_get_fence_reg - set up a fence reg for an object 2468 * i915_gem_object_get_fence - set up a fence reg for an object
2439 * @obj: object to map through a fence reg 2469 * @obj: object to map through a fence reg
2470 * @pipelined: ring on which to queue the change, or NULL for CPU access
2471 * @interruptible: must we wait uninterruptibly for the register to retire?
2440 * 2472 *
2441 * When mapping objects through the GTT, userspace wants to be able to write 2473 * When mapping objects through the GTT, userspace wants to be able to write
2442 * to them without having to worry about swizzling if the object is tiled. 2474 * to them without having to worry about swizzling if the object is tiled.
@@ -2448,72 +2480,138 @@ static int i915_find_fence_reg(struct drm_device *dev,
2448 * and tiling format. 2480 * and tiling format.
2449 */ 2481 */
2450int 2482int
2451i915_gem_object_get_fence_reg(struct drm_gem_object *obj, 2483i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
2452 bool interruptible) 2484 struct intel_ring_buffer *pipelined,
2485 bool interruptible)
2453{ 2486{
2454 struct drm_device *dev = obj->dev; 2487 struct drm_device *dev = obj->base.dev;
2455 struct drm_i915_private *dev_priv = dev->dev_private; 2488 struct drm_i915_private *dev_priv = dev->dev_private;
2456 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 2489 struct drm_i915_fence_reg *reg;
2457 struct drm_i915_fence_reg *reg = NULL;
2458 int ret; 2490 int ret;
2459 2491
2460 /* Just update our place in the LRU if our fence is getting used. */ 2492 /* XXX disable pipelining. There are bugs. Shocking. */
2461 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) { 2493 pipelined = NULL;
2462 reg = &dev_priv->fence_regs[obj_priv->fence_reg]; 2494
2495 /* Just update our place in the LRU if our fence is getting reused. */
2496 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2497 reg = &dev_priv->fence_regs[obj->fence_reg];
2463 list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list); 2498 list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
2499
2500 if (!obj->fenced_gpu_access && !obj->last_fenced_seqno)
2501 pipelined = NULL;
2502
2503 if (!pipelined) {
2504 if (reg->setup_seqno) {
2505 if (!ring_passed_seqno(obj->last_fenced_ring,
2506 reg->setup_seqno)) {
2507 ret = i915_do_wait_request(obj->base.dev,
2508 reg->setup_seqno,
2509 interruptible,
2510 obj->last_fenced_ring);
2511 if (ret)
2512 return ret;
2513 }
2514
2515 reg->setup_seqno = 0;
2516 }
2517 } else if (obj->last_fenced_ring &&
2518 obj->last_fenced_ring != pipelined) {
2519 ret = i915_gem_object_flush_fence(obj,
2520 pipelined,
2521 interruptible);
2522 if (ret)
2523 return ret;
2524 } else if (obj->tiling_changed) {
2525 if (obj->fenced_gpu_access) {
2526 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS)
2527 i915_gem_flush_ring(obj->base.dev, obj->ring,
2528 0, obj->base.write_domain);
2529
2530 obj->fenced_gpu_access = false;
2531 }
2532 }
2533
2534 if (!obj->fenced_gpu_access && !obj->last_fenced_seqno)
2535 pipelined = NULL;
2536 BUG_ON(!pipelined && reg->setup_seqno);
2537
2538 if (obj->tiling_changed) {
2539 if (pipelined) {
2540 reg->setup_seqno =
2541 i915_gem_next_request_seqno(dev, pipelined);
2542 obj->last_fenced_seqno = reg->setup_seqno;
2543 obj->last_fenced_ring = pipelined;
2544 }
2545 goto update;
2546 }
2547
2464 return 0; 2548 return 0;
2465 } 2549 }
2466 2550
2467 switch (obj_priv->tiling_mode) { 2551 reg = i915_find_fence_reg(dev, pipelined);
2468 case I915_TILING_NONE: 2552 if (reg == NULL)
2469 WARN(1, "allocating a fence for non-tiled object?\n"); 2553 return -ENOSPC;
2470 break;
2471 case I915_TILING_X:
2472 if (!obj_priv->stride)
2473 return -EINVAL;
2474 WARN((obj_priv->stride & (512 - 1)),
2475 "object 0x%08x is X tiled but has non-512B pitch\n",
2476 obj_priv->gtt_offset);
2477 break;
2478 case I915_TILING_Y:
2479 if (!obj_priv->stride)
2480 return -EINVAL;
2481 WARN((obj_priv->stride & (128 - 1)),
2482 "object 0x%08x is Y tiled but has non-128B pitch\n",
2483 obj_priv->gtt_offset);
2484 break;
2485 }
2486 2554
2487 ret = i915_find_fence_reg(dev, interruptible); 2555 ret = i915_gem_object_flush_fence(obj, pipelined, interruptible);
2488 if (ret < 0) 2556 if (ret)
2489 return ret; 2557 return ret;
2490 2558
2491 obj_priv->fence_reg = ret; 2559 if (reg->obj) {
2492 reg = &dev_priv->fence_regs[obj_priv->fence_reg]; 2560 struct drm_i915_gem_object *old = reg->obj;
2493 list_add_tail(&reg->lru_list, &dev_priv->mm.fence_list); 2561
2562 drm_gem_object_reference(&old->base);
2563
2564 if (old->tiling_mode)
2565 i915_gem_release_mmap(old);
2566
2567 ret = i915_gem_object_flush_fence(old,
2568 pipelined,
2569 interruptible);
2570 if (ret) {
2571 drm_gem_object_unreference(&old->base);
2572 return ret;
2573 }
2574
2575 if (old->last_fenced_seqno == 0 && obj->last_fenced_seqno == 0)
2576 pipelined = NULL;
2577
2578 old->fence_reg = I915_FENCE_REG_NONE;
2579 old->last_fenced_ring = pipelined;
2580 old->last_fenced_seqno =
2581 pipelined ? i915_gem_next_request_seqno(dev, pipelined) : 0;
2582
2583 drm_gem_object_unreference(&old->base);
2584 } else if (obj->last_fenced_seqno == 0)
2585 pipelined = NULL;
2494 2586
2495 reg->obj = obj; 2587 reg->obj = obj;
2588 list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
2589 obj->fence_reg = reg - dev_priv->fence_regs;
2590 obj->last_fenced_ring = pipelined;
2496 2591
2592 reg->setup_seqno =
2593 pipelined ? i915_gem_next_request_seqno(dev, pipelined) : 0;
2594 obj->last_fenced_seqno = reg->setup_seqno;
2595
2596update:
2597 obj->tiling_changed = false;
2497 switch (INTEL_INFO(dev)->gen) { 2598 switch (INTEL_INFO(dev)->gen) {
2498 case 6: 2599 case 6:
2499 sandybridge_write_fence_reg(reg); 2600 ret = sandybridge_write_fence_reg(obj, pipelined);
2500 break; 2601 break;
2501 case 5: 2602 case 5:
2502 case 4: 2603 case 4:
2503 i965_write_fence_reg(reg); 2604 ret = i965_write_fence_reg(obj, pipelined);
2504 break; 2605 break;
2505 case 3: 2606 case 3:
2506 i915_write_fence_reg(reg); 2607 ret = i915_write_fence_reg(obj, pipelined);
2507 break; 2608 break;
2508 case 2: 2609 case 2:
2509 i830_write_fence_reg(reg); 2610 ret = i830_write_fence_reg(obj, pipelined);
2510 break; 2611 break;
2511 } 2612 }
2512 2613
2513 trace_i915_gem_object_get_fence(obj, obj_priv->fence_reg, 2614 return ret;
2514 obj_priv->tiling_mode);
2515
2516 return 0;
2517} 2615}
2518 2616
2519/** 2617/**
@@ -2521,154 +2619,127 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj,
2521 * @obj: object to clear 2619 * @obj: object to clear
2522 * 2620 *
2523 * Zeroes out the fence register itself and clears out the associated 2621 * Zeroes out the fence register itself and clears out the associated
2524 * data structures in dev_priv and obj_priv. 2622 * data structures in dev_priv and obj.
2525 */ 2623 */
2526static void 2624static void
2527i915_gem_clear_fence_reg(struct drm_gem_object *obj) 2625i915_gem_clear_fence_reg(struct drm_device *dev,
2626 struct drm_i915_fence_reg *reg)
2528{ 2627{
2529 struct drm_device *dev = obj->dev;
2530 drm_i915_private_t *dev_priv = dev->dev_private; 2628 drm_i915_private_t *dev_priv = dev->dev_private;
2531 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 2629 uint32_t fence_reg = reg - dev_priv->fence_regs;
2532 struct drm_i915_fence_reg *reg =
2533 &dev_priv->fence_regs[obj_priv->fence_reg];
2534 uint32_t fence_reg;
2535 2630
2536 switch (INTEL_INFO(dev)->gen) { 2631 switch (INTEL_INFO(dev)->gen) {
2537 case 6: 2632 case 6:
2538 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + 2633 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + fence_reg*8, 0);
2539 (obj_priv->fence_reg * 8), 0);
2540 break; 2634 break;
2541 case 5: 2635 case 5:
2542 case 4: 2636 case 4:
2543 I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0); 2637 I915_WRITE64(FENCE_REG_965_0 + fence_reg*8, 0);
2544 break; 2638 break;
2545 case 3: 2639 case 3:
2546 if (obj_priv->fence_reg >= 8) 2640 if (fence_reg >= 8)
2547 fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg - 8) * 4; 2641 fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
2548 else 2642 else
2549 case 2: 2643 case 2:
2550 fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4; 2644 fence_reg = FENCE_REG_830_0 + fence_reg * 4;
2551 2645
2552 I915_WRITE(fence_reg, 0); 2646 I915_WRITE(fence_reg, 0);
2553 break; 2647 break;
2554 } 2648 }
2555 2649
2556 reg->obj = NULL;
2557 obj_priv->fence_reg = I915_FENCE_REG_NONE;
2558 list_del_init(&reg->lru_list); 2650 list_del_init(&reg->lru_list);
2559} 2651 reg->obj = NULL;
2560 2652 reg->setup_seqno = 0;
2561/**
2562 * i915_gem_object_put_fence_reg - waits on outstanding fenced access
2563 * to the buffer to finish, and then resets the fence register.
2564 * @obj: tiled object holding a fence register.
2565 * @bool: whether the wait upon the fence is interruptible
2566 *
2567 * Zeroes out the fence register itself and clears out the associated
2568 * data structures in dev_priv and obj_priv.
2569 */
2570int
2571i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
2572 bool interruptible)
2573{
2574 struct drm_device *dev = obj->dev;
2575 struct drm_i915_private *dev_priv = dev->dev_private;
2576 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2577 struct drm_i915_fence_reg *reg;
2578
2579 if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
2580 return 0;
2581
2582 /* If we've changed tiling, GTT-mappings of the object
2583 * need to re-fault to ensure that the correct fence register
2584 * setup is in place.
2585 */
2586 i915_gem_release_mmap(obj);
2587
2588 /* On the i915, GPU access to tiled buffers is via a fence,
2589 * therefore we must wait for any outstanding access to complete
2590 * before clearing the fence.
2591 */
2592 reg = &dev_priv->fence_regs[obj_priv->fence_reg];
2593 if (reg->gpu) {
2594 int ret;
2595
2596 ret = i915_gem_object_flush_gpu_write_domain(obj);
2597 if (ret)
2598 return ret;
2599
2600 ret = i915_gem_object_wait_rendering(obj, interruptible);
2601 if (ret)
2602 return ret;
2603
2604 reg->gpu = false;
2605 }
2606
2607 i915_gem_object_flush_gtt_write_domain(obj);
2608 i915_gem_clear_fence_reg(obj);
2609
2610 return 0;
2611} 2653}
2612 2654
2613/** 2655/**
2614 * Finds free space in the GTT aperture and binds the object there. 2656 * Finds free space in the GTT aperture and binds the object there.
2615 */ 2657 */
2616static int 2658static int
2617i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) 2659i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2660 unsigned alignment,
2661 bool map_and_fenceable)
2618{ 2662{
2619 struct drm_device *dev = obj->dev; 2663 struct drm_device *dev = obj->base.dev;
2620 drm_i915_private_t *dev_priv = dev->dev_private; 2664 drm_i915_private_t *dev_priv = dev->dev_private;
2621 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2622 struct drm_mm_node *free_space; 2665 struct drm_mm_node *free_space;
2623 gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN; 2666 gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
2667 u32 size, fence_size, fence_alignment, unfenced_alignment;
2668 bool mappable, fenceable;
2624 int ret; 2669 int ret;
2625 2670
2626 if (obj_priv->madv != I915_MADV_WILLNEED) { 2671 if (obj->madv != I915_MADV_WILLNEED) {
2627 DRM_ERROR("Attempting to bind a purgeable object\n"); 2672 DRM_ERROR("Attempting to bind a purgeable object\n");
2628 return -EINVAL; 2673 return -EINVAL;
2629 } 2674 }
2630 2675
2676 fence_size = i915_gem_get_gtt_size(obj);
2677 fence_alignment = i915_gem_get_gtt_alignment(obj);
2678 unfenced_alignment = i915_gem_get_unfenced_gtt_alignment(obj);
2679
2631 if (alignment == 0) 2680 if (alignment == 0)
2632 alignment = i915_gem_get_gtt_alignment(obj); 2681 alignment = map_and_fenceable ? fence_alignment :
2633 if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) { 2682 unfenced_alignment;
2683 if (map_and_fenceable && alignment & (fence_alignment - 1)) {
2634 DRM_ERROR("Invalid object alignment requested %u\n", alignment); 2684 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
2635 return -EINVAL; 2685 return -EINVAL;
2636 } 2686 }
2637 2687
2688 size = map_and_fenceable ? fence_size : obj->base.size;
2689
2638 /* If the object is bigger than the entire aperture, reject it early 2690 /* If the object is bigger than the entire aperture, reject it early
2639 * before evicting everything in a vain attempt to find space. 2691 * before evicting everything in a vain attempt to find space.
2640 */ 2692 */
2641 if (obj->size > dev_priv->mm.gtt_total) { 2693 if (obj->base.size >
2694 (map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) {
2642 DRM_ERROR("Attempting to bind an object larger than the aperture\n"); 2695 DRM_ERROR("Attempting to bind an object larger than the aperture\n");
2643 return -E2BIG; 2696 return -E2BIG;
2644 } 2697 }
2645 2698
2646 search_free: 2699 search_free:
2647 free_space = drm_mm_search_free(&dev_priv->mm.gtt_space, 2700 if (map_and_fenceable)
2648 obj->size, alignment, 0); 2701 free_space =
2649 if (free_space != NULL) 2702 drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
2650 obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size, 2703 size, alignment, 0,
2651 alignment); 2704 dev_priv->mm.gtt_mappable_end,
2652 if (obj_priv->gtt_space == NULL) { 2705 0);
2706 else
2707 free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
2708 size, alignment, 0);
2709
2710 if (free_space != NULL) {
2711 if (map_and_fenceable)
2712 obj->gtt_space =
2713 drm_mm_get_block_range_generic(free_space,
2714 size, alignment, 0,
2715 dev_priv->mm.gtt_mappable_end,
2716 0);
2717 else
2718 obj->gtt_space =
2719 drm_mm_get_block(free_space, size, alignment);
2720 }
2721 if (obj->gtt_space == NULL) {
2653 /* If the gtt is empty and we're still having trouble 2722 /* If the gtt is empty and we're still having trouble
2654 * fitting our object in, we're out of memory. 2723 * fitting our object in, we're out of memory.
2655 */ 2724 */
2656 ret = i915_gem_evict_something(dev, obj->size, alignment); 2725 ret = i915_gem_evict_something(dev, size, alignment,
2726 map_and_fenceable);
2657 if (ret) 2727 if (ret)
2658 return ret; 2728 return ret;
2659 2729
2660 goto search_free; 2730 goto search_free;
2661 } 2731 }
2662 2732
2663 ret = i915_gem_object_get_pages(obj, gfpmask); 2733 ret = i915_gem_object_get_pages_gtt(obj, gfpmask);
2664 if (ret) { 2734 if (ret) {
2665 drm_mm_put_block(obj_priv->gtt_space); 2735 drm_mm_put_block(obj->gtt_space);
2666 obj_priv->gtt_space = NULL; 2736 obj->gtt_space = NULL;
2667 2737
2668 if (ret == -ENOMEM) { 2738 if (ret == -ENOMEM) {
2669 /* first try to clear up some space from the GTT */ 2739 /* first try to clear up some space from the GTT */
2670 ret = i915_gem_evict_something(dev, obj->size, 2740 ret = i915_gem_evict_something(dev, size,
2671 alignment); 2741 alignment,
2742 map_and_fenceable);
2672 if (ret) { 2743 if (ret) {
2673 /* now try to shrink everyone else */ 2744 /* now try to shrink everyone else */
2674 if (gfpmask) { 2745 if (gfpmask) {
@@ -2685,122 +2756,113 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2685 return ret; 2756 return ret;
2686 } 2757 }
2687 2758
2688 /* Create an AGP memory structure pointing at our pages, and bind it 2759 ret = i915_gem_gtt_bind_object(obj);
2689 * into the GTT. 2760 if (ret) {
2690 */ 2761 i915_gem_object_put_pages_gtt(obj);
2691 obj_priv->agp_mem = drm_agp_bind_pages(dev, 2762 drm_mm_put_block(obj->gtt_space);
2692 obj_priv->pages, 2763 obj->gtt_space = NULL;
2693 obj->size >> PAGE_SHIFT, 2764
2694 obj_priv->gtt_space->start, 2765 ret = i915_gem_evict_something(dev, size,
2695 obj_priv->agp_type); 2766 alignment, map_and_fenceable);
2696 if (obj_priv->agp_mem == NULL) {
2697 i915_gem_object_put_pages(obj);
2698 drm_mm_put_block(obj_priv->gtt_space);
2699 obj_priv->gtt_space = NULL;
2700
2701 ret = i915_gem_evict_something(dev, obj->size, alignment);
2702 if (ret) 2767 if (ret)
2703 return ret; 2768 return ret;
2704 2769
2705 goto search_free; 2770 goto search_free;
2706 } 2771 }
2707 2772
2708 /* keep track of bounds object by adding it to the inactive list */ 2773 list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list);
2709 list_add_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list); 2774 list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
2710 i915_gem_info_add_gtt(dev_priv, obj->size);
2711 2775
2712 /* Assert that the object is not currently in any GPU domain. As it 2776 /* Assert that the object is not currently in any GPU domain. As it
2713 * wasn't in the GTT, there shouldn't be any way it could have been in 2777 * wasn't in the GTT, there shouldn't be any way it could have been in
2714 * a GPU cache 2778 * a GPU cache
2715 */ 2779 */
2716 BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS); 2780 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2717 BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS); 2781 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
2782
2783 obj->gtt_offset = obj->gtt_space->start;
2718 2784
2719 obj_priv->gtt_offset = obj_priv->gtt_space->start; 2785 fenceable =
2720 trace_i915_gem_object_bind(obj, obj_priv->gtt_offset); 2786 obj->gtt_space->size == fence_size &&
2787 (obj->gtt_space->start & (fence_alignment -1)) == 0;
2721 2788
2789 mappable =
2790 obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
2791
2792 obj->map_and_fenceable = mappable && fenceable;
2793
2794 trace_i915_gem_object_bind(obj, obj->gtt_offset, map_and_fenceable);
2722 return 0; 2795 return 0;
2723} 2796}
2724 2797
2725void 2798void
2726i915_gem_clflush_object(struct drm_gem_object *obj) 2799i915_gem_clflush_object(struct drm_i915_gem_object *obj)
2727{ 2800{
2728 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2729
2730 /* If we don't have a page list set up, then we're not pinned 2801 /* If we don't have a page list set up, then we're not pinned
2731 * to GPU, and we can ignore the cache flush because it'll happen 2802 * to GPU, and we can ignore the cache flush because it'll happen
2732 * again at bind time. 2803 * again at bind time.
2733 */ 2804 */
2734 if (obj_priv->pages == NULL) 2805 if (obj->pages == NULL)
2735 return; 2806 return;
2736 2807
2737 trace_i915_gem_object_clflush(obj); 2808 trace_i915_gem_object_clflush(obj);
2738 2809
2739 drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE); 2810 drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE);
2740} 2811}
2741 2812
2742/** Flushes any GPU write domain for the object if it's dirty. */ 2813/** Flushes any GPU write domain for the object if it's dirty. */
2743static int 2814static void
2744i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj) 2815i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj)
2745{ 2816{
2746 struct drm_device *dev = obj->dev; 2817 struct drm_device *dev = obj->base.dev;
2747 uint32_t old_write_domain;
2748 2818
2749 if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0) 2819 if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0)
2750 return 0; 2820 return;
2751 2821
2752 /* Queue the GPU write cache flushing we need. */ 2822 /* Queue the GPU write cache flushing we need. */
2753 old_write_domain = obj->write_domain; 2823 i915_gem_flush_ring(dev, obj->ring, 0, obj->base.write_domain);
2754 i915_gem_flush_ring(dev, NULL, 2824 BUG_ON(obj->base.write_domain);
2755 to_intel_bo(obj)->ring,
2756 0, obj->write_domain);
2757 BUG_ON(obj->write_domain);
2758
2759 trace_i915_gem_object_change_domain(obj,
2760 obj->read_domains,
2761 old_write_domain);
2762
2763 return 0;
2764} 2825}
2765 2826
2766/** Flushes the GTT write domain for the object if it's dirty. */ 2827/** Flushes the GTT write domain for the object if it's dirty. */
2767static void 2828static void
2768i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj) 2829i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
2769{ 2830{
2770 uint32_t old_write_domain; 2831 uint32_t old_write_domain;
2771 2832
2772 if (obj->write_domain != I915_GEM_DOMAIN_GTT) 2833 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
2773 return; 2834 return;
2774 2835
2775 /* No actual flushing is required for the GTT write domain. Writes 2836 /* No actual flushing is required for the GTT write domain. Writes
2776 * to it immediately go to main memory as far as we know, so there's 2837 * to it immediately go to main memory as far as we know, so there's
2777 * no chipset flush. It also doesn't land in render cache. 2838 * no chipset flush. It also doesn't land in render cache.
2778 */ 2839 */
2779 old_write_domain = obj->write_domain; 2840 i915_gem_release_mmap(obj);
2780 obj->write_domain = 0; 2841
2842 old_write_domain = obj->base.write_domain;
2843 obj->base.write_domain = 0;
2781 2844
2782 trace_i915_gem_object_change_domain(obj, 2845 trace_i915_gem_object_change_domain(obj,
2783 obj->read_domains, 2846 obj->base.read_domains,
2784 old_write_domain); 2847 old_write_domain);
2785} 2848}
2786 2849
2787/** Flushes the CPU write domain for the object if it's dirty. */ 2850/** Flushes the CPU write domain for the object if it's dirty. */
2788static void 2851static void
2789i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj) 2852i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
2790{ 2853{
2791 struct drm_device *dev = obj->dev;
2792 uint32_t old_write_domain; 2854 uint32_t old_write_domain;
2793 2855
2794 if (obj->write_domain != I915_GEM_DOMAIN_CPU) 2856 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
2795 return; 2857 return;
2796 2858
2797 i915_gem_clflush_object(obj); 2859 i915_gem_clflush_object(obj);
2798 drm_agp_chipset_flush(dev); 2860 intel_gtt_chipset_flush();
2799 old_write_domain = obj->write_domain; 2861 old_write_domain = obj->base.write_domain;
2800 obj->write_domain = 0; 2862 obj->base.write_domain = 0;
2801 2863
2802 trace_i915_gem_object_change_domain(obj, 2864 trace_i915_gem_object_change_domain(obj,
2803 obj->read_domains, 2865 obj->base.read_domains,
2804 old_write_domain); 2866 old_write_domain);
2805} 2867}
2806 2868
@@ -2811,37 +2873,36 @@ i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
2811 * flushes to occur. 2873 * flushes to occur.
2812 */ 2874 */
2813int 2875int
2814i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) 2876i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
2815{ 2877{
2816 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2817 uint32_t old_write_domain, old_read_domains; 2878 uint32_t old_write_domain, old_read_domains;
2818 int ret; 2879 int ret;
2819 2880
2820 /* Not valid to be called on unbound objects. */ 2881 /* Not valid to be called on unbound objects. */
2821 if (obj_priv->gtt_space == NULL) 2882 if (obj->gtt_space == NULL)
2822 return -EINVAL; 2883 return -EINVAL;
2823 2884
2824 ret = i915_gem_object_flush_gpu_write_domain(obj); 2885 i915_gem_object_flush_gpu_write_domain(obj);
2825 if (ret != 0) 2886 if (obj->pending_gpu_write || write) {
2826 return ret; 2887 ret = i915_gem_object_wait_rendering(obj, true);
2827 ret = i915_gem_object_wait_rendering(obj, true); 2888 if (ret)
2828 if (ret) 2889 return ret;
2829 return ret; 2890 }
2830 2891
2831 i915_gem_object_flush_cpu_write_domain(obj); 2892 i915_gem_object_flush_cpu_write_domain(obj);
2832 2893
2833 old_write_domain = obj->write_domain; 2894 old_write_domain = obj->base.write_domain;
2834 old_read_domains = obj->read_domains; 2895 old_read_domains = obj->base.read_domains;
2835 2896
2836 /* It should now be out of any other write domains, and we can update 2897 /* It should now be out of any other write domains, and we can update
2837 * the domain values for our changes. 2898 * the domain values for our changes.
2838 */ 2899 */
2839 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0); 2900 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2840 obj->read_domains |= I915_GEM_DOMAIN_GTT; 2901 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
2841 if (write) { 2902 if (write) {
2842 obj->read_domains = I915_GEM_DOMAIN_GTT; 2903 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
2843 obj->write_domain = I915_GEM_DOMAIN_GTT; 2904 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
2844 obj_priv->dirty = 1; 2905 obj->dirty = 1;
2845 } 2906 }
2846 2907
2847 trace_i915_gem_object_change_domain(obj, 2908 trace_i915_gem_object_change_domain(obj,
@@ -2856,23 +2917,20 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
2856 * wait, as in modesetting process we're not supposed to be interrupted. 2917 * wait, as in modesetting process we're not supposed to be interrupted.
2857 */ 2918 */
2858int 2919int
2859i915_gem_object_set_to_display_plane(struct drm_gem_object *obj, 2920i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
2860 bool pipelined) 2921 struct intel_ring_buffer *pipelined)
2861{ 2922{
2862 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2863 uint32_t old_read_domains; 2923 uint32_t old_read_domains;
2864 int ret; 2924 int ret;
2865 2925
2866 /* Not valid to be called on unbound objects. */ 2926 /* Not valid to be called on unbound objects. */
2867 if (obj_priv->gtt_space == NULL) 2927 if (obj->gtt_space == NULL)
2868 return -EINVAL; 2928 return -EINVAL;
2869 2929
2870 ret = i915_gem_object_flush_gpu_write_domain(obj); 2930 i915_gem_object_flush_gpu_write_domain(obj);
2871 if (ret)
2872 return ret;
2873 2931
2874 /* Currently, we are always called from an non-interruptible context. */ 2932 /* Currently, we are always called from an non-interruptible context. */
2875 if (!pipelined) { 2933 if (pipelined != obj->ring) {
2876 ret = i915_gem_object_wait_rendering(obj, false); 2934 ret = i915_gem_object_wait_rendering(obj, false);
2877 if (ret) 2935 if (ret)
2878 return ret; 2936 return ret;
@@ -2880,12 +2938,12 @@ i915_gem_object_set_to_display_plane(struct drm_gem_object *obj,
2880 2938
2881 i915_gem_object_flush_cpu_write_domain(obj); 2939 i915_gem_object_flush_cpu_write_domain(obj);
2882 2940
2883 old_read_domains = obj->read_domains; 2941 old_read_domains = obj->base.read_domains;
2884 obj->read_domains |= I915_GEM_DOMAIN_GTT; 2942 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
2885 2943
2886 trace_i915_gem_object_change_domain(obj, 2944 trace_i915_gem_object_change_domain(obj,
2887 old_read_domains, 2945 old_read_domains,
2888 obj->write_domain); 2946 obj->base.write_domain);
2889 2947
2890 return 0; 2948 return 0;
2891} 2949}
@@ -2898,10 +2956,10 @@ i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
2898 return 0; 2956 return 0;
2899 2957
2900 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) 2958 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS)
2901 i915_gem_flush_ring(obj->base.dev, NULL, obj->ring, 2959 i915_gem_flush_ring(obj->base.dev, obj->ring,
2902 0, obj->base.write_domain); 2960 0, obj->base.write_domain);
2903 2961
2904 return i915_gem_object_wait_rendering(&obj->base, interruptible); 2962 return i915_gem_object_wait_rendering(obj, interruptible);
2905} 2963}
2906 2964
2907/** 2965/**
@@ -2911,14 +2969,12 @@ i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
2911 * flushes to occur. 2969 * flushes to occur.
2912 */ 2970 */
2913static int 2971static int
2914i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) 2972i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
2915{ 2973{
2916 uint32_t old_write_domain, old_read_domains; 2974 uint32_t old_write_domain, old_read_domains;
2917 int ret; 2975 int ret;
2918 2976
2919 ret = i915_gem_object_flush_gpu_write_domain(obj); 2977 i915_gem_object_flush_gpu_write_domain(obj);
2920 if (ret != 0)
2921 return ret;
2922 ret = i915_gem_object_wait_rendering(obj, true); 2978 ret = i915_gem_object_wait_rendering(obj, true);
2923 if (ret) 2979 if (ret)
2924 return ret; 2980 return ret;
@@ -2930,27 +2986,27 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
2930 */ 2986 */
2931 i915_gem_object_set_to_full_cpu_read_domain(obj); 2987 i915_gem_object_set_to_full_cpu_read_domain(obj);
2932 2988
2933 old_write_domain = obj->write_domain; 2989 old_write_domain = obj->base.write_domain;
2934 old_read_domains = obj->read_domains; 2990 old_read_domains = obj->base.read_domains;
2935 2991
2936 /* Flush the CPU cache if it's still invalid. */ 2992 /* Flush the CPU cache if it's still invalid. */
2937 if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) { 2993 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
2938 i915_gem_clflush_object(obj); 2994 i915_gem_clflush_object(obj);
2939 2995
2940 obj->read_domains |= I915_GEM_DOMAIN_CPU; 2996 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
2941 } 2997 }
2942 2998
2943 /* It should now be out of any other write domains, and we can update 2999 /* It should now be out of any other write domains, and we can update
2944 * the domain values for our changes. 3000 * the domain values for our changes.
2945 */ 3001 */
2946 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0); 3002 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
2947 3003
2948 /* If we're writing through the CPU, then the GPU read domains will 3004 /* If we're writing through the CPU, then the GPU read domains will
2949 * need to be invalidated at next use. 3005 * need to be invalidated at next use.
2950 */ 3006 */
2951 if (write) { 3007 if (write) {
2952 obj->read_domains = I915_GEM_DOMAIN_CPU; 3008 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
2953 obj->write_domain = I915_GEM_DOMAIN_CPU; 3009 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
2954 } 3010 }
2955 3011
2956 trace_i915_gem_object_change_domain(obj, 3012 trace_i915_gem_object_change_domain(obj,
@@ -2960,184 +3016,6 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
2960 return 0; 3016 return 0;
2961} 3017}
2962 3018
2963/*
2964 * Set the next domain for the specified object. This
2965 * may not actually perform the necessary flushing/invaliding though,
2966 * as that may want to be batched with other set_domain operations
2967 *
2968 * This is (we hope) the only really tricky part of gem. The goal
2969 * is fairly simple -- track which caches hold bits of the object
2970 * and make sure they remain coherent. A few concrete examples may
2971 * help to explain how it works. For shorthand, we use the notation
2972 * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
2973 * a pair of read and write domain masks.
2974 *
2975 * Case 1: the batch buffer
2976 *
2977 * 1. Allocated
2978 * 2. Written by CPU
2979 * 3. Mapped to GTT
2980 * 4. Read by GPU
2981 * 5. Unmapped from GTT
2982 * 6. Freed
2983 *
2984 * Let's take these a step at a time
2985 *
2986 * 1. Allocated
2987 * Pages allocated from the kernel may still have
2988 * cache contents, so we set them to (CPU, CPU) always.
2989 * 2. Written by CPU (using pwrite)
2990 * The pwrite function calls set_domain (CPU, CPU) and
2991 * this function does nothing (as nothing changes)
2992 * 3. Mapped by GTT
2993 * This function asserts that the object is not
2994 * currently in any GPU-based read or write domains
2995 * 4. Read by GPU
2996 * i915_gem_execbuffer calls set_domain (COMMAND, 0).
2997 * As write_domain is zero, this function adds in the
2998 * current read domains (CPU+COMMAND, 0).
2999 * flush_domains is set to CPU.
3000 * invalidate_domains is set to COMMAND
3001 * clflush is run to get data out of the CPU caches
3002 * then i915_dev_set_domain calls i915_gem_flush to
3003 * emit an MI_FLUSH and drm_agp_chipset_flush
3004 * 5. Unmapped from GTT
3005 * i915_gem_object_unbind calls set_domain (CPU, CPU)
3006 * flush_domains and invalidate_domains end up both zero
3007 * so no flushing/invalidating happens
3008 * 6. Freed
3009 * yay, done
3010 *
3011 * Case 2: The shared render buffer
3012 *
3013 * 1. Allocated
3014 * 2. Mapped to GTT
3015 * 3. Read/written by GPU
3016 * 4. set_domain to (CPU,CPU)
3017 * 5. Read/written by CPU
3018 * 6. Read/written by GPU
3019 *
3020 * 1. Allocated
3021 * Same as last example, (CPU, CPU)
3022 * 2. Mapped to GTT
3023 * Nothing changes (assertions find that it is not in the GPU)
3024 * 3. Read/written by GPU
3025 * execbuffer calls set_domain (RENDER, RENDER)
3026 * flush_domains gets CPU
3027 * invalidate_domains gets GPU
3028 * clflush (obj)
3029 * MI_FLUSH and drm_agp_chipset_flush
3030 * 4. set_domain (CPU, CPU)
3031 * flush_domains gets GPU
3032 * invalidate_domains gets CPU
3033 * wait_rendering (obj) to make sure all drawing is complete.
3034 * This will include an MI_FLUSH to get the data from GPU
3035 * to memory
3036 * clflush (obj) to invalidate the CPU cache
3037 * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
3038 * 5. Read/written by CPU
3039 * cache lines are loaded and dirtied
3040 * 6. Read written by GPU
3041 * Same as last GPU access
3042 *
3043 * Case 3: The constant buffer
3044 *
3045 * 1. Allocated
3046 * 2. Written by CPU
3047 * 3. Read by GPU
3048 * 4. Updated (written) by CPU again
3049 * 5. Read by GPU
3050 *
3051 * 1. Allocated
3052 * (CPU, CPU)
3053 * 2. Written by CPU
3054 * (CPU, CPU)
3055 * 3. Read by GPU
3056 * (CPU+RENDER, 0)
3057 * flush_domains = CPU
3058 * invalidate_domains = RENDER
3059 * clflush (obj)
3060 * MI_FLUSH
3061 * drm_agp_chipset_flush
3062 * 4. Updated (written) by CPU again
3063 * (CPU, CPU)
3064 * flush_domains = 0 (no previous write domain)
3065 * invalidate_domains = 0 (no new read domains)
3066 * 5. Read by GPU
3067 * (CPU+RENDER, 0)
3068 * flush_domains = CPU
3069 * invalidate_domains = RENDER
3070 * clflush (obj)
3071 * MI_FLUSH
3072 * drm_agp_chipset_flush
3073 */
3074static void
3075i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
3076 struct intel_ring_buffer *ring)
3077{
3078 struct drm_device *dev = obj->dev;
3079 struct drm_i915_private *dev_priv = dev->dev_private;
3080 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
3081 uint32_t invalidate_domains = 0;
3082 uint32_t flush_domains = 0;
3083 uint32_t old_read_domains;
3084
3085 intel_mark_busy(dev, obj);
3086
3087 /*
3088 * If the object isn't moving to a new write domain,
3089 * let the object stay in multiple read domains
3090 */
3091 if (obj->pending_write_domain == 0)
3092 obj->pending_read_domains |= obj->read_domains;
3093 else
3094 obj_priv->dirty = 1;
3095
3096 /*
3097 * Flush the current write domain if
3098 * the new read domains don't match. Invalidate
3099 * any read domains which differ from the old
3100 * write domain
3101 */
3102 if (obj->write_domain &&
3103 (obj->write_domain != obj->pending_read_domains ||
3104 obj_priv->ring != ring)) {
3105 flush_domains |= obj->write_domain;
3106 invalidate_domains |=
3107 obj->pending_read_domains & ~obj->write_domain;
3108 }
3109 /*
3110 * Invalidate any read caches which may have
3111 * stale data. That is, any new read domains.
3112 */
3113 invalidate_domains |= obj->pending_read_domains & ~obj->read_domains;
3114 if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
3115 i915_gem_clflush_object(obj);
3116
3117 old_read_domains = obj->read_domains;
3118
3119 /* The actual obj->write_domain will be updated with
3120 * pending_write_domain after we emit the accumulated flush for all
3121 * of our domain changes in execbuffers (which clears objects'
3122 * write_domains). So if we have a current write domain that we
3123 * aren't changing, set pending_write_domain to that.
3124 */
3125 if (flush_domains == 0 && obj->pending_write_domain == 0)
3126 obj->pending_write_domain = obj->write_domain;
3127 obj->read_domains = obj->pending_read_domains;
3128
3129 dev->invalidate_domains |= invalidate_domains;
3130 dev->flush_domains |= flush_domains;
3131 if (flush_domains & I915_GEM_GPU_DOMAINS)
3132 dev_priv->mm.flush_rings |= obj_priv->ring->id;
3133 if (invalidate_domains & I915_GEM_GPU_DOMAINS)
3134 dev_priv->mm.flush_rings |= ring->id;
3135
3136 trace_i915_gem_object_change_domain(obj,
3137 old_read_domains,
3138 obj->write_domain);
3139}
3140
3141/** 3019/**
3142 * Moves the object from a partially CPU read to a full one. 3020 * Moves the object from a partially CPU read to a full one.
3143 * 3021 *
@@ -3145,30 +3023,28 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
3145 * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU). 3023 * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
3146 */ 3024 */
3147static void 3025static void
3148i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj) 3026i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj)
3149{ 3027{
3150 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 3028 if (!obj->page_cpu_valid)
3151
3152 if (!obj_priv->page_cpu_valid)
3153 return; 3029 return;
3154 3030
3155 /* If we're partially in the CPU read domain, finish moving it in. 3031 /* If we're partially in the CPU read domain, finish moving it in.
3156 */ 3032 */
3157 if (obj->read_domains & I915_GEM_DOMAIN_CPU) { 3033 if (obj->base.read_domains & I915_GEM_DOMAIN_CPU) {
3158 int i; 3034 int i;
3159 3035
3160 for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) { 3036 for (i = 0; i <= (obj->base.size - 1) / PAGE_SIZE; i++) {
3161 if (obj_priv->page_cpu_valid[i]) 3037 if (obj->page_cpu_valid[i])
3162 continue; 3038 continue;
3163 drm_clflush_pages(obj_priv->pages + i, 1); 3039 drm_clflush_pages(obj->pages + i, 1);
3164 } 3040 }
3165 } 3041 }
3166 3042
3167 /* Free the page_cpu_valid mappings which are now stale, whether 3043 /* Free the page_cpu_valid mappings which are now stale, whether
3168 * or not we've got I915_GEM_DOMAIN_CPU. 3044 * or not we've got I915_GEM_DOMAIN_CPU.
3169 */ 3045 */
3170 kfree(obj_priv->page_cpu_valid); 3046 kfree(obj->page_cpu_valid);
3171 obj_priv->page_cpu_valid = NULL; 3047 obj->page_cpu_valid = NULL;
3172} 3048}
3173 3049
3174/** 3050/**
@@ -3184,19 +3060,16 @@ i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
3184 * flushes to occur. 3060 * flushes to occur.
3185 */ 3061 */
3186static int 3062static int
3187i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, 3063i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
3188 uint64_t offset, uint64_t size) 3064 uint64_t offset, uint64_t size)
3189{ 3065{
3190 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
3191 uint32_t old_read_domains; 3066 uint32_t old_read_domains;
3192 int i, ret; 3067 int i, ret;
3193 3068
3194 if (offset == 0 && size == obj->size) 3069 if (offset == 0 && size == obj->base.size)
3195 return i915_gem_object_set_to_cpu_domain(obj, 0); 3070 return i915_gem_object_set_to_cpu_domain(obj, 0);
3196 3071
3197 ret = i915_gem_object_flush_gpu_write_domain(obj); 3072 i915_gem_object_flush_gpu_write_domain(obj);
3198 if (ret != 0)
3199 return ret;
3200 ret = i915_gem_object_wait_rendering(obj, true); 3073 ret = i915_gem_object_wait_rendering(obj, true);
3201 if (ret) 3074 if (ret)
3202 return ret; 3075 return ret;
@@ -3204,457 +3077,45 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
3204 i915_gem_object_flush_gtt_write_domain(obj); 3077 i915_gem_object_flush_gtt_write_domain(obj);
3205 3078
3206 /* If we're already fully in the CPU read domain, we're done. */ 3079 /* If we're already fully in the CPU read domain, we're done. */
3207 if (obj_priv->page_cpu_valid == NULL && 3080 if (obj->page_cpu_valid == NULL &&
3208 (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0) 3081 (obj->base.read_domains & I915_GEM_DOMAIN_CPU) != 0)
3209 return 0; 3082 return 0;
3210 3083
3211 /* Otherwise, create/clear the per-page CPU read domain flag if we're 3084 /* Otherwise, create/clear the per-page CPU read domain flag if we're
3212 * newly adding I915_GEM_DOMAIN_CPU 3085 * newly adding I915_GEM_DOMAIN_CPU
3213 */ 3086 */
3214 if (obj_priv->page_cpu_valid == NULL) { 3087 if (obj->page_cpu_valid == NULL) {
3215 obj_priv->page_cpu_valid = kzalloc(obj->size / PAGE_SIZE, 3088 obj->page_cpu_valid = kzalloc(obj->base.size / PAGE_SIZE,
3216 GFP_KERNEL); 3089 GFP_KERNEL);
3217 if (obj_priv->page_cpu_valid == NULL) 3090 if (obj->page_cpu_valid == NULL)
3218 return -ENOMEM; 3091 return -ENOMEM;
3219 } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) 3092 } else if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
3220 memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE); 3093 memset(obj->page_cpu_valid, 0, obj->base.size / PAGE_SIZE);
3221 3094
3222 /* Flush the cache on any pages that are still invalid from the CPU's 3095 /* Flush the cache on any pages that are still invalid from the CPU's
3223 * perspective. 3096 * perspective.
3224 */ 3097 */
3225 for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE; 3098 for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
3226 i++) { 3099 i++) {
3227 if (obj_priv->page_cpu_valid[i]) 3100 if (obj->page_cpu_valid[i])
3228 continue; 3101 continue;
3229 3102
3230 drm_clflush_pages(obj_priv->pages + i, 1); 3103 drm_clflush_pages(obj->pages + i, 1);
3231 3104
3232 obj_priv->page_cpu_valid[i] = 1; 3105 obj->page_cpu_valid[i] = 1;
3233 } 3106 }
3234 3107
3235 /* It should now be out of any other write domains, and we can update 3108 /* It should now be out of any other write domains, and we can update
3236 * the domain values for our changes. 3109 * the domain values for our changes.
3237 */ 3110 */
3238 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0); 3111 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3239 3112
3240 old_read_domains = obj->read_domains; 3113 old_read_domains = obj->base.read_domains;
3241 obj->read_domains |= I915_GEM_DOMAIN_CPU; 3114 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3242 3115
3243 trace_i915_gem_object_change_domain(obj, 3116 trace_i915_gem_object_change_domain(obj,
3244 old_read_domains, 3117 old_read_domains,
3245 obj->write_domain); 3118 obj->base.write_domain);
3246
3247 return 0;
3248}
3249
3250static int
3251i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
3252 struct drm_file *file_priv,
3253 struct drm_i915_gem_exec_object2 *entry,
3254 struct drm_i915_gem_relocation_entry *reloc)
3255{
3256 struct drm_device *dev = obj->base.dev;
3257 struct drm_gem_object *target_obj;
3258 uint32_t target_offset;
3259 int ret = -EINVAL;
3260
3261 target_obj = drm_gem_object_lookup(dev, file_priv,
3262 reloc->target_handle);
3263 if (target_obj == NULL)
3264 return -ENOENT;
3265
3266 target_offset = to_intel_bo(target_obj)->gtt_offset;
3267
3268#if WATCH_RELOC
3269 DRM_INFO("%s: obj %p offset %08x target %d "
3270 "read %08x write %08x gtt %08x "
3271 "presumed %08x delta %08x\n",
3272 __func__,
3273 obj,
3274 (int) reloc->offset,
3275 (int) reloc->target_handle,
3276 (int) reloc->read_domains,
3277 (int) reloc->write_domain,
3278 (int) target_offset,
3279 (int) reloc->presumed_offset,
3280 reloc->delta);
3281#endif
3282
3283 /* The target buffer should have appeared before us in the
3284 * exec_object list, so it should have a GTT space bound by now.
3285 */
3286 if (target_offset == 0) {
3287 DRM_ERROR("No GTT space found for object %d\n",
3288 reloc->target_handle);
3289 goto err;
3290 }
3291
3292 /* Validate that the target is in a valid r/w GPU domain */
3293 if (reloc->write_domain & (reloc->write_domain - 1)) {
3294 DRM_ERROR("reloc with multiple write domains: "
3295 "obj %p target %d offset %d "
3296 "read %08x write %08x",
3297 obj, reloc->target_handle,
3298 (int) reloc->offset,
3299 reloc->read_domains,
3300 reloc->write_domain);
3301 goto err;
3302 }
3303 if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
3304 reloc->read_domains & I915_GEM_DOMAIN_CPU) {
3305 DRM_ERROR("reloc with read/write CPU domains: "
3306 "obj %p target %d offset %d "
3307 "read %08x write %08x",
3308 obj, reloc->target_handle,
3309 (int) reloc->offset,
3310 reloc->read_domains,
3311 reloc->write_domain);
3312 goto err;
3313 }
3314 if (reloc->write_domain && target_obj->pending_write_domain &&
3315 reloc->write_domain != target_obj->pending_write_domain) {
3316 DRM_ERROR("Write domain conflict: "
3317 "obj %p target %d offset %d "
3318 "new %08x old %08x\n",
3319 obj, reloc->target_handle,
3320 (int) reloc->offset,
3321 reloc->write_domain,
3322 target_obj->pending_write_domain);
3323 goto err;
3324 }
3325
3326 target_obj->pending_read_domains |= reloc->read_domains;
3327 target_obj->pending_write_domain |= reloc->write_domain;
3328
3329 /* If the relocation already has the right value in it, no
3330 * more work needs to be done.
3331 */
3332 if (target_offset == reloc->presumed_offset)
3333 goto out;
3334
3335 /* Check that the relocation address is valid... */
3336 if (reloc->offset > obj->base.size - 4) {
3337 DRM_ERROR("Relocation beyond object bounds: "
3338 "obj %p target %d offset %d size %d.\n",
3339 obj, reloc->target_handle,
3340 (int) reloc->offset,
3341 (int) obj->base.size);
3342 goto err;
3343 }
3344 if (reloc->offset & 3) {
3345 DRM_ERROR("Relocation not 4-byte aligned: "
3346 "obj %p target %d offset %d.\n",
3347 obj, reloc->target_handle,
3348 (int) reloc->offset);
3349 goto err;
3350 }
3351
3352 /* and points to somewhere within the target object. */
3353 if (reloc->delta >= target_obj->size) {
3354 DRM_ERROR("Relocation beyond target object bounds: "
3355 "obj %p target %d delta %d size %d.\n",
3356 obj, reloc->target_handle,
3357 (int) reloc->delta,
3358 (int) target_obj->size);
3359 goto err;
3360 }
3361
3362 reloc->delta += target_offset;
3363 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
3364 uint32_t page_offset = reloc->offset & ~PAGE_MASK;
3365 char *vaddr;
3366
3367 vaddr = kmap_atomic(obj->pages[reloc->offset >> PAGE_SHIFT]);
3368 *(uint32_t *)(vaddr + page_offset) = reloc->delta;
3369 kunmap_atomic(vaddr);
3370 } else {
3371 struct drm_i915_private *dev_priv = dev->dev_private;
3372 uint32_t __iomem *reloc_entry;
3373 void __iomem *reloc_page;
3374
3375 ret = i915_gem_object_set_to_gtt_domain(&obj->base, 1);
3376 if (ret)
3377 goto err;
3378
3379 /* Map the page containing the relocation we're going to perform. */
3380 reloc->offset += obj->gtt_offset;
3381 reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
3382 reloc->offset & PAGE_MASK);
3383 reloc_entry = (uint32_t __iomem *)
3384 (reloc_page + (reloc->offset & ~PAGE_MASK));
3385 iowrite32(reloc->delta, reloc_entry);
3386 io_mapping_unmap_atomic(reloc_page);
3387 }
3388
3389 /* and update the user's relocation entry */
3390 reloc->presumed_offset = target_offset;
3391
3392out:
3393 ret = 0;
3394err:
3395 drm_gem_object_unreference(target_obj);
3396 return ret;
3397}
3398
3399static int
3400i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
3401 struct drm_file *file_priv,
3402 struct drm_i915_gem_exec_object2 *entry)
3403{
3404 struct drm_i915_gem_relocation_entry __user *user_relocs;
3405 int i, ret;
3406
3407 user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
3408 for (i = 0; i < entry->relocation_count; i++) {
3409 struct drm_i915_gem_relocation_entry reloc;
3410
3411 if (__copy_from_user_inatomic(&reloc,
3412 user_relocs+i,
3413 sizeof(reloc)))
3414 return -EFAULT;
3415
3416 ret = i915_gem_execbuffer_relocate_entry(obj, file_priv, entry, &reloc);
3417 if (ret)
3418 return ret;
3419
3420 if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset,
3421 &reloc.presumed_offset,
3422 sizeof(reloc.presumed_offset)))
3423 return -EFAULT;
3424 }
3425
3426 return 0;
3427}
3428
3429static int
3430i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
3431 struct drm_file *file_priv,
3432 struct drm_i915_gem_exec_object2 *entry,
3433 struct drm_i915_gem_relocation_entry *relocs)
3434{
3435 int i, ret;
3436
3437 for (i = 0; i < entry->relocation_count; i++) {
3438 ret = i915_gem_execbuffer_relocate_entry(obj, file_priv, entry, &relocs[i]);
3439 if (ret)
3440 return ret;
3441 }
3442
3443 return 0;
3444}
3445
3446static int
3447i915_gem_execbuffer_relocate(struct drm_device *dev,
3448 struct drm_file *file,
3449 struct drm_gem_object **object_list,
3450 struct drm_i915_gem_exec_object2 *exec_list,
3451 int count)
3452{
3453 int i, ret;
3454
3455 for (i = 0; i < count; i++) {
3456 struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
3457 obj->base.pending_read_domains = 0;
3458 obj->base.pending_write_domain = 0;
3459 ret = i915_gem_execbuffer_relocate_object(obj, file,
3460 &exec_list[i]);
3461 if (ret)
3462 return ret;
3463 }
3464
3465 return 0;
3466}
3467
3468static int
3469i915_gem_execbuffer_reserve(struct drm_device *dev,
3470 struct drm_file *file,
3471 struct drm_gem_object **object_list,
3472 struct drm_i915_gem_exec_object2 *exec_list,
3473 int count)
3474{
3475 struct drm_i915_private *dev_priv = dev->dev_private;
3476 int ret, i, retry;
3477
3478 /* attempt to pin all of the buffers into the GTT */
3479 for (retry = 0; retry < 2; retry++) {
3480 ret = 0;
3481 for (i = 0; i < count; i++) {
3482 struct drm_i915_gem_exec_object2 *entry = &exec_list[i];
3483 struct drm_i915_gem_object *obj= to_intel_bo(object_list[i]);
3484 bool need_fence =
3485 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
3486 obj->tiling_mode != I915_TILING_NONE;
3487
3488 /* Check fence reg constraints and rebind if necessary */
3489 if (need_fence &&
3490 !i915_gem_object_fence_offset_ok(&obj->base,
3491 obj->tiling_mode)) {
3492 ret = i915_gem_object_unbind(&obj->base);
3493 if (ret)
3494 break;
3495 }
3496
3497 ret = i915_gem_object_pin(&obj->base, entry->alignment);
3498 if (ret)
3499 break;
3500
3501 /*
3502 * Pre-965 chips need a fence register set up in order
3503 * to properly handle blits to/from tiled surfaces.
3504 */
3505 if (need_fence) {
3506 ret = i915_gem_object_get_fence_reg(&obj->base, true);
3507 if (ret) {
3508 i915_gem_object_unpin(&obj->base);
3509 break;
3510 }
3511
3512 dev_priv->fence_regs[obj->fence_reg].gpu = true;
3513 }
3514
3515 entry->offset = obj->gtt_offset;
3516 }
3517
3518 while (i--)
3519 i915_gem_object_unpin(object_list[i]);
3520
3521 if (ret == 0)
3522 break;
3523
3524 if (ret != -ENOSPC || retry)
3525 return ret;
3526
3527 ret = i915_gem_evict_everything(dev);
3528 if (ret)
3529 return ret;
3530 }
3531
3532 return 0;
3533}
3534
3535static int
3536i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
3537 struct drm_file *file,
3538 struct drm_gem_object **object_list,
3539 struct drm_i915_gem_exec_object2 *exec_list,
3540 int count)
3541{
3542 struct drm_i915_gem_relocation_entry *reloc;
3543 int i, total, ret;
3544
3545 for (i = 0; i < count; i++) {
3546 struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
3547 obj->in_execbuffer = false;
3548 }
3549
3550 mutex_unlock(&dev->struct_mutex);
3551
3552 total = 0;
3553 for (i = 0; i < count; i++)
3554 total += exec_list[i].relocation_count;
3555
3556 reloc = drm_malloc_ab(total, sizeof(*reloc));
3557 if (reloc == NULL) {
3558 mutex_lock(&dev->struct_mutex);
3559 return -ENOMEM;
3560 }
3561
3562 total = 0;
3563 for (i = 0; i < count; i++) {
3564 struct drm_i915_gem_relocation_entry __user *user_relocs;
3565
3566 user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
3567
3568 if (copy_from_user(reloc+total, user_relocs,
3569 exec_list[i].relocation_count *
3570 sizeof(*reloc))) {
3571 ret = -EFAULT;
3572 mutex_lock(&dev->struct_mutex);
3573 goto err;
3574 }
3575
3576 total += exec_list[i].relocation_count;
3577 }
3578
3579 ret = i915_mutex_lock_interruptible(dev);
3580 if (ret) {
3581 mutex_lock(&dev->struct_mutex);
3582 goto err;
3583 }
3584
3585 ret = i915_gem_execbuffer_reserve(dev, file,
3586 object_list, exec_list,
3587 count);
3588 if (ret)
3589 goto err;
3590
3591 total = 0;
3592 for (i = 0; i < count; i++) {
3593 struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
3594 obj->base.pending_read_domains = 0;
3595 obj->base.pending_write_domain = 0;
3596 ret = i915_gem_execbuffer_relocate_object_slow(obj, file,
3597 &exec_list[i],
3598 reloc + total);
3599 if (ret)
3600 goto err;
3601
3602 total += exec_list[i].relocation_count;
3603 }
3604
3605 /* Leave the user relocations as are, this is the painfully slow path,
3606 * and we want to avoid the complication of dropping the lock whilst
3607 * having buffers reserved in the aperture and so causing spurious
3608 * ENOSPC for random operations.
3609 */
3610
3611err:
3612 drm_free_large(reloc);
3613 return ret;
3614}
3615
3616static int
3617i915_gem_execbuffer_move_to_gpu(struct drm_device *dev,
3618 struct drm_file *file,
3619 struct intel_ring_buffer *ring,
3620 struct drm_gem_object **objects,
3621 int count)
3622{
3623 struct drm_i915_private *dev_priv = dev->dev_private;
3624 int ret, i;
3625
3626 /* Zero the global flush/invalidate flags. These
3627 * will be modified as new domains are computed
3628 * for each object
3629 */
3630 dev->invalidate_domains = 0;
3631 dev->flush_domains = 0;
3632 dev_priv->mm.flush_rings = 0;
3633 for (i = 0; i < count; i++)
3634 i915_gem_object_set_to_gpu_domain(objects[i], ring);
3635
3636 if (dev->invalidate_domains | dev->flush_domains) {
3637#if WATCH_EXEC
3638 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
3639 __func__,
3640 dev->invalidate_domains,
3641 dev->flush_domains);
3642#endif
3643 i915_gem_flush(dev, file,
3644 dev->invalidate_domains,
3645 dev->flush_domains,
3646 dev_priv->mm.flush_rings);
3647 }
3648
3649 for (i = 0; i < count; i++) {
3650 struct drm_i915_gem_object *obj = to_intel_bo(objects[i]);
3651 /* XXX replace with semaphores */
3652 if (obj->ring && ring != obj->ring) {
3653 ret = i915_gem_object_wait_rendering(&obj->base, true);
3654 if (ret)
3655 return ret;
3656 }
3657 }
3658 3119
3659 return 0; 3120 return 0;
3660} 3121}
@@ -3694,599 +3155,129 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3694 return 0; 3155 return 0;
3695 3156
3696 ret = 0; 3157 ret = 0;
3697 if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) { 3158 if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
3698 /* And wait for the seqno passing without holding any locks and 3159 /* And wait for the seqno passing without holding any locks and
3699 * causing extra latency for others. This is safe as the irq 3160 * causing extra latency for others. This is safe as the irq
3700 * generation is designed to be run atomically and so is 3161 * generation is designed to be run atomically and so is
3701 * lockless. 3162 * lockless.
3702 */ 3163 */
3703 ring->user_irq_get(dev, ring); 3164 if (ring->irq_get(ring)) {
3704 ret = wait_event_interruptible(ring->irq_queue, 3165 ret = wait_event_interruptible(ring->irq_queue,
3705 i915_seqno_passed(ring->get_seqno(dev, ring), seqno) 3166 i915_seqno_passed(ring->get_seqno(ring), seqno)
3706 || atomic_read(&dev_priv->mm.wedged)); 3167 || atomic_read(&dev_priv->mm.wedged));
3707 ring->user_irq_put(dev, ring); 3168 ring->irq_put(ring);
3708
3709 if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
3710 ret = -EIO;
3711 }
3712
3713 if (ret == 0)
3714 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
3715
3716 return ret;
3717}
3718
3719static int
3720i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec,
3721 uint64_t exec_offset)
3722{
3723 uint32_t exec_start, exec_len;
3724
3725 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
3726 exec_len = (uint32_t) exec->batch_len;
3727
3728 if ((exec_start | exec_len) & 0x7)
3729 return -EINVAL;
3730
3731 if (!exec_start)
3732 return -EINVAL;
3733
3734 return 0;
3735}
3736
3737static int
3738validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
3739 int count)
3740{
3741 int i;
3742
3743 for (i = 0; i < count; i++) {
3744 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
3745 int length; /* limited by fault_in_pages_readable() */
3746
3747 /* First check for malicious input causing overflow */
3748 if (exec[i].relocation_count >
3749 INT_MAX / sizeof(struct drm_i915_gem_relocation_entry))
3750 return -EINVAL;
3751
3752 length = exec[i].relocation_count *
3753 sizeof(struct drm_i915_gem_relocation_entry);
3754 if (!access_ok(VERIFY_READ, ptr, length))
3755 return -EFAULT;
3756
3757 /* we may also need to update the presumed offsets */
3758 if (!access_ok(VERIFY_WRITE, ptr, length))
3759 return -EFAULT;
3760
3761 if (fault_in_pages_readable(ptr, length))
3762 return -EFAULT;
3763 }
3764
3765 return 0;
3766}
3767
3768static int
3769i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3770 struct drm_file *file,
3771 struct drm_i915_gem_execbuffer2 *args,
3772 struct drm_i915_gem_exec_object2 *exec_list)
3773{
3774 drm_i915_private_t *dev_priv = dev->dev_private;
3775 struct drm_gem_object **object_list = NULL;
3776 struct drm_gem_object *batch_obj;
3777 struct drm_i915_gem_object *obj_priv;
3778 struct drm_clip_rect *cliprects = NULL;
3779 struct drm_i915_gem_request *request = NULL;
3780 int ret, i, flips;
3781 uint64_t exec_offset;
3782
3783 struct intel_ring_buffer *ring = NULL;
3784
3785 ret = i915_gem_check_is_wedged(dev);
3786 if (ret)
3787 return ret;
3788
3789 ret = validate_exec_list(exec_list, args->buffer_count);
3790 if (ret)
3791 return ret;
3792
3793#if WATCH_EXEC
3794 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
3795 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
3796#endif
3797 switch (args->flags & I915_EXEC_RING_MASK) {
3798 case I915_EXEC_DEFAULT:
3799 case I915_EXEC_RENDER:
3800 ring = &dev_priv->render_ring;
3801 break;
3802 case I915_EXEC_BSD:
3803 if (!HAS_BSD(dev)) {
3804 DRM_ERROR("execbuf with invalid ring (BSD)\n");
3805 return -EINVAL;
3806 }
3807 ring = &dev_priv->bsd_ring;
3808 break;
3809 case I915_EXEC_BLT:
3810 if (!HAS_BLT(dev)) {
3811 DRM_ERROR("execbuf with invalid ring (BLT)\n");
3812 return -EINVAL;
3813 }
3814 ring = &dev_priv->blt_ring;
3815 break;
3816 default:
3817 DRM_ERROR("execbuf with unknown ring: %d\n",
3818 (int)(args->flags & I915_EXEC_RING_MASK));
3819 return -EINVAL;
3820 }
3821
3822 if (args->buffer_count < 1) {
3823 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
3824 return -EINVAL;
3825 }
3826 object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count);
3827 if (object_list == NULL) {
3828 DRM_ERROR("Failed to allocate object list for %d buffers\n",
3829 args->buffer_count);
3830 ret = -ENOMEM;
3831 goto pre_mutex_err;
3832 }
3833
3834 if (args->num_cliprects != 0) {
3835 cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
3836 GFP_KERNEL);
3837 if (cliprects == NULL) {
3838 ret = -ENOMEM;
3839 goto pre_mutex_err;
3840 }
3841
3842 ret = copy_from_user(cliprects,
3843 (struct drm_clip_rect __user *)
3844 (uintptr_t) args->cliprects_ptr,
3845 sizeof(*cliprects) * args->num_cliprects);
3846 if (ret != 0) {
3847 DRM_ERROR("copy %d cliprects failed: %d\n",
3848 args->num_cliprects, ret);
3849 ret = -EFAULT;
3850 goto pre_mutex_err;
3851 }
3852 }
3853
3854 request = kzalloc(sizeof(*request), GFP_KERNEL);
3855 if (request == NULL) {
3856 ret = -ENOMEM;
3857 goto pre_mutex_err;
3858 }
3859
3860 ret = i915_mutex_lock_interruptible(dev);
3861 if (ret)
3862 goto pre_mutex_err;
3863
3864 if (dev_priv->mm.suspended) {
3865 mutex_unlock(&dev->struct_mutex);
3866 ret = -EBUSY;
3867 goto pre_mutex_err;
3868 }
3869
3870 /* Look up object handles */
3871 for (i = 0; i < args->buffer_count; i++) {
3872 object_list[i] = drm_gem_object_lookup(dev, file,
3873 exec_list[i].handle);
3874 if (object_list[i] == NULL) {
3875 DRM_ERROR("Invalid object handle %d at index %d\n",
3876 exec_list[i].handle, i);
3877 /* prevent error path from reading uninitialized data */
3878 args->buffer_count = i + 1;
3879 ret = -ENOENT;
3880 goto err;
3881 }
3882
3883 obj_priv = to_intel_bo(object_list[i]);
3884 if (obj_priv->in_execbuffer) {
3885 DRM_ERROR("Object %p appears more than once in object list\n",
3886 object_list[i]);
3887 /* prevent error path from reading uninitialized data */
3888 args->buffer_count = i + 1;
3889 ret = -EINVAL;
3890 goto err;
3891 }
3892 obj_priv->in_execbuffer = true;
3893 }
3894
3895 /* Move the objects en-masse into the GTT, evicting if necessary. */
3896 ret = i915_gem_execbuffer_reserve(dev, file,
3897 object_list, exec_list,
3898 args->buffer_count);
3899 if (ret)
3900 goto err;
3901
3902 /* The objects are in their final locations, apply the relocations. */
3903 ret = i915_gem_execbuffer_relocate(dev, file,
3904 object_list, exec_list,
3905 args->buffer_count);
3906 if (ret) {
3907 if (ret == -EFAULT) {
3908 ret = i915_gem_execbuffer_relocate_slow(dev, file,
3909 object_list,
3910 exec_list,
3911 args->buffer_count);
3912 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
3913 }
3914 if (ret)
3915 goto err;
3916 }
3917
3918 /* Set the pending read domains for the batch buffer to COMMAND */
3919 batch_obj = object_list[args->buffer_count-1];
3920 if (batch_obj->pending_write_domain) {
3921 DRM_ERROR("Attempting to use self-modifying batch buffer\n");
3922 ret = -EINVAL;
3923 goto err;
3924 }
3925 batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
3926
3927 /* Sanity check the batch buffer */
3928 exec_offset = to_intel_bo(batch_obj)->gtt_offset;
3929 ret = i915_gem_check_execbuffer(args, exec_offset);
3930 if (ret != 0) {
3931 DRM_ERROR("execbuf with invalid offset/length\n");
3932 goto err;
3933 }
3934
3935 ret = i915_gem_execbuffer_move_to_gpu(dev, file, ring,
3936 object_list, args->buffer_count);
3937 if (ret)
3938 goto err;
3939
3940 for (i = 0; i < args->buffer_count; i++) {
3941 struct drm_gem_object *obj = object_list[i];
3942 uint32_t old_write_domain = obj->write_domain;
3943 obj->write_domain = obj->pending_write_domain;
3944 trace_i915_gem_object_change_domain(obj,
3945 obj->read_domains,
3946 old_write_domain);
3947 }
3948
3949#if WATCH_COHERENCY
3950 for (i = 0; i < args->buffer_count; i++) {
3951 i915_gem_object_check_coherency(object_list[i],
3952 exec_list[i].handle);
3953 }
3954#endif
3955
3956#if WATCH_EXEC
3957 i915_gem_dump_object(batch_obj,
3958 args->batch_len,
3959 __func__,
3960 ~0);
3961#endif
3962
3963 /* Check for any pending flips. As we only maintain a flip queue depth
3964 * of 1, we can simply insert a WAIT for the next display flip prior
3965 * to executing the batch and avoid stalling the CPU.
3966 */
3967 flips = 0;
3968 for (i = 0; i < args->buffer_count; i++) {
3969 if (object_list[i]->write_domain)
3970 flips |= atomic_read(&to_intel_bo(object_list[i])->pending_flip);
3971 }
3972 if (flips) {
3973 int plane, flip_mask;
3974
3975 for (plane = 0; flips >> plane; plane++) {
3976 if (((flips >> plane) & 1) == 0)
3977 continue;
3978
3979 if (plane)
3980 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
3981 else
3982 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
3983
3984 intel_ring_begin(dev, ring, 2);
3985 intel_ring_emit(dev, ring,
3986 MI_WAIT_FOR_EVENT | flip_mask);
3987 intel_ring_emit(dev, ring, MI_NOOP);
3988 intel_ring_advance(dev, ring);
3989 }
3990 }
3991
3992 /* Exec the batchbuffer */
3993 ret = ring->dispatch_gem_execbuffer(dev, ring, args,
3994 cliprects, exec_offset);
3995 if (ret) {
3996 DRM_ERROR("dispatch failed %d\n", ret);
3997 goto err;
3998 }
3999
4000 /*
4001 * Ensure that the commands in the batch buffer are
4002 * finished before the interrupt fires
4003 */
4004 i915_retire_commands(dev, ring);
4005
4006 for (i = 0; i < args->buffer_count; i++) {
4007 struct drm_gem_object *obj = object_list[i];
4008
4009 i915_gem_object_move_to_active(obj, ring);
4010 if (obj->write_domain)
4011 list_move_tail(&to_intel_bo(obj)->gpu_write_list,
4012 &ring->gpu_write_list);
4013 }
4014
4015 i915_add_request(dev, file, request, ring);
4016 request = NULL;
4017
4018err:
4019 for (i = 0; i < args->buffer_count; i++) {
4020 if (object_list[i]) {
4021 obj_priv = to_intel_bo(object_list[i]);
4022 obj_priv->in_execbuffer = false;
4023 }
4024 drm_gem_object_unreference(object_list[i]);
4025 }
4026
4027 mutex_unlock(&dev->struct_mutex);
4028
4029pre_mutex_err:
4030 drm_free_large(object_list);
4031 kfree(cliprects);
4032 kfree(request);
4033
4034 return ret;
4035}
4036
4037/*
4038 * Legacy execbuffer just creates an exec2 list from the original exec object
4039 * list array and passes it to the real function.
4040 */
4041int
4042i915_gem_execbuffer(struct drm_device *dev, void *data,
4043 struct drm_file *file_priv)
4044{
4045 struct drm_i915_gem_execbuffer *args = data;
4046 struct drm_i915_gem_execbuffer2 exec2;
4047 struct drm_i915_gem_exec_object *exec_list = NULL;
4048 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
4049 int ret, i;
4050
4051#if WATCH_EXEC
4052 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
4053 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
4054#endif
4055
4056 if (args->buffer_count < 1) {
4057 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
4058 return -EINVAL;
4059 }
4060
4061 /* Copy in the exec list from userland */
4062 exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
4063 exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
4064 if (exec_list == NULL || exec2_list == NULL) {
4065 DRM_ERROR("Failed to allocate exec list for %d buffers\n",
4066 args->buffer_count);
4067 drm_free_large(exec_list);
4068 drm_free_large(exec2_list);
4069 return -ENOMEM;
4070 }
4071 ret = copy_from_user(exec_list,
4072 (struct drm_i915_relocation_entry __user *)
4073 (uintptr_t) args->buffers_ptr,
4074 sizeof(*exec_list) * args->buffer_count);
4075 if (ret != 0) {
4076 DRM_ERROR("copy %d exec entries failed %d\n",
4077 args->buffer_count, ret);
4078 drm_free_large(exec_list);
4079 drm_free_large(exec2_list);
4080 return -EFAULT;
4081 }
4082
4083 for (i = 0; i < args->buffer_count; i++) {
4084 exec2_list[i].handle = exec_list[i].handle;
4085 exec2_list[i].relocation_count = exec_list[i].relocation_count;
4086 exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
4087 exec2_list[i].alignment = exec_list[i].alignment;
4088 exec2_list[i].offset = exec_list[i].offset;
4089 if (INTEL_INFO(dev)->gen < 4)
4090 exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
4091 else
4092 exec2_list[i].flags = 0;
4093 }
4094 3169
4095 exec2.buffers_ptr = args->buffers_ptr; 3170 if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
4096 exec2.buffer_count = args->buffer_count; 3171 ret = -EIO;
4097 exec2.batch_start_offset = args->batch_start_offset;
4098 exec2.batch_len = args->batch_len;
4099 exec2.DR1 = args->DR1;
4100 exec2.DR4 = args->DR4;
4101 exec2.num_cliprects = args->num_cliprects;
4102 exec2.cliprects_ptr = args->cliprects_ptr;
4103 exec2.flags = I915_EXEC_RENDER;
4104
4105 ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list);
4106 if (!ret) {
4107 /* Copy the new buffer offsets back to the user's exec list. */
4108 for (i = 0; i < args->buffer_count; i++)
4109 exec_list[i].offset = exec2_list[i].offset;
4110 /* ... and back out to userspace */
4111 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
4112 (uintptr_t) args->buffers_ptr,
4113 exec_list,
4114 sizeof(*exec_list) * args->buffer_count);
4115 if (ret) {
4116 ret = -EFAULT;
4117 DRM_ERROR("failed to copy %d exec entries "
4118 "back to user (%d)\n",
4119 args->buffer_count, ret);
4120 } 3172 }
4121 } 3173 }
4122 3174
4123 drm_free_large(exec_list); 3175 if (ret == 0)
4124 drm_free_large(exec2_list); 3176 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
4125 return ret;
4126}
4127
4128int
4129i915_gem_execbuffer2(struct drm_device *dev, void *data,
4130 struct drm_file *file_priv)
4131{
4132 struct drm_i915_gem_execbuffer2 *args = data;
4133 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
4134 int ret;
4135
4136#if WATCH_EXEC
4137 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
4138 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
4139#endif
4140
4141 if (args->buffer_count < 1) {
4142 DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
4143 return -EINVAL;
4144 }
4145
4146 exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
4147 if (exec2_list == NULL) {
4148 DRM_ERROR("Failed to allocate exec list for %d buffers\n",
4149 args->buffer_count);
4150 return -ENOMEM;
4151 }
4152 ret = copy_from_user(exec2_list,
4153 (struct drm_i915_relocation_entry __user *)
4154 (uintptr_t) args->buffers_ptr,
4155 sizeof(*exec2_list) * args->buffer_count);
4156 if (ret != 0) {
4157 DRM_ERROR("copy %d exec entries failed %d\n",
4158 args->buffer_count, ret);
4159 drm_free_large(exec2_list);
4160 return -EFAULT;
4161 }
4162
4163 ret = i915_gem_do_execbuffer(dev, data, file_priv, args, exec2_list);
4164 if (!ret) {
4165 /* Copy the new buffer offsets back to the user's exec list. */
4166 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
4167 (uintptr_t) args->buffers_ptr,
4168 exec2_list,
4169 sizeof(*exec2_list) * args->buffer_count);
4170 if (ret) {
4171 ret = -EFAULT;
4172 DRM_ERROR("failed to copy %d exec entries "
4173 "back to user (%d)\n",
4174 args->buffer_count, ret);
4175 }
4176 }
4177 3177
4178 drm_free_large(exec2_list);
4179 return ret; 3178 return ret;
4180} 3179}
4181 3180
4182int 3181int
4183i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) 3182i915_gem_object_pin(struct drm_i915_gem_object *obj,
3183 uint32_t alignment,
3184 bool map_and_fenceable)
4184{ 3185{
4185 struct drm_device *dev = obj->dev; 3186 struct drm_device *dev = obj->base.dev;
4186 struct drm_i915_private *dev_priv = dev->dev_private; 3187 struct drm_i915_private *dev_priv = dev->dev_private;
4187 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4188 int ret; 3188 int ret;
4189 3189
4190 BUG_ON(obj_priv->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT); 3190 BUG_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
4191 WARN_ON(i915_verify_lists(dev)); 3191 WARN_ON(i915_verify_lists(dev));
4192 3192
4193 if (obj_priv->gtt_space != NULL) { 3193 if (obj->gtt_space != NULL) {
4194 if (alignment == 0) 3194 if ((alignment && obj->gtt_offset & (alignment - 1)) ||
4195 alignment = i915_gem_get_gtt_alignment(obj); 3195 (map_and_fenceable && !obj->map_and_fenceable)) {
4196 if (obj_priv->gtt_offset & (alignment - 1)) { 3196 WARN(obj->pin_count,
4197 WARN(obj_priv->pin_count, 3197 "bo is already pinned with incorrect alignment:"
4198 "bo is already pinned with incorrect alignment: offset=%x, req.alignment=%x\n", 3198 " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
4199 obj_priv->gtt_offset, alignment); 3199 " obj->map_and_fenceable=%d\n",
3200 obj->gtt_offset, alignment,
3201 map_and_fenceable,
3202 obj->map_and_fenceable);
4200 ret = i915_gem_object_unbind(obj); 3203 ret = i915_gem_object_unbind(obj);
4201 if (ret) 3204 if (ret)
4202 return ret; 3205 return ret;
4203 } 3206 }
4204 } 3207 }
4205 3208
4206 if (obj_priv->gtt_space == NULL) { 3209 if (obj->gtt_space == NULL) {
4207 ret = i915_gem_object_bind_to_gtt(obj, alignment); 3210 ret = i915_gem_object_bind_to_gtt(obj, alignment,
3211 map_and_fenceable);
4208 if (ret) 3212 if (ret)
4209 return ret; 3213 return ret;
4210 } 3214 }
4211 3215
4212 obj_priv->pin_count++; 3216 if (obj->pin_count++ == 0) {
4213 3217 if (!obj->active)
4214 /* If the object is not active and not pending a flush, 3218 list_move_tail(&obj->mm_list,
4215 * remove it from the inactive list
4216 */
4217 if (obj_priv->pin_count == 1) {
4218 i915_gem_info_add_pin(dev_priv, obj->size);
4219 if (!obj_priv->active)
4220 list_move_tail(&obj_priv->mm_list,
4221 &dev_priv->mm.pinned_list); 3219 &dev_priv->mm.pinned_list);
4222 } 3220 }
3221 obj->pin_mappable |= map_and_fenceable;
4223 3222
4224 WARN_ON(i915_verify_lists(dev)); 3223 WARN_ON(i915_verify_lists(dev));
4225 return 0; 3224 return 0;
4226} 3225}
4227 3226
4228void 3227void
4229i915_gem_object_unpin(struct drm_gem_object *obj) 3228i915_gem_object_unpin(struct drm_i915_gem_object *obj)
4230{ 3229{
4231 struct drm_device *dev = obj->dev; 3230 struct drm_device *dev = obj->base.dev;
4232 drm_i915_private_t *dev_priv = dev->dev_private; 3231 drm_i915_private_t *dev_priv = dev->dev_private;
4233 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4234 3232
4235 WARN_ON(i915_verify_lists(dev)); 3233 WARN_ON(i915_verify_lists(dev));
4236 obj_priv->pin_count--; 3234 BUG_ON(obj->pin_count == 0);
4237 BUG_ON(obj_priv->pin_count < 0); 3235 BUG_ON(obj->gtt_space == NULL);
4238 BUG_ON(obj_priv->gtt_space == NULL);
4239 3236
4240 /* If the object is no longer pinned, and is 3237 if (--obj->pin_count == 0) {
4241 * neither active nor being flushed, then stick it on 3238 if (!obj->active)
4242 * the inactive list 3239 list_move_tail(&obj->mm_list,
4243 */
4244 if (obj_priv->pin_count == 0) {
4245 if (!obj_priv->active)
4246 list_move_tail(&obj_priv->mm_list,
4247 &dev_priv->mm.inactive_list); 3240 &dev_priv->mm.inactive_list);
4248 i915_gem_info_remove_pin(dev_priv, obj->size); 3241 obj->pin_mappable = false;
4249 } 3242 }
4250 WARN_ON(i915_verify_lists(dev)); 3243 WARN_ON(i915_verify_lists(dev));
4251} 3244}
4252 3245
4253int 3246int
4254i915_gem_pin_ioctl(struct drm_device *dev, void *data, 3247i915_gem_pin_ioctl(struct drm_device *dev, void *data,
4255 struct drm_file *file_priv) 3248 struct drm_file *file)
4256{ 3249{
4257 struct drm_i915_gem_pin *args = data; 3250 struct drm_i915_gem_pin *args = data;
4258 struct drm_gem_object *obj; 3251 struct drm_i915_gem_object *obj;
4259 struct drm_i915_gem_object *obj_priv;
4260 int ret; 3252 int ret;
4261 3253
4262 ret = i915_mutex_lock_interruptible(dev); 3254 ret = i915_mutex_lock_interruptible(dev);
4263 if (ret) 3255 if (ret)
4264 return ret; 3256 return ret;
4265 3257
4266 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 3258 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
4267 if (obj == NULL) { 3259 if (obj == NULL) {
4268 ret = -ENOENT; 3260 ret = -ENOENT;
4269 goto unlock; 3261 goto unlock;
4270 } 3262 }
4271 obj_priv = to_intel_bo(obj);
4272 3263
4273 if (obj_priv->madv != I915_MADV_WILLNEED) { 3264 if (obj->madv != I915_MADV_WILLNEED) {
4274 DRM_ERROR("Attempting to pin a purgeable buffer\n"); 3265 DRM_ERROR("Attempting to pin a purgeable buffer\n");
4275 ret = -EINVAL; 3266 ret = -EINVAL;
4276 goto out; 3267 goto out;
4277 } 3268 }
4278 3269
4279 if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) { 3270 if (obj->pin_filp != NULL && obj->pin_filp != file) {
4280 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n", 3271 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
4281 args->handle); 3272 args->handle);
4282 ret = -EINVAL; 3273 ret = -EINVAL;
4283 goto out; 3274 goto out;
4284 } 3275 }
4285 3276
4286 obj_priv->user_pin_count++; 3277 obj->user_pin_count++;
4287 obj_priv->pin_filp = file_priv; 3278 obj->pin_filp = file;
4288 if (obj_priv->user_pin_count == 1) { 3279 if (obj->user_pin_count == 1) {
4289 ret = i915_gem_object_pin(obj, args->alignment); 3280 ret = i915_gem_object_pin(obj, args->alignment, true);
4290 if (ret) 3281 if (ret)
4291 goto out; 3282 goto out;
4292 } 3283 }
@@ -4295,9 +3286,9 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
4295 * as the X server doesn't manage domains yet 3286 * as the X server doesn't manage domains yet
4296 */ 3287 */
4297 i915_gem_object_flush_cpu_write_domain(obj); 3288 i915_gem_object_flush_cpu_write_domain(obj);
4298 args->offset = obj_priv->gtt_offset; 3289 args->offset = obj->gtt_offset;
4299out: 3290out:
4300 drm_gem_object_unreference(obj); 3291 drm_gem_object_unreference(&obj->base);
4301unlock: 3292unlock:
4302 mutex_unlock(&dev->struct_mutex); 3293 mutex_unlock(&dev->struct_mutex);
4303 return ret; 3294 return ret;
@@ -4305,38 +3296,36 @@ unlock:
4305 3296
4306int 3297int
4307i915_gem_unpin_ioctl(struct drm_device *dev, void *data, 3298i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
4308 struct drm_file *file_priv) 3299 struct drm_file *file)
4309{ 3300{
4310 struct drm_i915_gem_pin *args = data; 3301 struct drm_i915_gem_pin *args = data;
4311 struct drm_gem_object *obj; 3302 struct drm_i915_gem_object *obj;
4312 struct drm_i915_gem_object *obj_priv;
4313 int ret; 3303 int ret;
4314 3304
4315 ret = i915_mutex_lock_interruptible(dev); 3305 ret = i915_mutex_lock_interruptible(dev);
4316 if (ret) 3306 if (ret)
4317 return ret; 3307 return ret;
4318 3308
4319 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 3309 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
4320 if (obj == NULL) { 3310 if (obj == NULL) {
4321 ret = -ENOENT; 3311 ret = -ENOENT;
4322 goto unlock; 3312 goto unlock;
4323 } 3313 }
4324 obj_priv = to_intel_bo(obj);
4325 3314
4326 if (obj_priv->pin_filp != file_priv) { 3315 if (obj->pin_filp != file) {
4327 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n", 3316 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
4328 args->handle); 3317 args->handle);
4329 ret = -EINVAL; 3318 ret = -EINVAL;
4330 goto out; 3319 goto out;
4331 } 3320 }
4332 obj_priv->user_pin_count--; 3321 obj->user_pin_count--;
4333 if (obj_priv->user_pin_count == 0) { 3322 if (obj->user_pin_count == 0) {
4334 obj_priv->pin_filp = NULL; 3323 obj->pin_filp = NULL;
4335 i915_gem_object_unpin(obj); 3324 i915_gem_object_unpin(obj);
4336 } 3325 }
4337 3326
4338out: 3327out:
4339 drm_gem_object_unreference(obj); 3328 drm_gem_object_unreference(&obj->base);
4340unlock: 3329unlock:
4341 mutex_unlock(&dev->struct_mutex); 3330 mutex_unlock(&dev->struct_mutex);
4342 return ret; 3331 return ret;
@@ -4344,48 +3333,50 @@ unlock:
4344 3333
4345int 3334int
4346i915_gem_busy_ioctl(struct drm_device *dev, void *data, 3335i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4347 struct drm_file *file_priv) 3336 struct drm_file *file)
4348{ 3337{
4349 struct drm_i915_gem_busy *args = data; 3338 struct drm_i915_gem_busy *args = data;
4350 struct drm_gem_object *obj; 3339 struct drm_i915_gem_object *obj;
4351 struct drm_i915_gem_object *obj_priv;
4352 int ret; 3340 int ret;
4353 3341
4354 ret = i915_mutex_lock_interruptible(dev); 3342 ret = i915_mutex_lock_interruptible(dev);
4355 if (ret) 3343 if (ret)
4356 return ret; 3344 return ret;
4357 3345
4358 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 3346 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
4359 if (obj == NULL) { 3347 if (obj == NULL) {
4360 ret = -ENOENT; 3348 ret = -ENOENT;
4361 goto unlock; 3349 goto unlock;
4362 } 3350 }
4363 obj_priv = to_intel_bo(obj);
4364 3351
4365 /* Count all active objects as busy, even if they are currently not used 3352 /* Count all active objects as busy, even if they are currently not used
4366 * by the gpu. Users of this interface expect objects to eventually 3353 * by the gpu. Users of this interface expect objects to eventually
4367 * become non-busy without any further actions, therefore emit any 3354 * become non-busy without any further actions, therefore emit any
4368 * necessary flushes here. 3355 * necessary flushes here.
4369 */ 3356 */
4370 args->busy = obj_priv->active; 3357 args->busy = obj->active;
4371 if (args->busy) { 3358 if (args->busy) {
4372 /* Unconditionally flush objects, even when the gpu still uses this 3359 /* Unconditionally flush objects, even when the gpu still uses this
4373 * object. Userspace calling this function indicates that it wants to 3360 * object. Userspace calling this function indicates that it wants to
4374 * use this buffer rather sooner than later, so issuing the required 3361 * use this buffer rather sooner than later, so issuing the required
4375 * flush earlier is beneficial. 3362 * flush earlier is beneficial.
4376 */ 3363 */
4377 if (obj->write_domain & I915_GEM_GPU_DOMAINS) { 3364 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
4378 i915_gem_flush_ring(dev, file_priv, 3365 i915_gem_flush_ring(dev, obj->ring,
4379 obj_priv->ring, 3366 0, obj->base.write_domain);
4380 0, obj->write_domain); 3367 } else if (obj->ring->outstanding_lazy_request ==
4381 } else if (obj_priv->ring->outstanding_lazy_request) { 3368 obj->last_rendering_seqno) {
3369 struct drm_i915_gem_request *request;
3370
4382 /* This ring is not being cleared by active usage, 3371 /* This ring is not being cleared by active usage,
4383 * so emit a request to do so. 3372 * so emit a request to do so.
4384 */ 3373 */
4385 u32 seqno = i915_add_request(dev, 3374 request = kzalloc(sizeof(*request), GFP_KERNEL);
4386 NULL, NULL, 3375 if (request)
4387 obj_priv->ring); 3376 ret = i915_add_request(dev,
4388 if (seqno == 0) 3377 NULL, request,
3378 obj->ring);
3379 else
4389 ret = -ENOMEM; 3380 ret = -ENOMEM;
4390 } 3381 }
4391 3382
@@ -4394,12 +3385,12 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4394 * are actually unmasked, and our working set ends up being 3385 * are actually unmasked, and our working set ends up being
4395 * larger than required. 3386 * larger than required.
4396 */ 3387 */
4397 i915_gem_retire_requests_ring(dev, obj_priv->ring); 3388 i915_gem_retire_requests_ring(dev, obj->ring);
4398 3389
4399 args->busy = obj_priv->active; 3390 args->busy = obj->active;
4400 } 3391 }
4401 3392
4402 drm_gem_object_unreference(obj); 3393 drm_gem_object_unreference(&obj->base);
4403unlock: 3394unlock:
4404 mutex_unlock(&dev->struct_mutex); 3395 mutex_unlock(&dev->struct_mutex);
4405 return ret; 3396 return ret;
@@ -4417,8 +3408,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4417 struct drm_file *file_priv) 3408 struct drm_file *file_priv)
4418{ 3409{
4419 struct drm_i915_gem_madvise *args = data; 3410 struct drm_i915_gem_madvise *args = data;
4420 struct drm_gem_object *obj; 3411 struct drm_i915_gem_object *obj;
4421 struct drm_i915_gem_object *obj_priv;
4422 int ret; 3412 int ret;
4423 3413
4424 switch (args->madv) { 3414 switch (args->madv) {
@@ -4433,37 +3423,36 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4433 if (ret) 3423 if (ret)
4434 return ret; 3424 return ret;
4435 3425
4436 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 3426 obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
4437 if (obj == NULL) { 3427 if (obj == NULL) {
4438 ret = -ENOENT; 3428 ret = -ENOENT;
4439 goto unlock; 3429 goto unlock;
4440 } 3430 }
4441 obj_priv = to_intel_bo(obj);
4442 3431
4443 if (obj_priv->pin_count) { 3432 if (obj->pin_count) {
4444 ret = -EINVAL; 3433 ret = -EINVAL;
4445 goto out; 3434 goto out;
4446 } 3435 }
4447 3436
4448 if (obj_priv->madv != __I915_MADV_PURGED) 3437 if (obj->madv != __I915_MADV_PURGED)
4449 obj_priv->madv = args->madv; 3438 obj->madv = args->madv;
4450 3439
4451 /* if the object is no longer bound, discard its backing storage */ 3440 /* if the object is no longer bound, discard its backing storage */
4452 if (i915_gem_object_is_purgeable(obj_priv) && 3441 if (i915_gem_object_is_purgeable(obj) &&
4453 obj_priv->gtt_space == NULL) 3442 obj->gtt_space == NULL)
4454 i915_gem_object_truncate(obj); 3443 i915_gem_object_truncate(obj);
4455 3444
4456 args->retained = obj_priv->madv != __I915_MADV_PURGED; 3445 args->retained = obj->madv != __I915_MADV_PURGED;
4457 3446
4458out: 3447out:
4459 drm_gem_object_unreference(obj); 3448 drm_gem_object_unreference(&obj->base);
4460unlock: 3449unlock:
4461 mutex_unlock(&dev->struct_mutex); 3450 mutex_unlock(&dev->struct_mutex);
4462 return ret; 3451 return ret;
4463} 3452}
4464 3453
4465struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev, 3454struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
4466 size_t size) 3455 size_t size)
4467{ 3456{
4468 struct drm_i915_private *dev_priv = dev->dev_private; 3457 struct drm_i915_private *dev_priv = dev->dev_private;
4469 struct drm_i915_gem_object *obj; 3458 struct drm_i915_gem_object *obj;
@@ -4486,11 +3475,15 @@ struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
4486 obj->base.driver_private = NULL; 3475 obj->base.driver_private = NULL;
4487 obj->fence_reg = I915_FENCE_REG_NONE; 3476 obj->fence_reg = I915_FENCE_REG_NONE;
4488 INIT_LIST_HEAD(&obj->mm_list); 3477 INIT_LIST_HEAD(&obj->mm_list);
3478 INIT_LIST_HEAD(&obj->gtt_list);
4489 INIT_LIST_HEAD(&obj->ring_list); 3479 INIT_LIST_HEAD(&obj->ring_list);
3480 INIT_LIST_HEAD(&obj->exec_list);
4490 INIT_LIST_HEAD(&obj->gpu_write_list); 3481 INIT_LIST_HEAD(&obj->gpu_write_list);
4491 obj->madv = I915_MADV_WILLNEED; 3482 obj->madv = I915_MADV_WILLNEED;
3483 /* Avoid an unnecessary call to unbind on the first bind. */
3484 obj->map_and_fenceable = true;
4492 3485
4493 return &obj->base; 3486 return obj;
4494} 3487}
4495 3488
4496int i915_gem_init_object(struct drm_gem_object *obj) 3489int i915_gem_init_object(struct drm_gem_object *obj)
@@ -4500,42 +3493,41 @@ int i915_gem_init_object(struct drm_gem_object *obj)
4500 return 0; 3493 return 0;
4501} 3494}
4502 3495
4503static void i915_gem_free_object_tail(struct drm_gem_object *obj) 3496static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj)
4504{ 3497{
4505 struct drm_device *dev = obj->dev; 3498 struct drm_device *dev = obj->base.dev;
4506 drm_i915_private_t *dev_priv = dev->dev_private; 3499 drm_i915_private_t *dev_priv = dev->dev_private;
4507 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4508 int ret; 3500 int ret;
4509 3501
4510 ret = i915_gem_object_unbind(obj); 3502 ret = i915_gem_object_unbind(obj);
4511 if (ret == -ERESTARTSYS) { 3503 if (ret == -ERESTARTSYS) {
4512 list_move(&obj_priv->mm_list, 3504 list_move(&obj->mm_list,
4513 &dev_priv->mm.deferred_free_list); 3505 &dev_priv->mm.deferred_free_list);
4514 return; 3506 return;
4515 } 3507 }
4516 3508
4517 if (obj_priv->mmap_offset) 3509 if (obj->base.map_list.map)
4518 i915_gem_free_mmap_offset(obj); 3510 i915_gem_free_mmap_offset(obj);
4519 3511
4520 drm_gem_object_release(obj); 3512 drm_gem_object_release(&obj->base);
4521 i915_gem_info_remove_obj(dev_priv, obj->size); 3513 i915_gem_info_remove_obj(dev_priv, obj->base.size);
4522 3514
4523 kfree(obj_priv->page_cpu_valid); 3515 kfree(obj->page_cpu_valid);
4524 kfree(obj_priv->bit_17); 3516 kfree(obj->bit_17);
4525 kfree(obj_priv); 3517 kfree(obj);
4526} 3518}
4527 3519
4528void i915_gem_free_object(struct drm_gem_object *obj) 3520void i915_gem_free_object(struct drm_gem_object *gem_obj)
4529{ 3521{
4530 struct drm_device *dev = obj->dev; 3522 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
4531 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 3523 struct drm_device *dev = obj->base.dev;
4532 3524
4533 trace_i915_gem_object_destroy(obj); 3525 trace_i915_gem_object_destroy(obj);
4534 3526
4535 while (obj_priv->pin_count > 0) 3527 while (obj->pin_count > 0)
4536 i915_gem_object_unpin(obj); 3528 i915_gem_object_unpin(obj);
4537 3529
4538 if (obj_priv->phys_obj) 3530 if (obj->phys_obj)
4539 i915_gem_detach_phys_object(dev, obj); 3531 i915_gem_detach_phys_object(dev, obj);
4540 3532
4541 i915_gem_free_object_tail(obj); 3533 i915_gem_free_object_tail(obj);
@@ -4562,13 +3554,15 @@ i915_gem_idle(struct drm_device *dev)
4562 3554
4563 /* Under UMS, be paranoid and evict. */ 3555 /* Under UMS, be paranoid and evict. */
4564 if (!drm_core_check_feature(dev, DRIVER_MODESET)) { 3556 if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
4565 ret = i915_gem_evict_inactive(dev); 3557 ret = i915_gem_evict_inactive(dev, false);
4566 if (ret) { 3558 if (ret) {
4567 mutex_unlock(&dev->struct_mutex); 3559 mutex_unlock(&dev->struct_mutex);
4568 return ret; 3560 return ret;
4569 } 3561 }
4570 } 3562 }
4571 3563
3564 i915_gem_reset_fences(dev);
3565
4572 /* Hack! Don't let anybody do execbuf while we don't control the chip. 3566 /* Hack! Don't let anybody do execbuf while we don't control the chip.
4573 * We need to replace this with a semaphore, or something. 3567 * We need to replace this with a semaphore, or something.
4574 * And not confound mm.suspended! 3568 * And not confound mm.suspended!
@@ -4587,82 +3581,15 @@ i915_gem_idle(struct drm_device *dev)
4587 return 0; 3581 return 0;
4588} 3582}
4589 3583
4590/*
4591 * 965+ support PIPE_CONTROL commands, which provide finer grained control
4592 * over cache flushing.
4593 */
4594static int
4595i915_gem_init_pipe_control(struct drm_device *dev)
4596{
4597 drm_i915_private_t *dev_priv = dev->dev_private;
4598 struct drm_gem_object *obj;
4599 struct drm_i915_gem_object *obj_priv;
4600 int ret;
4601
4602 obj = i915_gem_alloc_object(dev, 4096);
4603 if (obj == NULL) {
4604 DRM_ERROR("Failed to allocate seqno page\n");
4605 ret = -ENOMEM;
4606 goto err;
4607 }
4608 obj_priv = to_intel_bo(obj);
4609 obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
4610
4611 ret = i915_gem_object_pin(obj, 4096);
4612 if (ret)
4613 goto err_unref;
4614
4615 dev_priv->seqno_gfx_addr = obj_priv->gtt_offset;
4616 dev_priv->seqno_page = kmap(obj_priv->pages[0]);
4617 if (dev_priv->seqno_page == NULL)
4618 goto err_unpin;
4619
4620 dev_priv->seqno_obj = obj;
4621 memset(dev_priv->seqno_page, 0, PAGE_SIZE);
4622
4623 return 0;
4624
4625err_unpin:
4626 i915_gem_object_unpin(obj);
4627err_unref:
4628 drm_gem_object_unreference(obj);
4629err:
4630 return ret;
4631}
4632
4633
4634static void
4635i915_gem_cleanup_pipe_control(struct drm_device *dev)
4636{
4637 drm_i915_private_t *dev_priv = dev->dev_private;
4638 struct drm_gem_object *obj;
4639 struct drm_i915_gem_object *obj_priv;
4640
4641 obj = dev_priv->seqno_obj;
4642 obj_priv = to_intel_bo(obj);
4643 kunmap(obj_priv->pages[0]);
4644 i915_gem_object_unpin(obj);
4645 drm_gem_object_unreference(obj);
4646 dev_priv->seqno_obj = NULL;
4647
4648 dev_priv->seqno_page = NULL;
4649}
4650
4651int 3584int
4652i915_gem_init_ringbuffer(struct drm_device *dev) 3585i915_gem_init_ringbuffer(struct drm_device *dev)
4653{ 3586{
4654 drm_i915_private_t *dev_priv = dev->dev_private; 3587 drm_i915_private_t *dev_priv = dev->dev_private;
4655 int ret; 3588 int ret;
4656 3589
4657 if (HAS_PIPE_CONTROL(dev)) {
4658 ret = i915_gem_init_pipe_control(dev);
4659 if (ret)
4660 return ret;
4661 }
4662
4663 ret = intel_init_render_ring_buffer(dev); 3590 ret = intel_init_render_ring_buffer(dev);
4664 if (ret) 3591 if (ret)
4665 goto cleanup_pipe_control; 3592 return ret;
4666 3593
4667 if (HAS_BSD(dev)) { 3594 if (HAS_BSD(dev)) {
4668 ret = intel_init_bsd_ring_buffer(dev); 3595 ret = intel_init_bsd_ring_buffer(dev);
@@ -4681,12 +3608,9 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
4681 return 0; 3608 return 0;
4682 3609
4683cleanup_bsd_ring: 3610cleanup_bsd_ring:
4684 intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring); 3611 intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
4685cleanup_render_ring: 3612cleanup_render_ring:
4686 intel_cleanup_ring_buffer(dev, &dev_priv->render_ring); 3613 intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
4687cleanup_pipe_control:
4688 if (HAS_PIPE_CONTROL(dev))
4689 i915_gem_cleanup_pipe_control(dev);
4690 return ret; 3614 return ret;
4691} 3615}
4692 3616
@@ -4694,12 +3618,10 @@ void
4694i915_gem_cleanup_ringbuffer(struct drm_device *dev) 3618i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4695{ 3619{
4696 drm_i915_private_t *dev_priv = dev->dev_private; 3620 drm_i915_private_t *dev_priv = dev->dev_private;
3621 int i;
4697 3622
4698 intel_cleanup_ring_buffer(dev, &dev_priv->render_ring); 3623 for (i = 0; i < I915_NUM_RINGS; i++)
4699 intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring); 3624 intel_cleanup_ring_buffer(&dev_priv->ring[i]);
4700 intel_cleanup_ring_buffer(dev, &dev_priv->blt_ring);
4701 if (HAS_PIPE_CONTROL(dev))
4702 i915_gem_cleanup_pipe_control(dev);
4703} 3625}
4704 3626
4705int 3627int
@@ -4707,7 +3629,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4707 struct drm_file *file_priv) 3629 struct drm_file *file_priv)
4708{ 3630{
4709 drm_i915_private_t *dev_priv = dev->dev_private; 3631 drm_i915_private_t *dev_priv = dev->dev_private;
4710 int ret; 3632 int ret, i;
4711 3633
4712 if (drm_core_check_feature(dev, DRIVER_MODESET)) 3634 if (drm_core_check_feature(dev, DRIVER_MODESET))
4713 return 0; 3635 return 0;
@@ -4727,14 +3649,12 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4727 } 3649 }
4728 3650
4729 BUG_ON(!list_empty(&dev_priv->mm.active_list)); 3651 BUG_ON(!list_empty(&dev_priv->mm.active_list));
4730 BUG_ON(!list_empty(&dev_priv->render_ring.active_list));
4731 BUG_ON(!list_empty(&dev_priv->bsd_ring.active_list));
4732 BUG_ON(!list_empty(&dev_priv->blt_ring.active_list));
4733 BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); 3652 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
4734 BUG_ON(!list_empty(&dev_priv->mm.inactive_list)); 3653 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
4735 BUG_ON(!list_empty(&dev_priv->render_ring.request_list)); 3654 for (i = 0; i < I915_NUM_RINGS; i++) {
4736 BUG_ON(!list_empty(&dev_priv->bsd_ring.request_list)); 3655 BUG_ON(!list_empty(&dev_priv->ring[i].active_list));
4737 BUG_ON(!list_empty(&dev_priv->blt_ring.request_list)); 3656 BUG_ON(!list_empty(&dev_priv->ring[i].request_list));
3657 }
4738 mutex_unlock(&dev->struct_mutex); 3658 mutex_unlock(&dev->struct_mutex);
4739 3659
4740 ret = drm_irq_install(dev); 3660 ret = drm_irq_install(dev);
@@ -4796,17 +3716,14 @@ i915_gem_load(struct drm_device *dev)
4796 INIT_LIST_HEAD(&dev_priv->mm.pinned_list); 3716 INIT_LIST_HEAD(&dev_priv->mm.pinned_list);
4797 INIT_LIST_HEAD(&dev_priv->mm.fence_list); 3717 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4798 INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list); 3718 INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
4799 init_ring_lists(&dev_priv->render_ring); 3719 INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
4800 init_ring_lists(&dev_priv->bsd_ring); 3720 for (i = 0; i < I915_NUM_RINGS; i++)
4801 init_ring_lists(&dev_priv->blt_ring); 3721 init_ring_lists(&dev_priv->ring[i]);
4802 for (i = 0; i < 16; i++) 3722 for (i = 0; i < 16; i++)
4803 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); 3723 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
4804 INIT_DELAYED_WORK(&dev_priv->mm.retire_work, 3724 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4805 i915_gem_retire_work_handler); 3725 i915_gem_retire_work_handler);
4806 init_completion(&dev_priv->error_completion); 3726 init_completion(&dev_priv->error_completion);
4807 spin_lock(&shrink_list_lock);
4808 list_add(&dev_priv->mm.shrink_list, &shrink_list);
4809 spin_unlock(&shrink_list_lock);
4810 3727
4811 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */ 3728 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
4812 if (IS_GEN3(dev)) { 3729 if (IS_GEN3(dev)) {
@@ -4818,6 +3735,8 @@ i915_gem_load(struct drm_device *dev)
4818 } 3735 }
4819 } 3736 }
4820 3737
3738 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
3739
4821 /* Old X drivers will take 0-2 for front, back, depth buffers */ 3740 /* Old X drivers will take 0-2 for front, back, depth buffers */
4822 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 3741 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4823 dev_priv->fence_reg_start = 3; 3742 dev_priv->fence_reg_start = 3;
@@ -4849,6 +3768,10 @@ i915_gem_load(struct drm_device *dev)
4849 } 3768 }
4850 i915_gem_detect_bit_6_swizzle(dev); 3769 i915_gem_detect_bit_6_swizzle(dev);
4851 init_waitqueue_head(&dev_priv->pending_flip_queue); 3770 init_waitqueue_head(&dev_priv->pending_flip_queue);
3771
3772 dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
3773 dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
3774 register_shrinker(&dev_priv->mm.inactive_shrinker);
4852} 3775}
4853 3776
4854/* 3777/*
@@ -4918,47 +3841,47 @@ void i915_gem_free_all_phys_object(struct drm_device *dev)
4918} 3841}
4919 3842
4920void i915_gem_detach_phys_object(struct drm_device *dev, 3843void i915_gem_detach_phys_object(struct drm_device *dev,
4921 struct drm_gem_object *obj) 3844 struct drm_i915_gem_object *obj)
4922{ 3845{
4923 struct drm_i915_gem_object *obj_priv; 3846 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
3847 char *vaddr;
4924 int i; 3848 int i;
4925 int ret;
4926 int page_count; 3849 int page_count;
4927 3850
4928 obj_priv = to_intel_bo(obj); 3851 if (!obj->phys_obj)
4929 if (!obj_priv->phys_obj)
4930 return; 3852 return;
3853 vaddr = obj->phys_obj->handle->vaddr;
4931 3854
4932 ret = i915_gem_object_get_pages(obj, 0); 3855 page_count = obj->base.size / PAGE_SIZE;
4933 if (ret)
4934 goto out;
4935
4936 page_count = obj->size / PAGE_SIZE;
4937
4938 for (i = 0; i < page_count; i++) { 3856 for (i = 0; i < page_count; i++) {
4939 char *dst = kmap_atomic(obj_priv->pages[i]); 3857 struct page *page = read_cache_page_gfp(mapping, i,
4940 char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE); 3858 GFP_HIGHUSER | __GFP_RECLAIMABLE);
4941 3859 if (!IS_ERR(page)) {
4942 memcpy(dst, src, PAGE_SIZE); 3860 char *dst = kmap_atomic(page);
4943 kunmap_atomic(dst); 3861 memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
3862 kunmap_atomic(dst);
3863
3864 drm_clflush_pages(&page, 1);
3865
3866 set_page_dirty(page);
3867 mark_page_accessed(page);
3868 page_cache_release(page);
3869 }
4944 } 3870 }
4945 drm_clflush_pages(obj_priv->pages, page_count); 3871 intel_gtt_chipset_flush();
4946 drm_agp_chipset_flush(dev);
4947 3872
4948 i915_gem_object_put_pages(obj); 3873 obj->phys_obj->cur_obj = NULL;
4949out: 3874 obj->phys_obj = NULL;
4950 obj_priv->phys_obj->cur_obj = NULL;
4951 obj_priv->phys_obj = NULL;
4952} 3875}
4953 3876
4954int 3877int
4955i915_gem_attach_phys_object(struct drm_device *dev, 3878i915_gem_attach_phys_object(struct drm_device *dev,
4956 struct drm_gem_object *obj, 3879 struct drm_i915_gem_object *obj,
4957 int id, 3880 int id,
4958 int align) 3881 int align)
4959{ 3882{
3883 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
4960 drm_i915_private_t *dev_priv = dev->dev_private; 3884 drm_i915_private_t *dev_priv = dev->dev_private;
4961 struct drm_i915_gem_object *obj_priv;
4962 int ret = 0; 3885 int ret = 0;
4963 int page_count; 3886 int page_count;
4964 int i; 3887 int i;
@@ -4966,10 +3889,8 @@ i915_gem_attach_phys_object(struct drm_device *dev,
4966 if (id > I915_MAX_PHYS_OBJECT) 3889 if (id > I915_MAX_PHYS_OBJECT)
4967 return -EINVAL; 3890 return -EINVAL;
4968 3891
4969 obj_priv = to_intel_bo(obj); 3892 if (obj->phys_obj) {
4970 3893 if (obj->phys_obj->id == id)
4971 if (obj_priv->phys_obj) {
4972 if (obj_priv->phys_obj->id == id)
4973 return 0; 3894 return 0;
4974 i915_gem_detach_phys_object(dev, obj); 3895 i915_gem_detach_phys_object(dev, obj);
4975 } 3896 }
@@ -4977,51 +3898,50 @@ i915_gem_attach_phys_object(struct drm_device *dev,
4977 /* create a new object */ 3898 /* create a new object */
4978 if (!dev_priv->mm.phys_objs[id - 1]) { 3899 if (!dev_priv->mm.phys_objs[id - 1]) {
4979 ret = i915_gem_init_phys_object(dev, id, 3900 ret = i915_gem_init_phys_object(dev, id,
4980 obj->size, align); 3901 obj->base.size, align);
4981 if (ret) { 3902 if (ret) {
4982 DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size); 3903 DRM_ERROR("failed to init phys object %d size: %zu\n",
4983 goto out; 3904 id, obj->base.size);
3905 return ret;
4984 } 3906 }
4985 } 3907 }
4986 3908
4987 /* bind to the object */ 3909 /* bind to the object */
4988 obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1]; 3910 obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
4989 obj_priv->phys_obj->cur_obj = obj; 3911 obj->phys_obj->cur_obj = obj;
4990 3912
4991 ret = i915_gem_object_get_pages(obj, 0); 3913 page_count = obj->base.size / PAGE_SIZE;
4992 if (ret) {
4993 DRM_ERROR("failed to get page list\n");
4994 goto out;
4995 }
4996
4997 page_count = obj->size / PAGE_SIZE;
4998 3914
4999 for (i = 0; i < page_count; i++) { 3915 for (i = 0; i < page_count; i++) {
5000 char *src = kmap_atomic(obj_priv->pages[i]); 3916 struct page *page;
5001 char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE); 3917 char *dst, *src;
5002 3918
3919 page = read_cache_page_gfp(mapping, i,
3920 GFP_HIGHUSER | __GFP_RECLAIMABLE);
3921 if (IS_ERR(page))
3922 return PTR_ERR(page);
3923
3924 src = kmap_atomic(page);
3925 dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
5003 memcpy(dst, src, PAGE_SIZE); 3926 memcpy(dst, src, PAGE_SIZE);
5004 kunmap_atomic(src); 3927 kunmap_atomic(src);
5005 }
5006 3928
5007 i915_gem_object_put_pages(obj); 3929 mark_page_accessed(page);
3930 page_cache_release(page);
3931 }
5008 3932
5009 return 0; 3933 return 0;
5010out:
5011 return ret;
5012} 3934}
5013 3935
5014static int 3936static int
5015i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, 3937i915_gem_phys_pwrite(struct drm_device *dev,
3938 struct drm_i915_gem_object *obj,
5016 struct drm_i915_gem_pwrite *args, 3939 struct drm_i915_gem_pwrite *args,
5017 struct drm_file *file_priv) 3940 struct drm_file *file_priv)
5018{ 3941{
5019 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 3942 void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
5020 void *vaddr = obj_priv->phys_obj->handle->vaddr + args->offset;
5021 char __user *user_data = (char __user *) (uintptr_t) args->data_ptr; 3943 char __user *user_data = (char __user *) (uintptr_t) args->data_ptr;
5022 3944
5023 DRM_DEBUG_DRIVER("vaddr %p, %lld\n", vaddr, args->size);
5024
5025 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) { 3945 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
5026 unsigned long unwritten; 3946 unsigned long unwritten;
5027 3947
@@ -5036,7 +3956,7 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
5036 return -EFAULT; 3956 return -EFAULT;
5037 } 3957 }
5038 3958
5039 drm_agp_chipset_flush(dev); 3959 intel_gtt_chipset_flush();
5040 return 0; 3960 return 0;
5041} 3961}
5042 3962
@@ -5074,144 +3994,68 @@ i915_gpu_is_active(struct drm_device *dev)
5074} 3994}
5075 3995
5076static int 3996static int
5077i915_gem_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask) 3997i915_gem_inactive_shrink(struct shrinker *shrinker,
5078{ 3998 int nr_to_scan,
5079 drm_i915_private_t *dev_priv, *next_dev; 3999 gfp_t gfp_mask)
5080 struct drm_i915_gem_object *obj_priv, *next_obj; 4000{
5081 int cnt = 0; 4001 struct drm_i915_private *dev_priv =
5082 int would_deadlock = 1; 4002 container_of(shrinker,
4003 struct drm_i915_private,
4004 mm.inactive_shrinker);
4005 struct drm_device *dev = dev_priv->dev;
4006 struct drm_i915_gem_object *obj, *next;
4007 int cnt;
4008
4009 if (!mutex_trylock(&dev->struct_mutex))
4010 return 0;
5083 4011
5084 /* "fast-path" to count number of available objects */ 4012 /* "fast-path" to count number of available objects */
5085 if (nr_to_scan == 0) { 4013 if (nr_to_scan == 0) {
5086 spin_lock(&shrink_list_lock); 4014 cnt = 0;
5087 list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) { 4015 list_for_each_entry(obj,
5088 struct drm_device *dev = dev_priv->dev; 4016 &dev_priv->mm.inactive_list,
5089 4017 mm_list)
5090 if (mutex_trylock(&dev->struct_mutex)) { 4018 cnt++;
5091 list_for_each_entry(obj_priv, 4019 mutex_unlock(&dev->struct_mutex);
5092 &dev_priv->mm.inactive_list, 4020 return cnt / 100 * sysctl_vfs_cache_pressure;
5093 mm_list)
5094 cnt++;
5095 mutex_unlock(&dev->struct_mutex);
5096 }
5097 }
5098 spin_unlock(&shrink_list_lock);
5099
5100 return (cnt / 100) * sysctl_vfs_cache_pressure;
5101 } 4021 }
5102 4022
5103 spin_lock(&shrink_list_lock);
5104
5105rescan: 4023rescan:
5106 /* first scan for clean buffers */ 4024 /* first scan for clean buffers */
5107 list_for_each_entry_safe(dev_priv, next_dev, 4025 i915_gem_retire_requests(dev);
5108 &shrink_list, mm.shrink_list) {
5109 struct drm_device *dev = dev_priv->dev;
5110
5111 if (! mutex_trylock(&dev->struct_mutex))
5112 continue;
5113
5114 spin_unlock(&shrink_list_lock);
5115 i915_gem_retire_requests(dev);
5116 4026
5117 list_for_each_entry_safe(obj_priv, next_obj, 4027 list_for_each_entry_safe(obj, next,
5118 &dev_priv->mm.inactive_list, 4028 &dev_priv->mm.inactive_list,
5119 mm_list) { 4029 mm_list) {
5120 if (i915_gem_object_is_purgeable(obj_priv)) { 4030 if (i915_gem_object_is_purgeable(obj)) {
5121 i915_gem_object_unbind(&obj_priv->base); 4031 if (i915_gem_object_unbind(obj) == 0 &&
5122 if (--nr_to_scan <= 0) 4032 --nr_to_scan == 0)
5123 break; 4033 break;
5124 }
5125 } 4034 }
5126
5127 spin_lock(&shrink_list_lock);
5128 mutex_unlock(&dev->struct_mutex);
5129
5130 would_deadlock = 0;
5131
5132 if (nr_to_scan <= 0)
5133 break;
5134 } 4035 }
5135 4036
5136 /* second pass, evict/count anything still on the inactive list */ 4037 /* second pass, evict/count anything still on the inactive list */
5137 list_for_each_entry_safe(dev_priv, next_dev, 4038 cnt = 0;
5138 &shrink_list, mm.shrink_list) { 4039 list_for_each_entry_safe(obj, next,
5139 struct drm_device *dev = dev_priv->dev; 4040 &dev_priv->mm.inactive_list,
5140 4041 mm_list) {
5141 if (! mutex_trylock(&dev->struct_mutex)) 4042 if (nr_to_scan &&
5142 continue; 4043 i915_gem_object_unbind(obj) == 0)
5143 4044 nr_to_scan--;
5144 spin_unlock(&shrink_list_lock); 4045 else
5145 4046 cnt++;
5146 list_for_each_entry_safe(obj_priv, next_obj,
5147 &dev_priv->mm.inactive_list,
5148 mm_list) {
5149 if (nr_to_scan > 0) {
5150 i915_gem_object_unbind(&obj_priv->base);
5151 nr_to_scan--;
5152 } else
5153 cnt++;
5154 }
5155
5156 spin_lock(&shrink_list_lock);
5157 mutex_unlock(&dev->struct_mutex);
5158
5159 would_deadlock = 0;
5160 } 4047 }
5161 4048
5162 if (nr_to_scan) { 4049 if (nr_to_scan && i915_gpu_is_active(dev)) {
5163 int active = 0;
5164
5165 /* 4050 /*
5166 * We are desperate for pages, so as a last resort, wait 4051 * We are desperate for pages, so as a last resort, wait
5167 * for the GPU to finish and discard whatever we can. 4052 * for the GPU to finish and discard whatever we can.
5168 * This has a dramatic impact to reduce the number of 4053 * This has a dramatic impact to reduce the number of
5169 * OOM-killer events whilst running the GPU aggressively. 4054 * OOM-killer events whilst running the GPU aggressively.
5170 */ 4055 */
5171 list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) { 4056 if (i915_gpu_idle(dev) == 0)
5172 struct drm_device *dev = dev_priv->dev;
5173
5174 if (!mutex_trylock(&dev->struct_mutex))
5175 continue;
5176
5177 spin_unlock(&shrink_list_lock);
5178
5179 if (i915_gpu_is_active(dev)) {
5180 i915_gpu_idle(dev);
5181 active++;
5182 }
5183
5184 spin_lock(&shrink_list_lock);
5185 mutex_unlock(&dev->struct_mutex);
5186 }
5187
5188 if (active)
5189 goto rescan; 4057 goto rescan;
5190 } 4058 }
5191 4059 mutex_unlock(&dev->struct_mutex);
5192 spin_unlock(&shrink_list_lock); 4060 return cnt / 100 * sysctl_vfs_cache_pressure;
5193
5194 if (would_deadlock)
5195 return -1;
5196 else if (cnt > 0)
5197 return (cnt / 100) * sysctl_vfs_cache_pressure;
5198 else
5199 return 0;
5200}
5201
5202static struct shrinker shrinker = {
5203 .shrink = i915_gem_shrink,
5204 .seeks = DEFAULT_SEEKS,
5205};
5206
5207__init void
5208i915_gem_shrinker_init(void)
5209{
5210 register_shrinker(&shrinker);
5211}
5212
5213__exit void
5214i915_gem_shrinker_exit(void)
5215{
5216 unregister_shrinker(&shrinker);
5217} 4061}
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
index 48644b840a8d..29d014c48ca2 100644
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -152,13 +152,12 @@ i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end,
152} 152}
153 153
154void 154void
155i915_gem_dump_object(struct drm_gem_object *obj, int len, 155i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
156 const char *where, uint32_t mark) 156 const char *where, uint32_t mark)
157{ 157{
158 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
159 int page; 158 int page;
160 159
161 DRM_INFO("%s: object at offset %08x\n", where, obj_priv->gtt_offset); 160 DRM_INFO("%s: object at offset %08x\n", where, obj->gtt_offset);
162 for (page = 0; page < (len + PAGE_SIZE-1) / PAGE_SIZE; page++) { 161 for (page = 0; page < (len + PAGE_SIZE-1) / PAGE_SIZE; page++) {
163 int page_len, chunk, chunk_len; 162 int page_len, chunk, chunk_len;
164 163
@@ -170,9 +169,9 @@ i915_gem_dump_object(struct drm_gem_object *obj, int len,
170 chunk_len = page_len - chunk; 169 chunk_len = page_len - chunk;
171 if (chunk_len > 128) 170 if (chunk_len > 128)
172 chunk_len = 128; 171 chunk_len = 128;
173 i915_gem_dump_page(obj_priv->pages[page], 172 i915_gem_dump_page(obj->pages[page],
174 chunk, chunk + chunk_len, 173 chunk, chunk + chunk_len,
175 obj_priv->gtt_offset + 174 obj->gtt_offset +
176 page * PAGE_SIZE, 175 page * PAGE_SIZE,
177 mark); 176 mark);
178 } 177 }
@@ -182,21 +181,19 @@ i915_gem_dump_object(struct drm_gem_object *obj, int len,
182 181
183#if WATCH_COHERENCY 182#if WATCH_COHERENCY
184void 183void
185i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle) 184i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle)
186{ 185{
187 struct drm_device *dev = obj->dev; 186 struct drm_device *dev = obj->base.dev;
188 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
189 int page; 187 int page;
190 uint32_t *gtt_mapping; 188 uint32_t *gtt_mapping;
191 uint32_t *backing_map = NULL; 189 uint32_t *backing_map = NULL;
192 int bad_count = 0; 190 int bad_count = 0;
193 191
194 DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %zdkb):\n", 192 DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %zdkb):\n",
195 __func__, obj, obj_priv->gtt_offset, handle, 193 __func__, obj, obj->gtt_offset, handle,
196 obj->size / 1024); 194 obj->size / 1024);
197 195
198 gtt_mapping = ioremap(dev->agp->base + obj_priv->gtt_offset, 196 gtt_mapping = ioremap(dev->agp->base + obj->gtt_offset, obj->base.size);
199 obj->size);
200 if (gtt_mapping == NULL) { 197 if (gtt_mapping == NULL) {
201 DRM_ERROR("failed to map GTT space\n"); 198 DRM_ERROR("failed to map GTT space\n");
202 return; 199 return;
@@ -205,7 +202,7 @@ i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
205 for (page = 0; page < obj->size / PAGE_SIZE; page++) { 202 for (page = 0; page < obj->size / PAGE_SIZE; page++) {
206 int i; 203 int i;
207 204
208 backing_map = kmap_atomic(obj_priv->pages[page], KM_USER0); 205 backing_map = kmap_atomic(obj->pages[page], KM_USER0);
209 206
210 if (backing_map == NULL) { 207 if (backing_map == NULL) {
211 DRM_ERROR("failed to map backing page\n"); 208 DRM_ERROR("failed to map backing page\n");
@@ -220,7 +217,7 @@ i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
220 if (cpuval != gttval) { 217 if (cpuval != gttval) {
221 DRM_INFO("incoherent CPU vs GPU at 0x%08x: " 218 DRM_INFO("incoherent CPU vs GPU at 0x%08x: "
222 "0x%08x vs 0x%08x\n", 219 "0x%08x vs 0x%08x\n",
223 (int)(obj_priv->gtt_offset + 220 (int)(obj->gtt_offset +
224 page * PAGE_SIZE + i * 4), 221 page * PAGE_SIZE + i * 4),
225 cpuval, gttval); 222 cpuval, gttval);
226 if (bad_count++ >= 8) { 223 if (bad_count++ >= 8) {
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index d8ae7d1d0cc6..78b8cf90c922 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -32,28 +32,36 @@
32#include "i915_drm.h" 32#include "i915_drm.h"
33 33
34static bool 34static bool
35mark_free(struct drm_i915_gem_object *obj_priv, 35mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
36 struct list_head *unwind)
37{ 36{
38 list_add(&obj_priv->evict_list, unwind); 37 list_add(&obj->exec_list, unwind);
39 drm_gem_object_reference(&obj_priv->base); 38 drm_gem_object_reference(&obj->base);
40 return drm_mm_scan_add_block(obj_priv->gtt_space); 39 return drm_mm_scan_add_block(obj->gtt_space);
41} 40}
42 41
43int 42int
44i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment) 43i915_gem_evict_something(struct drm_device *dev, int min_size,
44 unsigned alignment, bool mappable)
45{ 45{
46 drm_i915_private_t *dev_priv = dev->dev_private; 46 drm_i915_private_t *dev_priv = dev->dev_private;
47 struct list_head eviction_list, unwind_list; 47 struct list_head eviction_list, unwind_list;
48 struct drm_i915_gem_object *obj_priv; 48 struct drm_i915_gem_object *obj;
49 int ret = 0; 49 int ret = 0;
50 50
51 i915_gem_retire_requests(dev); 51 i915_gem_retire_requests(dev);
52 52
53 /* Re-check for free space after retiring requests */ 53 /* Re-check for free space after retiring requests */
54 if (drm_mm_search_free(&dev_priv->mm.gtt_space, 54 if (mappable) {
55 min_size, alignment, 0)) 55 if (drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
56 return 0; 56 min_size, alignment, 0,
57 dev_priv->mm.gtt_mappable_end,
58 0))
59 return 0;
60 } else {
61 if (drm_mm_search_free(&dev_priv->mm.gtt_space,
62 min_size, alignment, 0))
63 return 0;
64 }
57 65
58 /* 66 /*
59 * The goal is to evict objects and amalgamate space in LRU order. 67 * The goal is to evict objects and amalgamate space in LRU order.
@@ -79,45 +87,50 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen
79 */ 87 */
80 88
81 INIT_LIST_HEAD(&unwind_list); 89 INIT_LIST_HEAD(&unwind_list);
82 drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment); 90 if (mappable)
91 drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space, min_size,
92 alignment, 0,
93 dev_priv->mm.gtt_mappable_end);
94 else
95 drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment);
83 96
84 /* First see if there is a large enough contiguous idle region... */ 97 /* First see if there is a large enough contiguous idle region... */
85 list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) { 98 list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) {
86 if (mark_free(obj_priv, &unwind_list)) 99 if (mark_free(obj, &unwind_list))
87 goto found; 100 goto found;
88 } 101 }
89 102
90 /* Now merge in the soon-to-be-expired objects... */ 103 /* Now merge in the soon-to-be-expired objects... */
91 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) { 104 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
92 /* Does the object require an outstanding flush? */ 105 /* Does the object require an outstanding flush? */
93 if (obj_priv->base.write_domain || obj_priv->pin_count) 106 if (obj->base.write_domain || obj->pin_count)
94 continue; 107 continue;
95 108
96 if (mark_free(obj_priv, &unwind_list)) 109 if (mark_free(obj, &unwind_list))
97 goto found; 110 goto found;
98 } 111 }
99 112
100 /* Finally add anything with a pending flush (in order of retirement) */ 113 /* Finally add anything with a pending flush (in order of retirement) */
101 list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, mm_list) { 114 list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) {
102 if (obj_priv->pin_count) 115 if (obj->pin_count)
103 continue; 116 continue;
104 117
105 if (mark_free(obj_priv, &unwind_list)) 118 if (mark_free(obj, &unwind_list))
106 goto found; 119 goto found;
107 } 120 }
108 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) { 121 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
109 if (! obj_priv->base.write_domain || obj_priv->pin_count) 122 if (! obj->base.write_domain || obj->pin_count)
110 continue; 123 continue;
111 124
112 if (mark_free(obj_priv, &unwind_list)) 125 if (mark_free(obj, &unwind_list))
113 goto found; 126 goto found;
114 } 127 }
115 128
116 /* Nothing found, clean up and bail out! */ 129 /* Nothing found, clean up and bail out! */
117 list_for_each_entry(obj_priv, &unwind_list, evict_list) { 130 list_for_each_entry(obj, &unwind_list, exec_list) {
118 ret = drm_mm_scan_remove_block(obj_priv->gtt_space); 131 ret = drm_mm_scan_remove_block(obj->gtt_space);
119 BUG_ON(ret); 132 BUG_ON(ret);
120 drm_gem_object_unreference(&obj_priv->base); 133 drm_gem_object_unreference(&obj->base);
121 } 134 }
122 135
123 /* We expect the caller to unpin, evict all and try again, or give up. 136 /* We expect the caller to unpin, evict all and try again, or give up.
@@ -131,33 +144,33 @@ found:
131 * temporary list. */ 144 * temporary list. */
132 INIT_LIST_HEAD(&eviction_list); 145 INIT_LIST_HEAD(&eviction_list);
133 while (!list_empty(&unwind_list)) { 146 while (!list_empty(&unwind_list)) {
134 obj_priv = list_first_entry(&unwind_list, 147 obj = list_first_entry(&unwind_list,
135 struct drm_i915_gem_object, 148 struct drm_i915_gem_object,
136 evict_list); 149 exec_list);
137 if (drm_mm_scan_remove_block(obj_priv->gtt_space)) { 150 if (drm_mm_scan_remove_block(obj->gtt_space)) {
138 list_move(&obj_priv->evict_list, &eviction_list); 151 list_move(&obj->exec_list, &eviction_list);
139 continue; 152 continue;
140 } 153 }
141 list_del(&obj_priv->evict_list); 154 list_del_init(&obj->exec_list);
142 drm_gem_object_unreference(&obj_priv->base); 155 drm_gem_object_unreference(&obj->base);
143 } 156 }
144 157
145 /* Unbinding will emit any required flushes */ 158 /* Unbinding will emit any required flushes */
146 while (!list_empty(&eviction_list)) { 159 while (!list_empty(&eviction_list)) {
147 obj_priv = list_first_entry(&eviction_list, 160 obj = list_first_entry(&eviction_list,
148 struct drm_i915_gem_object, 161 struct drm_i915_gem_object,
149 evict_list); 162 exec_list);
150 if (ret == 0) 163 if (ret == 0)
151 ret = i915_gem_object_unbind(&obj_priv->base); 164 ret = i915_gem_object_unbind(obj);
152 list_del(&obj_priv->evict_list); 165 list_del_init(&obj->exec_list);
153 drm_gem_object_unreference(&obj_priv->base); 166 drm_gem_object_unreference(&obj->base);
154 } 167 }
155 168
156 return ret; 169 return ret;
157} 170}
158 171
159int 172int
160i915_gem_evict_everything(struct drm_device *dev) 173i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
161{ 174{
162 drm_i915_private_t *dev_priv = dev->dev_private; 175 drm_i915_private_t *dev_priv = dev->dev_private;
163 int ret; 176 int ret;
@@ -176,36 +189,22 @@ i915_gem_evict_everything(struct drm_device *dev)
176 189
177 BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); 190 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
178 191
179 ret = i915_gem_evict_inactive(dev); 192 return i915_gem_evict_inactive(dev, purgeable_only);
180 if (ret)
181 return ret;
182
183 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
184 list_empty(&dev_priv->mm.flushing_list) &&
185 list_empty(&dev_priv->mm.active_list));
186 BUG_ON(!lists_empty);
187
188 return 0;
189} 193}
190 194
191/** Unbinds all inactive objects. */ 195/** Unbinds all inactive objects. */
192int 196int
193i915_gem_evict_inactive(struct drm_device *dev) 197i915_gem_evict_inactive(struct drm_device *dev, bool purgeable_only)
194{ 198{
195 drm_i915_private_t *dev_priv = dev->dev_private; 199 drm_i915_private_t *dev_priv = dev->dev_private;
196 200 struct drm_i915_gem_object *obj, *next;
197 while (!list_empty(&dev_priv->mm.inactive_list)) { 201
198 struct drm_gem_object *obj; 202 list_for_each_entry_safe(obj, next,
199 int ret; 203 &dev_priv->mm.inactive_list, mm_list) {
200 204 if (!purgeable_only || obj->madv != I915_MADV_WILLNEED) {
201 obj = &list_first_entry(&dev_priv->mm.inactive_list, 205 int ret = i915_gem_object_unbind(obj);
202 struct drm_i915_gem_object, 206 if (ret)
203 mm_list)->base; 207 return ret;
204
205 ret = i915_gem_object_unbind(obj);
206 if (ret != 0) {
207 DRM_ERROR("Error unbinding object: %d\n", ret);
208 return ret;
209 } 208 }
210 } 209 }
211 210
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
new file mode 100644
index 000000000000..61129e6759eb
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -0,0 +1,1343 @@
1/*
2 * Copyright © 2008,2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uk>
26 *
27 */
28
29#include "drmP.h"
30#include "drm.h"
31#include "i915_drm.h"
32#include "i915_drv.h"
33#include "i915_trace.h"
34#include "intel_drv.h"
35
36struct change_domains {
37 uint32_t invalidate_domains;
38 uint32_t flush_domains;
39 uint32_t flush_rings;
40};
41
42/*
43 * Set the next domain for the specified object. This
44 * may not actually perform the necessary flushing/invaliding though,
45 * as that may want to be batched with other set_domain operations
46 *
47 * This is (we hope) the only really tricky part of gem. The goal
48 * is fairly simple -- track which caches hold bits of the object
49 * and make sure they remain coherent. A few concrete examples may
50 * help to explain how it works. For shorthand, we use the notation
51 * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
52 * a pair of read and write domain masks.
53 *
54 * Case 1: the batch buffer
55 *
56 * 1. Allocated
57 * 2. Written by CPU
58 * 3. Mapped to GTT
59 * 4. Read by GPU
60 * 5. Unmapped from GTT
61 * 6. Freed
62 *
63 * Let's take these a step at a time
64 *
65 * 1. Allocated
66 * Pages allocated from the kernel may still have
67 * cache contents, so we set them to (CPU, CPU) always.
68 * 2. Written by CPU (using pwrite)
69 * The pwrite function calls set_domain (CPU, CPU) and
70 * this function does nothing (as nothing changes)
71 * 3. Mapped by GTT
72 * This function asserts that the object is not
73 * currently in any GPU-based read or write domains
74 * 4. Read by GPU
75 * i915_gem_execbuffer calls set_domain (COMMAND, 0).
76 * As write_domain is zero, this function adds in the
77 * current read domains (CPU+COMMAND, 0).
78 * flush_domains is set to CPU.
79 * invalidate_domains is set to COMMAND
80 * clflush is run to get data out of the CPU caches
81 * then i915_dev_set_domain calls i915_gem_flush to
82 * emit an MI_FLUSH and drm_agp_chipset_flush
83 * 5. Unmapped from GTT
84 * i915_gem_object_unbind calls set_domain (CPU, CPU)
85 * flush_domains and invalidate_domains end up both zero
86 * so no flushing/invalidating happens
87 * 6. Freed
88 * yay, done
89 *
90 * Case 2: The shared render buffer
91 *
92 * 1. Allocated
93 * 2. Mapped to GTT
94 * 3. Read/written by GPU
95 * 4. set_domain to (CPU,CPU)
96 * 5. Read/written by CPU
97 * 6. Read/written by GPU
98 *
99 * 1. Allocated
100 * Same as last example, (CPU, CPU)
101 * 2. Mapped to GTT
102 * Nothing changes (assertions find that it is not in the GPU)
103 * 3. Read/written by GPU
104 * execbuffer calls set_domain (RENDER, RENDER)
105 * flush_domains gets CPU
106 * invalidate_domains gets GPU
107 * clflush (obj)
108 * MI_FLUSH and drm_agp_chipset_flush
109 * 4. set_domain (CPU, CPU)
110 * flush_domains gets GPU
111 * invalidate_domains gets CPU
112 * wait_rendering (obj) to make sure all drawing is complete.
113 * This will include an MI_FLUSH to get the data from GPU
114 * to memory
115 * clflush (obj) to invalidate the CPU cache
116 * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
117 * 5. Read/written by CPU
118 * cache lines are loaded and dirtied
119 * 6. Read written by GPU
120 * Same as last GPU access
121 *
122 * Case 3: The constant buffer
123 *
124 * 1. Allocated
125 * 2. Written by CPU
126 * 3. Read by GPU
127 * 4. Updated (written) by CPU again
128 * 5. Read by GPU
129 *
130 * 1. Allocated
131 * (CPU, CPU)
132 * 2. Written by CPU
133 * (CPU, CPU)
134 * 3. Read by GPU
135 * (CPU+RENDER, 0)
136 * flush_domains = CPU
137 * invalidate_domains = RENDER
138 * clflush (obj)
139 * MI_FLUSH
140 * drm_agp_chipset_flush
141 * 4. Updated (written) by CPU again
142 * (CPU, CPU)
143 * flush_domains = 0 (no previous write domain)
144 * invalidate_domains = 0 (no new read domains)
145 * 5. Read by GPU
146 * (CPU+RENDER, 0)
147 * flush_domains = CPU
148 * invalidate_domains = RENDER
149 * clflush (obj)
150 * MI_FLUSH
151 * drm_agp_chipset_flush
152 */
153static void
154i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
155 struct intel_ring_buffer *ring,
156 struct change_domains *cd)
157{
158 uint32_t invalidate_domains = 0, flush_domains = 0;
159
160 /*
161 * If the object isn't moving to a new write domain,
162 * let the object stay in multiple read domains
163 */
164 if (obj->base.pending_write_domain == 0)
165 obj->base.pending_read_domains |= obj->base.read_domains;
166
167 /*
168 * Flush the current write domain if
169 * the new read domains don't match. Invalidate
170 * any read domains which differ from the old
171 * write domain
172 */
173 if (obj->base.write_domain &&
174 (((obj->base.write_domain != obj->base.pending_read_domains ||
175 obj->ring != ring)) ||
176 (obj->fenced_gpu_access && !obj->pending_fenced_gpu_access))) {
177 flush_domains |= obj->base.write_domain;
178 invalidate_domains |=
179 obj->base.pending_read_domains & ~obj->base.write_domain;
180 }
181 /*
182 * Invalidate any read caches which may have
183 * stale data. That is, any new read domains.
184 */
185 invalidate_domains |= obj->base.pending_read_domains & ~obj->base.read_domains;
186 if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
187 i915_gem_clflush_object(obj);
188
189 /* blow away mappings if mapped through GTT */
190 if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_GTT)
191 i915_gem_release_mmap(obj);
192
193 /* The actual obj->write_domain will be updated with
194 * pending_write_domain after we emit the accumulated flush for all
195 * of our domain changes in execbuffers (which clears objects'
196 * write_domains). So if we have a current write domain that we
197 * aren't changing, set pending_write_domain to that.
198 */
199 if (flush_domains == 0 && obj->base.pending_write_domain == 0)
200 obj->base.pending_write_domain = obj->base.write_domain;
201
202 cd->invalidate_domains |= invalidate_domains;
203 cd->flush_domains |= flush_domains;
204 if (flush_domains & I915_GEM_GPU_DOMAINS)
205 cd->flush_rings |= obj->ring->id;
206 if (invalidate_domains & I915_GEM_GPU_DOMAINS)
207 cd->flush_rings |= ring->id;
208}
209
210struct eb_objects {
211 int and;
212 struct hlist_head buckets[0];
213};
214
215static struct eb_objects *
216eb_create(int size)
217{
218 struct eb_objects *eb;
219 int count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
220 while (count > size)
221 count >>= 1;
222 eb = kzalloc(count*sizeof(struct hlist_head) +
223 sizeof(struct eb_objects),
224 GFP_KERNEL);
225 if (eb == NULL)
226 return eb;
227
228 eb->and = count - 1;
229 return eb;
230}
231
232static void
233eb_reset(struct eb_objects *eb)
234{
235 memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
236}
237
238static void
239eb_add_object(struct eb_objects *eb, struct drm_i915_gem_object *obj)
240{
241 hlist_add_head(&obj->exec_node,
242 &eb->buckets[obj->exec_handle & eb->and]);
243}
244
245static struct drm_i915_gem_object *
246eb_get_object(struct eb_objects *eb, unsigned long handle)
247{
248 struct hlist_head *head;
249 struct hlist_node *node;
250 struct drm_i915_gem_object *obj;
251
252 head = &eb->buckets[handle & eb->and];
253 hlist_for_each(node, head) {
254 obj = hlist_entry(node, struct drm_i915_gem_object, exec_node);
255 if (obj->exec_handle == handle)
256 return obj;
257 }
258
259 return NULL;
260}
261
262static void
263eb_destroy(struct eb_objects *eb)
264{
265 kfree(eb);
266}
267
268static int
269i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
270 struct eb_objects *eb,
271 struct drm_i915_gem_exec_object2 *entry,
272 struct drm_i915_gem_relocation_entry *reloc)
273{
274 struct drm_device *dev = obj->base.dev;
275 struct drm_gem_object *target_obj;
276 uint32_t target_offset;
277 int ret = -EINVAL;
278
279 /* we've already hold a reference to all valid objects */
280 target_obj = &eb_get_object(eb, reloc->target_handle)->base;
281 if (unlikely(target_obj == NULL))
282 return -ENOENT;
283
284 target_offset = to_intel_bo(target_obj)->gtt_offset;
285
286#if WATCH_RELOC
287 DRM_INFO("%s: obj %p offset %08x target %d "
288 "read %08x write %08x gtt %08x "
289 "presumed %08x delta %08x\n",
290 __func__,
291 obj,
292 (int) reloc->offset,
293 (int) reloc->target_handle,
294 (int) reloc->read_domains,
295 (int) reloc->write_domain,
296 (int) target_offset,
297 (int) reloc->presumed_offset,
298 reloc->delta);
299#endif
300
301 /* The target buffer should have appeared before us in the
302 * exec_object list, so it should have a GTT space bound by now.
303 */
304 if (unlikely(target_offset == 0)) {
305 DRM_ERROR("No GTT space found for object %d\n",
306 reloc->target_handle);
307 return ret;
308 }
309
310 /* Validate that the target is in a valid r/w GPU domain */
311 if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
312 DRM_ERROR("reloc with multiple write domains: "
313 "obj %p target %d offset %d "
314 "read %08x write %08x",
315 obj, reloc->target_handle,
316 (int) reloc->offset,
317 reloc->read_domains,
318 reloc->write_domain);
319 return ret;
320 }
321 if (unlikely((reloc->write_domain | reloc->read_domains) & I915_GEM_DOMAIN_CPU)) {
322 DRM_ERROR("reloc with read/write CPU domains: "
323 "obj %p target %d offset %d "
324 "read %08x write %08x",
325 obj, reloc->target_handle,
326 (int) reloc->offset,
327 reloc->read_domains,
328 reloc->write_domain);
329 return ret;
330 }
331 if (unlikely(reloc->write_domain && target_obj->pending_write_domain &&
332 reloc->write_domain != target_obj->pending_write_domain)) {
333 DRM_ERROR("Write domain conflict: "
334 "obj %p target %d offset %d "
335 "new %08x old %08x\n",
336 obj, reloc->target_handle,
337 (int) reloc->offset,
338 reloc->write_domain,
339 target_obj->pending_write_domain);
340 return ret;
341 }
342
343 target_obj->pending_read_domains |= reloc->read_domains;
344 target_obj->pending_write_domain |= reloc->write_domain;
345
346 /* If the relocation already has the right value in it, no
347 * more work needs to be done.
348 */
349 if (target_offset == reloc->presumed_offset)
350 return 0;
351
352 /* Check that the relocation address is valid... */
353 if (unlikely(reloc->offset > obj->base.size - 4)) {
354 DRM_ERROR("Relocation beyond object bounds: "
355 "obj %p target %d offset %d size %d.\n",
356 obj, reloc->target_handle,
357 (int) reloc->offset,
358 (int) obj->base.size);
359 return ret;
360 }
361 if (unlikely(reloc->offset & 3)) {
362 DRM_ERROR("Relocation not 4-byte aligned: "
363 "obj %p target %d offset %d.\n",
364 obj, reloc->target_handle,
365 (int) reloc->offset);
366 return ret;
367 }
368
369 /* and points to somewhere within the target object. */
370 if (unlikely(reloc->delta >= target_obj->size)) {
371 DRM_ERROR("Relocation beyond target object bounds: "
372 "obj %p target %d delta %d size %d.\n",
373 obj, reloc->target_handle,
374 (int) reloc->delta,
375 (int) target_obj->size);
376 return ret;
377 }
378
379 reloc->delta += target_offset;
380 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
381 uint32_t page_offset = reloc->offset & ~PAGE_MASK;
382 char *vaddr;
383
384 vaddr = kmap_atomic(obj->pages[reloc->offset >> PAGE_SHIFT]);
385 *(uint32_t *)(vaddr + page_offset) = reloc->delta;
386 kunmap_atomic(vaddr);
387 } else {
388 struct drm_i915_private *dev_priv = dev->dev_private;
389 uint32_t __iomem *reloc_entry;
390 void __iomem *reloc_page;
391
392 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
393 if (ret)
394 return ret;
395
396 /* Map the page containing the relocation we're going to perform. */
397 reloc->offset += obj->gtt_offset;
398 reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
399 reloc->offset & PAGE_MASK);
400 reloc_entry = (uint32_t __iomem *)
401 (reloc_page + (reloc->offset & ~PAGE_MASK));
402 iowrite32(reloc->delta, reloc_entry);
403 io_mapping_unmap_atomic(reloc_page);
404 }
405
406 /* and update the user's relocation entry */
407 reloc->presumed_offset = target_offset;
408
409 return 0;
410}
411
412static int
413i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
414 struct eb_objects *eb,
415 struct drm_i915_gem_exec_object2 *entry)
416{
417 struct drm_i915_gem_relocation_entry __user *user_relocs;
418 int i, ret;
419
420 user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
421 for (i = 0; i < entry->relocation_count; i++) {
422 struct drm_i915_gem_relocation_entry reloc;
423
424 if (__copy_from_user_inatomic(&reloc,
425 user_relocs+i,
426 sizeof(reloc)))
427 return -EFAULT;
428
429 ret = i915_gem_execbuffer_relocate_entry(obj, eb, entry, &reloc);
430 if (ret)
431 return ret;
432
433 if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset,
434 &reloc.presumed_offset,
435 sizeof(reloc.presumed_offset)))
436 return -EFAULT;
437 }
438
439 return 0;
440}
441
442static int
443i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
444 struct eb_objects *eb,
445 struct drm_i915_gem_exec_object2 *entry,
446 struct drm_i915_gem_relocation_entry *relocs)
447{
448 int i, ret;
449
450 for (i = 0; i < entry->relocation_count; i++) {
451 ret = i915_gem_execbuffer_relocate_entry(obj, eb, entry, &relocs[i]);
452 if (ret)
453 return ret;
454 }
455
456 return 0;
457}
458
459static int
460i915_gem_execbuffer_relocate(struct drm_device *dev,
461 struct eb_objects *eb,
462 struct list_head *objects,
463 struct drm_i915_gem_exec_object2 *exec)
464{
465 struct drm_i915_gem_object *obj;
466 int ret;
467
468 list_for_each_entry(obj, objects, exec_list) {
469 obj->base.pending_read_domains = 0;
470 obj->base.pending_write_domain = 0;
471 ret = i915_gem_execbuffer_relocate_object(obj, eb, exec++);
472 if (ret)
473 return ret;
474 }
475
476 return 0;
477}
478
479static int
480i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
481 struct drm_file *file,
482 struct list_head *objects,
483 struct drm_i915_gem_exec_object2 *exec)
484{
485 struct drm_i915_gem_object *obj;
486 struct drm_i915_gem_exec_object2 *entry;
487 int ret, retry;
488 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
489
490 /* Attempt to pin all of the buffers into the GTT.
491 * This is done in 3 phases:
492 *
493 * 1a. Unbind all objects that do not match the GTT constraints for
494 * the execbuffer (fenceable, mappable, alignment etc).
495 * 1b. Increment pin count for already bound objects.
496 * 2. Bind new objects.
497 * 3. Decrement pin count.
498 *
499 * This avoid unnecessary unbinding of later objects in order to makr
500 * room for the earlier objects *unless* we need to defragment.
501 */
502 retry = 0;
503 do {
504 ret = 0;
505
506 /* Unbind any ill-fitting objects or pin. */
507 entry = exec;
508 list_for_each_entry(obj, objects, exec_list) {
509 bool need_fence, need_mappable;
510
511 if (!obj->gtt_space) {
512 entry++;
513 continue;
514 }
515
516 need_fence =
517 has_fenced_gpu_access &&
518 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
519 obj->tiling_mode != I915_TILING_NONE;
520 need_mappable =
521 entry->relocation_count ? true : need_fence;
522
523 if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) ||
524 (need_mappable && !obj->map_and_fenceable))
525 ret = i915_gem_object_unbind(obj);
526 else
527 ret = i915_gem_object_pin(obj,
528 entry->alignment,
529 need_mappable);
530 if (ret)
531 goto err;
532
533 entry++;
534 }
535
536 /* Bind fresh objects */
537 entry = exec;
538 list_for_each_entry(obj, objects, exec_list) {
539 bool need_fence;
540
541 need_fence =
542 has_fenced_gpu_access &&
543 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
544 obj->tiling_mode != I915_TILING_NONE;
545
546 if (!obj->gtt_space) {
547 bool need_mappable =
548 entry->relocation_count ? true : need_fence;
549
550 ret = i915_gem_object_pin(obj,
551 entry->alignment,
552 need_mappable);
553 if (ret)
554 break;
555 }
556
557 if (has_fenced_gpu_access) {
558 if (need_fence) {
559 ret = i915_gem_object_get_fence(obj, ring, 1);
560 if (ret)
561 break;
562 } else if (entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
563 obj->tiling_mode == I915_TILING_NONE) {
564 /* XXX pipelined! */
565 ret = i915_gem_object_put_fence(obj);
566 if (ret)
567 break;
568 }
569 obj->pending_fenced_gpu_access = need_fence;
570 }
571
572 entry->offset = obj->gtt_offset;
573 entry++;
574 }
575
576 /* Decrement pin count for bound objects */
577 list_for_each_entry(obj, objects, exec_list) {
578 if (obj->gtt_space)
579 i915_gem_object_unpin(obj);
580 }
581
582 if (ret != -ENOSPC || retry > 1)
583 return ret;
584
585 /* First attempt, just clear anything that is purgeable.
586 * Second attempt, clear the entire GTT.
587 */
588 ret = i915_gem_evict_everything(ring->dev, retry == 0);
589 if (ret)
590 return ret;
591
592 retry++;
593 } while (1);
594
595err:
596 obj = list_entry(obj->exec_list.prev,
597 struct drm_i915_gem_object,
598 exec_list);
599 while (objects != &obj->exec_list) {
600 if (obj->gtt_space)
601 i915_gem_object_unpin(obj);
602
603 obj = list_entry(obj->exec_list.prev,
604 struct drm_i915_gem_object,
605 exec_list);
606 }
607
608 return ret;
609}
610
611static int
612i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
613 struct drm_file *file,
614 struct intel_ring_buffer *ring,
615 struct list_head *objects,
616 struct eb_objects *eb,
617 struct drm_i915_gem_exec_object2 *exec,
618 int count)
619{
620 struct drm_i915_gem_relocation_entry *reloc;
621 struct drm_i915_gem_object *obj;
622 int i, total, ret;
623
624 /* We may process another execbuffer during the unlock... */
625 while (list_empty(objects)) {
626 obj = list_first_entry(objects,
627 struct drm_i915_gem_object,
628 exec_list);
629 list_del_init(&obj->exec_list);
630 drm_gem_object_unreference(&obj->base);
631 }
632
633 mutex_unlock(&dev->struct_mutex);
634
635 total = 0;
636 for (i = 0; i < count; i++)
637 total += exec[i].relocation_count;
638
639 reloc = drm_malloc_ab(total, sizeof(*reloc));
640 if (reloc == NULL) {
641 mutex_lock(&dev->struct_mutex);
642 return -ENOMEM;
643 }
644
645 total = 0;
646 for (i = 0; i < count; i++) {
647 struct drm_i915_gem_relocation_entry __user *user_relocs;
648
649 user_relocs = (void __user *)(uintptr_t)exec[i].relocs_ptr;
650
651 if (copy_from_user(reloc+total, user_relocs,
652 exec[i].relocation_count * sizeof(*reloc))) {
653 ret = -EFAULT;
654 mutex_lock(&dev->struct_mutex);
655 goto err;
656 }
657
658 total += exec[i].relocation_count;
659 }
660
661 ret = i915_mutex_lock_interruptible(dev);
662 if (ret) {
663 mutex_lock(&dev->struct_mutex);
664 goto err;
665 }
666
667 /* reacquire the objects */
668 INIT_LIST_HEAD(objects);
669 eb_reset(eb);
670 for (i = 0; i < count; i++) {
671 struct drm_i915_gem_object *obj;
672
673 obj = to_intel_bo(drm_gem_object_lookup(dev, file,
674 exec[i].handle));
675 if (obj == NULL) {
676 DRM_ERROR("Invalid object handle %d at index %d\n",
677 exec[i].handle, i);
678 ret = -ENOENT;
679 goto err;
680 }
681
682 list_add_tail(&obj->exec_list, objects);
683 obj->exec_handle = exec[i].handle;
684 eb_add_object(eb, obj);
685 }
686
687 ret = i915_gem_execbuffer_reserve(ring, file, objects, exec);
688 if (ret)
689 goto err;
690
691 total = 0;
692 list_for_each_entry(obj, objects, exec_list) {
693 obj->base.pending_read_domains = 0;
694 obj->base.pending_write_domain = 0;
695 ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
696 exec,
697 reloc + total);
698 if (ret)
699 goto err;
700
701 total += exec->relocation_count;
702 exec++;
703 }
704
705 /* Leave the user relocations as are, this is the painfully slow path,
706 * and we want to avoid the complication of dropping the lock whilst
707 * having buffers reserved in the aperture and so causing spurious
708 * ENOSPC for random operations.
709 */
710
711err:
712 drm_free_large(reloc);
713 return ret;
714}
715
716static void
717i915_gem_execbuffer_flush(struct drm_device *dev,
718 uint32_t invalidate_domains,
719 uint32_t flush_domains,
720 uint32_t flush_rings)
721{
722 drm_i915_private_t *dev_priv = dev->dev_private;
723 int i;
724
725 if (flush_domains & I915_GEM_DOMAIN_CPU)
726 intel_gtt_chipset_flush();
727
728 if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
729 for (i = 0; i < I915_NUM_RINGS; i++)
730 if (flush_rings & (1 << i))
731 i915_gem_flush_ring(dev, &dev_priv->ring[i],
732 invalidate_domains,
733 flush_domains);
734 }
735}
736
737static int
738i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
739 struct intel_ring_buffer *to)
740{
741 struct intel_ring_buffer *from = obj->ring;
742 u32 seqno;
743 int ret, idx;
744
745 if (from == NULL || to == from)
746 return 0;
747
748 if (INTEL_INFO(obj->base.dev)->gen < 6)
749 return i915_gem_object_wait_rendering(obj, true);
750
751 idx = intel_ring_sync_index(from, to);
752
753 seqno = obj->last_rendering_seqno;
754 if (seqno <= from->sync_seqno[idx])
755 return 0;
756
757 if (seqno == from->outstanding_lazy_request) {
758 struct drm_i915_gem_request *request;
759
760 request = kzalloc(sizeof(*request), GFP_KERNEL);
761 if (request == NULL)
762 return -ENOMEM;
763
764 ret = i915_add_request(obj->base.dev, NULL, request, from);
765 if (ret) {
766 kfree(request);
767 return ret;
768 }
769
770 seqno = request->seqno;
771 }
772
773 from->sync_seqno[idx] = seqno;
774 return intel_ring_sync(to, from, seqno - 1);
775}
776
777static int
778i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
779 struct list_head *objects)
780{
781 struct drm_i915_gem_object *obj;
782 struct change_domains cd;
783 int ret;
784
785 cd.invalidate_domains = 0;
786 cd.flush_domains = 0;
787 cd.flush_rings = 0;
788 list_for_each_entry(obj, objects, exec_list)
789 i915_gem_object_set_to_gpu_domain(obj, ring, &cd);
790
791 if (cd.invalidate_domains | cd.flush_domains) {
792#if WATCH_EXEC
793 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
794 __func__,
795 cd.invalidate_domains,
796 cd.flush_domains);
797#endif
798 i915_gem_execbuffer_flush(ring->dev,
799 cd.invalidate_domains,
800 cd.flush_domains,
801 cd.flush_rings);
802 }
803
804 list_for_each_entry(obj, objects, exec_list) {
805 ret = i915_gem_execbuffer_sync_rings(obj, ring);
806 if (ret)
807 return ret;
808 }
809
810 return 0;
811}
812
813static bool
814i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
815{
816 return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0;
817}
818
819static int
820validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
821 int count)
822{
823 int i;
824
825 for (i = 0; i < count; i++) {
826 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
827 int length; /* limited by fault_in_pages_readable() */
828
829 /* First check for malicious input causing overflow */
830 if (exec[i].relocation_count >
831 INT_MAX / sizeof(struct drm_i915_gem_relocation_entry))
832 return -EINVAL;
833
834 length = exec[i].relocation_count *
835 sizeof(struct drm_i915_gem_relocation_entry);
836 if (!access_ok(VERIFY_READ, ptr, length))
837 return -EFAULT;
838
839 /* we may also need to update the presumed offsets */
840 if (!access_ok(VERIFY_WRITE, ptr, length))
841 return -EFAULT;
842
843 if (fault_in_pages_readable(ptr, length))
844 return -EFAULT;
845 }
846
847 return 0;
848}
849
850static int
851i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring,
852 struct list_head *objects)
853{
854 struct drm_i915_gem_object *obj;
855 int flips;
856
857 /* Check for any pending flips. As we only maintain a flip queue depth
858 * of 1, we can simply insert a WAIT for the next display flip prior
859 * to executing the batch and avoid stalling the CPU.
860 */
861 flips = 0;
862 list_for_each_entry(obj, objects, exec_list) {
863 if (obj->base.write_domain)
864 flips |= atomic_read(&obj->pending_flip);
865 }
866 if (flips) {
867 int plane, flip_mask, ret;
868
869 for (plane = 0; flips >> plane; plane++) {
870 if (((flips >> plane) & 1) == 0)
871 continue;
872
873 if (plane)
874 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
875 else
876 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
877
878 ret = intel_ring_begin(ring, 2);
879 if (ret)
880 return ret;
881
882 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
883 intel_ring_emit(ring, MI_NOOP);
884 intel_ring_advance(ring);
885 }
886 }
887
888 return 0;
889}
890
891static void
892i915_gem_execbuffer_move_to_active(struct list_head *objects,
893 struct intel_ring_buffer *ring,
894 u32 seqno)
895{
896 struct drm_i915_gem_object *obj;
897
898 list_for_each_entry(obj, objects, exec_list) {
899 obj->base.read_domains = obj->base.pending_read_domains;
900 obj->base.write_domain = obj->base.pending_write_domain;
901 obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
902
903 i915_gem_object_move_to_active(obj, ring, seqno);
904 if (obj->base.write_domain) {
905 obj->dirty = 1;
906 obj->pending_gpu_write = true;
907 list_move_tail(&obj->gpu_write_list,
908 &ring->gpu_write_list);
909 intel_mark_busy(ring->dev, obj);
910 }
911
912 trace_i915_gem_object_change_domain(obj,
913 obj->base.read_domains,
914 obj->base.write_domain);
915 }
916}
917
918static void
919i915_gem_execbuffer_retire_commands(struct drm_device *dev,
920 struct drm_file *file,
921 struct intel_ring_buffer *ring)
922{
923 struct drm_i915_gem_request *request;
924 u32 flush_domains;
925
926 /*
927 * Ensure that the commands in the batch buffer are
928 * finished before the interrupt fires.
929 *
930 * The sampler always gets flushed on i965 (sigh).
931 */
932 flush_domains = 0;
933 if (INTEL_INFO(dev)->gen >= 4)
934 flush_domains |= I915_GEM_DOMAIN_SAMPLER;
935
936 ring->flush(ring, I915_GEM_DOMAIN_COMMAND, flush_domains);
937
938 /* Add a breadcrumb for the completion of the batch buffer */
939 request = kzalloc(sizeof(*request), GFP_KERNEL);
940 if (request == NULL || i915_add_request(dev, file, request, ring)) {
941 i915_gem_next_request_seqno(dev, ring);
942 kfree(request);
943 }
944}
945
946static int
947i915_gem_do_execbuffer(struct drm_device *dev, void *data,
948 struct drm_file *file,
949 struct drm_i915_gem_execbuffer2 *args,
950 struct drm_i915_gem_exec_object2 *exec)
951{
952 drm_i915_private_t *dev_priv = dev->dev_private;
953 struct list_head objects;
954 struct eb_objects *eb;
955 struct drm_i915_gem_object *batch_obj;
956 struct drm_clip_rect *cliprects = NULL;
957 struct intel_ring_buffer *ring;
958 u32 exec_start, exec_len;
959 u32 seqno;
960 int ret, mode, i;
961
962 if (!i915_gem_check_execbuffer(args)) {
963 DRM_ERROR("execbuf with invalid offset/length\n");
964 return -EINVAL;
965 }
966
967 ret = validate_exec_list(exec, args->buffer_count);
968 if (ret)
969 return ret;
970
971#if WATCH_EXEC
972 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
973 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
974#endif
975 switch (args->flags & I915_EXEC_RING_MASK) {
976 case I915_EXEC_DEFAULT:
977 case I915_EXEC_RENDER:
978 ring = &dev_priv->ring[RCS];
979 break;
980 case I915_EXEC_BSD:
981 if (!HAS_BSD(dev)) {
982 DRM_ERROR("execbuf with invalid ring (BSD)\n");
983 return -EINVAL;
984 }
985 ring = &dev_priv->ring[VCS];
986 break;
987 case I915_EXEC_BLT:
988 if (!HAS_BLT(dev)) {
989 DRM_ERROR("execbuf with invalid ring (BLT)\n");
990 return -EINVAL;
991 }
992 ring = &dev_priv->ring[BCS];
993 break;
994 default:
995 DRM_ERROR("execbuf with unknown ring: %d\n",
996 (int)(args->flags & I915_EXEC_RING_MASK));
997 return -EINVAL;
998 }
999
1000 mode = args->flags & I915_EXEC_CONSTANTS_MASK;
1001 switch (mode) {
1002 case I915_EXEC_CONSTANTS_REL_GENERAL:
1003 case I915_EXEC_CONSTANTS_ABSOLUTE:
1004 case I915_EXEC_CONSTANTS_REL_SURFACE:
1005 if (ring == &dev_priv->ring[RCS] &&
1006 mode != dev_priv->relative_constants_mode) {
1007 if (INTEL_INFO(dev)->gen < 4)
1008 return -EINVAL;
1009
1010 if (INTEL_INFO(dev)->gen > 5 &&
1011 mode == I915_EXEC_CONSTANTS_REL_SURFACE)
1012 return -EINVAL;
1013
1014 ret = intel_ring_begin(ring, 4);
1015 if (ret)
1016 return ret;
1017
1018 intel_ring_emit(ring, MI_NOOP);
1019 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1020 intel_ring_emit(ring, INSTPM);
1021 intel_ring_emit(ring,
1022 I915_EXEC_CONSTANTS_MASK << 16 | mode);
1023 intel_ring_advance(ring);
1024
1025 dev_priv->relative_constants_mode = mode;
1026 }
1027 break;
1028 default:
1029 DRM_ERROR("execbuf with unknown constants: %d\n", mode);
1030 return -EINVAL;
1031 }
1032
1033 if (args->buffer_count < 1) {
1034 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
1035 return -EINVAL;
1036 }
1037
1038 if (args->num_cliprects != 0) {
1039 if (ring != &dev_priv->ring[RCS]) {
1040 DRM_ERROR("clip rectangles are only valid with the render ring\n");
1041 return -EINVAL;
1042 }
1043
1044 cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects),
1045 GFP_KERNEL);
1046 if (cliprects == NULL) {
1047 ret = -ENOMEM;
1048 goto pre_mutex_err;
1049 }
1050
1051 if (copy_from_user(cliprects,
1052 (struct drm_clip_rect __user *)(uintptr_t)
1053 args->cliprects_ptr,
1054 sizeof(*cliprects)*args->num_cliprects)) {
1055 ret = -EFAULT;
1056 goto pre_mutex_err;
1057 }
1058 }
1059
1060 ret = i915_mutex_lock_interruptible(dev);
1061 if (ret)
1062 goto pre_mutex_err;
1063
1064 if (dev_priv->mm.suspended) {
1065 mutex_unlock(&dev->struct_mutex);
1066 ret = -EBUSY;
1067 goto pre_mutex_err;
1068 }
1069
1070 eb = eb_create(args->buffer_count);
1071 if (eb == NULL) {
1072 mutex_unlock(&dev->struct_mutex);
1073 ret = -ENOMEM;
1074 goto pre_mutex_err;
1075 }
1076
1077 /* Look up object handles */
1078 INIT_LIST_HEAD(&objects);
1079 for (i = 0; i < args->buffer_count; i++) {
1080 struct drm_i915_gem_object *obj;
1081
1082 obj = to_intel_bo(drm_gem_object_lookup(dev, file,
1083 exec[i].handle));
1084 if (obj == NULL) {
1085 DRM_ERROR("Invalid object handle %d at index %d\n",
1086 exec[i].handle, i);
1087 /* prevent error path from reading uninitialized data */
1088 ret = -ENOENT;
1089 goto err;
1090 }
1091
1092 if (!list_empty(&obj->exec_list)) {
1093 DRM_ERROR("Object %p [handle %d, index %d] appears more than once in object list\n",
1094 obj, exec[i].handle, i);
1095 ret = -EINVAL;
1096 goto err;
1097 }
1098
1099 list_add_tail(&obj->exec_list, &objects);
1100 obj->exec_handle = exec[i].handle;
1101 eb_add_object(eb, obj);
1102 }
1103
1104 /* Move the objects en-masse into the GTT, evicting if necessary. */
1105 ret = i915_gem_execbuffer_reserve(ring, file, &objects, exec);
1106 if (ret)
1107 goto err;
1108
1109 /* The objects are in their final locations, apply the relocations. */
1110 ret = i915_gem_execbuffer_relocate(dev, eb, &objects, exec);
1111 if (ret) {
1112 if (ret == -EFAULT) {
1113 ret = i915_gem_execbuffer_relocate_slow(dev, file, ring,
1114 &objects, eb,
1115 exec,
1116 args->buffer_count);
1117 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1118 }
1119 if (ret)
1120 goto err;
1121 }
1122
1123 /* Set the pending read domains for the batch buffer to COMMAND */
1124 batch_obj = list_entry(objects.prev,
1125 struct drm_i915_gem_object,
1126 exec_list);
1127 if (batch_obj->base.pending_write_domain) {
1128 DRM_ERROR("Attempting to use self-modifying batch buffer\n");
1129 ret = -EINVAL;
1130 goto err;
1131 }
1132 batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
1133
1134 ret = i915_gem_execbuffer_move_to_gpu(ring, &objects);
1135 if (ret)
1136 goto err;
1137
1138 ret = i915_gem_execbuffer_wait_for_flips(ring, &objects);
1139 if (ret)
1140 goto err;
1141
1142 seqno = i915_gem_next_request_seqno(dev, ring);
1143 for (i = 0; i < I915_NUM_RINGS-1; i++) {
1144 if (seqno < ring->sync_seqno[i]) {
1145 /* The GPU can not handle its semaphore value wrapping,
1146 * so every billion or so execbuffers, we need to stall
1147 * the GPU in order to reset the counters.
1148 */
1149 ret = i915_gpu_idle(dev);
1150 if (ret)
1151 goto err;
1152
1153 BUG_ON(ring->sync_seqno[i]);
1154 }
1155 }
1156
1157 exec_start = batch_obj->gtt_offset + args->batch_start_offset;
1158 exec_len = args->batch_len;
1159 if (cliprects) {
1160 for (i = 0; i < args->num_cliprects; i++) {
1161 ret = i915_emit_box(dev, &cliprects[i],
1162 args->DR1, args->DR4);
1163 if (ret)
1164 goto err;
1165
1166 ret = ring->dispatch_execbuffer(ring,
1167 exec_start, exec_len);
1168 if (ret)
1169 goto err;
1170 }
1171 } else {
1172 ret = ring->dispatch_execbuffer(ring, exec_start, exec_len);
1173 if (ret)
1174 goto err;
1175 }
1176
1177 i915_gem_execbuffer_move_to_active(&objects, ring, seqno);
1178 i915_gem_execbuffer_retire_commands(dev, file, ring);
1179
1180err:
1181 eb_destroy(eb);
1182 while (!list_empty(&objects)) {
1183 struct drm_i915_gem_object *obj;
1184
1185 obj = list_first_entry(&objects,
1186 struct drm_i915_gem_object,
1187 exec_list);
1188 list_del_init(&obj->exec_list);
1189 drm_gem_object_unreference(&obj->base);
1190 }
1191
1192 mutex_unlock(&dev->struct_mutex);
1193
1194pre_mutex_err:
1195 kfree(cliprects);
1196 return ret;
1197}
1198
1199/*
1200 * Legacy execbuffer just creates an exec2 list from the original exec object
1201 * list array and passes it to the real function.
1202 */
1203int
1204i915_gem_execbuffer(struct drm_device *dev, void *data,
1205 struct drm_file *file)
1206{
1207 struct drm_i915_gem_execbuffer *args = data;
1208 struct drm_i915_gem_execbuffer2 exec2;
1209 struct drm_i915_gem_exec_object *exec_list = NULL;
1210 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1211 int ret, i;
1212
1213#if WATCH_EXEC
1214 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
1215 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
1216#endif
1217
1218 if (args->buffer_count < 1) {
1219 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
1220 return -EINVAL;
1221 }
1222
1223 /* Copy in the exec list from userland */
1224 exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
1225 exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
1226 if (exec_list == NULL || exec2_list == NULL) {
1227 DRM_ERROR("Failed to allocate exec list for %d buffers\n",
1228 args->buffer_count);
1229 drm_free_large(exec_list);
1230 drm_free_large(exec2_list);
1231 return -ENOMEM;
1232 }
1233 ret = copy_from_user(exec_list,
1234 (struct drm_i915_relocation_entry __user *)
1235 (uintptr_t) args->buffers_ptr,
1236 sizeof(*exec_list) * args->buffer_count);
1237 if (ret != 0) {
1238 DRM_ERROR("copy %d exec entries failed %d\n",
1239 args->buffer_count, ret);
1240 drm_free_large(exec_list);
1241 drm_free_large(exec2_list);
1242 return -EFAULT;
1243 }
1244
1245 for (i = 0; i < args->buffer_count; i++) {
1246 exec2_list[i].handle = exec_list[i].handle;
1247 exec2_list[i].relocation_count = exec_list[i].relocation_count;
1248 exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
1249 exec2_list[i].alignment = exec_list[i].alignment;
1250 exec2_list[i].offset = exec_list[i].offset;
1251 if (INTEL_INFO(dev)->gen < 4)
1252 exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
1253 else
1254 exec2_list[i].flags = 0;
1255 }
1256
1257 exec2.buffers_ptr = args->buffers_ptr;
1258 exec2.buffer_count = args->buffer_count;
1259 exec2.batch_start_offset = args->batch_start_offset;
1260 exec2.batch_len = args->batch_len;
1261 exec2.DR1 = args->DR1;
1262 exec2.DR4 = args->DR4;
1263 exec2.num_cliprects = args->num_cliprects;
1264 exec2.cliprects_ptr = args->cliprects_ptr;
1265 exec2.flags = I915_EXEC_RENDER;
1266
1267 ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
1268 if (!ret) {
1269 /* Copy the new buffer offsets back to the user's exec list. */
1270 for (i = 0; i < args->buffer_count; i++)
1271 exec_list[i].offset = exec2_list[i].offset;
1272 /* ... and back out to userspace */
1273 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
1274 (uintptr_t) args->buffers_ptr,
1275 exec_list,
1276 sizeof(*exec_list) * args->buffer_count);
1277 if (ret) {
1278 ret = -EFAULT;
1279 DRM_ERROR("failed to copy %d exec entries "
1280 "back to user (%d)\n",
1281 args->buffer_count, ret);
1282 }
1283 }
1284
1285 drm_free_large(exec_list);
1286 drm_free_large(exec2_list);
1287 return ret;
1288}
1289
1290int
1291i915_gem_execbuffer2(struct drm_device *dev, void *data,
1292 struct drm_file *file)
1293{
1294 struct drm_i915_gem_execbuffer2 *args = data;
1295 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1296 int ret;
1297
1298#if WATCH_EXEC
1299 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
1300 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
1301#endif
1302
1303 if (args->buffer_count < 1) {
1304 DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
1305 return -EINVAL;
1306 }
1307
1308 exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
1309 if (exec2_list == NULL) {
1310 DRM_ERROR("Failed to allocate exec list for %d buffers\n",
1311 args->buffer_count);
1312 return -ENOMEM;
1313 }
1314 ret = copy_from_user(exec2_list,
1315 (struct drm_i915_relocation_entry __user *)
1316 (uintptr_t) args->buffers_ptr,
1317 sizeof(*exec2_list) * args->buffer_count);
1318 if (ret != 0) {
1319 DRM_ERROR("copy %d exec entries failed %d\n",
1320 args->buffer_count, ret);
1321 drm_free_large(exec2_list);
1322 return -EFAULT;
1323 }
1324
1325 ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
1326 if (!ret) {
1327 /* Copy the new buffer offsets back to the user's exec list. */
1328 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
1329 (uintptr_t) args->buffers_ptr,
1330 exec2_list,
1331 sizeof(*exec2_list) * args->buffer_count);
1332 if (ret) {
1333 ret = -EFAULT;
1334 DRM_ERROR("failed to copy %d exec entries "
1335 "back to user (%d)\n",
1336 args->buffer_count, ret);
1337 }
1338 }
1339
1340 drm_free_large(exec2_list);
1341 return ret;
1342}
1343
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
new file mode 100644
index 000000000000..86673e77d7cb
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -0,0 +1,99 @@
1/*
2 * Copyright © 2010 Daniel Vetter
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25#include "drmP.h"
26#include "drm.h"
27#include "i915_drm.h"
28#include "i915_drv.h"
29#include "i915_trace.h"
30#include "intel_drv.h"
31
32void i915_gem_restore_gtt_mappings(struct drm_device *dev)
33{
34 struct drm_i915_private *dev_priv = dev->dev_private;
35 struct drm_i915_gem_object *obj;
36
37 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
38 i915_gem_clflush_object(obj);
39
40 if (dev_priv->mm.gtt->needs_dmar) {
41 BUG_ON(!obj->sg_list);
42
43 intel_gtt_insert_sg_entries(obj->sg_list,
44 obj->num_sg,
45 obj->gtt_space->start
46 >> PAGE_SHIFT,
47 obj->agp_type);
48 } else
49 intel_gtt_insert_pages(obj->gtt_space->start
50 >> PAGE_SHIFT,
51 obj->base.size >> PAGE_SHIFT,
52 obj->pages,
53 obj->agp_type);
54 }
55
56 intel_gtt_chipset_flush();
57}
58
59int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj)
60{
61 struct drm_device *dev = obj->base.dev;
62 struct drm_i915_private *dev_priv = dev->dev_private;
63 int ret;
64
65 if (dev_priv->mm.gtt->needs_dmar) {
66 ret = intel_gtt_map_memory(obj->pages,
67 obj->base.size >> PAGE_SHIFT,
68 &obj->sg_list,
69 &obj->num_sg);
70 if (ret != 0)
71 return ret;
72
73 intel_gtt_insert_sg_entries(obj->sg_list,
74 obj->num_sg,
75 obj->gtt_space->start >> PAGE_SHIFT,
76 obj->agp_type);
77 } else
78 intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
79 obj->base.size >> PAGE_SHIFT,
80 obj->pages,
81 obj->agp_type);
82
83 return 0;
84}
85
86void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
87{
88 struct drm_device *dev = obj->base.dev;
89 struct drm_i915_private *dev_priv = dev->dev_private;
90
91 if (dev_priv->mm.gtt->needs_dmar) {
92 intel_gtt_unmap_memory(obj->sg_list, obj->num_sg);
93 obj->sg_list = NULL;
94 obj->num_sg = 0;
95 }
96
97 intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
98 obj->base.size >> PAGE_SHIFT);
99}
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index af352de70be1..22a32b9932c5 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -181,7 +181,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
181} 181}
182 182
183/* Check pitch constriants for all chips & tiling formats */ 183/* Check pitch constriants for all chips & tiling formats */
184bool 184static bool
185i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) 185i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
186{ 186{
187 int tile_width; 187 int tile_width;
@@ -232,32 +232,44 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
232 return true; 232 return true;
233} 233}
234 234
235bool 235/* Is the current GTT allocation valid for the change in tiling? */
236i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode) 236static bool
237i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
237{ 238{
238 struct drm_device *dev = obj->dev; 239 u32 size;
239 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
240
241 if (obj_priv->gtt_space == NULL)
242 return true;
243 240
244 if (tiling_mode == I915_TILING_NONE) 241 if (tiling_mode == I915_TILING_NONE)
245 return true; 242 return true;
246 243
247 if (INTEL_INFO(dev)->gen >= 4) 244 if (INTEL_INFO(obj->base.dev)->gen >= 4)
248 return true; 245 return true;
249 246
250 if (obj_priv->gtt_offset & (obj->size - 1)) 247 if (INTEL_INFO(obj->base.dev)->gen == 3) {
251 return false; 248 if (obj->gtt_offset & ~I915_FENCE_START_MASK)
252
253 if (IS_GEN3(dev)) {
254 if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK)
255 return false; 249 return false;
256 } else { 250 } else {
257 if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK) 251 if (obj->gtt_offset & ~I830_FENCE_START_MASK)
258 return false; 252 return false;
259 } 253 }
260 254
255 /*
256 * Previous chips need to be aligned to the size of the smallest
257 * fence register that can contain the object.
258 */
259 if (INTEL_INFO(obj->base.dev)->gen == 3)
260 size = 1024*1024;
261 else
262 size = 512*1024;
263
264 while (size < obj->base.size)
265 size <<= 1;
266
267 if (obj->gtt_space->size != size)
268 return false;
269
270 if (obj->gtt_offset & (size - 1))
271 return false;
272
261 return true; 273 return true;
262} 274}
263 275
@@ -267,30 +279,29 @@ i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode)
267 */ 279 */
268int 280int
269i915_gem_set_tiling(struct drm_device *dev, void *data, 281i915_gem_set_tiling(struct drm_device *dev, void *data,
270 struct drm_file *file_priv) 282 struct drm_file *file)
271{ 283{
272 struct drm_i915_gem_set_tiling *args = data; 284 struct drm_i915_gem_set_tiling *args = data;
273 drm_i915_private_t *dev_priv = dev->dev_private; 285 drm_i915_private_t *dev_priv = dev->dev_private;
274 struct drm_gem_object *obj; 286 struct drm_i915_gem_object *obj;
275 struct drm_i915_gem_object *obj_priv;
276 int ret; 287 int ret;
277 288
278 ret = i915_gem_check_is_wedged(dev); 289 ret = i915_gem_check_is_wedged(dev);
279 if (ret) 290 if (ret)
280 return ret; 291 return ret;
281 292
282 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 293 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
283 if (obj == NULL) 294 if (obj == NULL)
284 return -ENOENT; 295 return -ENOENT;
285 obj_priv = to_intel_bo(obj);
286 296
287 if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) { 297 if (!i915_tiling_ok(dev,
288 drm_gem_object_unreference_unlocked(obj); 298 args->stride, obj->base.size, args->tiling_mode)) {
299 drm_gem_object_unreference_unlocked(&obj->base);
289 return -EINVAL; 300 return -EINVAL;
290 } 301 }
291 302
292 if (obj_priv->pin_count) { 303 if (obj->pin_count) {
293 drm_gem_object_unreference_unlocked(obj); 304 drm_gem_object_unreference_unlocked(&obj->base);
294 return -EBUSY; 305 return -EBUSY;
295 } 306 }
296 307
@@ -324,34 +335,28 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
324 } 335 }
325 336
326 mutex_lock(&dev->struct_mutex); 337 mutex_lock(&dev->struct_mutex);
327 if (args->tiling_mode != obj_priv->tiling_mode || 338 if (args->tiling_mode != obj->tiling_mode ||
328 args->stride != obj_priv->stride) { 339 args->stride != obj->stride) {
329 /* We need to rebind the object if its current allocation 340 /* We need to rebind the object if its current allocation
330 * no longer meets the alignment restrictions for its new 341 * no longer meets the alignment restrictions for its new
331 * tiling mode. Otherwise we can just leave it alone, but 342 * tiling mode. Otherwise we can just leave it alone, but
332 * need to ensure that any fence register is cleared. 343 * need to ensure that any fence register is cleared.
333 */ 344 */
334 if (!i915_gem_object_fence_offset_ok(obj, args->tiling_mode)) 345 i915_gem_release_mmap(obj);
335 ret = i915_gem_object_unbind(obj);
336 else if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
337 ret = i915_gem_object_put_fence_reg(obj, true);
338 else
339 i915_gem_release_mmap(obj);
340 346
341 if (ret != 0) { 347 obj->map_and_fenceable =
342 args->tiling_mode = obj_priv->tiling_mode; 348 obj->gtt_space == NULL ||
343 args->stride = obj_priv->stride; 349 (obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end &&
344 goto err; 350 i915_gem_object_fence_ok(obj, args->tiling_mode));
345 }
346 351
347 obj_priv->tiling_mode = args->tiling_mode; 352 obj->tiling_changed = true;
348 obj_priv->stride = args->stride; 353 obj->tiling_mode = args->tiling_mode;
354 obj->stride = args->stride;
349 } 355 }
350err: 356 drm_gem_object_unreference(&obj->base);
351 drm_gem_object_unreference(obj);
352 mutex_unlock(&dev->struct_mutex); 357 mutex_unlock(&dev->struct_mutex);
353 358
354 return ret; 359 return 0;
355} 360}
356 361
357/** 362/**
@@ -359,22 +364,20 @@ err:
359 */ 364 */
360int 365int
361i915_gem_get_tiling(struct drm_device *dev, void *data, 366i915_gem_get_tiling(struct drm_device *dev, void *data,
362 struct drm_file *file_priv) 367 struct drm_file *file)
363{ 368{
364 struct drm_i915_gem_get_tiling *args = data; 369 struct drm_i915_gem_get_tiling *args = data;
365 drm_i915_private_t *dev_priv = dev->dev_private; 370 drm_i915_private_t *dev_priv = dev->dev_private;
366 struct drm_gem_object *obj; 371 struct drm_i915_gem_object *obj;
367 struct drm_i915_gem_object *obj_priv;
368 372
369 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 373 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
370 if (obj == NULL) 374 if (obj == NULL)
371 return -ENOENT; 375 return -ENOENT;
372 obj_priv = to_intel_bo(obj);
373 376
374 mutex_lock(&dev->struct_mutex); 377 mutex_lock(&dev->struct_mutex);
375 378
376 args->tiling_mode = obj_priv->tiling_mode; 379 args->tiling_mode = obj->tiling_mode;
377 switch (obj_priv->tiling_mode) { 380 switch (obj->tiling_mode) {
378 case I915_TILING_X: 381 case I915_TILING_X:
379 args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; 382 args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
380 break; 383 break;
@@ -394,7 +397,7 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
394 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17) 397 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
395 args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10; 398 args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;
396 399
397 drm_gem_object_unreference(obj); 400 drm_gem_object_unreference(&obj->base);
398 mutex_unlock(&dev->struct_mutex); 401 mutex_unlock(&dev->struct_mutex);
399 402
400 return 0; 403 return 0;
@@ -424,46 +427,44 @@ i915_gem_swizzle_page(struct page *page)
424} 427}
425 428
426void 429void
427i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj) 430i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
428{ 431{
429 struct drm_device *dev = obj->dev; 432 struct drm_device *dev = obj->base.dev;
430 drm_i915_private_t *dev_priv = dev->dev_private; 433 drm_i915_private_t *dev_priv = dev->dev_private;
431 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 434 int page_count = obj->base.size >> PAGE_SHIFT;
432 int page_count = obj->size >> PAGE_SHIFT;
433 int i; 435 int i;
434 436
435 if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17) 437 if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17)
436 return; 438 return;
437 439
438 if (obj_priv->bit_17 == NULL) 440 if (obj->bit_17 == NULL)
439 return; 441 return;
440 442
441 for (i = 0; i < page_count; i++) { 443 for (i = 0; i < page_count; i++) {
442 char new_bit_17 = page_to_phys(obj_priv->pages[i]) >> 17; 444 char new_bit_17 = page_to_phys(obj->pages[i]) >> 17;
443 if ((new_bit_17 & 0x1) != 445 if ((new_bit_17 & 0x1) !=
444 (test_bit(i, obj_priv->bit_17) != 0)) { 446 (test_bit(i, obj->bit_17) != 0)) {
445 i915_gem_swizzle_page(obj_priv->pages[i]); 447 i915_gem_swizzle_page(obj->pages[i]);
446 set_page_dirty(obj_priv->pages[i]); 448 set_page_dirty(obj->pages[i]);
447 } 449 }
448 } 450 }
449} 451}
450 452
451void 453void
452i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj) 454i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
453{ 455{
454 struct drm_device *dev = obj->dev; 456 struct drm_device *dev = obj->base.dev;
455 drm_i915_private_t *dev_priv = dev->dev_private; 457 drm_i915_private_t *dev_priv = dev->dev_private;
456 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 458 int page_count = obj->base.size >> PAGE_SHIFT;
457 int page_count = obj->size >> PAGE_SHIFT;
458 int i; 459 int i;
459 460
460 if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17) 461 if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17)
461 return; 462 return;
462 463
463 if (obj_priv->bit_17 == NULL) { 464 if (obj->bit_17 == NULL) {
464 obj_priv->bit_17 = kmalloc(BITS_TO_LONGS(page_count) * 465 obj->bit_17 = kmalloc(BITS_TO_LONGS(page_count) *
465 sizeof(long), GFP_KERNEL); 466 sizeof(long), GFP_KERNEL);
466 if (obj_priv->bit_17 == NULL) { 467 if (obj->bit_17 == NULL) {
467 DRM_ERROR("Failed to allocate memory for bit 17 " 468 DRM_ERROR("Failed to allocate memory for bit 17 "
468 "record\n"); 469 "record\n");
469 return; 470 return;
@@ -471,9 +472,9 @@ i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj)
471 } 472 }
472 473
473 for (i = 0; i < page_count; i++) { 474 for (i = 0; i < page_count; i++) {
474 if (page_to_phys(obj_priv->pages[i]) & (1 << 17)) 475 if (page_to_phys(obj->pages[i]) & (1 << 17))
475 __set_bit(i, obj_priv->bit_17); 476 __set_bit(i, obj->bit_17);
476 else 477 else
477 __clear_bit(i, obj_priv->bit_17); 478 __clear_bit(i, obj->bit_17);
478 } 479 }
479} 480}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 729fd0c91d7b..0dadc025b77b 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -67,20 +67,20 @@
67void 67void
68ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask) 68ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
69{ 69{
70 if ((dev_priv->gt_irq_mask_reg & mask) != 0) { 70 if ((dev_priv->gt_irq_mask & mask) != 0) {
71 dev_priv->gt_irq_mask_reg &= ~mask; 71 dev_priv->gt_irq_mask &= ~mask;
72 I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg); 72 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
73 (void) I915_READ(GTIMR); 73 POSTING_READ(GTIMR);
74 } 74 }
75} 75}
76 76
77void 77void
78ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask) 78ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
79{ 79{
80 if ((dev_priv->gt_irq_mask_reg & mask) != mask) { 80 if ((dev_priv->gt_irq_mask & mask) != mask) {
81 dev_priv->gt_irq_mask_reg |= mask; 81 dev_priv->gt_irq_mask |= mask;
82 I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg); 82 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
83 (void) I915_READ(GTIMR); 83 POSTING_READ(GTIMR);
84 } 84 }
85} 85}
86 86
@@ -88,40 +88,40 @@ ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
88static void 88static void
89ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 89ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
90{ 90{
91 if ((dev_priv->irq_mask_reg & mask) != 0) { 91 if ((dev_priv->irq_mask & mask) != 0) {
92 dev_priv->irq_mask_reg &= ~mask; 92 dev_priv->irq_mask &= ~mask;
93 I915_WRITE(DEIMR, dev_priv->irq_mask_reg); 93 I915_WRITE(DEIMR, dev_priv->irq_mask);
94 (void) I915_READ(DEIMR); 94 POSTING_READ(DEIMR);
95 } 95 }
96} 96}
97 97
98static inline void 98static inline void
99ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 99ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
100{ 100{
101 if ((dev_priv->irq_mask_reg & mask) != mask) { 101 if ((dev_priv->irq_mask & mask) != mask) {
102 dev_priv->irq_mask_reg |= mask; 102 dev_priv->irq_mask |= mask;
103 I915_WRITE(DEIMR, dev_priv->irq_mask_reg); 103 I915_WRITE(DEIMR, dev_priv->irq_mask);
104 (void) I915_READ(DEIMR); 104 POSTING_READ(DEIMR);
105 } 105 }
106} 106}
107 107
108void 108void
109i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask) 109i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
110{ 110{
111 if ((dev_priv->irq_mask_reg & mask) != 0) { 111 if ((dev_priv->irq_mask & mask) != 0) {
112 dev_priv->irq_mask_reg &= ~mask; 112 dev_priv->irq_mask &= ~mask;
113 I915_WRITE(IMR, dev_priv->irq_mask_reg); 113 I915_WRITE(IMR, dev_priv->irq_mask);
114 (void) I915_READ(IMR); 114 POSTING_READ(IMR);
115 } 115 }
116} 116}
117 117
118void 118void
119i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask) 119i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
120{ 120{
121 if ((dev_priv->irq_mask_reg & mask) != mask) { 121 if ((dev_priv->irq_mask & mask) != mask) {
122 dev_priv->irq_mask_reg |= mask; 122 dev_priv->irq_mask |= mask;
123 I915_WRITE(IMR, dev_priv->irq_mask_reg); 123 I915_WRITE(IMR, dev_priv->irq_mask);
124 (void) I915_READ(IMR); 124 POSTING_READ(IMR);
125 } 125 }
126} 126}
127 127
@@ -144,7 +144,7 @@ i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
144 dev_priv->pipestat[pipe] |= mask; 144 dev_priv->pipestat[pipe] |= mask;
145 /* Enable the interrupt, clear any pending status */ 145 /* Enable the interrupt, clear any pending status */
146 I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16)); 146 I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16));
147 (void) I915_READ(reg); 147 POSTING_READ(reg);
148 } 148 }
149} 149}
150 150
@@ -156,16 +156,19 @@ i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
156 156
157 dev_priv->pipestat[pipe] &= ~mask; 157 dev_priv->pipestat[pipe] &= ~mask;
158 I915_WRITE(reg, dev_priv->pipestat[pipe]); 158 I915_WRITE(reg, dev_priv->pipestat[pipe]);
159 (void) I915_READ(reg); 159 POSTING_READ(reg);
160 } 160 }
161} 161}
162 162
163/** 163/**
164 * intel_enable_asle - enable ASLE interrupt for OpRegion 164 * intel_enable_asle - enable ASLE interrupt for OpRegion
165 */ 165 */
166void intel_enable_asle (struct drm_device *dev) 166void intel_enable_asle(struct drm_device *dev)
167{ 167{
168 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 168 drm_i915_private_t *dev_priv = dev->dev_private;
169 unsigned long irqflags;
170
171 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
169 172
170 if (HAS_PCH_SPLIT(dev)) 173 if (HAS_PCH_SPLIT(dev))
171 ironlake_enable_display_irq(dev_priv, DE_GSE); 174 ironlake_enable_display_irq(dev_priv, DE_GSE);
@@ -176,6 +179,8 @@ void intel_enable_asle (struct drm_device *dev)
176 i915_enable_pipestat(dev_priv, 0, 179 i915_enable_pipestat(dev_priv, 0,
177 PIPE_LEGACY_BLC_EVENT_ENABLE); 180 PIPE_LEGACY_BLC_EVENT_ENABLE);
178 } 181 }
182
183 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
179} 184}
180 185
181/** 186/**
@@ -243,6 +248,92 @@ u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
243 return I915_READ(reg); 248 return I915_READ(reg);
244} 249}
245 250
251int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
252 int *vpos, int *hpos)
253{
254 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
255 u32 vbl = 0, position = 0;
256 int vbl_start, vbl_end, htotal, vtotal;
257 bool in_vbl = true;
258 int ret = 0;
259
260 if (!i915_pipe_enabled(dev, pipe)) {
261 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
262 "pipe %d\n", pipe);
263 return 0;
264 }
265
266 /* Get vtotal. */
267 vtotal = 1 + ((I915_READ(VTOTAL(pipe)) >> 16) & 0x1fff);
268
269 if (INTEL_INFO(dev)->gen >= 4) {
270 /* No obvious pixelcount register. Only query vertical
271 * scanout position from Display scan line register.
272 */
273 position = I915_READ(PIPEDSL(pipe));
274
275 /* Decode into vertical scanout position. Don't have
276 * horizontal scanout position.
277 */
278 *vpos = position & 0x1fff;
279 *hpos = 0;
280 } else {
281 /* Have access to pixelcount since start of frame.
282 * We can split this into vertical and horizontal
283 * scanout position.
284 */
285 position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
286
287 htotal = 1 + ((I915_READ(HTOTAL(pipe)) >> 16) & 0x1fff);
288 *vpos = position / htotal;
289 *hpos = position - (*vpos * htotal);
290 }
291
292 /* Query vblank area. */
293 vbl = I915_READ(VBLANK(pipe));
294
295 /* Test position against vblank region. */
296 vbl_start = vbl & 0x1fff;
297 vbl_end = (vbl >> 16) & 0x1fff;
298
299 if ((*vpos < vbl_start) || (*vpos > vbl_end))
300 in_vbl = false;
301
302 /* Inside "upper part" of vblank area? Apply corrective offset: */
303 if (in_vbl && (*vpos >= vbl_start))
304 *vpos = *vpos - vtotal;
305
306 /* Readouts valid? */
307 if (vbl > 0)
308 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
309
310 /* In vblank? */
311 if (in_vbl)
312 ret |= DRM_SCANOUTPOS_INVBL;
313
314 return ret;
315}
316
317int i915_get_vblank_timestamp(struct drm_device *dev, int crtc,
318 int *max_error,
319 struct timeval *vblank_time,
320 unsigned flags)
321{
322 struct drm_crtc *drmcrtc;
323
324 if (crtc < 0 || crtc >= dev->num_crtcs) {
325 DRM_ERROR("Invalid crtc %d\n", crtc);
326 return -EINVAL;
327 }
328
329 /* Get drm_crtc to timestamp: */
330 drmcrtc = intel_get_crtc_for_pipe(dev, crtc);
331
332 /* Helper routine in DRM core does all the work: */
333 return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error,
334 vblank_time, flags, drmcrtc);
335}
336
246/* 337/*
247 * Handle hotplug events outside the interrupt handler proper. 338 * Handle hotplug events outside the interrupt handler proper.
248 */ 339 */
@@ -297,8 +388,8 @@ static void notify_ring(struct drm_device *dev,
297 struct intel_ring_buffer *ring) 388 struct intel_ring_buffer *ring)
298{ 389{
299 struct drm_i915_private *dev_priv = dev->dev_private; 390 struct drm_i915_private *dev_priv = dev->dev_private;
300 u32 seqno = ring->get_seqno(dev, ring); 391 u32 seqno = ring->get_seqno(ring);
301 ring->irq_gem_seqno = seqno; 392 ring->irq_seqno = seqno;
302 trace_i915_gem_request_complete(dev, seqno); 393 trace_i915_gem_request_complete(dev, seqno);
303 wake_up_all(&ring->irq_queue); 394 wake_up_all(&ring->irq_queue);
304 dev_priv->hangcheck_count = 0; 395 dev_priv->hangcheck_count = 0;
@@ -306,11 +397,49 @@ static void notify_ring(struct drm_device *dev,
306 jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); 397 jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
307} 398}
308 399
400static void gen6_pm_irq_handler(struct drm_device *dev)
401{
402 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
403 u8 new_delay = dev_priv->cur_delay;
404 u32 pm_iir;
405
406 pm_iir = I915_READ(GEN6_PMIIR);
407 if (!pm_iir)
408 return;
409
410 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
411 if (dev_priv->cur_delay != dev_priv->max_delay)
412 new_delay = dev_priv->cur_delay + 1;
413 if (new_delay > dev_priv->max_delay)
414 new_delay = dev_priv->max_delay;
415 } else if (pm_iir & (GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT)) {
416 if (dev_priv->cur_delay != dev_priv->min_delay)
417 new_delay = dev_priv->cur_delay - 1;
418 if (new_delay < dev_priv->min_delay) {
419 new_delay = dev_priv->min_delay;
420 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
421 I915_READ(GEN6_RP_INTERRUPT_LIMITS) |
422 ((new_delay << 16) & 0x3f0000));
423 } else {
424 /* Make sure we continue to get down interrupts
425 * until we hit the minimum frequency */
426 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
427 I915_READ(GEN6_RP_INTERRUPT_LIMITS) & ~0x3f0000);
428 }
429
430 }
431
432 gen6_set_rps(dev, new_delay);
433 dev_priv->cur_delay = new_delay;
434
435 I915_WRITE(GEN6_PMIIR, pm_iir);
436}
437
309static irqreturn_t ironlake_irq_handler(struct drm_device *dev) 438static irqreturn_t ironlake_irq_handler(struct drm_device *dev)
310{ 439{
311 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 440 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
312 int ret = IRQ_NONE; 441 int ret = IRQ_NONE;
313 u32 de_iir, gt_iir, de_ier, pch_iir; 442 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
314 u32 hotplug_mask; 443 u32 hotplug_mask;
315 struct drm_i915_master_private *master_priv; 444 struct drm_i915_master_private *master_priv;
316 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT; 445 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
@@ -321,13 +450,15 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev)
321 /* disable master interrupt before clearing iir */ 450 /* disable master interrupt before clearing iir */
322 de_ier = I915_READ(DEIER); 451 de_ier = I915_READ(DEIER);
323 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 452 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
324 (void)I915_READ(DEIER); 453 POSTING_READ(DEIER);
325 454
326 de_iir = I915_READ(DEIIR); 455 de_iir = I915_READ(DEIIR);
327 gt_iir = I915_READ(GTIIR); 456 gt_iir = I915_READ(GTIIR);
328 pch_iir = I915_READ(SDEIIR); 457 pch_iir = I915_READ(SDEIIR);
458 pm_iir = I915_READ(GEN6_PMIIR);
329 459
330 if (de_iir == 0 && gt_iir == 0 && pch_iir == 0) 460 if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 &&
461 (!IS_GEN6(dev) || pm_iir == 0))
331 goto done; 462 goto done;
332 463
333 if (HAS_PCH_CPT(dev)) 464 if (HAS_PCH_CPT(dev))
@@ -344,12 +475,12 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev)
344 READ_BREADCRUMB(dev_priv); 475 READ_BREADCRUMB(dev_priv);
345 } 476 }
346 477
347 if (gt_iir & GT_PIPE_NOTIFY) 478 if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
348 notify_ring(dev, &dev_priv->render_ring); 479 notify_ring(dev, &dev_priv->ring[RCS]);
349 if (gt_iir & bsd_usr_interrupt) 480 if (gt_iir & bsd_usr_interrupt)
350 notify_ring(dev, &dev_priv->bsd_ring); 481 notify_ring(dev, &dev_priv->ring[VCS]);
351 if (HAS_BLT(dev) && gt_iir & GT_BLT_USER_INTERRUPT) 482 if (gt_iir & GT_BLT_USER_INTERRUPT)
352 notify_ring(dev, &dev_priv->blt_ring); 483 notify_ring(dev, &dev_priv->ring[BCS]);
353 484
354 if (de_iir & DE_GSE) 485 if (de_iir & DE_GSE)
355 intel_opregion_gse_intr(dev); 486 intel_opregion_gse_intr(dev);
@@ -379,6 +510,9 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev)
379 i915_handle_rps_change(dev); 510 i915_handle_rps_change(dev);
380 } 511 }
381 512
513 if (IS_GEN6(dev))
514 gen6_pm_irq_handler(dev);
515
382 /* should clear PCH hotplug event before clear CPU irq */ 516 /* should clear PCH hotplug event before clear CPU irq */
383 I915_WRITE(SDEIIR, pch_iir); 517 I915_WRITE(SDEIIR, pch_iir);
384 I915_WRITE(GTIIR, gt_iir); 518 I915_WRITE(GTIIR, gt_iir);
@@ -386,7 +520,7 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev)
386 520
387done: 521done:
388 I915_WRITE(DEIER, de_ier); 522 I915_WRITE(DEIER, de_ier);
389 (void)I915_READ(DEIER); 523 POSTING_READ(DEIER);
390 524
391 return ret; 525 return ret;
392} 526}
@@ -423,28 +557,23 @@ static void i915_error_work_func(struct work_struct *work)
423#ifdef CONFIG_DEBUG_FS 557#ifdef CONFIG_DEBUG_FS
424static struct drm_i915_error_object * 558static struct drm_i915_error_object *
425i915_error_object_create(struct drm_device *dev, 559i915_error_object_create(struct drm_device *dev,
426 struct drm_gem_object *src) 560 struct drm_i915_gem_object *src)
427{ 561{
428 drm_i915_private_t *dev_priv = dev->dev_private; 562 drm_i915_private_t *dev_priv = dev->dev_private;
429 struct drm_i915_error_object *dst; 563 struct drm_i915_error_object *dst;
430 struct drm_i915_gem_object *src_priv;
431 int page, page_count; 564 int page, page_count;
432 u32 reloc_offset; 565 u32 reloc_offset;
433 566
434 if (src == NULL) 567 if (src == NULL || src->pages == NULL)
435 return NULL;
436
437 src_priv = to_intel_bo(src);
438 if (src_priv->pages == NULL)
439 return NULL; 568 return NULL;
440 569
441 page_count = src->size / PAGE_SIZE; 570 page_count = src->base.size / PAGE_SIZE;
442 571
443 dst = kmalloc(sizeof(*dst) + page_count * sizeof (u32 *), GFP_ATOMIC); 572 dst = kmalloc(sizeof(*dst) + page_count * sizeof (u32 *), GFP_ATOMIC);
444 if (dst == NULL) 573 if (dst == NULL)
445 return NULL; 574 return NULL;
446 575
447 reloc_offset = src_priv->gtt_offset; 576 reloc_offset = src->gtt_offset;
448 for (page = 0; page < page_count; page++) { 577 for (page = 0; page < page_count; page++) {
449 unsigned long flags; 578 unsigned long flags;
450 void __iomem *s; 579 void __iomem *s;
@@ -466,7 +595,7 @@ i915_error_object_create(struct drm_device *dev,
466 reloc_offset += PAGE_SIZE; 595 reloc_offset += PAGE_SIZE;
467 } 596 }
468 dst->page_count = page_count; 597 dst->page_count = page_count;
469 dst->gtt_offset = src_priv->gtt_offset; 598 dst->gtt_offset = src->gtt_offset;
470 599
471 return dst; 600 return dst;
472 601
@@ -520,36 +649,96 @@ i915_get_bbaddr(struct drm_device *dev, u32 *ring)
520} 649}
521 650
522static u32 651static u32
523i915_ringbuffer_last_batch(struct drm_device *dev) 652i915_ringbuffer_last_batch(struct drm_device *dev,
653 struct intel_ring_buffer *ring)
524{ 654{
525 struct drm_i915_private *dev_priv = dev->dev_private; 655 struct drm_i915_private *dev_priv = dev->dev_private;
526 u32 head, bbaddr; 656 u32 head, bbaddr;
527 u32 *ring; 657 u32 *val;
528 658
529 /* Locate the current position in the ringbuffer and walk back 659 /* Locate the current position in the ringbuffer and walk back
530 * to find the most recently dispatched batch buffer. 660 * to find the most recently dispatched batch buffer.
531 */ 661 */
532 bbaddr = 0; 662 head = I915_READ_HEAD(ring) & HEAD_ADDR;
533 head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
534 ring = (u32 *)(dev_priv->render_ring.virtual_start + head);
535 663
536 while (--ring >= (u32 *)dev_priv->render_ring.virtual_start) { 664 val = (u32 *)(ring->virtual_start + head);
537 bbaddr = i915_get_bbaddr(dev, ring); 665 while (--val >= (u32 *)ring->virtual_start) {
666 bbaddr = i915_get_bbaddr(dev, val);
538 if (bbaddr) 667 if (bbaddr)
539 break; 668 return bbaddr;
540 } 669 }
541 670
542 if (bbaddr == 0) { 671 val = (u32 *)(ring->virtual_start + ring->size);
543 ring = (u32 *)(dev_priv->render_ring.virtual_start 672 while (--val >= (u32 *)ring->virtual_start) {
544 + dev_priv->render_ring.size); 673 bbaddr = i915_get_bbaddr(dev, val);
545 while (--ring >= (u32 *)dev_priv->render_ring.virtual_start) { 674 if (bbaddr)
546 bbaddr = i915_get_bbaddr(dev, ring); 675 return bbaddr;
547 if (bbaddr)
548 break;
549 }
550 } 676 }
551 677
552 return bbaddr; 678 return 0;
679}
680
681static u32 capture_bo_list(struct drm_i915_error_buffer *err,
682 int count,
683 struct list_head *head)
684{
685 struct drm_i915_gem_object *obj;
686 int i = 0;
687
688 list_for_each_entry(obj, head, mm_list) {
689 err->size = obj->base.size;
690 err->name = obj->base.name;
691 err->seqno = obj->last_rendering_seqno;
692 err->gtt_offset = obj->gtt_offset;
693 err->read_domains = obj->base.read_domains;
694 err->write_domain = obj->base.write_domain;
695 err->fence_reg = obj->fence_reg;
696 err->pinned = 0;
697 if (obj->pin_count > 0)
698 err->pinned = 1;
699 if (obj->user_pin_count > 0)
700 err->pinned = -1;
701 err->tiling = obj->tiling_mode;
702 err->dirty = obj->dirty;
703 err->purgeable = obj->madv != I915_MADV_WILLNEED;
704 err->ring = obj->ring ? obj->ring->id : 0;
705
706 if (++i == count)
707 break;
708
709 err++;
710 }
711
712 return i;
713}
714
715static void i915_gem_record_fences(struct drm_device *dev,
716 struct drm_i915_error_state *error)
717{
718 struct drm_i915_private *dev_priv = dev->dev_private;
719 int i;
720
721 /* Fences */
722 switch (INTEL_INFO(dev)->gen) {
723 case 6:
724 for (i = 0; i < 16; i++)
725 error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
726 break;
727 case 5:
728 case 4:
729 for (i = 0; i < 16; i++)
730 error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
731 break;
732 case 3:
733 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
734 for (i = 0; i < 8; i++)
735 error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
736 case 2:
737 for (i = 0; i < 8; i++)
738 error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
739 break;
740
741 }
553} 742}
554 743
555/** 744/**
@@ -564,9 +753,9 @@ i915_ringbuffer_last_batch(struct drm_device *dev)
564static void i915_capture_error_state(struct drm_device *dev) 753static void i915_capture_error_state(struct drm_device *dev)
565{ 754{
566 struct drm_i915_private *dev_priv = dev->dev_private; 755 struct drm_i915_private *dev_priv = dev->dev_private;
567 struct drm_i915_gem_object *obj_priv; 756 struct drm_i915_gem_object *obj;
568 struct drm_i915_error_state *error; 757 struct drm_i915_error_state *error;
569 struct drm_gem_object *batchbuffer[2]; 758 struct drm_i915_gem_object *batchbuffer[2];
570 unsigned long flags; 759 unsigned long flags;
571 u32 bbaddr; 760 u32 bbaddr;
572 int count; 761 int count;
@@ -585,20 +774,33 @@ static void i915_capture_error_state(struct drm_device *dev)
585 774
586 DRM_DEBUG_DRIVER("generating error event\n"); 775 DRM_DEBUG_DRIVER("generating error event\n");
587 776
588 error->seqno = 777 error->seqno = dev_priv->ring[RCS].get_seqno(&dev_priv->ring[RCS]);
589 dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring);
590 error->eir = I915_READ(EIR); 778 error->eir = I915_READ(EIR);
591 error->pgtbl_er = I915_READ(PGTBL_ER); 779 error->pgtbl_er = I915_READ(PGTBL_ER);
592 error->pipeastat = I915_READ(PIPEASTAT); 780 error->pipeastat = I915_READ(PIPEASTAT);
593 error->pipebstat = I915_READ(PIPEBSTAT); 781 error->pipebstat = I915_READ(PIPEBSTAT);
594 error->instpm = I915_READ(INSTPM); 782 error->instpm = I915_READ(INSTPM);
595 if (INTEL_INFO(dev)->gen < 4) { 783 error->error = 0;
596 error->ipeir = I915_READ(IPEIR); 784 if (INTEL_INFO(dev)->gen >= 6) {
597 error->ipehr = I915_READ(IPEHR); 785 error->error = I915_READ(ERROR_GEN6);
598 error->instdone = I915_READ(INSTDONE); 786
599 error->acthd = I915_READ(ACTHD); 787 error->bcs_acthd = I915_READ(BCS_ACTHD);
600 error->bbaddr = 0; 788 error->bcs_ipehr = I915_READ(BCS_IPEHR);
601 } else { 789 error->bcs_ipeir = I915_READ(BCS_IPEIR);
790 error->bcs_instdone = I915_READ(BCS_INSTDONE);
791 error->bcs_seqno = 0;
792 if (dev_priv->ring[BCS].get_seqno)
793 error->bcs_seqno = dev_priv->ring[BCS].get_seqno(&dev_priv->ring[BCS]);
794
795 error->vcs_acthd = I915_READ(VCS_ACTHD);
796 error->vcs_ipehr = I915_READ(VCS_IPEHR);
797 error->vcs_ipeir = I915_READ(VCS_IPEIR);
798 error->vcs_instdone = I915_READ(VCS_INSTDONE);
799 error->vcs_seqno = 0;
800 if (dev_priv->ring[VCS].get_seqno)
801 error->vcs_seqno = dev_priv->ring[VCS].get_seqno(&dev_priv->ring[VCS]);
802 }
803 if (INTEL_INFO(dev)->gen >= 4) {
602 error->ipeir = I915_READ(IPEIR_I965); 804 error->ipeir = I915_READ(IPEIR_I965);
603 error->ipehr = I915_READ(IPEHR_I965); 805 error->ipehr = I915_READ(IPEHR_I965);
604 error->instdone = I915_READ(INSTDONE_I965); 806 error->instdone = I915_READ(INSTDONE_I965);
@@ -606,42 +808,45 @@ static void i915_capture_error_state(struct drm_device *dev)
606 error->instdone1 = I915_READ(INSTDONE1); 808 error->instdone1 = I915_READ(INSTDONE1);
607 error->acthd = I915_READ(ACTHD_I965); 809 error->acthd = I915_READ(ACTHD_I965);
608 error->bbaddr = I915_READ64(BB_ADDR); 810 error->bbaddr = I915_READ64(BB_ADDR);
811 } else {
812 error->ipeir = I915_READ(IPEIR);
813 error->ipehr = I915_READ(IPEHR);
814 error->instdone = I915_READ(INSTDONE);
815 error->acthd = I915_READ(ACTHD);
816 error->bbaddr = 0;
609 } 817 }
818 i915_gem_record_fences(dev, error);
610 819
611 bbaddr = i915_ringbuffer_last_batch(dev); 820 bbaddr = i915_ringbuffer_last_batch(dev, &dev_priv->ring[RCS]);
612 821
613 /* Grab the current batchbuffer, most likely to have crashed. */ 822 /* Grab the current batchbuffer, most likely to have crashed. */
614 batchbuffer[0] = NULL; 823 batchbuffer[0] = NULL;
615 batchbuffer[1] = NULL; 824 batchbuffer[1] = NULL;
616 count = 0; 825 count = 0;
617 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) { 826 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
618 struct drm_gem_object *obj = &obj_priv->base;
619
620 if (batchbuffer[0] == NULL && 827 if (batchbuffer[0] == NULL &&
621 bbaddr >= obj_priv->gtt_offset && 828 bbaddr >= obj->gtt_offset &&
622 bbaddr < obj_priv->gtt_offset + obj->size) 829 bbaddr < obj->gtt_offset + obj->base.size)
623 batchbuffer[0] = obj; 830 batchbuffer[0] = obj;
624 831
625 if (batchbuffer[1] == NULL && 832 if (batchbuffer[1] == NULL &&
626 error->acthd >= obj_priv->gtt_offset && 833 error->acthd >= obj->gtt_offset &&
627 error->acthd < obj_priv->gtt_offset + obj->size) 834 error->acthd < obj->gtt_offset + obj->base.size)
628 batchbuffer[1] = obj; 835 batchbuffer[1] = obj;
629 836
630 count++; 837 count++;
631 } 838 }
632 /* Scan the other lists for completeness for those bizarre errors. */ 839 /* Scan the other lists for completeness for those bizarre errors. */
633 if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) { 840 if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) {
634 list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, mm_list) { 841 list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) {
635 struct drm_gem_object *obj = &obj_priv->base;
636
637 if (batchbuffer[0] == NULL && 842 if (batchbuffer[0] == NULL &&
638 bbaddr >= obj_priv->gtt_offset && 843 bbaddr >= obj->gtt_offset &&
639 bbaddr < obj_priv->gtt_offset + obj->size) 844 bbaddr < obj->gtt_offset + obj->base.size)
640 batchbuffer[0] = obj; 845 batchbuffer[0] = obj;
641 846
642 if (batchbuffer[1] == NULL && 847 if (batchbuffer[1] == NULL &&
643 error->acthd >= obj_priv->gtt_offset && 848 error->acthd >= obj->gtt_offset &&
644 error->acthd < obj_priv->gtt_offset + obj->size) 849 error->acthd < obj->gtt_offset + obj->base.size)
645 batchbuffer[1] = obj; 850 batchbuffer[1] = obj;
646 851
647 if (batchbuffer[0] && batchbuffer[1]) 852 if (batchbuffer[0] && batchbuffer[1])
@@ -649,17 +854,15 @@ static void i915_capture_error_state(struct drm_device *dev)
649 } 854 }
650 } 855 }
651 if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) { 856 if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) {
652 list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) { 857 list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) {
653 struct drm_gem_object *obj = &obj_priv->base;
654
655 if (batchbuffer[0] == NULL && 858 if (batchbuffer[0] == NULL &&
656 bbaddr >= obj_priv->gtt_offset && 859 bbaddr >= obj->gtt_offset &&
657 bbaddr < obj_priv->gtt_offset + obj->size) 860 bbaddr < obj->gtt_offset + obj->base.size)
658 batchbuffer[0] = obj; 861 batchbuffer[0] = obj;
659 862
660 if (batchbuffer[1] == NULL && 863 if (batchbuffer[1] == NULL &&
661 error->acthd >= obj_priv->gtt_offset && 864 error->acthd >= obj->gtt_offset &&
662 error->acthd < obj_priv->gtt_offset + obj->size) 865 error->acthd < obj->gtt_offset + obj->base.size)
663 batchbuffer[1] = obj; 866 batchbuffer[1] = obj;
664 867
665 if (batchbuffer[0] && batchbuffer[1]) 868 if (batchbuffer[0] && batchbuffer[1])
@@ -678,46 +881,41 @@ static void i915_capture_error_state(struct drm_device *dev)
678 881
679 /* Record the ringbuffer */ 882 /* Record the ringbuffer */
680 error->ringbuffer = i915_error_object_create(dev, 883 error->ringbuffer = i915_error_object_create(dev,
681 dev_priv->render_ring.gem_object); 884 dev_priv->ring[RCS].obj);
682 885
683 /* Record buffers on the active list. */ 886 /* Record buffers on the active and pinned lists. */
684 error->active_bo = NULL; 887 error->active_bo = NULL;
685 error->active_bo_count = 0; 888 error->pinned_bo = NULL;
686 889
687 if (count) 890 error->active_bo_count = count;
891 list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list)
892 count++;
893 error->pinned_bo_count = count - error->active_bo_count;
894
895 if (count) {
688 error->active_bo = kmalloc(sizeof(*error->active_bo)*count, 896 error->active_bo = kmalloc(sizeof(*error->active_bo)*count,
689 GFP_ATOMIC); 897 GFP_ATOMIC);
690 898 if (error->active_bo)
691 if (error->active_bo) { 899 error->pinned_bo =
692 int i = 0; 900 error->active_bo + error->active_bo_count;
693 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
694 struct drm_gem_object *obj = &obj_priv->base;
695
696 error->active_bo[i].size = obj->size;
697 error->active_bo[i].name = obj->name;
698 error->active_bo[i].seqno = obj_priv->last_rendering_seqno;
699 error->active_bo[i].gtt_offset = obj_priv->gtt_offset;
700 error->active_bo[i].read_domains = obj->read_domains;
701 error->active_bo[i].write_domain = obj->write_domain;
702 error->active_bo[i].fence_reg = obj_priv->fence_reg;
703 error->active_bo[i].pinned = 0;
704 if (obj_priv->pin_count > 0)
705 error->active_bo[i].pinned = 1;
706 if (obj_priv->user_pin_count > 0)
707 error->active_bo[i].pinned = -1;
708 error->active_bo[i].tiling = obj_priv->tiling_mode;
709 error->active_bo[i].dirty = obj_priv->dirty;
710 error->active_bo[i].purgeable = obj_priv->madv != I915_MADV_WILLNEED;
711
712 if (++i == count)
713 break;
714 }
715 error->active_bo_count = i;
716 } 901 }
717 902
903 if (error->active_bo)
904 error->active_bo_count =
905 capture_bo_list(error->active_bo,
906 error->active_bo_count,
907 &dev_priv->mm.active_list);
908
909 if (error->pinned_bo)
910 error->pinned_bo_count =
911 capture_bo_list(error->pinned_bo,
912 error->pinned_bo_count,
913 &dev_priv->mm.pinned_list);
914
718 do_gettimeofday(&error->time); 915 do_gettimeofday(&error->time);
719 916
720 error->overlay = intel_overlay_capture_error_state(dev); 917 error->overlay = intel_overlay_capture_error_state(dev);
918 error->display = intel_display_capture_error_state(dev);
721 919
722 spin_lock_irqsave(&dev_priv->error_lock, flags); 920 spin_lock_irqsave(&dev_priv->error_lock, flags);
723 if (dev_priv->first_error == NULL) { 921 if (dev_priv->first_error == NULL) {
@@ -775,7 +973,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
775 printk(KERN_ERR " ACTHD: 0x%08x\n", 973 printk(KERN_ERR " ACTHD: 0x%08x\n",
776 I915_READ(ACTHD_I965)); 974 I915_READ(ACTHD_I965));
777 I915_WRITE(IPEIR_I965, ipeir); 975 I915_WRITE(IPEIR_I965, ipeir);
778 (void)I915_READ(IPEIR_I965); 976 POSTING_READ(IPEIR_I965);
779 } 977 }
780 if (eir & GM45_ERROR_PAGE_TABLE) { 978 if (eir & GM45_ERROR_PAGE_TABLE) {
781 u32 pgtbl_err = I915_READ(PGTBL_ER); 979 u32 pgtbl_err = I915_READ(PGTBL_ER);
@@ -783,7 +981,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
783 printk(KERN_ERR " PGTBL_ER: 0x%08x\n", 981 printk(KERN_ERR " PGTBL_ER: 0x%08x\n",
784 pgtbl_err); 982 pgtbl_err);
785 I915_WRITE(PGTBL_ER, pgtbl_err); 983 I915_WRITE(PGTBL_ER, pgtbl_err);
786 (void)I915_READ(PGTBL_ER); 984 POSTING_READ(PGTBL_ER);
787 } 985 }
788 } 986 }
789 987
@@ -794,7 +992,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
794 printk(KERN_ERR " PGTBL_ER: 0x%08x\n", 992 printk(KERN_ERR " PGTBL_ER: 0x%08x\n",
795 pgtbl_err); 993 pgtbl_err);
796 I915_WRITE(PGTBL_ER, pgtbl_err); 994 I915_WRITE(PGTBL_ER, pgtbl_err);
797 (void)I915_READ(PGTBL_ER); 995 POSTING_READ(PGTBL_ER);
798 } 996 }
799 } 997 }
800 998
@@ -825,7 +1023,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
825 printk(KERN_ERR " ACTHD: 0x%08x\n", 1023 printk(KERN_ERR " ACTHD: 0x%08x\n",
826 I915_READ(ACTHD)); 1024 I915_READ(ACTHD));
827 I915_WRITE(IPEIR, ipeir); 1025 I915_WRITE(IPEIR, ipeir);
828 (void)I915_READ(IPEIR); 1026 POSTING_READ(IPEIR);
829 } else { 1027 } else {
830 u32 ipeir = I915_READ(IPEIR_I965); 1028 u32 ipeir = I915_READ(IPEIR_I965);
831 1029
@@ -842,12 +1040,12 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
842 printk(KERN_ERR " ACTHD: 0x%08x\n", 1040 printk(KERN_ERR " ACTHD: 0x%08x\n",
843 I915_READ(ACTHD_I965)); 1041 I915_READ(ACTHD_I965));
844 I915_WRITE(IPEIR_I965, ipeir); 1042 I915_WRITE(IPEIR_I965, ipeir);
845 (void)I915_READ(IPEIR_I965); 1043 POSTING_READ(IPEIR_I965);
846 } 1044 }
847 } 1045 }
848 1046
849 I915_WRITE(EIR, eir); 1047 I915_WRITE(EIR, eir);
850 (void)I915_READ(EIR); 1048 POSTING_READ(EIR);
851 eir = I915_READ(EIR); 1049 eir = I915_READ(EIR);
852 if (eir) { 1050 if (eir) {
853 /* 1051 /*
@@ -870,7 +1068,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
870 * so userspace knows something bad happened (should trigger collection 1068 * so userspace knows something bad happened (should trigger collection
871 * of a ring dump etc.). 1069 * of a ring dump etc.).
872 */ 1070 */
873static void i915_handle_error(struct drm_device *dev, bool wedged) 1071void i915_handle_error(struct drm_device *dev, bool wedged)
874{ 1072{
875 struct drm_i915_private *dev_priv = dev->dev_private; 1073 struct drm_i915_private *dev_priv = dev->dev_private;
876 1074
@@ -884,11 +1082,11 @@ static void i915_handle_error(struct drm_device *dev, bool wedged)
884 /* 1082 /*
885 * Wakeup waiting processes so they don't hang 1083 * Wakeup waiting processes so they don't hang
886 */ 1084 */
887 wake_up_all(&dev_priv->render_ring.irq_queue); 1085 wake_up_all(&dev_priv->ring[RCS].irq_queue);
888 if (HAS_BSD(dev)) 1086 if (HAS_BSD(dev))
889 wake_up_all(&dev_priv->bsd_ring.irq_queue); 1087 wake_up_all(&dev_priv->ring[VCS].irq_queue);
890 if (HAS_BLT(dev)) 1088 if (HAS_BLT(dev))
891 wake_up_all(&dev_priv->blt_ring.irq_queue); 1089 wake_up_all(&dev_priv->ring[BCS].irq_queue);
892 } 1090 }
893 1091
894 queue_work(dev_priv->wq, &dev_priv->error_work); 1092 queue_work(dev_priv->wq, &dev_priv->error_work);
@@ -899,7 +1097,7 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
899 drm_i915_private_t *dev_priv = dev->dev_private; 1097 drm_i915_private_t *dev_priv = dev->dev_private;
900 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 1098 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
901 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1099 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
902 struct drm_i915_gem_object *obj_priv; 1100 struct drm_i915_gem_object *obj;
903 struct intel_unpin_work *work; 1101 struct intel_unpin_work *work;
904 unsigned long flags; 1102 unsigned long flags;
905 bool stall_detected; 1103 bool stall_detected;
@@ -918,13 +1116,13 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
918 } 1116 }
919 1117
920 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */ 1118 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
921 obj_priv = to_intel_bo(work->pending_flip_obj); 1119 obj = work->pending_flip_obj;
922 if (INTEL_INFO(dev)->gen >= 4) { 1120 if (INTEL_INFO(dev)->gen >= 4) {
923 int dspsurf = intel_crtc->plane == 0 ? DSPASURF : DSPBSURF; 1121 int dspsurf = intel_crtc->plane == 0 ? DSPASURF : DSPBSURF;
924 stall_detected = I915_READ(dspsurf) == obj_priv->gtt_offset; 1122 stall_detected = I915_READ(dspsurf) == obj->gtt_offset;
925 } else { 1123 } else {
926 int dspaddr = intel_crtc->plane == 0 ? DSPAADDR : DSPBADDR; 1124 int dspaddr = intel_crtc->plane == 0 ? DSPAADDR : DSPBADDR;
927 stall_detected = I915_READ(dspaddr) == (obj_priv->gtt_offset + 1125 stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
928 crtc->y * crtc->fb->pitch + 1126 crtc->y * crtc->fb->pitch +
929 crtc->x * crtc->fb->bits_per_pixel/8); 1127 crtc->x * crtc->fb->bits_per_pixel/8);
930 } 1128 }
@@ -970,7 +1168,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
970 * It doesn't set the bit in iir again, but it still produces 1168 * It doesn't set the bit in iir again, but it still produces
971 * interrupts (for non-MSI). 1169 * interrupts (for non-MSI).
972 */ 1170 */
973 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 1171 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
974 pipea_stats = I915_READ(PIPEASTAT); 1172 pipea_stats = I915_READ(PIPEASTAT);
975 pipeb_stats = I915_READ(PIPEBSTAT); 1173 pipeb_stats = I915_READ(PIPEBSTAT);
976 1174
@@ -993,7 +1191,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
993 I915_WRITE(PIPEBSTAT, pipeb_stats); 1191 I915_WRITE(PIPEBSTAT, pipeb_stats);
994 irq_received = 1; 1192 irq_received = 1;
995 } 1193 }
996 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); 1194 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
997 1195
998 if (!irq_received) 1196 if (!irq_received)
999 break; 1197 break;
@@ -1026,9 +1224,9 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
1026 } 1224 }
1027 1225
1028 if (iir & I915_USER_INTERRUPT) 1226 if (iir & I915_USER_INTERRUPT)
1029 notify_ring(dev, &dev_priv->render_ring); 1227 notify_ring(dev, &dev_priv->ring[RCS]);
1030 if (HAS_BSD(dev) && (iir & I915_BSD_USER_INTERRUPT)) 1228 if (iir & I915_BSD_USER_INTERRUPT)
1031 notify_ring(dev, &dev_priv->bsd_ring); 1229 notify_ring(dev, &dev_priv->ring[VCS]);
1032 1230
1033 if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) { 1231 if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
1034 intel_prepare_page_flip(dev, 0); 1232 intel_prepare_page_flip(dev, 0);
@@ -1101,12 +1299,13 @@ static int i915_emit_irq(struct drm_device * dev)
1101 if (master_priv->sarea_priv) 1299 if (master_priv->sarea_priv)
1102 master_priv->sarea_priv->last_enqueue = dev_priv->counter; 1300 master_priv->sarea_priv->last_enqueue = dev_priv->counter;
1103 1301
1104 BEGIN_LP_RING(4); 1302 if (BEGIN_LP_RING(4) == 0) {
1105 OUT_RING(MI_STORE_DWORD_INDEX); 1303 OUT_RING(MI_STORE_DWORD_INDEX);
1106 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 1304 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
1107 OUT_RING(dev_priv->counter); 1305 OUT_RING(dev_priv->counter);
1108 OUT_RING(MI_USER_INTERRUPT); 1306 OUT_RING(MI_USER_INTERRUPT);
1109 ADVANCE_LP_RING(); 1307 ADVANCE_LP_RING();
1308 }
1110 1309
1111 return dev_priv->counter; 1310 return dev_priv->counter;
1112} 1311}
@@ -1114,12 +1313,11 @@ static int i915_emit_irq(struct drm_device * dev)
1114void i915_trace_irq_get(struct drm_device *dev, u32 seqno) 1313void i915_trace_irq_get(struct drm_device *dev, u32 seqno)
1115{ 1314{
1116 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1315 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1117 struct intel_ring_buffer *render_ring = &dev_priv->render_ring; 1316 struct intel_ring_buffer *ring = LP_RING(dev_priv);
1118 1317
1119 if (dev_priv->trace_irq_seqno == 0) 1318 if (dev_priv->trace_irq_seqno == 0 &&
1120 render_ring->user_irq_get(dev, render_ring); 1319 ring->irq_get(ring))
1121 1320 dev_priv->trace_irq_seqno = seqno;
1122 dev_priv->trace_irq_seqno = seqno;
1123} 1321}
1124 1322
1125static int i915_wait_irq(struct drm_device * dev, int irq_nr) 1323static int i915_wait_irq(struct drm_device * dev, int irq_nr)
@@ -1127,7 +1325,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
1127 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1325 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1128 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 1326 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
1129 int ret = 0; 1327 int ret = 0;
1130 struct intel_ring_buffer *render_ring = &dev_priv->render_ring; 1328 struct intel_ring_buffer *ring = LP_RING(dev_priv);
1131 1329
1132 DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr, 1330 DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
1133 READ_BREADCRUMB(dev_priv)); 1331 READ_BREADCRUMB(dev_priv));
@@ -1141,10 +1339,12 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
1141 if (master_priv->sarea_priv) 1339 if (master_priv->sarea_priv)
1142 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; 1340 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
1143 1341
1144 render_ring->user_irq_get(dev, render_ring); 1342 ret = -ENODEV;
1145 DRM_WAIT_ON(ret, dev_priv->render_ring.irq_queue, 3 * DRM_HZ, 1343 if (ring->irq_get(ring)) {
1146 READ_BREADCRUMB(dev_priv) >= irq_nr); 1344 DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ,
1147 render_ring->user_irq_put(dev, render_ring); 1345 READ_BREADCRUMB(dev_priv) >= irq_nr);
1346 ring->irq_put(ring);
1347 }
1148 1348
1149 if (ret == -EBUSY) { 1349 if (ret == -EBUSY) {
1150 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", 1350 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
@@ -1163,7 +1363,7 @@ int i915_irq_emit(struct drm_device *dev, void *data,
1163 drm_i915_irq_emit_t *emit = data; 1363 drm_i915_irq_emit_t *emit = data;
1164 int result; 1364 int result;
1165 1365
1166 if (!dev_priv || !dev_priv->render_ring.virtual_start) { 1366 if (!dev_priv || !LP_RING(dev_priv)->virtual_start) {
1167 DRM_ERROR("called with no initialization\n"); 1367 DRM_ERROR("called with no initialization\n");
1168 return -EINVAL; 1368 return -EINVAL;
1169 } 1369 }
@@ -1209,9 +1409,9 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
1209 if (!i915_pipe_enabled(dev, pipe)) 1409 if (!i915_pipe_enabled(dev, pipe))
1210 return -EINVAL; 1410 return -EINVAL;
1211 1411
1212 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 1412 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1213 if (HAS_PCH_SPLIT(dev)) 1413 if (HAS_PCH_SPLIT(dev))
1214 ironlake_enable_display_irq(dev_priv, (pipe == 0) ? 1414 ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
1215 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); 1415 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
1216 else if (INTEL_INFO(dev)->gen >= 4) 1416 else if (INTEL_INFO(dev)->gen >= 4)
1217 i915_enable_pipestat(dev_priv, pipe, 1417 i915_enable_pipestat(dev_priv, pipe,
@@ -1219,7 +1419,7 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
1219 else 1419 else
1220 i915_enable_pipestat(dev_priv, pipe, 1420 i915_enable_pipestat(dev_priv, pipe,
1221 PIPE_VBLANK_INTERRUPT_ENABLE); 1421 PIPE_VBLANK_INTERRUPT_ENABLE);
1222 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); 1422 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1223 return 0; 1423 return 0;
1224} 1424}
1225 1425
@@ -1231,15 +1431,15 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
1231 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1431 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1232 unsigned long irqflags; 1432 unsigned long irqflags;
1233 1433
1234 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 1434 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1235 if (HAS_PCH_SPLIT(dev)) 1435 if (HAS_PCH_SPLIT(dev))
1236 ironlake_disable_display_irq(dev_priv, (pipe == 0) ? 1436 ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
1237 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); 1437 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
1238 else 1438 else
1239 i915_disable_pipestat(dev_priv, pipe, 1439 i915_disable_pipestat(dev_priv, pipe,
1240 PIPE_VBLANK_INTERRUPT_ENABLE | 1440 PIPE_VBLANK_INTERRUPT_ENABLE |
1241 PIPE_START_VBLANK_INTERRUPT_ENABLE); 1441 PIPE_START_VBLANK_INTERRUPT_ENABLE);
1242 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); 1442 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1243} 1443}
1244 1444
1245void i915_enable_interrupt (struct drm_device *dev) 1445void i915_enable_interrupt (struct drm_device *dev)
@@ -1306,12 +1506,50 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
1306 return -EINVAL; 1506 return -EINVAL;
1307} 1507}
1308 1508
1309static struct drm_i915_gem_request * 1509static u32
1310i915_get_tail_request(struct drm_device *dev) 1510ring_last_seqno(struct intel_ring_buffer *ring)
1311{ 1511{
1312 drm_i915_private_t *dev_priv = dev->dev_private; 1512 return list_entry(ring->request_list.prev,
1313 return list_entry(dev_priv->render_ring.request_list.prev, 1513 struct drm_i915_gem_request, list)->seqno;
1314 struct drm_i915_gem_request, list); 1514}
1515
1516static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err)
1517{
1518 if (list_empty(&ring->request_list) ||
1519 i915_seqno_passed(ring->get_seqno(ring), ring_last_seqno(ring))) {
1520 /* Issue a wake-up to catch stuck h/w. */
1521 if (ring->waiting_seqno && waitqueue_active(&ring->irq_queue)) {
1522 DRM_ERROR("Hangcheck timer elapsed... %s idle [waiting on %d, at %d], missed IRQ?\n",
1523 ring->name,
1524 ring->waiting_seqno,
1525 ring->get_seqno(ring));
1526 wake_up_all(&ring->irq_queue);
1527 *err = true;
1528 }
1529 return true;
1530 }
1531 return false;
1532}
1533
1534static bool kick_ring(struct intel_ring_buffer *ring)
1535{
1536 struct drm_device *dev = ring->dev;
1537 struct drm_i915_private *dev_priv = dev->dev_private;
1538 u32 tmp = I915_READ_CTL(ring);
1539 if (tmp & RING_WAIT) {
1540 DRM_ERROR("Kicking stuck wait on %s\n",
1541 ring->name);
1542 I915_WRITE_CTL(ring, tmp);
1543 return true;
1544 }
1545 if (IS_GEN6(dev) &&
1546 (tmp & RING_WAIT_SEMAPHORE)) {
1547 DRM_ERROR("Kicking stuck semaphore on %s\n",
1548 ring->name);
1549 I915_WRITE_CTL(ring, tmp);
1550 return true;
1551 }
1552 return false;
1315} 1553}
1316 1554
1317/** 1555/**
@@ -1325,6 +1563,17 @@ void i915_hangcheck_elapsed(unsigned long data)
1325 struct drm_device *dev = (struct drm_device *)data; 1563 struct drm_device *dev = (struct drm_device *)data;
1326 drm_i915_private_t *dev_priv = dev->dev_private; 1564 drm_i915_private_t *dev_priv = dev->dev_private;
1327 uint32_t acthd, instdone, instdone1; 1565 uint32_t acthd, instdone, instdone1;
1566 bool err = false;
1567
1568 /* If all work is done then ACTHD clearly hasn't advanced. */
1569 if (i915_hangcheck_ring_idle(&dev_priv->ring[RCS], &err) &&
1570 i915_hangcheck_ring_idle(&dev_priv->ring[VCS], &err) &&
1571 i915_hangcheck_ring_idle(&dev_priv->ring[BCS], &err)) {
1572 dev_priv->hangcheck_count = 0;
1573 if (err)
1574 goto repeat;
1575 return;
1576 }
1328 1577
1329 if (INTEL_INFO(dev)->gen < 4) { 1578 if (INTEL_INFO(dev)->gen < 4) {
1330 acthd = I915_READ(ACTHD); 1579 acthd = I915_READ(ACTHD);
@@ -1336,38 +1585,6 @@ void i915_hangcheck_elapsed(unsigned long data)
1336 instdone1 = I915_READ(INSTDONE1); 1585 instdone1 = I915_READ(INSTDONE1);
1337 } 1586 }
1338 1587
1339 /* If all work is done then ACTHD clearly hasn't advanced. */
1340 if (list_empty(&dev_priv->render_ring.request_list) ||
1341 i915_seqno_passed(dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring),
1342 i915_get_tail_request(dev)->seqno)) {
1343 bool missed_wakeup = false;
1344
1345 dev_priv->hangcheck_count = 0;
1346
1347 /* Issue a wake-up to catch stuck h/w. */
1348 if (dev_priv->render_ring.waiting_gem_seqno &&
1349 waitqueue_active(&dev_priv->render_ring.irq_queue)) {
1350 wake_up_all(&dev_priv->render_ring.irq_queue);
1351 missed_wakeup = true;
1352 }
1353
1354 if (dev_priv->bsd_ring.waiting_gem_seqno &&
1355 waitqueue_active(&dev_priv->bsd_ring.irq_queue)) {
1356 wake_up_all(&dev_priv->bsd_ring.irq_queue);
1357 missed_wakeup = true;
1358 }
1359
1360 if (dev_priv->blt_ring.waiting_gem_seqno &&
1361 waitqueue_active(&dev_priv->blt_ring.irq_queue)) {
1362 wake_up_all(&dev_priv->blt_ring.irq_queue);
1363 missed_wakeup = true;
1364 }
1365
1366 if (missed_wakeup)
1367 DRM_ERROR("Hangcheck timer elapsed... GPU idle, missed IRQ.\n");
1368 return;
1369 }
1370
1371 if (dev_priv->last_acthd == acthd && 1588 if (dev_priv->last_acthd == acthd &&
1372 dev_priv->last_instdone == instdone && 1589 dev_priv->last_instdone == instdone &&
1373 dev_priv->last_instdone1 == instdone1) { 1590 dev_priv->last_instdone1 == instdone1) {
@@ -1380,12 +1597,17 @@ void i915_hangcheck_elapsed(unsigned long data)
1380 * and break the hang. This should work on 1597 * and break the hang. This should work on
1381 * all but the second generation chipsets. 1598 * all but the second generation chipsets.
1382 */ 1599 */
1383 u32 tmp = I915_READ(PRB0_CTL); 1600
1384 if (tmp & RING_WAIT) { 1601 if (kick_ring(&dev_priv->ring[RCS]))
1385 I915_WRITE(PRB0_CTL, tmp); 1602 goto repeat;
1386 POSTING_READ(PRB0_CTL); 1603
1387 goto out; 1604 if (HAS_BSD(dev) &&
1388 } 1605 kick_ring(&dev_priv->ring[VCS]))
1606 goto repeat;
1607
1608 if (HAS_BLT(dev) &&
1609 kick_ring(&dev_priv->ring[BCS]))
1610 goto repeat;
1389 } 1611 }
1390 1612
1391 i915_handle_error(dev, true); 1613 i915_handle_error(dev, true);
@@ -1399,7 +1621,7 @@ void i915_hangcheck_elapsed(unsigned long data)
1399 dev_priv->last_instdone1 = instdone1; 1621 dev_priv->last_instdone1 = instdone1;
1400 } 1622 }
1401 1623
1402out: 1624repeat:
1403 /* Reset timer case chip hangs without another request being added */ 1625 /* Reset timer case chip hangs without another request being added */
1404 mod_timer(&dev_priv->hangcheck_timer, 1626 mod_timer(&dev_priv->hangcheck_timer,
1405 jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); 1627 jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
@@ -1417,17 +1639,17 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
1417 1639
1418 I915_WRITE(DEIMR, 0xffffffff); 1640 I915_WRITE(DEIMR, 0xffffffff);
1419 I915_WRITE(DEIER, 0x0); 1641 I915_WRITE(DEIER, 0x0);
1420 (void) I915_READ(DEIER); 1642 POSTING_READ(DEIER);
1421 1643
1422 /* and GT */ 1644 /* and GT */
1423 I915_WRITE(GTIMR, 0xffffffff); 1645 I915_WRITE(GTIMR, 0xffffffff);
1424 I915_WRITE(GTIER, 0x0); 1646 I915_WRITE(GTIER, 0x0);
1425 (void) I915_READ(GTIER); 1647 POSTING_READ(GTIER);
1426 1648
1427 /* south display irq */ 1649 /* south display irq */
1428 I915_WRITE(SDEIMR, 0xffffffff); 1650 I915_WRITE(SDEIMR, 0xffffffff);
1429 I915_WRITE(SDEIER, 0x0); 1651 I915_WRITE(SDEIER, 0x0);
1430 (void) I915_READ(SDEIER); 1652 POSTING_READ(SDEIER);
1431} 1653}
1432 1654
1433static int ironlake_irq_postinstall(struct drm_device *dev) 1655static int ironlake_irq_postinstall(struct drm_device *dev)
@@ -1436,38 +1658,39 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
1436 /* enable kind of interrupts always enabled */ 1658 /* enable kind of interrupts always enabled */
1437 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 1659 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
1438 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; 1660 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
1439 u32 render_mask = GT_PIPE_NOTIFY | GT_BSD_USER_INTERRUPT; 1661 u32 render_irqs;
1440 u32 hotplug_mask; 1662 u32 hotplug_mask;
1441 1663
1442 dev_priv->irq_mask_reg = ~display_mask; 1664 dev_priv->irq_mask = ~display_mask;
1443 dev_priv->de_irq_enable_reg = display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK;
1444 1665
1445 /* should always can generate irq */ 1666 /* should always can generate irq */
1446 I915_WRITE(DEIIR, I915_READ(DEIIR)); 1667 I915_WRITE(DEIIR, I915_READ(DEIIR));
1447 I915_WRITE(DEIMR, dev_priv->irq_mask_reg); 1668 I915_WRITE(DEIMR, dev_priv->irq_mask);
1448 I915_WRITE(DEIER, dev_priv->de_irq_enable_reg); 1669 I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK);
1449 (void) I915_READ(DEIER); 1670 POSTING_READ(DEIER);
1450 1671
1451 if (IS_GEN6(dev)) { 1672 dev_priv->gt_irq_mask = ~0;
1452 render_mask =
1453 GT_PIPE_NOTIFY |
1454 GT_GEN6_BSD_USER_INTERRUPT |
1455 GT_BLT_USER_INTERRUPT;
1456 }
1457
1458 dev_priv->gt_irq_mask_reg = ~render_mask;
1459 dev_priv->gt_irq_enable_reg = render_mask;
1460 1673
1461 I915_WRITE(GTIIR, I915_READ(GTIIR)); 1674 I915_WRITE(GTIIR, I915_READ(GTIIR));
1462 I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg); 1675 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
1463 if (IS_GEN6(dev)) { 1676 if (IS_GEN6(dev)) {
1464 I915_WRITE(GEN6_RENDER_IMR, ~GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT); 1677 I915_WRITE(GEN6_RENDER_IMR, ~GEN6_RENDER_USER_INTERRUPT);
1465 I915_WRITE(GEN6_BSD_IMR, ~GEN6_BSD_IMR_USER_INTERRUPT); 1678 I915_WRITE(GEN6_BSD_IMR, ~GEN6_BSD_USER_INTERRUPT);
1466 I915_WRITE(GEN6_BLITTER_IMR, ~GEN6_BLITTER_USER_INTERRUPT); 1679 I915_WRITE(GEN6_BLITTER_IMR, ~GEN6_BLITTER_USER_INTERRUPT);
1467 } 1680 }
1468 1681
1469 I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg); 1682 if (IS_GEN6(dev))
1470 (void) I915_READ(GTIER); 1683 render_irqs =
1684 GT_USER_INTERRUPT |
1685 GT_GEN6_BSD_USER_INTERRUPT |
1686 GT_BLT_USER_INTERRUPT;
1687 else
1688 render_irqs =
1689 GT_USER_INTERRUPT |
1690 GT_PIPE_NOTIFY |
1691 GT_BSD_USER_INTERRUPT;
1692 I915_WRITE(GTIER, render_irqs);
1693 POSTING_READ(GTIER);
1471 1694
1472 if (HAS_PCH_CPT(dev)) { 1695 if (HAS_PCH_CPT(dev)) {
1473 hotplug_mask = SDE_CRT_HOTPLUG_CPT | SDE_PORTB_HOTPLUG_CPT | 1696 hotplug_mask = SDE_CRT_HOTPLUG_CPT | SDE_PORTB_HOTPLUG_CPT |
@@ -1477,13 +1700,12 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
1477 SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; 1700 SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
1478 } 1701 }
1479 1702
1480 dev_priv->pch_irq_mask_reg = ~hotplug_mask; 1703 dev_priv->pch_irq_mask = ~hotplug_mask;
1481 dev_priv->pch_irq_enable_reg = hotplug_mask;
1482 1704
1483 I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 1705 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
1484 I915_WRITE(SDEIMR, dev_priv->pch_irq_mask_reg); 1706 I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
1485 I915_WRITE(SDEIER, dev_priv->pch_irq_enable_reg); 1707 I915_WRITE(SDEIER, hotplug_mask);
1486 (void) I915_READ(SDEIER); 1708 POSTING_READ(SDEIER);
1487 1709
1488 if (IS_IRONLAKE_M(dev)) { 1710 if (IS_IRONLAKE_M(dev)) {
1489 /* Clear & enable PCU event interrupts */ 1711 /* Clear & enable PCU event interrupts */
@@ -1519,7 +1741,7 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
1519 I915_WRITE(PIPEBSTAT, 0); 1741 I915_WRITE(PIPEBSTAT, 0);
1520 I915_WRITE(IMR, 0xffffffff); 1742 I915_WRITE(IMR, 0xffffffff);
1521 I915_WRITE(IER, 0x0); 1743 I915_WRITE(IER, 0x0);
1522 (void) I915_READ(IER); 1744 POSTING_READ(IER);
1523} 1745}
1524 1746
1525/* 1747/*
@@ -1532,11 +1754,11 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
1532 u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR; 1754 u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR;
1533 u32 error_mask; 1755 u32 error_mask;
1534 1756
1535 DRM_INIT_WAITQUEUE(&dev_priv->render_ring.irq_queue); 1757 DRM_INIT_WAITQUEUE(&dev_priv->ring[RCS].irq_queue);
1536 if (HAS_BSD(dev)) 1758 if (HAS_BSD(dev))
1537 DRM_INIT_WAITQUEUE(&dev_priv->bsd_ring.irq_queue); 1759 DRM_INIT_WAITQUEUE(&dev_priv->ring[VCS].irq_queue);
1538 if (HAS_BLT(dev)) 1760 if (HAS_BLT(dev))
1539 DRM_INIT_WAITQUEUE(&dev_priv->blt_ring.irq_queue); 1761 DRM_INIT_WAITQUEUE(&dev_priv->ring[BCS].irq_queue);
1540 1762
1541 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; 1763 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
1542 1764
@@ -1544,7 +1766,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
1544 return ironlake_irq_postinstall(dev); 1766 return ironlake_irq_postinstall(dev);
1545 1767
1546 /* Unmask the interrupts that we always want on. */ 1768 /* Unmask the interrupts that we always want on. */
1547 dev_priv->irq_mask_reg = ~I915_INTERRUPT_ENABLE_FIX; 1769 dev_priv->irq_mask = ~I915_INTERRUPT_ENABLE_FIX;
1548 1770
1549 dev_priv->pipestat[0] = 0; 1771 dev_priv->pipestat[0] = 0;
1550 dev_priv->pipestat[1] = 0; 1772 dev_priv->pipestat[1] = 0;
@@ -1553,7 +1775,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
1553 /* Enable in IER... */ 1775 /* Enable in IER... */
1554 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 1776 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
1555 /* and unmask in IMR */ 1777 /* and unmask in IMR */
1556 dev_priv->irq_mask_reg &= ~I915_DISPLAY_PORT_INTERRUPT; 1778 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
1557 } 1779 }
1558 1780
1559 /* 1781 /*
@@ -1571,9 +1793,9 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
1571 } 1793 }
1572 I915_WRITE(EMR, error_mask); 1794 I915_WRITE(EMR, error_mask);
1573 1795
1574 I915_WRITE(IMR, dev_priv->irq_mask_reg); 1796 I915_WRITE(IMR, dev_priv->irq_mask);
1575 I915_WRITE(IER, enable_mask); 1797 I915_WRITE(IER, enable_mask);
1576 (void) I915_READ(IER); 1798 POSTING_READ(IER);
1577 1799
1578 if (I915_HAS_HOTPLUG(dev)) { 1800 if (I915_HAS_HOTPLUG(dev)) {
1579 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); 1801 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index cb8f43429279..8f948a6fbc1c 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -78,6 +78,12 @@
78#define GRDOM_RENDER (1<<2) 78#define GRDOM_RENDER (1<<2)
79#define GRDOM_MEDIA (3<<2) 79#define GRDOM_MEDIA (3<<2)
80 80
81#define GEN6_GDRST 0x941c
82#define GEN6_GRDOM_FULL (1 << 0)
83#define GEN6_GRDOM_RENDER (1 << 1)
84#define GEN6_GRDOM_MEDIA (1 << 2)
85#define GEN6_GRDOM_BLT (1 << 3)
86
81/* VGA stuff */ 87/* VGA stuff */
82 88
83#define VGA_ST01_MDA 0x3ba 89#define VGA_ST01_MDA 0x3ba
@@ -158,12 +164,23 @@
158#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */ 164#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */
159#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1) 165#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1)
160#define MI_STORE_DWORD_INDEX_SHIFT 2 166#define MI_STORE_DWORD_INDEX_SHIFT 2
161#define MI_LOAD_REGISTER_IMM MI_INSTR(0x22, 1) 167/* Official intel docs are somewhat sloppy concerning MI_LOAD_REGISTER_IMM:
168 * - Always issue a MI_NOOP _before_ the MI_LOAD_REGISTER_IMM - otherwise hw
169 * simply ignores the register load under certain conditions.
170 * - One can actually load arbitrary many arbitrary registers: Simply issue x
171 * address/value pairs. Don't overdue it, though, x <= 2^4 must hold!
172 */
173#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1)
162#define MI_FLUSH_DW MI_INSTR(0x26, 2) /* for GEN6 */ 174#define MI_FLUSH_DW MI_INSTR(0x26, 2) /* for GEN6 */
163#define MI_BATCH_BUFFER MI_INSTR(0x30, 1) 175#define MI_BATCH_BUFFER MI_INSTR(0x30, 1)
164#define MI_BATCH_NON_SECURE (1) 176#define MI_BATCH_NON_SECURE (1)
165#define MI_BATCH_NON_SECURE_I965 (1<<8) 177#define MI_BATCH_NON_SECURE_I965 (1<<8)
166#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0) 178#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0)
179#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */
180#define MI_SEMAPHORE_GLOBAL_GTT (1<<22)
181#define MI_SEMAPHORE_UPDATE (1<<21)
182#define MI_SEMAPHORE_COMPARE (1<<20)
183#define MI_SEMAPHORE_REGISTER (1<<18)
167/* 184/*
168 * 3D instructions used by the kernel 185 * 3D instructions used by the kernel
169 */ 186 */
@@ -256,10 +273,6 @@
256 * Instruction and interrupt control regs 273 * Instruction and interrupt control regs
257 */ 274 */
258#define PGTBL_ER 0x02024 275#define PGTBL_ER 0x02024
259#define PRB0_TAIL 0x02030
260#define PRB0_HEAD 0x02034
261#define PRB0_START 0x02038
262#define PRB0_CTL 0x0203c
263#define RENDER_RING_BASE 0x02000 276#define RENDER_RING_BASE 0x02000
264#define BSD_RING_BASE 0x04000 277#define BSD_RING_BASE 0x04000
265#define GEN6_BSD_RING_BASE 0x12000 278#define GEN6_BSD_RING_BASE 0x12000
@@ -268,9 +281,13 @@
268#define RING_HEAD(base) ((base)+0x34) 281#define RING_HEAD(base) ((base)+0x34)
269#define RING_START(base) ((base)+0x38) 282#define RING_START(base) ((base)+0x38)
270#define RING_CTL(base) ((base)+0x3c) 283#define RING_CTL(base) ((base)+0x3c)
284#define RING_SYNC_0(base) ((base)+0x40)
285#define RING_SYNC_1(base) ((base)+0x44)
286#define RING_MAX_IDLE(base) ((base)+0x54)
271#define RING_HWS_PGA(base) ((base)+0x80) 287#define RING_HWS_PGA(base) ((base)+0x80)
272#define RING_HWS_PGA_GEN6(base) ((base)+0x2080) 288#define RING_HWS_PGA_GEN6(base) ((base)+0x2080)
273#define RING_ACTHD(base) ((base)+0x74) 289#define RING_ACTHD(base) ((base)+0x74)
290#define RING_NOPID(base) ((base)+0x94)
274#define TAIL_ADDR 0x001FFFF8 291#define TAIL_ADDR 0x001FFFF8
275#define HEAD_WRAP_COUNT 0xFFE00000 292#define HEAD_WRAP_COUNT 0xFFE00000
276#define HEAD_WRAP_ONE 0x00200000 293#define HEAD_WRAP_ONE 0x00200000
@@ -285,10 +302,17 @@
285#define RING_INVALID 0x00000000 302#define RING_INVALID 0x00000000
286#define RING_WAIT_I8XX (1<<0) /* gen2, PRBx_HEAD */ 303#define RING_WAIT_I8XX (1<<0) /* gen2, PRBx_HEAD */
287#define RING_WAIT (1<<11) /* gen3+, PRBx_CTL */ 304#define RING_WAIT (1<<11) /* gen3+, PRBx_CTL */
305#define RING_WAIT_SEMAPHORE (1<<10) /* gen6+ */
306#if 0
307#define PRB0_TAIL 0x02030
308#define PRB0_HEAD 0x02034
309#define PRB0_START 0x02038
310#define PRB0_CTL 0x0203c
288#define PRB1_TAIL 0x02040 /* 915+ only */ 311#define PRB1_TAIL 0x02040 /* 915+ only */
289#define PRB1_HEAD 0x02044 /* 915+ only */ 312#define PRB1_HEAD 0x02044 /* 915+ only */
290#define PRB1_START 0x02048 /* 915+ only */ 313#define PRB1_START 0x02048 /* 915+ only */
291#define PRB1_CTL 0x0204c /* 915+ only */ 314#define PRB1_CTL 0x0204c /* 915+ only */
315#endif
292#define IPEIR_I965 0x02064 316#define IPEIR_I965 0x02064
293#define IPEHR_I965 0x02068 317#define IPEHR_I965 0x02068
294#define INSTDONE_I965 0x0206c 318#define INSTDONE_I965 0x0206c
@@ -305,11 +329,42 @@
305#define INSTDONE 0x02090 329#define INSTDONE 0x02090
306#define NOPID 0x02094 330#define NOPID 0x02094
307#define HWSTAM 0x02098 331#define HWSTAM 0x02098
332#define VCS_INSTDONE 0x1206C
333#define VCS_IPEIR 0x12064
334#define VCS_IPEHR 0x12068
335#define VCS_ACTHD 0x12074
336#define BCS_INSTDONE 0x2206C
337#define BCS_IPEIR 0x22064
338#define BCS_IPEHR 0x22068
339#define BCS_ACTHD 0x22074
340
341#define ERROR_GEN6 0x040a0
342
343/* GM45+ chicken bits -- debug workaround bits that may be required
344 * for various sorts of correct behavior. The top 16 bits of each are
345 * the enables for writing to the corresponding low bit.
346 */
347#define _3D_CHICKEN 0x02084
348#define _3D_CHICKEN2 0x0208c
349/* Disables pipelining of read flushes past the SF-WIZ interface.
350 * Required on all Ironlake steppings according to the B-Spec, but the
351 * particular danger of not doing so is not specified.
352 */
353# define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14)
354#define _3D_CHICKEN3 0x02090
308 355
309#define MI_MODE 0x0209c 356#define MI_MODE 0x0209c
310# define VS_TIMER_DISPATCH (1 << 6) 357# define VS_TIMER_DISPATCH (1 << 6)
311# define MI_FLUSH_ENABLE (1 << 11) 358# define MI_FLUSH_ENABLE (1 << 11)
312 359
360#define GFX_MODE 0x02520
361#define GFX_RUN_LIST_ENABLE (1<<15)
362#define GFX_TLB_INVALIDATE_ALWAYS (1<<13)
363#define GFX_SURFACE_FAULT_ENABLE (1<<12)
364#define GFX_REPLAY_MODE (1<<11)
365#define GFX_PSMI_GRANULARITY (1<<10)
366#define GFX_PPGTT_ENABLE (1<<9)
367
313#define SCPD0 0x0209c /* 915+ only */ 368#define SCPD0 0x0209c /* 915+ only */
314#define IER 0x020a0 369#define IER 0x020a0
315#define IIR 0x020a4 370#define IIR 0x020a4
@@ -461,7 +516,7 @@
461#define GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR (1 << 3) 516#define GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR (1 << 3)
462 517
463#define GEN6_BSD_IMR 0x120a8 518#define GEN6_BSD_IMR 0x120a8
464#define GEN6_BSD_IMR_USER_INTERRUPT (1 << 12) 519#define GEN6_BSD_USER_INTERRUPT (1 << 12)
465 520
466#define GEN6_BSD_RNCID 0x12198 521#define GEN6_BSD_RNCID 0x12198
467 522
@@ -541,6 +596,18 @@
541 596
542#define ILK_DISPLAY_CHICKEN1 0x42000 597#define ILK_DISPLAY_CHICKEN1 0x42000
543#define ILK_FBCQ_DIS (1<<22) 598#define ILK_FBCQ_DIS (1<<22)
599#define ILK_PABSTRETCH_DIS (1<<21)
600
601
602/*
603 * Framebuffer compression for Sandybridge
604 *
605 * The following two registers are of type GTTMMADR
606 */
607#define SNB_DPFC_CTL_SA 0x100100
608#define SNB_CPU_FENCE_ENABLE (1<<29)
609#define DPFC_CPU_FENCE_OFFSET 0x100104
610
544 611
545/* 612/*
546 * GPIO regs 613 * GPIO regs
@@ -900,6 +967,8 @@
900 */ 967 */
901#define MCHBAR_MIRROR_BASE 0x10000 968#define MCHBAR_MIRROR_BASE 0x10000
902 969
970#define MCHBAR_MIRROR_BASE_SNB 0x140000
971
903/** 915-945 and GM965 MCH register controlling DRAM channel access */ 972/** 915-945 and GM965 MCH register controlling DRAM channel access */
904#define DCC 0x10200 973#define DCC 0x10200
905#define DCC_ADDRESSING_MODE_SINGLE_CHANNEL (0 << 0) 974#define DCC_ADDRESSING_MODE_SINGLE_CHANNEL (0 << 0)
@@ -1119,6 +1188,10 @@
1119#define DDRMPLL1 0X12c20 1188#define DDRMPLL1 0X12c20
1120#define PEG_BAND_GAP_DATA 0x14d68 1189#define PEG_BAND_GAP_DATA 0x14d68
1121 1190
1191#define GEN6_GT_PERF_STATUS 0x145948
1192#define GEN6_RP_STATE_LIMITS 0x145994
1193#define GEN6_RP_STATE_CAP 0x145998
1194
1122/* 1195/*
1123 * Logical Context regs 1196 * Logical Context regs
1124 */ 1197 */
@@ -1168,7 +1241,6 @@
1168#define VTOTAL(pipe) _PIPE(pipe, VTOTAL_A, VTOTAL_B) 1241#define VTOTAL(pipe) _PIPE(pipe, VTOTAL_A, VTOTAL_B)
1169#define VBLANK(pipe) _PIPE(pipe, VBLANK_A, VBLANK_B) 1242#define VBLANK(pipe) _PIPE(pipe, VBLANK_A, VBLANK_B)
1170#define VSYNC(pipe) _PIPE(pipe, VSYNC_A, VSYNC_B) 1243#define VSYNC(pipe) _PIPE(pipe, VSYNC_A, VSYNC_B)
1171#define PIPESRC(pipe) _PIPE(pipe, PIPEASRC, PIPEBSRC)
1172#define BCLRPAT(pipe) _PIPE(pipe, BCLRPAT_A, BCLRPAT_B) 1244#define BCLRPAT(pipe) _PIPE(pipe, BCLRPAT_A, BCLRPAT_B)
1173 1245
1174/* VGA port control */ 1246/* VGA port control */
@@ -2182,8 +2254,10 @@
2182#define PIPE_6BPC (2 << 5) 2254#define PIPE_6BPC (2 << 5)
2183#define PIPE_12BPC (3 << 5) 2255#define PIPE_12BPC (3 << 5)
2184 2256
2257#define PIPESRC(pipe) _PIPE(pipe, PIPEASRC, PIPEBSRC)
2185#define PIPECONF(pipe) _PIPE(pipe, PIPEACONF, PIPEBCONF) 2258#define PIPECONF(pipe) _PIPE(pipe, PIPEACONF, PIPEBCONF)
2186#define PIPEDSL(pipe) _PIPE(pipe, PIPEADSL, PIPEBDSL) 2259#define PIPEDSL(pipe) _PIPE(pipe, PIPEADSL, PIPEBDSL)
2260#define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, PIPEAFRAMEPIXEL, PIPEBFRAMEPIXEL)
2187 2261
2188#define DSPARB 0x70030 2262#define DSPARB 0x70030
2189#define DSPARB_CSTART_MASK (0x7f << 7) 2263#define DSPARB_CSTART_MASK (0x7f << 7)
@@ -2291,6 +2365,40 @@
2291 2365
2292#define ILK_FIFO_LINE_SIZE 64 2366#define ILK_FIFO_LINE_SIZE 64
2293 2367
2368/* define the WM info on Sandybridge */
2369#define SNB_DISPLAY_FIFO 128
2370#define SNB_DISPLAY_MAXWM 0x7f /* bit 16:22 */
2371#define SNB_DISPLAY_DFTWM 8
2372#define SNB_CURSOR_FIFO 32
2373#define SNB_CURSOR_MAXWM 0x1f /* bit 4:0 */
2374#define SNB_CURSOR_DFTWM 8
2375
2376#define SNB_DISPLAY_SR_FIFO 512
2377#define SNB_DISPLAY_MAX_SRWM 0x1ff /* bit 16:8 */
2378#define SNB_DISPLAY_DFT_SRWM 0x3f
2379#define SNB_CURSOR_SR_FIFO 64
2380#define SNB_CURSOR_MAX_SRWM 0x3f /* bit 5:0 */
2381#define SNB_CURSOR_DFT_SRWM 8
2382
2383#define SNB_FBC_MAX_SRWM 0xf /* bit 23:20 */
2384
2385#define SNB_FIFO_LINE_SIZE 64
2386
2387
2388/* the address where we get all kinds of latency value */
2389#define SSKPD 0x5d10
2390#define SSKPD_WM_MASK 0x3f
2391#define SSKPD_WM0_SHIFT 0
2392#define SSKPD_WM1_SHIFT 8
2393#define SSKPD_WM2_SHIFT 16
2394#define SSKPD_WM3_SHIFT 24
2395
2396#define SNB_LATENCY(shift) (I915_READ(MCHBAR_MIRROR_BASE_SNB + SSKPD) >> (shift) & SSKPD_WM_MASK)
2397#define SNB_READ_WM0_LATENCY() SNB_LATENCY(SSKPD_WM0_SHIFT)
2398#define SNB_READ_WM1_LATENCY() SNB_LATENCY(SSKPD_WM1_SHIFT)
2399#define SNB_READ_WM2_LATENCY() SNB_LATENCY(SSKPD_WM2_SHIFT)
2400#define SNB_READ_WM3_LATENCY() SNB_LATENCY(SSKPD_WM3_SHIFT)
2401
2294/* 2402/*
2295 * The two pipe frame counter registers are not synchronized, so 2403 * The two pipe frame counter registers are not synchronized, so
2296 * reading a stable value is somewhat tricky. The following code 2404 * reading a stable value is somewhat tricky. The following code
@@ -2351,6 +2459,10 @@
2351#define CURBBASE 0x700c4 2459#define CURBBASE 0x700c4
2352#define CURBPOS 0x700c8 2460#define CURBPOS 0x700c8
2353 2461
2462#define CURCNTR(pipe) _PIPE(pipe, CURACNTR, CURBCNTR)
2463#define CURBASE(pipe) _PIPE(pipe, CURABASE, CURBBASE)
2464#define CURPOS(pipe) _PIPE(pipe, CURAPOS, CURBPOS)
2465
2354/* Display A control */ 2466/* Display A control */
2355#define DSPACNTR 0x70180 2467#define DSPACNTR 0x70180
2356#define DISPLAY_PLANE_ENABLE (1<<31) 2468#define DISPLAY_PLANE_ENABLE (1<<31)
@@ -2589,6 +2701,8 @@
2589#define GTIER 0x4401c 2701#define GTIER 0x4401c
2590 2702
2591#define ILK_DISPLAY_CHICKEN2 0x42004 2703#define ILK_DISPLAY_CHICKEN2 0x42004
2704/* Required on all Ironlake and Sandybridge according to the B-Spec. */
2705#define ILK_ELPIN_409_SELECT (1 << 25)
2592#define ILK_DPARB_GATE (1<<22) 2706#define ILK_DPARB_GATE (1<<22)
2593#define ILK_VSDPFD_FULL (1<<21) 2707#define ILK_VSDPFD_FULL (1<<21)
2594#define ILK_DISPLAY_CHICKEN_FUSES 0x42014 2708#define ILK_DISPLAY_CHICKEN_FUSES 0x42014
@@ -2600,6 +2714,8 @@
2600#define ILK_DESKTOP (1<<23) 2714#define ILK_DESKTOP (1<<23)
2601#define ILK_DSPCLK_GATE 0x42020 2715#define ILK_DSPCLK_GATE 0x42020
2602#define ILK_DPARB_CLK_GATE (1<<5) 2716#define ILK_DPARB_CLK_GATE (1<<5)
2717#define ILK_DPFD_CLK_GATE (1<<7)
2718
2603/* According to spec this bit 7/8/9 of 0x42020 should be set to enable FBC */ 2719/* According to spec this bit 7/8/9 of 0x42020 should be set to enable FBC */
2604#define ILK_CLK_FBC (1<<7) 2720#define ILK_CLK_FBC (1<<7)
2605#define ILK_DPFC_DIS1 (1<<8) 2721#define ILK_DPFC_DIS1 (1<<8)
@@ -2679,6 +2795,7 @@
2679#define PCH_DPLL(pipe) _PIPE(pipe, PCH_DPLL_A, PCH_DPLL_B) 2795#define PCH_DPLL(pipe) _PIPE(pipe, PCH_DPLL_A, PCH_DPLL_B)
2680 2796
2681#define PCH_FPA0 0xc6040 2797#define PCH_FPA0 0xc6040
2798#define FP_CB_TUNE (0x3<<22)
2682#define PCH_FPA1 0xc6044 2799#define PCH_FPA1 0xc6044
2683#define PCH_FPB0 0xc6048 2800#define PCH_FPB0 0xc6048
2684#define PCH_FPB1 0xc604c 2801#define PCH_FPB1 0xc604c
@@ -3063,4 +3180,66 @@
3063#define EDP_LINK_TRAIN_800MV_0DB_SNB_B (0x38<<22) 3180#define EDP_LINK_TRAIN_800MV_0DB_SNB_B (0x38<<22)
3064#define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f<<22) 3181#define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f<<22)
3065 3182
3183#define FORCEWAKE 0xA18C
3184#define FORCEWAKE_ACK 0x130090
3185
3186#define GEN6_RPNSWREQ 0xA008
3187#define GEN6_TURBO_DISABLE (1<<31)
3188#define GEN6_FREQUENCY(x) ((x)<<25)
3189#define GEN6_OFFSET(x) ((x)<<19)
3190#define GEN6_AGGRESSIVE_TURBO (0<<15)
3191#define GEN6_RC_VIDEO_FREQ 0xA00C
3192#define GEN6_RC_CONTROL 0xA090
3193#define GEN6_RC_CTL_RC6pp_ENABLE (1<<16)
3194#define GEN6_RC_CTL_RC6p_ENABLE (1<<17)
3195#define GEN6_RC_CTL_RC6_ENABLE (1<<18)
3196#define GEN6_RC_CTL_RC1e_ENABLE (1<<20)
3197#define GEN6_RC_CTL_RC7_ENABLE (1<<22)
3198#define GEN6_RC_CTL_EI_MODE(x) ((x)<<27)
3199#define GEN6_RC_CTL_HW_ENABLE (1<<31)
3200#define GEN6_RP_DOWN_TIMEOUT 0xA010
3201#define GEN6_RP_INTERRUPT_LIMITS 0xA014
3202#define GEN6_RPSTAT1 0xA01C
3203#define GEN6_RP_CONTROL 0xA024
3204#define GEN6_RP_MEDIA_TURBO (1<<11)
3205#define GEN6_RP_USE_NORMAL_FREQ (1<<9)
3206#define GEN6_RP_MEDIA_IS_GFX (1<<8)
3207#define GEN6_RP_ENABLE (1<<7)
3208#define GEN6_RP_UP_BUSY_MAX (0x2<<3)
3209#define GEN6_RP_DOWN_BUSY_MIN (0x2<<0)
3210#define GEN6_RP_UP_THRESHOLD 0xA02C
3211#define GEN6_RP_DOWN_THRESHOLD 0xA030
3212#define GEN6_RP_UP_EI 0xA068
3213#define GEN6_RP_DOWN_EI 0xA06C
3214#define GEN6_RP_IDLE_HYSTERSIS 0xA070
3215#define GEN6_RC_STATE 0xA094
3216#define GEN6_RC1_WAKE_RATE_LIMIT 0xA098
3217#define GEN6_RC6_WAKE_RATE_LIMIT 0xA09C
3218#define GEN6_RC6pp_WAKE_RATE_LIMIT 0xA0A0
3219#define GEN6_RC_EVALUATION_INTERVAL 0xA0A8
3220#define GEN6_RC_IDLE_HYSTERSIS 0xA0AC
3221#define GEN6_RC_SLEEP 0xA0B0
3222#define GEN6_RC1e_THRESHOLD 0xA0B4
3223#define GEN6_RC6_THRESHOLD 0xA0B8
3224#define GEN6_RC6p_THRESHOLD 0xA0BC
3225#define GEN6_RC6pp_THRESHOLD 0xA0C0
3226#define GEN6_PMINTRMSK 0xA168
3227
3228#define GEN6_PMISR 0x44020
3229#define GEN6_PMIMR 0x44024
3230#define GEN6_PMIIR 0x44028
3231#define GEN6_PMIER 0x4402C
3232#define GEN6_PM_MBOX_EVENT (1<<25)
3233#define GEN6_PM_THERMAL_EVENT (1<<24)
3234#define GEN6_PM_RP_DOWN_TIMEOUT (1<<6)
3235#define GEN6_PM_RP_UP_THRESHOLD (1<<5)
3236#define GEN6_PM_RP_DOWN_THRESHOLD (1<<4)
3237#define GEN6_PM_RP_UP_EI_EXPIRED (1<<2)
3238#define GEN6_PM_RP_DOWN_EI_EXPIRED (1<<1)
3239
3240#define GEN6_PCODE_MAILBOX 0x138124
3241#define GEN6_PCODE_READY (1<<31)
3242#define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x9
3243#define GEN6_PCODE_DATA 0x138128
3244
3066#endif /* _I915_REG_H_ */ 3245#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 42729d25da58..410772466fa7 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -235,6 +235,7 @@ static void i915_restore_vga(struct drm_device *dev)
235static void i915_save_modeset_reg(struct drm_device *dev) 235static void i915_save_modeset_reg(struct drm_device *dev)
236{ 236{
237 struct drm_i915_private *dev_priv = dev->dev_private; 237 struct drm_i915_private *dev_priv = dev->dev_private;
238 int i;
238 239
239 if (drm_core_check_feature(dev, DRIVER_MODESET)) 240 if (drm_core_check_feature(dev, DRIVER_MODESET))
240 return; 241 return;
@@ -367,6 +368,28 @@ static void i915_save_modeset_reg(struct drm_device *dev)
367 } 368 }
368 i915_save_palette(dev, PIPE_B); 369 i915_save_palette(dev, PIPE_B);
369 dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT); 370 dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT);
371
372 /* Fences */
373 switch (INTEL_INFO(dev)->gen) {
374 case 6:
375 for (i = 0; i < 16; i++)
376 dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
377 break;
378 case 5:
379 case 4:
380 for (i = 0; i < 16; i++)
381 dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
382 break;
383 case 3:
384 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
385 for (i = 0; i < 8; i++)
386 dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
387 case 2:
388 for (i = 0; i < 8; i++)
389 dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
390 break;
391 }
392
370 return; 393 return;
371} 394}
372 395
@@ -375,10 +398,33 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
375 struct drm_i915_private *dev_priv = dev->dev_private; 398 struct drm_i915_private *dev_priv = dev->dev_private;
376 int dpll_a_reg, fpa0_reg, fpa1_reg; 399 int dpll_a_reg, fpa0_reg, fpa1_reg;
377 int dpll_b_reg, fpb0_reg, fpb1_reg; 400 int dpll_b_reg, fpb0_reg, fpb1_reg;
401 int i;
378 402
379 if (drm_core_check_feature(dev, DRIVER_MODESET)) 403 if (drm_core_check_feature(dev, DRIVER_MODESET))
380 return; 404 return;
381 405
406 /* Fences */
407 switch (INTEL_INFO(dev)->gen) {
408 case 6:
409 for (i = 0; i < 16; i++)
410 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]);
411 break;
412 case 5:
413 case 4:
414 for (i = 0; i < 16; i++)
415 I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]);
416 break;
417 case 3:
418 case 2:
419 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
420 for (i = 0; i < 8; i++)
421 I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]);
422 for (i = 0; i < 8; i++)
423 I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]);
424 break;
425 }
426
427
382 if (HAS_PCH_SPLIT(dev)) { 428 if (HAS_PCH_SPLIT(dev)) {
383 dpll_a_reg = PCH_DPLL_A; 429 dpll_a_reg = PCH_DPLL_A;
384 dpll_b_reg = PCH_DPLL_B; 430 dpll_b_reg = PCH_DPLL_B;
@@ -771,8 +817,14 @@ int i915_save_state(struct drm_device *dev)
771 dev_priv->saveIMR = I915_READ(IMR); 817 dev_priv->saveIMR = I915_READ(IMR);
772 } 818 }
773 819
774 if (HAS_PCH_SPLIT(dev)) 820 if (IS_IRONLAKE_M(dev))
775 ironlake_disable_drps(dev); 821 ironlake_disable_drps(dev);
822 if (IS_GEN6(dev))
823 gen6_disable_rps(dev);
824
825 /* XXX disabling the clock gating breaks suspend on gm45
826 intel_disable_clock_gating(dev);
827 */
776 828
777 /* Cache mode state */ 829 /* Cache mode state */
778 dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); 830 dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
@@ -788,28 +840,6 @@ int i915_save_state(struct drm_device *dev)
788 for (i = 0; i < 3; i++) 840 for (i = 0; i < 3; i++)
789 dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2)); 841 dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
790 842
791 /* Fences */
792 switch (INTEL_INFO(dev)->gen) {
793 case 6:
794 for (i = 0; i < 16; i++)
795 dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
796 break;
797 case 5:
798 case 4:
799 for (i = 0; i < 16; i++)
800 dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
801 break;
802 case 3:
803 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
804 for (i = 0; i < 8; i++)
805 dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
806 case 2:
807 for (i = 0; i < 8; i++)
808 dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
809 break;
810
811 }
812
813 return 0; 843 return 0;
814} 844}
815 845
@@ -823,27 +853,6 @@ int i915_restore_state(struct drm_device *dev)
823 /* Hardware status page */ 853 /* Hardware status page */
824 I915_WRITE(HWS_PGA, dev_priv->saveHWS); 854 I915_WRITE(HWS_PGA, dev_priv->saveHWS);
825 855
826 /* Fences */
827 switch (INTEL_INFO(dev)->gen) {
828 case 6:
829 for (i = 0; i < 16; i++)
830 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]);
831 break;
832 case 5:
833 case 4:
834 for (i = 0; i < 16; i++)
835 I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]);
836 break;
837 case 3:
838 case 2:
839 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
840 for (i = 0; i < 8; i++)
841 I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]);
842 for (i = 0; i < 8; i++)
843 I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]);
844 break;
845 }
846
847 i915_restore_display(dev); 856 i915_restore_display(dev);
848 857
849 /* Interrupt state */ 858 /* Interrupt state */
@@ -860,13 +869,16 @@ int i915_restore_state(struct drm_device *dev)
860 } 869 }
861 870
862 /* Clock gating state */ 871 /* Clock gating state */
863 intel_init_clock_gating(dev); 872 intel_enable_clock_gating(dev);
864 873
865 if (HAS_PCH_SPLIT(dev)) { 874 if (IS_IRONLAKE_M(dev)) {
866 ironlake_enable_drps(dev); 875 ironlake_enable_drps(dev);
867 intel_init_emon(dev); 876 intel_init_emon(dev);
868 } 877 }
869 878
879 if (IS_GEN6(dev))
880 gen6_enable_rps(dev_priv);
881
870 /* Cache mode state */ 882 /* Cache mode state */
871 I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000); 883 I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
872 884
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index fea97a21cc14..7f0fc3ed61aa 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -6,6 +6,7 @@
6#include <linux/tracepoint.h> 6#include <linux/tracepoint.h>
7 7
8#include <drm/drmP.h> 8#include <drm/drmP.h>
9#include "i915_drv.h"
9 10
10#undef TRACE_SYSTEM 11#undef TRACE_SYSTEM
11#define TRACE_SYSTEM i915 12#define TRACE_SYSTEM i915
@@ -16,18 +17,18 @@
16 17
17TRACE_EVENT(i915_gem_object_create, 18TRACE_EVENT(i915_gem_object_create,
18 19
19 TP_PROTO(struct drm_gem_object *obj), 20 TP_PROTO(struct drm_i915_gem_object *obj),
20 21
21 TP_ARGS(obj), 22 TP_ARGS(obj),
22 23
23 TP_STRUCT__entry( 24 TP_STRUCT__entry(
24 __field(struct drm_gem_object *, obj) 25 __field(struct drm_i915_gem_object *, obj)
25 __field(u32, size) 26 __field(u32, size)
26 ), 27 ),
27 28
28 TP_fast_assign( 29 TP_fast_assign(
29 __entry->obj = obj; 30 __entry->obj = obj;
30 __entry->size = obj->size; 31 __entry->size = obj->base.size;
31 ), 32 ),
32 33
33 TP_printk("obj=%p, size=%u", __entry->obj, __entry->size) 34 TP_printk("obj=%p, size=%u", __entry->obj, __entry->size)
@@ -35,40 +36,43 @@ TRACE_EVENT(i915_gem_object_create,
35 36
36TRACE_EVENT(i915_gem_object_bind, 37TRACE_EVENT(i915_gem_object_bind,
37 38
38 TP_PROTO(struct drm_gem_object *obj, u32 gtt_offset), 39 TP_PROTO(struct drm_i915_gem_object *obj, u32 gtt_offset, bool mappable),
39 40
40 TP_ARGS(obj, gtt_offset), 41 TP_ARGS(obj, gtt_offset, mappable),
41 42
42 TP_STRUCT__entry( 43 TP_STRUCT__entry(
43 __field(struct drm_gem_object *, obj) 44 __field(struct drm_i915_gem_object *, obj)
44 __field(u32, gtt_offset) 45 __field(u32, gtt_offset)
46 __field(bool, mappable)
45 ), 47 ),
46 48
47 TP_fast_assign( 49 TP_fast_assign(
48 __entry->obj = obj; 50 __entry->obj = obj;
49 __entry->gtt_offset = gtt_offset; 51 __entry->gtt_offset = gtt_offset;
52 __entry->mappable = mappable;
50 ), 53 ),
51 54
52 TP_printk("obj=%p, gtt_offset=%08x", 55 TP_printk("obj=%p, gtt_offset=%08x%s",
53 __entry->obj, __entry->gtt_offset) 56 __entry->obj, __entry->gtt_offset,
57 __entry->mappable ? ", mappable" : "")
54); 58);
55 59
56TRACE_EVENT(i915_gem_object_change_domain, 60TRACE_EVENT(i915_gem_object_change_domain,
57 61
58 TP_PROTO(struct drm_gem_object *obj, uint32_t old_read_domains, uint32_t old_write_domain), 62 TP_PROTO(struct drm_i915_gem_object *obj, uint32_t old_read_domains, uint32_t old_write_domain),
59 63
60 TP_ARGS(obj, old_read_domains, old_write_domain), 64 TP_ARGS(obj, old_read_domains, old_write_domain),
61 65
62 TP_STRUCT__entry( 66 TP_STRUCT__entry(
63 __field(struct drm_gem_object *, obj) 67 __field(struct drm_i915_gem_object *, obj)
64 __field(u32, read_domains) 68 __field(u32, read_domains)
65 __field(u32, write_domain) 69 __field(u32, write_domain)
66 ), 70 ),
67 71
68 TP_fast_assign( 72 TP_fast_assign(
69 __entry->obj = obj; 73 __entry->obj = obj;
70 __entry->read_domains = obj->read_domains | (old_read_domains << 16); 74 __entry->read_domains = obj->base.read_domains | (old_read_domains << 16);
71 __entry->write_domain = obj->write_domain | (old_write_domain << 16); 75 __entry->write_domain = obj->base.write_domain | (old_write_domain << 16);
72 ), 76 ),
73 77
74 TP_printk("obj=%p, read=%04x, write=%04x", 78 TP_printk("obj=%p, read=%04x, write=%04x",
@@ -76,36 +80,14 @@ TRACE_EVENT(i915_gem_object_change_domain,
76 __entry->read_domains, __entry->write_domain) 80 __entry->read_domains, __entry->write_domain)
77); 81);
78 82
79TRACE_EVENT(i915_gem_object_get_fence,
80
81 TP_PROTO(struct drm_gem_object *obj, int fence, int tiling_mode),
82
83 TP_ARGS(obj, fence, tiling_mode),
84
85 TP_STRUCT__entry(
86 __field(struct drm_gem_object *, obj)
87 __field(int, fence)
88 __field(int, tiling_mode)
89 ),
90
91 TP_fast_assign(
92 __entry->obj = obj;
93 __entry->fence = fence;
94 __entry->tiling_mode = tiling_mode;
95 ),
96
97 TP_printk("obj=%p, fence=%d, tiling=%d",
98 __entry->obj, __entry->fence, __entry->tiling_mode)
99);
100
101DECLARE_EVENT_CLASS(i915_gem_object, 83DECLARE_EVENT_CLASS(i915_gem_object,
102 84
103 TP_PROTO(struct drm_gem_object *obj), 85 TP_PROTO(struct drm_i915_gem_object *obj),
104 86
105 TP_ARGS(obj), 87 TP_ARGS(obj),
106 88
107 TP_STRUCT__entry( 89 TP_STRUCT__entry(
108 __field(struct drm_gem_object *, obj) 90 __field(struct drm_i915_gem_object *, obj)
109 ), 91 ),
110 92
111 TP_fast_assign( 93 TP_fast_assign(
@@ -117,21 +99,21 @@ DECLARE_EVENT_CLASS(i915_gem_object,
117 99
118DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush, 100DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush,
119 101
120 TP_PROTO(struct drm_gem_object *obj), 102 TP_PROTO(struct drm_i915_gem_object *obj),
121 103
122 TP_ARGS(obj) 104 TP_ARGS(obj)
123); 105);
124 106
125DEFINE_EVENT(i915_gem_object, i915_gem_object_unbind, 107DEFINE_EVENT(i915_gem_object, i915_gem_object_unbind,
126 108
127 TP_PROTO(struct drm_gem_object *obj), 109 TP_PROTO(struct drm_i915_gem_object *obj),
128 110
129 TP_ARGS(obj) 111 TP_ARGS(obj)
130); 112);
131 113
132DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy, 114DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy,
133 115
134 TP_PROTO(struct drm_gem_object *obj), 116 TP_PROTO(struct drm_i915_gem_object *obj),
135 117
136 TP_ARGS(obj) 118 TP_ARGS(obj)
137); 119);
@@ -263,13 +245,13 @@ DEFINE_EVENT(i915_ring, i915_ring_wait_end,
263); 245);
264 246
265TRACE_EVENT(i915_flip_request, 247TRACE_EVENT(i915_flip_request,
266 TP_PROTO(int plane, struct drm_gem_object *obj), 248 TP_PROTO(int plane, struct drm_i915_gem_object *obj),
267 249
268 TP_ARGS(plane, obj), 250 TP_ARGS(plane, obj),
269 251
270 TP_STRUCT__entry( 252 TP_STRUCT__entry(
271 __field(int, plane) 253 __field(int, plane)
272 __field(struct drm_gem_object *, obj) 254 __field(struct drm_i915_gem_object *, obj)
273 ), 255 ),
274 256
275 TP_fast_assign( 257 TP_fast_assign(
@@ -281,13 +263,13 @@ TRACE_EVENT(i915_flip_request,
281); 263);
282 264
283TRACE_EVENT(i915_flip_complete, 265TRACE_EVENT(i915_flip_complete,
284 TP_PROTO(int plane, struct drm_gem_object *obj), 266 TP_PROTO(int plane, struct drm_i915_gem_object *obj),
285 267
286 TP_ARGS(plane, obj), 268 TP_ARGS(plane, obj),
287 269
288 TP_STRUCT__entry( 270 TP_STRUCT__entry(
289 __field(int, plane) 271 __field(int, plane)
290 __field(struct drm_gem_object *, obj) 272 __field(struct drm_i915_gem_object *, obj)
291 ), 273 ),
292 274
293 TP_fast_assign( 275 TP_fast_assign(
@@ -298,6 +280,29 @@ TRACE_EVENT(i915_flip_complete,
298 TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj) 280 TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj)
299); 281);
300 282
283TRACE_EVENT(i915_reg_rw,
284 TP_PROTO(int cmd, uint32_t reg, uint64_t val, int len),
285
286 TP_ARGS(cmd, reg, val, len),
287
288 TP_STRUCT__entry(
289 __field(int, cmd)
290 __field(uint32_t, reg)
291 __field(uint64_t, val)
292 __field(int, len)
293 ),
294
295 TP_fast_assign(
296 __entry->cmd = cmd;
297 __entry->reg = reg;
298 __entry->val = (uint64_t)val;
299 __entry->len = len;
300 ),
301
302 TP_printk("cmd=%c, reg=0x%x, val=0x%llx, len=%d",
303 __entry->cmd, __entry->reg, __entry->val, __entry->len)
304);
305
301#endif /* _I915_TRACE_H_ */ 306#endif /* _I915_TRACE_H_ */
302 307
303/* This part must be outside protection */ 308/* This part must be outside protection */
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index fca523288aca..0abe79fb6385 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -642,26 +642,23 @@ static const intel_limit_t intel_limits_ironlake_display_port = {
642 .find_pll = intel_find_pll_ironlake_dp, 642 .find_pll = intel_find_pll_ironlake_dp,
643}; 643};
644 644
645static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc) 645static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
646 int refclk)
646{ 647{
647 struct drm_device *dev = crtc->dev; 648 struct drm_device *dev = crtc->dev;
648 struct drm_i915_private *dev_priv = dev->dev_private; 649 struct drm_i915_private *dev_priv = dev->dev_private;
649 const intel_limit_t *limit; 650 const intel_limit_t *limit;
650 int refclk = 120;
651 651
652 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 652 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
653 if (dev_priv->lvds_use_ssc && dev_priv->lvds_ssc_freq == 100)
654 refclk = 100;
655
656 if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == 653 if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
657 LVDS_CLKB_POWER_UP) { 654 LVDS_CLKB_POWER_UP) {
658 /* LVDS dual channel */ 655 /* LVDS dual channel */
659 if (refclk == 100) 656 if (refclk == 100000)
660 limit = &intel_limits_ironlake_dual_lvds_100m; 657 limit = &intel_limits_ironlake_dual_lvds_100m;
661 else 658 else
662 limit = &intel_limits_ironlake_dual_lvds; 659 limit = &intel_limits_ironlake_dual_lvds;
663 } else { 660 } else {
664 if (refclk == 100) 661 if (refclk == 100000)
665 limit = &intel_limits_ironlake_single_lvds_100m; 662 limit = &intel_limits_ironlake_single_lvds_100m;
666 else 663 else
667 limit = &intel_limits_ironlake_single_lvds; 664 limit = &intel_limits_ironlake_single_lvds;
@@ -702,13 +699,13 @@ static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
702 return limit; 699 return limit;
703} 700}
704 701
705static const intel_limit_t *intel_limit(struct drm_crtc *crtc) 702static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
706{ 703{
707 struct drm_device *dev = crtc->dev; 704 struct drm_device *dev = crtc->dev;
708 const intel_limit_t *limit; 705 const intel_limit_t *limit;
709 706
710 if (HAS_PCH_SPLIT(dev)) 707 if (HAS_PCH_SPLIT(dev))
711 limit = intel_ironlake_limit(crtc); 708 limit = intel_ironlake_limit(crtc, refclk);
712 else if (IS_G4X(dev)) { 709 else if (IS_G4X(dev)) {
713 limit = intel_g4x_limit(crtc); 710 limit = intel_g4x_limit(crtc);
714 } else if (IS_PINEVIEW(dev)) { 711 } else if (IS_PINEVIEW(dev)) {
@@ -773,11 +770,10 @@ bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
773 * the given connectors. 770 * the given connectors.
774 */ 771 */
775 772
776static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock) 773static bool intel_PLL_is_valid(struct drm_device *dev,
774 const intel_limit_t *limit,
775 const intel_clock_t *clock)
777{ 776{
778 const intel_limit_t *limit = intel_limit (crtc);
779 struct drm_device *dev = crtc->dev;
780
781 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) 777 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
782 INTELPllInvalid ("p1 out of range\n"); 778 INTELPllInvalid ("p1 out of range\n");
783 if (clock->p < limit->p.min || limit->p.max < clock->p) 779 if (clock->p < limit->p.min || limit->p.max < clock->p)
@@ -849,8 +845,8 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
849 int this_err; 845 int this_err;
850 846
851 intel_clock(dev, refclk, &clock); 847 intel_clock(dev, refclk, &clock);
852 848 if (!intel_PLL_is_valid(dev, limit,
853 if (!intel_PLL_is_valid(crtc, &clock)) 849 &clock))
854 continue; 850 continue;
855 851
856 this_err = abs(clock.dot - target); 852 this_err = abs(clock.dot - target);
@@ -912,9 +908,11 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
912 int this_err; 908 int this_err;
913 909
914 intel_clock(dev, refclk, &clock); 910 intel_clock(dev, refclk, &clock);
915 if (!intel_PLL_is_valid(crtc, &clock)) 911 if (!intel_PLL_is_valid(dev, limit,
912 &clock))
916 continue; 913 continue;
917 this_err = abs(clock.dot - target) ; 914
915 this_err = abs(clock.dot - target);
918 if (this_err < err_most) { 916 if (this_err < err_most) {
919 *best_clock = clock; 917 *best_clock = clock;
920 err_most = this_err; 918 err_most = this_err;
@@ -1066,13 +1064,13 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1066 struct drm_i915_private *dev_priv = dev->dev_private; 1064 struct drm_i915_private *dev_priv = dev->dev_private;
1067 struct drm_framebuffer *fb = crtc->fb; 1065 struct drm_framebuffer *fb = crtc->fb;
1068 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 1066 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1069 struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj); 1067 struct drm_i915_gem_object *obj = intel_fb->obj;
1070 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1068 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1071 int plane, i; 1069 int plane, i;
1072 u32 fbc_ctl, fbc_ctl2; 1070 u32 fbc_ctl, fbc_ctl2;
1073 1071
1074 if (fb->pitch == dev_priv->cfb_pitch && 1072 if (fb->pitch == dev_priv->cfb_pitch &&
1075 obj_priv->fence_reg == dev_priv->cfb_fence && 1073 obj->fence_reg == dev_priv->cfb_fence &&
1076 intel_crtc->plane == dev_priv->cfb_plane && 1074 intel_crtc->plane == dev_priv->cfb_plane &&
1077 I915_READ(FBC_CONTROL) & FBC_CTL_EN) 1075 I915_READ(FBC_CONTROL) & FBC_CTL_EN)
1078 return; 1076 return;
@@ -1086,7 +1084,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1086 1084
1087 /* FBC_CTL wants 64B units */ 1085 /* FBC_CTL wants 64B units */
1088 dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; 1086 dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
1089 dev_priv->cfb_fence = obj_priv->fence_reg; 1087 dev_priv->cfb_fence = obj->fence_reg;
1090 dev_priv->cfb_plane = intel_crtc->plane; 1088 dev_priv->cfb_plane = intel_crtc->plane;
1091 plane = dev_priv->cfb_plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB; 1089 plane = dev_priv->cfb_plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
1092 1090
@@ -1096,7 +1094,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1096 1094
1097 /* Set it up... */ 1095 /* Set it up... */
1098 fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | plane; 1096 fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | plane;
1099 if (obj_priv->tiling_mode != I915_TILING_NONE) 1097 if (obj->tiling_mode != I915_TILING_NONE)
1100 fbc_ctl2 |= FBC_CTL_CPU_FENCE; 1098 fbc_ctl2 |= FBC_CTL_CPU_FENCE;
1101 I915_WRITE(FBC_CONTROL2, fbc_ctl2); 1099 I915_WRITE(FBC_CONTROL2, fbc_ctl2);
1102 I915_WRITE(FBC_FENCE_OFF, crtc->y); 1100 I915_WRITE(FBC_FENCE_OFF, crtc->y);
@@ -1107,7 +1105,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1107 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ 1105 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
1108 fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; 1106 fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
1109 fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT; 1107 fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
1110 if (obj_priv->tiling_mode != I915_TILING_NONE) 1108 if (obj->tiling_mode != I915_TILING_NONE)
1111 fbc_ctl |= dev_priv->cfb_fence; 1109 fbc_ctl |= dev_priv->cfb_fence;
1112 I915_WRITE(FBC_CONTROL, fbc_ctl); 1110 I915_WRITE(FBC_CONTROL, fbc_ctl);
1113 1111
@@ -1150,7 +1148,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1150 struct drm_i915_private *dev_priv = dev->dev_private; 1148 struct drm_i915_private *dev_priv = dev->dev_private;
1151 struct drm_framebuffer *fb = crtc->fb; 1149 struct drm_framebuffer *fb = crtc->fb;
1152 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 1150 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1153 struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj); 1151 struct drm_i915_gem_object *obj = intel_fb->obj;
1154 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1152 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1155 int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB; 1153 int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
1156 unsigned long stall_watermark = 200; 1154 unsigned long stall_watermark = 200;
@@ -1159,7 +1157,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1159 dpfc_ctl = I915_READ(DPFC_CONTROL); 1157 dpfc_ctl = I915_READ(DPFC_CONTROL);
1160 if (dpfc_ctl & DPFC_CTL_EN) { 1158 if (dpfc_ctl & DPFC_CTL_EN) {
1161 if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 && 1159 if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 &&
1162 dev_priv->cfb_fence == obj_priv->fence_reg && 1160 dev_priv->cfb_fence == obj->fence_reg &&
1163 dev_priv->cfb_plane == intel_crtc->plane && 1161 dev_priv->cfb_plane == intel_crtc->plane &&
1164 dev_priv->cfb_y == crtc->y) 1162 dev_priv->cfb_y == crtc->y)
1165 return; 1163 return;
@@ -1170,12 +1168,12 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1170 } 1168 }
1171 1169
1172 dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; 1170 dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
1173 dev_priv->cfb_fence = obj_priv->fence_reg; 1171 dev_priv->cfb_fence = obj->fence_reg;
1174 dev_priv->cfb_plane = intel_crtc->plane; 1172 dev_priv->cfb_plane = intel_crtc->plane;
1175 dev_priv->cfb_y = crtc->y; 1173 dev_priv->cfb_y = crtc->y;
1176 1174
1177 dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X; 1175 dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
1178 if (obj_priv->tiling_mode != I915_TILING_NONE) { 1176 if (obj->tiling_mode != I915_TILING_NONE) {
1179 dpfc_ctl |= DPFC_CTL_FENCE_EN | dev_priv->cfb_fence; 1177 dpfc_ctl |= DPFC_CTL_FENCE_EN | dev_priv->cfb_fence;
1180 I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY); 1178 I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
1181 } else { 1179 } else {
@@ -1221,7 +1219,7 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1221 struct drm_i915_private *dev_priv = dev->dev_private; 1219 struct drm_i915_private *dev_priv = dev->dev_private;
1222 struct drm_framebuffer *fb = crtc->fb; 1220 struct drm_framebuffer *fb = crtc->fb;
1223 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 1221 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1224 struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj); 1222 struct drm_i915_gem_object *obj = intel_fb->obj;
1225 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1223 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1226 int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB; 1224 int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
1227 unsigned long stall_watermark = 200; 1225 unsigned long stall_watermark = 200;
@@ -1230,9 +1228,9 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1230 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); 1228 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
1231 if (dpfc_ctl & DPFC_CTL_EN) { 1229 if (dpfc_ctl & DPFC_CTL_EN) {
1232 if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 && 1230 if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 &&
1233 dev_priv->cfb_fence == obj_priv->fence_reg && 1231 dev_priv->cfb_fence == obj->fence_reg &&
1234 dev_priv->cfb_plane == intel_crtc->plane && 1232 dev_priv->cfb_plane == intel_crtc->plane &&
1235 dev_priv->cfb_offset == obj_priv->gtt_offset && 1233 dev_priv->cfb_offset == obj->gtt_offset &&
1236 dev_priv->cfb_y == crtc->y) 1234 dev_priv->cfb_y == crtc->y)
1237 return; 1235 return;
1238 1236
@@ -1242,14 +1240,14 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1242 } 1240 }
1243 1241
1244 dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; 1242 dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
1245 dev_priv->cfb_fence = obj_priv->fence_reg; 1243 dev_priv->cfb_fence = obj->fence_reg;
1246 dev_priv->cfb_plane = intel_crtc->plane; 1244 dev_priv->cfb_plane = intel_crtc->plane;
1247 dev_priv->cfb_offset = obj_priv->gtt_offset; 1245 dev_priv->cfb_offset = obj->gtt_offset;
1248 dev_priv->cfb_y = crtc->y; 1246 dev_priv->cfb_y = crtc->y;
1249 1247
1250 dpfc_ctl &= DPFC_RESERVED; 1248 dpfc_ctl &= DPFC_RESERVED;
1251 dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X); 1249 dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
1252 if (obj_priv->tiling_mode != I915_TILING_NONE) { 1250 if (obj->tiling_mode != I915_TILING_NONE) {
1253 dpfc_ctl |= (DPFC_CTL_FENCE_EN | dev_priv->cfb_fence); 1251 dpfc_ctl |= (DPFC_CTL_FENCE_EN | dev_priv->cfb_fence);
1254 I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY); 1252 I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
1255 } else { 1253 } else {
@@ -1260,10 +1258,16 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1260 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | 1258 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
1261 (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT)); 1259 (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
1262 I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y); 1260 I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
1263 I915_WRITE(ILK_FBC_RT_BASE, obj_priv->gtt_offset | ILK_FBC_RT_VALID); 1261 I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
1264 /* enable it... */ 1262 /* enable it... */
1265 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); 1263 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
1266 1264
1265 if (IS_GEN6(dev)) {
1266 I915_WRITE(SNB_DPFC_CTL_SA,
1267 SNB_CPU_FENCE_ENABLE | dev_priv->cfb_fence);
1268 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
1269 }
1270
1267 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane); 1271 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
1268} 1272}
1269 1273
@@ -1345,7 +1349,7 @@ static void intel_update_fbc(struct drm_device *dev)
1345 struct intel_crtc *intel_crtc; 1349 struct intel_crtc *intel_crtc;
1346 struct drm_framebuffer *fb; 1350 struct drm_framebuffer *fb;
1347 struct intel_framebuffer *intel_fb; 1351 struct intel_framebuffer *intel_fb;
1348 struct drm_i915_gem_object *obj_priv; 1352 struct drm_i915_gem_object *obj;
1349 1353
1350 DRM_DEBUG_KMS("\n"); 1354 DRM_DEBUG_KMS("\n");
1351 1355
@@ -1384,9 +1388,9 @@ static void intel_update_fbc(struct drm_device *dev)
1384 intel_crtc = to_intel_crtc(crtc); 1388 intel_crtc = to_intel_crtc(crtc);
1385 fb = crtc->fb; 1389 fb = crtc->fb;
1386 intel_fb = to_intel_framebuffer(fb); 1390 intel_fb = to_intel_framebuffer(fb);
1387 obj_priv = to_intel_bo(intel_fb->obj); 1391 obj = intel_fb->obj;
1388 1392
1389 if (intel_fb->obj->size > dev_priv->cfb_size) { 1393 if (intel_fb->obj->base.size > dev_priv->cfb_size) {
1390 DRM_DEBUG_KMS("framebuffer too large, disabling " 1394 DRM_DEBUG_KMS("framebuffer too large, disabling "
1391 "compression\n"); 1395 "compression\n");
1392 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; 1396 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
@@ -1410,7 +1414,7 @@ static void intel_update_fbc(struct drm_device *dev)
1410 dev_priv->no_fbc_reason = FBC_BAD_PLANE; 1414 dev_priv->no_fbc_reason = FBC_BAD_PLANE;
1411 goto out_disable; 1415 goto out_disable;
1412 } 1416 }
1413 if (obj_priv->tiling_mode != I915_TILING_X) { 1417 if (obj->tiling_mode != I915_TILING_X) {
1414 DRM_DEBUG_KMS("framebuffer not tiled, disabling compression\n"); 1418 DRM_DEBUG_KMS("framebuffer not tiled, disabling compression\n");
1415 dev_priv->no_fbc_reason = FBC_NOT_TILED; 1419 dev_priv->no_fbc_reason = FBC_NOT_TILED;
1416 goto out_disable; 1420 goto out_disable;
@@ -1433,14 +1437,13 @@ out_disable:
1433 1437
1434int 1438int
1435intel_pin_and_fence_fb_obj(struct drm_device *dev, 1439intel_pin_and_fence_fb_obj(struct drm_device *dev,
1436 struct drm_gem_object *obj, 1440 struct drm_i915_gem_object *obj,
1437 bool pipelined) 1441 struct intel_ring_buffer *pipelined)
1438{ 1442{
1439 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1440 u32 alignment; 1443 u32 alignment;
1441 int ret; 1444 int ret;
1442 1445
1443 switch (obj_priv->tiling_mode) { 1446 switch (obj->tiling_mode) {
1444 case I915_TILING_NONE: 1447 case I915_TILING_NONE:
1445 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) 1448 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
1446 alignment = 128 * 1024; 1449 alignment = 128 * 1024;
@@ -1461,7 +1464,7 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
1461 BUG(); 1464 BUG();
1462 } 1465 }
1463 1466
1464 ret = i915_gem_object_pin(obj, alignment); 1467 ret = i915_gem_object_pin(obj, alignment, true);
1465 if (ret) 1468 if (ret)
1466 return ret; 1469 return ret;
1467 1470
@@ -1474,9 +1477,8 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
1474 * framebuffer compression. For simplicity, we always install 1477 * framebuffer compression. For simplicity, we always install
1475 * a fence as the cost is not that onerous. 1478 * a fence as the cost is not that onerous.
1476 */ 1479 */
1477 if (obj_priv->fence_reg == I915_FENCE_REG_NONE && 1480 if (obj->tiling_mode != I915_TILING_NONE) {
1478 obj_priv->tiling_mode != I915_TILING_NONE) { 1481 ret = i915_gem_object_get_fence(obj, pipelined, false);
1479 ret = i915_gem_object_get_fence_reg(obj, false);
1480 if (ret) 1482 if (ret)
1481 goto err_unpin; 1483 goto err_unpin;
1482 } 1484 }
@@ -1497,8 +1499,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1497 struct drm_i915_private *dev_priv = dev->dev_private; 1499 struct drm_i915_private *dev_priv = dev->dev_private;
1498 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1500 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1499 struct intel_framebuffer *intel_fb; 1501 struct intel_framebuffer *intel_fb;
1500 struct drm_i915_gem_object *obj_priv; 1502 struct drm_i915_gem_object *obj;
1501 struct drm_gem_object *obj;
1502 int plane = intel_crtc->plane; 1503 int plane = intel_crtc->plane;
1503 unsigned long Start, Offset; 1504 unsigned long Start, Offset;
1504 u32 dspcntr; 1505 u32 dspcntr;
@@ -1515,7 +1516,6 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1515 1516
1516 intel_fb = to_intel_framebuffer(fb); 1517 intel_fb = to_intel_framebuffer(fb);
1517 obj = intel_fb->obj; 1518 obj = intel_fb->obj;
1518 obj_priv = to_intel_bo(obj);
1519 1519
1520 reg = DSPCNTR(plane); 1520 reg = DSPCNTR(plane);
1521 dspcntr = I915_READ(reg); 1521 dspcntr = I915_READ(reg);
@@ -1540,7 +1540,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1540 return -EINVAL; 1540 return -EINVAL;
1541 } 1541 }
1542 if (INTEL_INFO(dev)->gen >= 4) { 1542 if (INTEL_INFO(dev)->gen >= 4) {
1543 if (obj_priv->tiling_mode != I915_TILING_NONE) 1543 if (obj->tiling_mode != I915_TILING_NONE)
1544 dspcntr |= DISPPLANE_TILED; 1544 dspcntr |= DISPPLANE_TILED;
1545 else 1545 else
1546 dspcntr &= ~DISPPLANE_TILED; 1546 dspcntr &= ~DISPPLANE_TILED;
@@ -1552,7 +1552,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1552 1552
1553 I915_WRITE(reg, dspcntr); 1553 I915_WRITE(reg, dspcntr);
1554 1554
1555 Start = obj_priv->gtt_offset; 1555 Start = obj->gtt_offset;
1556 Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8); 1556 Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
1557 1557
1558 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", 1558 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
@@ -1598,7 +1598,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1598 mutex_lock(&dev->struct_mutex); 1598 mutex_lock(&dev->struct_mutex);
1599 ret = intel_pin_and_fence_fb_obj(dev, 1599 ret = intel_pin_and_fence_fb_obj(dev,
1600 to_intel_framebuffer(crtc->fb)->obj, 1600 to_intel_framebuffer(crtc->fb)->obj,
1601 false); 1601 NULL);
1602 if (ret != 0) { 1602 if (ret != 0) {
1603 mutex_unlock(&dev->struct_mutex); 1603 mutex_unlock(&dev->struct_mutex);
1604 return ret; 1604 return ret;
@@ -1606,18 +1606,17 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1606 1606
1607 if (old_fb) { 1607 if (old_fb) {
1608 struct drm_i915_private *dev_priv = dev->dev_private; 1608 struct drm_i915_private *dev_priv = dev->dev_private;
1609 struct drm_gem_object *obj = to_intel_framebuffer(old_fb)->obj; 1609 struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
1610 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1611 1610
1612 wait_event(dev_priv->pending_flip_queue, 1611 wait_event(dev_priv->pending_flip_queue,
1613 atomic_read(&obj_priv->pending_flip) == 0); 1612 atomic_read(&obj->pending_flip) == 0);
1614 1613
1615 /* Big Hammer, we also need to ensure that any pending 1614 /* Big Hammer, we also need to ensure that any pending
1616 * MI_WAIT_FOR_EVENT inside a user batch buffer on the 1615 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
1617 * current scanout is retired before unpinning the old 1616 * current scanout is retired before unpinning the old
1618 * framebuffer. 1617 * framebuffer.
1619 */ 1618 */
1620 ret = i915_gem_object_flush_gpu(obj_priv, false); 1619 ret = i915_gem_object_flush_gpu(obj, false);
1621 if (ret) { 1620 if (ret) {
1622 i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj); 1621 i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
1623 mutex_unlock(&dev->struct_mutex); 1622 mutex_unlock(&dev->struct_mutex);
@@ -1633,8 +1632,10 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1633 return ret; 1632 return ret;
1634 } 1633 }
1635 1634
1636 if (old_fb) 1635 if (old_fb) {
1636 intel_wait_for_vblank(dev, intel_crtc->pipe);
1637 i915_gem_object_unpin(to_intel_framebuffer(old_fb)->obj); 1637 i915_gem_object_unpin(to_intel_framebuffer(old_fb)->obj);
1638 }
1638 1639
1639 mutex_unlock(&dev->struct_mutex); 1640 mutex_unlock(&dev->struct_mutex);
1640 1641
@@ -1996,31 +1997,31 @@ static void intel_flush_display_plane(struct drm_device *dev,
1996static void intel_clear_scanline_wait(struct drm_device *dev) 1997static void intel_clear_scanline_wait(struct drm_device *dev)
1997{ 1998{
1998 struct drm_i915_private *dev_priv = dev->dev_private; 1999 struct drm_i915_private *dev_priv = dev->dev_private;
2000 struct intel_ring_buffer *ring;
1999 u32 tmp; 2001 u32 tmp;
2000 2002
2001 if (IS_GEN2(dev)) 2003 if (IS_GEN2(dev))
2002 /* Can't break the hang on i8xx */ 2004 /* Can't break the hang on i8xx */
2003 return; 2005 return;
2004 2006
2005 tmp = I915_READ(PRB0_CTL); 2007 ring = LP_RING(dev_priv);
2006 if (tmp & RING_WAIT) { 2008 tmp = I915_READ_CTL(ring);
2007 I915_WRITE(PRB0_CTL, tmp); 2009 if (tmp & RING_WAIT)
2008 POSTING_READ(PRB0_CTL); 2010 I915_WRITE_CTL(ring, tmp);
2009 }
2010} 2011}
2011 2012
2012static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) 2013static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
2013{ 2014{
2014 struct drm_i915_gem_object *obj_priv; 2015 struct drm_i915_gem_object *obj;
2015 struct drm_i915_private *dev_priv; 2016 struct drm_i915_private *dev_priv;
2016 2017
2017 if (crtc->fb == NULL) 2018 if (crtc->fb == NULL)
2018 return; 2019 return;
2019 2020
2020 obj_priv = to_intel_bo(to_intel_framebuffer(crtc->fb)->obj); 2021 obj = to_intel_framebuffer(crtc->fb)->obj;
2021 dev_priv = crtc->dev->dev_private; 2022 dev_priv = crtc->dev->dev_private;
2022 wait_event(dev_priv->pending_flip_queue, 2023 wait_event(dev_priv->pending_flip_queue,
2023 atomic_read(&obj_priv->pending_flip) == 0); 2024 atomic_read(&obj->pending_flip) == 0);
2024} 2025}
2025 2026
2026static void ironlake_crtc_enable(struct drm_crtc *crtc) 2027static void ironlake_crtc_enable(struct drm_crtc *crtc)
@@ -2850,6 +2851,39 @@ static struct intel_watermark_params ironlake_cursor_srwm_info = {
2850 ILK_FIFO_LINE_SIZE 2851 ILK_FIFO_LINE_SIZE
2851}; 2852};
2852 2853
2854static struct intel_watermark_params sandybridge_display_wm_info = {
2855 SNB_DISPLAY_FIFO,
2856 SNB_DISPLAY_MAXWM,
2857 SNB_DISPLAY_DFTWM,
2858 2,
2859 SNB_FIFO_LINE_SIZE
2860};
2861
2862static struct intel_watermark_params sandybridge_cursor_wm_info = {
2863 SNB_CURSOR_FIFO,
2864 SNB_CURSOR_MAXWM,
2865 SNB_CURSOR_DFTWM,
2866 2,
2867 SNB_FIFO_LINE_SIZE
2868};
2869
2870static struct intel_watermark_params sandybridge_display_srwm_info = {
2871 SNB_DISPLAY_SR_FIFO,
2872 SNB_DISPLAY_MAX_SRWM,
2873 SNB_DISPLAY_DFT_SRWM,
2874 2,
2875 SNB_FIFO_LINE_SIZE
2876};
2877
2878static struct intel_watermark_params sandybridge_cursor_srwm_info = {
2879 SNB_CURSOR_SR_FIFO,
2880 SNB_CURSOR_MAX_SRWM,
2881 SNB_CURSOR_DFT_SRWM,
2882 2,
2883 SNB_FIFO_LINE_SIZE
2884};
2885
2886
2853/** 2887/**
2854 * intel_calculate_wm - calculate watermark level 2888 * intel_calculate_wm - calculate watermark level
2855 * @clock_in_khz: pixel clock 2889 * @clock_in_khz: pixel clock
@@ -3383,6 +3417,10 @@ static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused,
3383 3417
3384static bool ironlake_compute_wm0(struct drm_device *dev, 3418static bool ironlake_compute_wm0(struct drm_device *dev,
3385 int pipe, 3419 int pipe,
3420 const struct intel_watermark_params *display,
3421 int display_latency,
3422 const struct intel_watermark_params *cursor,
3423 int cursor_latency,
3386 int *plane_wm, 3424 int *plane_wm,
3387 int *cursor_wm) 3425 int *cursor_wm)
3388{ 3426{
@@ -3400,22 +3438,20 @@ static bool ironlake_compute_wm0(struct drm_device *dev,
3400 pixel_size = crtc->fb->bits_per_pixel / 8; 3438 pixel_size = crtc->fb->bits_per_pixel / 8;
3401 3439
3402 /* Use the small buffer method to calculate plane watermark */ 3440 /* Use the small buffer method to calculate plane watermark */
3403 entries = ((clock * pixel_size / 1000) * ILK_LP0_PLANE_LATENCY) / 1000; 3441 entries = ((clock * pixel_size / 1000) * display_latency * 100) / 1000;
3404 entries = DIV_ROUND_UP(entries, 3442 entries = DIV_ROUND_UP(entries, display->cacheline_size);
3405 ironlake_display_wm_info.cacheline_size); 3443 *plane_wm = entries + display->guard_size;
3406 *plane_wm = entries + ironlake_display_wm_info.guard_size; 3444 if (*plane_wm > (int)display->max_wm)
3407 if (*plane_wm > (int)ironlake_display_wm_info.max_wm) 3445 *plane_wm = display->max_wm;
3408 *plane_wm = ironlake_display_wm_info.max_wm;
3409 3446
3410 /* Use the large buffer method to calculate cursor watermark */ 3447 /* Use the large buffer method to calculate cursor watermark */
3411 line_time_us = ((htotal * 1000) / clock); 3448 line_time_us = ((htotal * 1000) / clock);
3412 line_count = (ILK_LP0_CURSOR_LATENCY / line_time_us + 1000) / 1000; 3449 line_count = (cursor_latency * 100 / line_time_us + 1000) / 1000;
3413 entries = line_count * 64 * pixel_size; 3450 entries = line_count * 64 * pixel_size;
3414 entries = DIV_ROUND_UP(entries, 3451 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
3415 ironlake_cursor_wm_info.cacheline_size); 3452 *cursor_wm = entries + cursor->guard_size;
3416 *cursor_wm = entries + ironlake_cursor_wm_info.guard_size; 3453 if (*cursor_wm > (int)cursor->max_wm)
3417 if (*cursor_wm > ironlake_cursor_wm_info.max_wm) 3454 *cursor_wm = (int)cursor->max_wm;
3418 *cursor_wm = ironlake_cursor_wm_info.max_wm;
3419 3455
3420 return true; 3456 return true;
3421} 3457}
@@ -3430,7 +3466,12 @@ static void ironlake_update_wm(struct drm_device *dev,
3430 int tmp; 3466 int tmp;
3431 3467
3432 enabled = 0; 3468 enabled = 0;
3433 if (ironlake_compute_wm0(dev, 0, &plane_wm, &cursor_wm)) { 3469 if (ironlake_compute_wm0(dev, 0,
3470 &ironlake_display_wm_info,
3471 ILK_LP0_PLANE_LATENCY,
3472 &ironlake_cursor_wm_info,
3473 ILK_LP0_CURSOR_LATENCY,
3474 &plane_wm, &cursor_wm)) {
3434 I915_WRITE(WM0_PIPEA_ILK, 3475 I915_WRITE(WM0_PIPEA_ILK,
3435 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); 3476 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
3436 DRM_DEBUG_KMS("FIFO watermarks For pipe A -" 3477 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
@@ -3439,7 +3480,12 @@ static void ironlake_update_wm(struct drm_device *dev,
3439 enabled++; 3480 enabled++;
3440 } 3481 }
3441 3482
3442 if (ironlake_compute_wm0(dev, 1, &plane_wm, &cursor_wm)) { 3483 if (ironlake_compute_wm0(dev, 1,
3484 &ironlake_display_wm_info,
3485 ILK_LP0_PLANE_LATENCY,
3486 &ironlake_cursor_wm_info,
3487 ILK_LP0_CURSOR_LATENCY,
3488 &plane_wm, &cursor_wm)) {
3443 I915_WRITE(WM0_PIPEB_ILK, 3489 I915_WRITE(WM0_PIPEB_ILK,
3444 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); 3490 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
3445 DRM_DEBUG_KMS("FIFO watermarks For pipe B -" 3491 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
@@ -3453,7 +3499,7 @@ static void ironlake_update_wm(struct drm_device *dev,
3453 * display plane is used. 3499 * display plane is used.
3454 */ 3500 */
3455 tmp = 0; 3501 tmp = 0;
3456 if (enabled == 1 && /* XXX disabled due to buggy implmentation? */ 0) { 3502 if (enabled == 1) {
3457 unsigned long line_time_us; 3503 unsigned long line_time_us;
3458 int small, large, plane_fbc; 3504 int small, large, plane_fbc;
3459 int sr_clock, entries; 3505 int sr_clock, entries;
@@ -3505,6 +3551,197 @@ static void ironlake_update_wm(struct drm_device *dev,
3505 /* XXX setup WM2 and WM3 */ 3551 /* XXX setup WM2 and WM3 */
3506} 3552}
3507 3553
3554/*
3555 * Check the wm result.
3556 *
3557 * If any calculated watermark values is larger than the maximum value that
3558 * can be programmed into the associated watermark register, that watermark
3559 * must be disabled.
3560 *
3561 * Also return true if all of those watermark values is 0, which is set by
3562 * sandybridge_compute_srwm, to indicate the latency is ZERO.
3563 */
3564static bool sandybridge_check_srwm(struct drm_device *dev, int level,
3565 int fbc_wm, int display_wm, int cursor_wm)
3566{
3567 struct drm_i915_private *dev_priv = dev->dev_private;
3568
3569 DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
3570 " cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
3571
3572 if (fbc_wm > SNB_FBC_MAX_SRWM) {
3573 DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
3574 fbc_wm, SNB_FBC_MAX_SRWM, level);
3575
3576 /* fbc has it's own way to disable FBC WM */
3577 I915_WRITE(DISP_ARB_CTL,
3578 I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
3579 return false;
3580 }
3581
3582 if (display_wm > SNB_DISPLAY_MAX_SRWM) {
3583 DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
3584 display_wm, SNB_DISPLAY_MAX_SRWM, level);
3585 return false;
3586 }
3587
3588 if (cursor_wm > SNB_CURSOR_MAX_SRWM) {
3589 DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
3590 cursor_wm, SNB_CURSOR_MAX_SRWM, level);
3591 return false;
3592 }
3593
3594 if (!(fbc_wm || display_wm || cursor_wm)) {
3595 DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
3596 return false;
3597 }
3598
3599 return true;
3600}
3601
3602/*
3603 * Compute watermark values of WM[1-3],
3604 */
3605static bool sandybridge_compute_srwm(struct drm_device *dev, int level,
3606 int hdisplay, int htotal, int pixel_size,
3607 int clock, int latency_ns, int *fbc_wm,
3608 int *display_wm, int *cursor_wm)
3609{
3610
3611 unsigned long line_time_us;
3612 int small, large;
3613 int entries;
3614 int line_count, line_size;
3615
3616 if (!latency_ns) {
3617 *fbc_wm = *display_wm = *cursor_wm = 0;
3618 return false;
3619 }
3620
3621 line_time_us = (htotal * 1000) / clock;
3622 line_count = (latency_ns / line_time_us + 1000) / 1000;
3623 line_size = hdisplay * pixel_size;
3624
3625 /* Use the minimum of the small and large buffer method for primary */
3626 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
3627 large = line_count * line_size;
3628
3629 entries = DIV_ROUND_UP(min(small, large),
3630 sandybridge_display_srwm_info.cacheline_size);
3631 *display_wm = entries + sandybridge_display_srwm_info.guard_size;
3632
3633 /*
3634 * Spec said:
3635 * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
3636 */
3637 *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2;
3638
3639 /* calculate the self-refresh watermark for display cursor */
3640 entries = line_count * pixel_size * 64;
3641 entries = DIV_ROUND_UP(entries,
3642 sandybridge_cursor_srwm_info.cacheline_size);
3643 *cursor_wm = entries + sandybridge_cursor_srwm_info.guard_size;
3644
3645 return sandybridge_check_srwm(dev, level,
3646 *fbc_wm, *display_wm, *cursor_wm);
3647}
3648
3649static void sandybridge_update_wm(struct drm_device *dev,
3650 int planea_clock, int planeb_clock,
3651 int hdisplay, int htotal,
3652 int pixel_size)
3653{
3654 struct drm_i915_private *dev_priv = dev->dev_private;
3655 int latency = SNB_READ_WM0_LATENCY();
3656 int fbc_wm, plane_wm, cursor_wm, enabled;
3657 int clock;
3658
3659 enabled = 0;
3660 if (ironlake_compute_wm0(dev, 0,
3661 &sandybridge_display_wm_info, latency,
3662 &sandybridge_cursor_wm_info, latency,
3663 &plane_wm, &cursor_wm)) {
3664 I915_WRITE(WM0_PIPEA_ILK,
3665 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
3666 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
3667 " plane %d, " "cursor: %d\n",
3668 plane_wm, cursor_wm);
3669 enabled++;
3670 }
3671
3672 if (ironlake_compute_wm0(dev, 1,
3673 &sandybridge_display_wm_info, latency,
3674 &sandybridge_cursor_wm_info, latency,
3675 &plane_wm, &cursor_wm)) {
3676 I915_WRITE(WM0_PIPEB_ILK,
3677 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
3678 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
3679 " plane %d, cursor: %d\n",
3680 plane_wm, cursor_wm);
3681 enabled++;
3682 }
3683
3684 /*
3685 * Calculate and update the self-refresh watermark only when one
3686 * display plane is used.
3687 *
3688 * SNB support 3 levels of watermark.
3689 *
3690 * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
3691 * and disabled in the descending order
3692 *
3693 */
3694 I915_WRITE(WM3_LP_ILK, 0);
3695 I915_WRITE(WM2_LP_ILK, 0);
3696 I915_WRITE(WM1_LP_ILK, 0);
3697
3698 if (enabled != 1)
3699 return;
3700
3701 clock = planea_clock ? planea_clock : planeb_clock;
3702
3703 /* WM1 */
3704 if (!sandybridge_compute_srwm(dev, 1, hdisplay, htotal, pixel_size,
3705 clock, SNB_READ_WM1_LATENCY() * 500,
3706 &fbc_wm, &plane_wm, &cursor_wm))
3707 return;
3708
3709 I915_WRITE(WM1_LP_ILK,
3710 WM1_LP_SR_EN |
3711 (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
3712 (fbc_wm << WM1_LP_FBC_SHIFT) |
3713 (plane_wm << WM1_LP_SR_SHIFT) |
3714 cursor_wm);
3715
3716 /* WM2 */
3717 if (!sandybridge_compute_srwm(dev, 2,
3718 hdisplay, htotal, pixel_size,
3719 clock, SNB_READ_WM2_LATENCY() * 500,
3720 &fbc_wm, &plane_wm, &cursor_wm))
3721 return;
3722
3723 I915_WRITE(WM2_LP_ILK,
3724 WM2_LP_EN |
3725 (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
3726 (fbc_wm << WM1_LP_FBC_SHIFT) |
3727 (plane_wm << WM1_LP_SR_SHIFT) |
3728 cursor_wm);
3729
3730 /* WM3 */
3731 if (!sandybridge_compute_srwm(dev, 3,
3732 hdisplay, htotal, pixel_size,
3733 clock, SNB_READ_WM3_LATENCY() * 500,
3734 &fbc_wm, &plane_wm, &cursor_wm))
3735 return;
3736
3737 I915_WRITE(WM3_LP_ILK,
3738 WM3_LP_EN |
3739 (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
3740 (fbc_wm << WM1_LP_FBC_SHIFT) |
3741 (plane_wm << WM1_LP_SR_SHIFT) |
3742 cursor_wm);
3743}
3744
3508/** 3745/**
3509 * intel_update_watermarks - update FIFO watermark values based on current modes 3746 * intel_update_watermarks - update FIFO watermark values based on current modes
3510 * 3747 *
@@ -3660,7 +3897,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3660 * refclk, or FALSE. The returned values represent the clock equation: 3897 * refclk, or FALSE. The returned values represent the clock equation:
3661 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 3898 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
3662 */ 3899 */
3663 limit = intel_limit(crtc); 3900 limit = intel_limit(crtc, refclk);
3664 ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock); 3901 ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock);
3665 if (!ok) { 3902 if (!ok) {
3666 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 3903 DRM_ERROR("Couldn't find PLL settings for mode!\n");
@@ -3857,6 +4094,22 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3857 reduced_clock.m2; 4094 reduced_clock.m2;
3858 } 4095 }
3859 4096
4097 /* Enable autotuning of the PLL clock (if permissible) */
4098 if (HAS_PCH_SPLIT(dev)) {
4099 int factor = 21;
4100
4101 if (is_lvds) {
4102 if ((dev_priv->lvds_use_ssc &&
4103 dev_priv->lvds_ssc_freq == 100) ||
4104 (I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP)
4105 factor = 25;
4106 } else if (is_sdvo && is_tv)
4107 factor = 20;
4108
4109 if (clock.m1 < factor * clock.n)
4110 fp |= FP_CB_TUNE;
4111 }
4112
3860 dpll = 0; 4113 dpll = 0;
3861 if (!HAS_PCH_SPLIT(dev)) 4114 if (!HAS_PCH_SPLIT(dev))
3862 dpll = DPLL_VGA_MODE_DIS; 4115 dpll = DPLL_VGA_MODE_DIS;
@@ -4071,7 +4324,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
4071 } 4324 }
4072 4325
4073 if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { 4326 if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
4074 I915_WRITE(fp_reg, fp);
4075 I915_WRITE(dpll_reg, dpll); 4327 I915_WRITE(dpll_reg, dpll);
4076 4328
4077 /* Wait for the clocks to stabilize. */ 4329 /* Wait for the clocks to stabilize. */
@@ -4089,13 +4341,13 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
4089 } 4341 }
4090 I915_WRITE(DPLL_MD(pipe), temp); 4342 I915_WRITE(DPLL_MD(pipe), temp);
4091 } else { 4343 } else {
4092 /* write it again -- the BIOS does, after all */ 4344 /* The pixel multiplier can only be updated once the
4345 * DPLL is enabled and the clocks are stable.
4346 *
4347 * So write it again.
4348 */
4093 I915_WRITE(dpll_reg, dpll); 4349 I915_WRITE(dpll_reg, dpll);
4094 } 4350 }
4095
4096 /* Wait for the clocks to stabilize. */
4097 POSTING_READ(dpll_reg);
4098 udelay(150);
4099 } 4351 }
4100 4352
4101 intel_crtc->lowfreq_avail = false; 4353 intel_crtc->lowfreq_avail = false;
@@ -4331,15 +4583,14 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
4331} 4583}
4332 4584
4333static int intel_crtc_cursor_set(struct drm_crtc *crtc, 4585static int intel_crtc_cursor_set(struct drm_crtc *crtc,
4334 struct drm_file *file_priv, 4586 struct drm_file *file,
4335 uint32_t handle, 4587 uint32_t handle,
4336 uint32_t width, uint32_t height) 4588 uint32_t width, uint32_t height)
4337{ 4589{
4338 struct drm_device *dev = crtc->dev; 4590 struct drm_device *dev = crtc->dev;
4339 struct drm_i915_private *dev_priv = dev->dev_private; 4591 struct drm_i915_private *dev_priv = dev->dev_private;
4340 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4592 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4341 struct drm_gem_object *bo; 4593 struct drm_i915_gem_object *obj;
4342 struct drm_i915_gem_object *obj_priv;
4343 uint32_t addr; 4594 uint32_t addr;
4344 int ret; 4595 int ret;
4345 4596
@@ -4349,7 +4600,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
4349 if (!handle) { 4600 if (!handle) {
4350 DRM_DEBUG_KMS("cursor off\n"); 4601 DRM_DEBUG_KMS("cursor off\n");
4351 addr = 0; 4602 addr = 0;
4352 bo = NULL; 4603 obj = NULL;
4353 mutex_lock(&dev->struct_mutex); 4604 mutex_lock(&dev->struct_mutex);
4354 goto finish; 4605 goto finish;
4355 } 4606 }
@@ -4360,13 +4611,11 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
4360 return -EINVAL; 4611 return -EINVAL;
4361 } 4612 }
4362 4613
4363 bo = drm_gem_object_lookup(dev, file_priv, handle); 4614 obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
4364 if (!bo) 4615 if (!obj)
4365 return -ENOENT; 4616 return -ENOENT;
4366 4617
4367 obj_priv = to_intel_bo(bo); 4618 if (obj->base.size < width * height * 4) {
4368
4369 if (bo->size < width * height * 4) {
4370 DRM_ERROR("buffer is to small\n"); 4619 DRM_ERROR("buffer is to small\n");
4371 ret = -ENOMEM; 4620 ret = -ENOMEM;
4372 goto fail; 4621 goto fail;
@@ -4375,29 +4624,41 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
4375 /* we only need to pin inside GTT if cursor is non-phy */ 4624 /* we only need to pin inside GTT if cursor is non-phy */
4376 mutex_lock(&dev->struct_mutex); 4625 mutex_lock(&dev->struct_mutex);
4377 if (!dev_priv->info->cursor_needs_physical) { 4626 if (!dev_priv->info->cursor_needs_physical) {
4378 ret = i915_gem_object_pin(bo, PAGE_SIZE); 4627 if (obj->tiling_mode) {
4628 DRM_ERROR("cursor cannot be tiled\n");
4629 ret = -EINVAL;
4630 goto fail_locked;
4631 }
4632
4633 ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
4379 if (ret) { 4634 if (ret) {
4380 DRM_ERROR("failed to pin cursor bo\n"); 4635 DRM_ERROR("failed to pin cursor bo\n");
4381 goto fail_locked; 4636 goto fail_locked;
4382 } 4637 }
4383 4638
4384 ret = i915_gem_object_set_to_gtt_domain(bo, 0); 4639 ret = i915_gem_object_set_to_gtt_domain(obj, 0);
4385 if (ret) { 4640 if (ret) {
4386 DRM_ERROR("failed to move cursor bo into the GTT\n"); 4641 DRM_ERROR("failed to move cursor bo into the GTT\n");
4387 goto fail_unpin; 4642 goto fail_unpin;
4388 } 4643 }
4389 4644
4390 addr = obj_priv->gtt_offset; 4645 ret = i915_gem_object_put_fence(obj);
4646 if (ret) {
4647 DRM_ERROR("failed to move cursor bo into the GTT\n");
4648 goto fail_unpin;
4649 }
4650
4651 addr = obj->gtt_offset;
4391 } else { 4652 } else {
4392 int align = IS_I830(dev) ? 16 * 1024 : 256; 4653 int align = IS_I830(dev) ? 16 * 1024 : 256;
4393 ret = i915_gem_attach_phys_object(dev, bo, 4654 ret = i915_gem_attach_phys_object(dev, obj,
4394 (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1, 4655 (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
4395 align); 4656 align);
4396 if (ret) { 4657 if (ret) {
4397 DRM_ERROR("failed to attach phys object\n"); 4658 DRM_ERROR("failed to attach phys object\n");
4398 goto fail_locked; 4659 goto fail_locked;
4399 } 4660 }
4400 addr = obj_priv->phys_obj->handle->busaddr; 4661 addr = obj->phys_obj->handle->busaddr;
4401 } 4662 }
4402 4663
4403 if (IS_GEN2(dev)) 4664 if (IS_GEN2(dev))
@@ -4406,17 +4667,17 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
4406 finish: 4667 finish:
4407 if (intel_crtc->cursor_bo) { 4668 if (intel_crtc->cursor_bo) {
4408 if (dev_priv->info->cursor_needs_physical) { 4669 if (dev_priv->info->cursor_needs_physical) {
4409 if (intel_crtc->cursor_bo != bo) 4670 if (intel_crtc->cursor_bo != obj)
4410 i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo); 4671 i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
4411 } else 4672 } else
4412 i915_gem_object_unpin(intel_crtc->cursor_bo); 4673 i915_gem_object_unpin(intel_crtc->cursor_bo);
4413 drm_gem_object_unreference(intel_crtc->cursor_bo); 4674 drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
4414 } 4675 }
4415 4676
4416 mutex_unlock(&dev->struct_mutex); 4677 mutex_unlock(&dev->struct_mutex);
4417 4678
4418 intel_crtc->cursor_addr = addr; 4679 intel_crtc->cursor_addr = addr;
4419 intel_crtc->cursor_bo = bo; 4680 intel_crtc->cursor_bo = obj;
4420 intel_crtc->cursor_width = width; 4681 intel_crtc->cursor_width = width;
4421 intel_crtc->cursor_height = height; 4682 intel_crtc->cursor_height = height;
4422 4683
@@ -4424,11 +4685,11 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
4424 4685
4425 return 0; 4686 return 0;
4426fail_unpin: 4687fail_unpin:
4427 i915_gem_object_unpin(bo); 4688 i915_gem_object_unpin(obj);
4428fail_locked: 4689fail_locked:
4429 mutex_unlock(&dev->struct_mutex); 4690 mutex_unlock(&dev->struct_mutex);
4430fail: 4691fail:
4431 drm_gem_object_unreference_unlocked(bo); 4692 drm_gem_object_unreference_unlocked(&obj->base);
4432 return ret; 4693 return ret;
4433} 4694}
4434 4695
@@ -4739,8 +5000,14 @@ static void intel_gpu_idle_timer(unsigned long arg)
4739 struct drm_device *dev = (struct drm_device *)arg; 5000 struct drm_device *dev = (struct drm_device *)arg;
4740 drm_i915_private_t *dev_priv = dev->dev_private; 5001 drm_i915_private_t *dev_priv = dev->dev_private;
4741 5002
4742 dev_priv->busy = false; 5003 if (!list_empty(&dev_priv->mm.active_list)) {
5004 /* Still processing requests, so just re-arm the timer. */
5005 mod_timer(&dev_priv->idle_timer, jiffies +
5006 msecs_to_jiffies(GPU_IDLE_TIMEOUT));
5007 return;
5008 }
4743 5009
5010 dev_priv->busy = false;
4744 queue_work(dev_priv->wq, &dev_priv->idle_work); 5011 queue_work(dev_priv->wq, &dev_priv->idle_work);
4745} 5012}
4746 5013
@@ -4751,9 +5018,17 @@ static void intel_crtc_idle_timer(unsigned long arg)
4751 struct intel_crtc *intel_crtc = (struct intel_crtc *)arg; 5018 struct intel_crtc *intel_crtc = (struct intel_crtc *)arg;
4752 struct drm_crtc *crtc = &intel_crtc->base; 5019 struct drm_crtc *crtc = &intel_crtc->base;
4753 drm_i915_private_t *dev_priv = crtc->dev->dev_private; 5020 drm_i915_private_t *dev_priv = crtc->dev->dev_private;
5021 struct intel_framebuffer *intel_fb;
4754 5022
4755 intel_crtc->busy = false; 5023 intel_fb = to_intel_framebuffer(crtc->fb);
5024 if (intel_fb && intel_fb->obj->active) {
5025 /* The framebuffer is still being accessed by the GPU. */
5026 mod_timer(&intel_crtc->idle_timer, jiffies +
5027 msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
5028 return;
5029 }
4756 5030
5031 intel_crtc->busy = false;
4757 queue_work(dev_priv->wq, &dev_priv->idle_work); 5032 queue_work(dev_priv->wq, &dev_priv->idle_work);
4758} 5033}
4759 5034
@@ -4888,7 +5163,7 @@ static void intel_idle_update(struct work_struct *work)
4888 * buffer), we'll also mark the display as busy, so we know to increase its 5163 * buffer), we'll also mark the display as busy, so we know to increase its
4889 * clock frequency. 5164 * clock frequency.
4890 */ 5165 */
4891void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj) 5166void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj)
4892{ 5167{
4893 drm_i915_private_t *dev_priv = dev->dev_private; 5168 drm_i915_private_t *dev_priv = dev->dev_private;
4894 struct drm_crtc *crtc = NULL; 5169 struct drm_crtc *crtc = NULL;
@@ -4969,8 +5244,9 @@ static void intel_unpin_work_fn(struct work_struct *__work)
4969 5244
4970 mutex_lock(&work->dev->struct_mutex); 5245 mutex_lock(&work->dev->struct_mutex);
4971 i915_gem_object_unpin(work->old_fb_obj); 5246 i915_gem_object_unpin(work->old_fb_obj);
4972 drm_gem_object_unreference(work->pending_flip_obj); 5247 drm_gem_object_unreference(&work->pending_flip_obj->base);
4973 drm_gem_object_unreference(work->old_fb_obj); 5248 drm_gem_object_unreference(&work->old_fb_obj->base);
5249
4974 mutex_unlock(&work->dev->struct_mutex); 5250 mutex_unlock(&work->dev->struct_mutex);
4975 kfree(work); 5251 kfree(work);
4976} 5252}
@@ -4981,15 +5257,17 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
4981 drm_i915_private_t *dev_priv = dev->dev_private; 5257 drm_i915_private_t *dev_priv = dev->dev_private;
4982 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5258 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4983 struct intel_unpin_work *work; 5259 struct intel_unpin_work *work;
4984 struct drm_i915_gem_object *obj_priv; 5260 struct drm_i915_gem_object *obj;
4985 struct drm_pending_vblank_event *e; 5261 struct drm_pending_vblank_event *e;
4986 struct timeval now; 5262 struct timeval tnow, tvbl;
4987 unsigned long flags; 5263 unsigned long flags;
4988 5264
4989 /* Ignore early vblank irqs */ 5265 /* Ignore early vblank irqs */
4990 if (intel_crtc == NULL) 5266 if (intel_crtc == NULL)
4991 return; 5267 return;
4992 5268
5269 do_gettimeofday(&tnow);
5270
4993 spin_lock_irqsave(&dev->event_lock, flags); 5271 spin_lock_irqsave(&dev->event_lock, flags);
4994 work = intel_crtc->unpin_work; 5272 work = intel_crtc->unpin_work;
4995 if (work == NULL || !work->pending) { 5273 if (work == NULL || !work->pending) {
@@ -4998,26 +5276,49 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
4998 } 5276 }
4999 5277
5000 intel_crtc->unpin_work = NULL; 5278 intel_crtc->unpin_work = NULL;
5001 drm_vblank_put(dev, intel_crtc->pipe);
5002 5279
5003 if (work->event) { 5280 if (work->event) {
5004 e = work->event; 5281 e = work->event;
5005 do_gettimeofday(&now); 5282 e->event.sequence = drm_vblank_count_and_time(dev, intel_crtc->pipe, &tvbl);
5006 e->event.sequence = drm_vblank_count(dev, intel_crtc->pipe); 5283
5007 e->event.tv_sec = now.tv_sec; 5284 /* Called before vblank count and timestamps have
5008 e->event.tv_usec = now.tv_usec; 5285 * been updated for the vblank interval of flip
5286 * completion? Need to increment vblank count and
5287 * add one videorefresh duration to returned timestamp
5288 * to account for this. We assume this happened if we
5289 * get called over 0.9 frame durations after the last
5290 * timestamped vblank.
5291 *
5292 * This calculation can not be used with vrefresh rates
5293 * below 5Hz (10Hz to be on the safe side) without
5294 * promoting to 64 integers.
5295 */
5296 if (10 * (timeval_to_ns(&tnow) - timeval_to_ns(&tvbl)) >
5297 9 * crtc->framedur_ns) {
5298 e->event.sequence++;
5299 tvbl = ns_to_timeval(timeval_to_ns(&tvbl) +
5300 crtc->framedur_ns);
5301 }
5302
5303 e->event.tv_sec = tvbl.tv_sec;
5304 e->event.tv_usec = tvbl.tv_usec;
5305
5009 list_add_tail(&e->base.link, 5306 list_add_tail(&e->base.link,
5010 &e->base.file_priv->event_list); 5307 &e->base.file_priv->event_list);
5011 wake_up_interruptible(&e->base.file_priv->event_wait); 5308 wake_up_interruptible(&e->base.file_priv->event_wait);
5012 } 5309 }
5013 5310
5311 drm_vblank_put(dev, intel_crtc->pipe);
5312
5014 spin_unlock_irqrestore(&dev->event_lock, flags); 5313 spin_unlock_irqrestore(&dev->event_lock, flags);
5015 5314
5016 obj_priv = to_intel_bo(work->old_fb_obj); 5315 obj = work->old_fb_obj;
5316
5017 atomic_clear_mask(1 << intel_crtc->plane, 5317 atomic_clear_mask(1 << intel_crtc->plane,
5018 &obj_priv->pending_flip.counter); 5318 &obj->pending_flip.counter);
5019 if (atomic_read(&obj_priv->pending_flip) == 0) 5319 if (atomic_read(&obj->pending_flip) == 0)
5020 wake_up(&dev_priv->pending_flip_queue); 5320 wake_up(&dev_priv->pending_flip_queue);
5321
5021 schedule_work(&work->work); 5322 schedule_work(&work->work);
5022 5323
5023 trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj); 5324 trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
@@ -5063,8 +5364,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
5063 struct drm_device *dev = crtc->dev; 5364 struct drm_device *dev = crtc->dev;
5064 struct drm_i915_private *dev_priv = dev->dev_private; 5365 struct drm_i915_private *dev_priv = dev->dev_private;
5065 struct intel_framebuffer *intel_fb; 5366 struct intel_framebuffer *intel_fb;
5066 struct drm_i915_gem_object *obj_priv; 5367 struct drm_i915_gem_object *obj;
5067 struct drm_gem_object *obj;
5068 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5368 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5069 struct intel_unpin_work *work; 5369 struct intel_unpin_work *work;
5070 unsigned long flags, offset; 5370 unsigned long flags, offset;
@@ -5098,13 +5398,13 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
5098 obj = intel_fb->obj; 5398 obj = intel_fb->obj;
5099 5399
5100 mutex_lock(&dev->struct_mutex); 5400 mutex_lock(&dev->struct_mutex);
5101 ret = intel_pin_and_fence_fb_obj(dev, obj, true); 5401 ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
5102 if (ret) 5402 if (ret)
5103 goto cleanup_work; 5403 goto cleanup_work;
5104 5404
5105 /* Reference the objects for the scheduled work. */ 5405 /* Reference the objects for the scheduled work. */
5106 drm_gem_object_reference(work->old_fb_obj); 5406 drm_gem_object_reference(&work->old_fb_obj->base);
5107 drm_gem_object_reference(obj); 5407 drm_gem_object_reference(&obj->base);
5108 5408
5109 crtc->fb = fb; 5409 crtc->fb = fb;
5110 5410
@@ -5112,22 +5412,16 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
5112 if (ret) 5412 if (ret)
5113 goto cleanup_objs; 5413 goto cleanup_objs;
5114 5414
5115 /* Block clients from rendering to the new back buffer until
5116 * the flip occurs and the object is no longer visible.
5117 */
5118 atomic_add(1 << intel_crtc->plane,
5119 &to_intel_bo(work->old_fb_obj)->pending_flip);
5120
5121 work->pending_flip_obj = obj;
5122 obj_priv = to_intel_bo(obj);
5123
5124 if (IS_GEN3(dev) || IS_GEN2(dev)) { 5415 if (IS_GEN3(dev) || IS_GEN2(dev)) {
5125 u32 flip_mask; 5416 u32 flip_mask;
5126 5417
5127 /* Can't queue multiple flips, so wait for the previous 5418 /* Can't queue multiple flips, so wait for the previous
5128 * one to finish before executing the next. 5419 * one to finish before executing the next.
5129 */ 5420 */
5130 BEGIN_LP_RING(2); 5421 ret = BEGIN_LP_RING(2);
5422 if (ret)
5423 goto cleanup_objs;
5424
5131 if (intel_crtc->plane) 5425 if (intel_crtc->plane)
5132 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; 5426 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
5133 else 5427 else
@@ -5137,18 +5431,28 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
5137 ADVANCE_LP_RING(); 5431 ADVANCE_LP_RING();
5138 } 5432 }
5139 5433
5434 work->pending_flip_obj = obj;
5435
5140 work->enable_stall_check = true; 5436 work->enable_stall_check = true;
5141 5437
5142 /* Offset into the new buffer for cases of shared fbs between CRTCs */ 5438 /* Offset into the new buffer for cases of shared fbs between CRTCs */
5143 offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8; 5439 offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
5144 5440
5145 BEGIN_LP_RING(4); 5441 ret = BEGIN_LP_RING(4);
5146 switch(INTEL_INFO(dev)->gen) { 5442 if (ret)
5443 goto cleanup_objs;
5444
5445 /* Block clients from rendering to the new back buffer until
5446 * the flip occurs and the object is no longer visible.
5447 */
5448 atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
5449
5450 switch (INTEL_INFO(dev)->gen) {
5147 case 2: 5451 case 2:
5148 OUT_RING(MI_DISPLAY_FLIP | 5452 OUT_RING(MI_DISPLAY_FLIP |
5149 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 5453 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
5150 OUT_RING(fb->pitch); 5454 OUT_RING(fb->pitch);
5151 OUT_RING(obj_priv->gtt_offset + offset); 5455 OUT_RING(obj->gtt_offset + offset);
5152 OUT_RING(MI_NOOP); 5456 OUT_RING(MI_NOOP);
5153 break; 5457 break;
5154 5458
@@ -5156,7 +5460,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
5156 OUT_RING(MI_DISPLAY_FLIP_I915 | 5460 OUT_RING(MI_DISPLAY_FLIP_I915 |
5157 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 5461 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
5158 OUT_RING(fb->pitch); 5462 OUT_RING(fb->pitch);
5159 OUT_RING(obj_priv->gtt_offset + offset); 5463 OUT_RING(obj->gtt_offset + offset);
5160 OUT_RING(MI_NOOP); 5464 OUT_RING(MI_NOOP);
5161 break; 5465 break;
5162 5466
@@ -5169,7 +5473,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
5169 OUT_RING(MI_DISPLAY_FLIP | 5473 OUT_RING(MI_DISPLAY_FLIP |
5170 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 5474 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
5171 OUT_RING(fb->pitch); 5475 OUT_RING(fb->pitch);
5172 OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode); 5476 OUT_RING(obj->gtt_offset | obj->tiling_mode);
5173 5477
5174 /* XXX Enabling the panel-fitter across page-flip is so far 5478 /* XXX Enabling the panel-fitter across page-flip is so far
5175 * untested on non-native modes, so ignore it for now. 5479 * untested on non-native modes, so ignore it for now.
@@ -5183,8 +5487,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
5183 case 6: 5487 case 6:
5184 OUT_RING(MI_DISPLAY_FLIP | 5488 OUT_RING(MI_DISPLAY_FLIP |
5185 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 5489 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
5186 OUT_RING(fb->pitch | obj_priv->tiling_mode); 5490 OUT_RING(fb->pitch | obj->tiling_mode);
5187 OUT_RING(obj_priv->gtt_offset); 5491 OUT_RING(obj->gtt_offset);
5188 5492
5189 pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE; 5493 pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
5190 pipesrc = I915_READ(pipe == 0 ? PIPEASRC : PIPEBSRC) & 0x0fff0fff; 5494 pipesrc = I915_READ(pipe == 0 ? PIPEASRC : PIPEBSRC) & 0x0fff0fff;
@@ -5200,8 +5504,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
5200 return 0; 5504 return 0;
5201 5505
5202cleanup_objs: 5506cleanup_objs:
5203 drm_gem_object_unreference(work->old_fb_obj); 5507 drm_gem_object_unreference(&work->old_fb_obj->base);
5204 drm_gem_object_unreference(obj); 5508 drm_gem_object_unreference(&obj->base);
5205cleanup_work: 5509cleanup_work:
5206 mutex_unlock(&dev->struct_mutex); 5510 mutex_unlock(&dev->struct_mutex);
5207 5511
@@ -5338,7 +5642,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
5338} 5642}
5339 5643
5340int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, 5644int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
5341 struct drm_file *file_priv) 5645 struct drm_file *file)
5342{ 5646{
5343 drm_i915_private_t *dev_priv = dev->dev_private; 5647 drm_i915_private_t *dev_priv = dev->dev_private;
5344 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; 5648 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
@@ -5505,19 +5809,19 @@ static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
5505 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 5809 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
5506 5810
5507 drm_framebuffer_cleanup(fb); 5811 drm_framebuffer_cleanup(fb);
5508 drm_gem_object_unreference_unlocked(intel_fb->obj); 5812 drm_gem_object_unreference_unlocked(&intel_fb->obj->base);
5509 5813
5510 kfree(intel_fb); 5814 kfree(intel_fb);
5511} 5815}
5512 5816
5513static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb, 5817static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
5514 struct drm_file *file_priv, 5818 struct drm_file *file,
5515 unsigned int *handle) 5819 unsigned int *handle)
5516{ 5820{
5517 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 5821 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
5518 struct drm_gem_object *object = intel_fb->obj; 5822 struct drm_i915_gem_object *obj = intel_fb->obj;
5519 5823
5520 return drm_gem_handle_create(file_priv, object, handle); 5824 return drm_gem_handle_create(file, &obj->base, handle);
5521} 5825}
5522 5826
5523static const struct drm_framebuffer_funcs intel_fb_funcs = { 5827static const struct drm_framebuffer_funcs intel_fb_funcs = {
@@ -5528,12 +5832,11 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = {
5528int intel_framebuffer_init(struct drm_device *dev, 5832int intel_framebuffer_init(struct drm_device *dev,
5529 struct intel_framebuffer *intel_fb, 5833 struct intel_framebuffer *intel_fb,
5530 struct drm_mode_fb_cmd *mode_cmd, 5834 struct drm_mode_fb_cmd *mode_cmd,
5531 struct drm_gem_object *obj) 5835 struct drm_i915_gem_object *obj)
5532{ 5836{
5533 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
5534 int ret; 5837 int ret;
5535 5838
5536 if (obj_priv->tiling_mode == I915_TILING_Y) 5839 if (obj->tiling_mode == I915_TILING_Y)
5537 return -EINVAL; 5840 return -EINVAL;
5538 5841
5539 if (mode_cmd->pitch & 63) 5842 if (mode_cmd->pitch & 63)
@@ -5565,11 +5868,11 @@ intel_user_framebuffer_create(struct drm_device *dev,
5565 struct drm_file *filp, 5868 struct drm_file *filp,
5566 struct drm_mode_fb_cmd *mode_cmd) 5869 struct drm_mode_fb_cmd *mode_cmd)
5567{ 5870{
5568 struct drm_gem_object *obj; 5871 struct drm_i915_gem_object *obj;
5569 struct intel_framebuffer *intel_fb; 5872 struct intel_framebuffer *intel_fb;
5570 int ret; 5873 int ret;
5571 5874
5572 obj = drm_gem_object_lookup(dev, filp, mode_cmd->handle); 5875 obj = to_intel_bo(drm_gem_object_lookup(dev, filp, mode_cmd->handle));
5573 if (!obj) 5876 if (!obj)
5574 return ERR_PTR(-ENOENT); 5877 return ERR_PTR(-ENOENT);
5575 5878
@@ -5577,10 +5880,9 @@ intel_user_framebuffer_create(struct drm_device *dev,
5577 if (!intel_fb) 5880 if (!intel_fb)
5578 return ERR_PTR(-ENOMEM); 5881 return ERR_PTR(-ENOMEM);
5579 5882
5580 ret = intel_framebuffer_init(dev, intel_fb, 5883 ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
5581 mode_cmd, obj);
5582 if (ret) { 5884 if (ret) {
5583 drm_gem_object_unreference_unlocked(obj); 5885 drm_gem_object_unreference_unlocked(&obj->base);
5584 kfree(intel_fb); 5886 kfree(intel_fb);
5585 return ERR_PTR(ret); 5887 return ERR_PTR(ret);
5586 } 5888 }
@@ -5593,10 +5895,10 @@ static const struct drm_mode_config_funcs intel_mode_funcs = {
5593 .output_poll_changed = intel_fb_output_poll_changed, 5895 .output_poll_changed = intel_fb_output_poll_changed,
5594}; 5896};
5595 5897
5596static struct drm_gem_object * 5898static struct drm_i915_gem_object *
5597intel_alloc_context_page(struct drm_device *dev) 5899intel_alloc_context_page(struct drm_device *dev)
5598{ 5900{
5599 struct drm_gem_object *ctx; 5901 struct drm_i915_gem_object *ctx;
5600 int ret; 5902 int ret;
5601 5903
5602 ctx = i915_gem_alloc_object(dev, 4096); 5904 ctx = i915_gem_alloc_object(dev, 4096);
@@ -5606,7 +5908,7 @@ intel_alloc_context_page(struct drm_device *dev)
5606 } 5908 }
5607 5909
5608 mutex_lock(&dev->struct_mutex); 5910 mutex_lock(&dev->struct_mutex);
5609 ret = i915_gem_object_pin(ctx, 4096); 5911 ret = i915_gem_object_pin(ctx, 4096, true);
5610 if (ret) { 5912 if (ret) {
5611 DRM_ERROR("failed to pin power context: %d\n", ret); 5913 DRM_ERROR("failed to pin power context: %d\n", ret);
5612 goto err_unref; 5914 goto err_unref;
@@ -5624,7 +5926,7 @@ intel_alloc_context_page(struct drm_device *dev)
5624err_unpin: 5926err_unpin:
5625 i915_gem_object_unpin(ctx); 5927 i915_gem_object_unpin(ctx);
5626err_unref: 5928err_unref:
5627 drm_gem_object_unreference(ctx); 5929 drm_gem_object_unreference(&ctx->base);
5628 mutex_unlock(&dev->struct_mutex); 5930 mutex_unlock(&dev->struct_mutex);
5629 return NULL; 5931 return NULL;
5630} 5932}
@@ -5736,6 +6038,25 @@ void ironlake_disable_drps(struct drm_device *dev)
5736 6038
5737} 6039}
5738 6040
6041void gen6_set_rps(struct drm_device *dev, u8 val)
6042{
6043 struct drm_i915_private *dev_priv = dev->dev_private;
6044 u32 swreq;
6045
6046 swreq = (val & 0x3ff) << 25;
6047 I915_WRITE(GEN6_RPNSWREQ, swreq);
6048}
6049
6050void gen6_disable_rps(struct drm_device *dev)
6051{
6052 struct drm_i915_private *dev_priv = dev->dev_private;
6053
6054 I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
6055 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
6056 I915_WRITE(GEN6_PMIER, 0);
6057 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
6058}
6059
5739static unsigned long intel_pxfreq(u32 vidfreq) 6060static unsigned long intel_pxfreq(u32 vidfreq)
5740{ 6061{
5741 unsigned long freq; 6062 unsigned long freq;
@@ -5822,7 +6143,96 @@ void intel_init_emon(struct drm_device *dev)
5822 dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK); 6143 dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
5823} 6144}
5824 6145
5825void intel_init_clock_gating(struct drm_device *dev) 6146void gen6_enable_rps(struct drm_i915_private *dev_priv)
6147{
6148 int i;
6149
6150 /* Here begins a magic sequence of register writes to enable
6151 * auto-downclocking.
6152 *
6153 * Perhaps there might be some value in exposing these to
6154 * userspace...
6155 */
6156 I915_WRITE(GEN6_RC_STATE, 0);
6157 __gen6_force_wake_get(dev_priv);
6158
6159 /* disable the counters and set deterministic thresholds */
6160 I915_WRITE(GEN6_RC_CONTROL, 0);
6161
6162 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
6163 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
6164 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
6165 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
6166 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
6167
6168 for (i = 0; i < I915_NUM_RINGS; i++)
6169 I915_WRITE(RING_MAX_IDLE(dev_priv->ring[i].mmio_base), 10);
6170
6171 I915_WRITE(GEN6_RC_SLEEP, 0);
6172 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
6173 I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
6174 I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
6175 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
6176
6177 I915_WRITE(GEN6_RC_CONTROL,
6178 GEN6_RC_CTL_RC6p_ENABLE |
6179 GEN6_RC_CTL_RC6_ENABLE |
6180 GEN6_RC_CTL_EI_MODE(1) |
6181 GEN6_RC_CTL_HW_ENABLE);
6182
6183 I915_WRITE(GEN6_RPNSWREQ,
6184 GEN6_FREQUENCY(10) |
6185 GEN6_OFFSET(0) |
6186 GEN6_AGGRESSIVE_TURBO);
6187 I915_WRITE(GEN6_RC_VIDEO_FREQ,
6188 GEN6_FREQUENCY(12));
6189
6190 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
6191 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
6192 18 << 24 |
6193 6 << 16);
6194 I915_WRITE(GEN6_RP_UP_THRESHOLD, 90000);
6195 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 100000);
6196 I915_WRITE(GEN6_RP_UP_EI, 100000);
6197 I915_WRITE(GEN6_RP_DOWN_EI, 300000);
6198 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
6199 I915_WRITE(GEN6_RP_CONTROL,
6200 GEN6_RP_MEDIA_TURBO |
6201 GEN6_RP_USE_NORMAL_FREQ |
6202 GEN6_RP_MEDIA_IS_GFX |
6203 GEN6_RP_ENABLE |
6204 GEN6_RP_UP_BUSY_MAX |
6205 GEN6_RP_DOWN_BUSY_MIN);
6206
6207 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
6208 500))
6209 DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
6210
6211 I915_WRITE(GEN6_PCODE_DATA, 0);
6212 I915_WRITE(GEN6_PCODE_MAILBOX,
6213 GEN6_PCODE_READY |
6214 GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
6215 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
6216 500))
6217 DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
6218
6219 /* requires MSI enabled */
6220 I915_WRITE(GEN6_PMIER,
6221 GEN6_PM_MBOX_EVENT |
6222 GEN6_PM_THERMAL_EVENT |
6223 GEN6_PM_RP_DOWN_TIMEOUT |
6224 GEN6_PM_RP_UP_THRESHOLD |
6225 GEN6_PM_RP_DOWN_THRESHOLD |
6226 GEN6_PM_RP_UP_EI_EXPIRED |
6227 GEN6_PM_RP_DOWN_EI_EXPIRED);
6228 I915_WRITE(GEN6_PMIMR, 0);
6229 /* enable all PM interrupts */
6230 I915_WRITE(GEN6_PMINTRMSK, 0);
6231
6232 __gen6_force_wake_put(dev_priv);
6233}
6234
6235void intel_enable_clock_gating(struct drm_device *dev)
5826{ 6236{
5827 struct drm_i915_private *dev_priv = dev->dev_private; 6237 struct drm_i915_private *dev_priv = dev->dev_private;
5828 6238
@@ -5872,9 +6282,9 @@ void intel_init_clock_gating(struct drm_device *dev)
5872 I915_WRITE(DISP_ARB_CTL, 6282 I915_WRITE(DISP_ARB_CTL,
5873 (I915_READ(DISP_ARB_CTL) | 6283 (I915_READ(DISP_ARB_CTL) |
5874 DISP_FBC_WM_DIS)); 6284 DISP_FBC_WM_DIS));
5875 I915_WRITE(WM3_LP_ILK, 0); 6285 I915_WRITE(WM3_LP_ILK, 0);
5876 I915_WRITE(WM2_LP_ILK, 0); 6286 I915_WRITE(WM2_LP_ILK, 0);
5877 I915_WRITE(WM1_LP_ILK, 0); 6287 I915_WRITE(WM1_LP_ILK, 0);
5878 } 6288 }
5879 /* 6289 /*
5880 * Based on the document from hardware guys the following bits 6290 * Based on the document from hardware guys the following bits
@@ -5896,7 +6306,49 @@ void intel_init_clock_gating(struct drm_device *dev)
5896 ILK_DPFC_DIS2 | 6306 ILK_DPFC_DIS2 |
5897 ILK_CLK_FBC); 6307 ILK_CLK_FBC);
5898 } 6308 }
5899 return; 6309
6310 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6311 I915_READ(ILK_DISPLAY_CHICKEN2) |
6312 ILK_ELPIN_409_SELECT);
6313
6314 if (IS_GEN5(dev)) {
6315 I915_WRITE(_3D_CHICKEN2,
6316 _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
6317 _3D_CHICKEN2_WM_READ_PIPELINED);
6318 }
6319
6320 if (IS_GEN6(dev)) {
6321 I915_WRITE(WM3_LP_ILK, 0);
6322 I915_WRITE(WM2_LP_ILK, 0);
6323 I915_WRITE(WM1_LP_ILK, 0);
6324
6325 /*
6326 * According to the spec the following bits should be
6327 * set in order to enable memory self-refresh and fbc:
6328 * The bit21 and bit22 of 0x42000
6329 * The bit21 and bit22 of 0x42004
6330 * The bit5 and bit7 of 0x42020
6331 * The bit14 of 0x70180
6332 * The bit14 of 0x71180
6333 */
6334 I915_WRITE(ILK_DISPLAY_CHICKEN1,
6335 I915_READ(ILK_DISPLAY_CHICKEN1) |
6336 ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
6337 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6338 I915_READ(ILK_DISPLAY_CHICKEN2) |
6339 ILK_DPARB_GATE | ILK_VSDPFD_FULL);
6340 I915_WRITE(ILK_DSPCLK_GATE,
6341 I915_READ(ILK_DSPCLK_GATE) |
6342 ILK_DPARB_CLK_GATE |
6343 ILK_DPFD_CLK_GATE);
6344
6345 I915_WRITE(DSPACNTR,
6346 I915_READ(DSPACNTR) |
6347 DISPPLANE_TRICKLE_FEED_DISABLE);
6348 I915_WRITE(DSPBCNTR,
6349 I915_READ(DSPBCNTR) |
6350 DISPPLANE_TRICKLE_FEED_DISABLE);
6351 }
5900 } else if (IS_G4X(dev)) { 6352 } else if (IS_G4X(dev)) {
5901 uint32_t dspclk_gate; 6353 uint32_t dspclk_gate;
5902 I915_WRITE(RENCLK_GATE_D1, 0); 6354 I915_WRITE(RENCLK_GATE_D1, 0);
@@ -5939,20 +6391,18 @@ void intel_init_clock_gating(struct drm_device *dev)
5939 * GPU can automatically power down the render unit if given a page 6391 * GPU can automatically power down the render unit if given a page
5940 * to save state. 6392 * to save state.
5941 */ 6393 */
5942 if (IS_IRONLAKE_M(dev)) { 6394 if (IS_IRONLAKE_M(dev) && 0) { /* XXX causes a failure during suspend */
5943 if (dev_priv->renderctx == NULL) 6395 if (dev_priv->renderctx == NULL)
5944 dev_priv->renderctx = intel_alloc_context_page(dev); 6396 dev_priv->renderctx = intel_alloc_context_page(dev);
5945 if (dev_priv->renderctx) { 6397 if (dev_priv->renderctx) {
5946 struct drm_i915_gem_object *obj_priv; 6398 struct drm_i915_gem_object *obj = dev_priv->renderctx;
5947 obj_priv = to_intel_bo(dev_priv->renderctx); 6399 if (BEGIN_LP_RING(4) == 0) {
5948 if (obj_priv) {
5949 BEGIN_LP_RING(4);
5950 OUT_RING(MI_SET_CONTEXT); 6400 OUT_RING(MI_SET_CONTEXT);
5951 OUT_RING(obj_priv->gtt_offset | 6401 OUT_RING(obj->gtt_offset |
5952 MI_MM_SPACE_GTT | 6402 MI_MM_SPACE_GTT |
5953 MI_SAVE_EXT_STATE_EN | 6403 MI_SAVE_EXT_STATE_EN |
5954 MI_RESTORE_EXT_STATE_EN | 6404 MI_RESTORE_EXT_STATE_EN |
5955 MI_RESTORE_INHIBIT); 6405 MI_RESTORE_INHIBIT);
5956 OUT_RING(MI_NOOP); 6406 OUT_RING(MI_NOOP);
5957 OUT_RING(MI_FLUSH); 6407 OUT_RING(MI_FLUSH);
5958 ADVANCE_LP_RING(); 6408 ADVANCE_LP_RING();
@@ -5962,29 +6412,45 @@ void intel_init_clock_gating(struct drm_device *dev)
5962 "Disable RC6\n"); 6412 "Disable RC6\n");
5963 } 6413 }
5964 6414
5965 if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) { 6415 if (IS_GEN4(dev) && IS_MOBILE(dev)) {
5966 struct drm_i915_gem_object *obj_priv = NULL; 6416 if (dev_priv->pwrctx == NULL)
5967 6417 dev_priv->pwrctx = intel_alloc_context_page(dev);
5968 if (dev_priv->pwrctx) { 6418 if (dev_priv->pwrctx) {
5969 obj_priv = to_intel_bo(dev_priv->pwrctx); 6419 struct drm_i915_gem_object *obj = dev_priv->pwrctx;
5970 } else { 6420 I915_WRITE(PWRCTXA, obj->gtt_offset | PWRCTX_EN);
5971 struct drm_gem_object *pwrctx;
5972
5973 pwrctx = intel_alloc_context_page(dev);
5974 if (pwrctx) {
5975 dev_priv->pwrctx = pwrctx;
5976 obj_priv = to_intel_bo(pwrctx);
5977 }
5978 }
5979
5980 if (obj_priv) {
5981 I915_WRITE(PWRCTXA, obj_priv->gtt_offset | PWRCTX_EN);
5982 I915_WRITE(MCHBAR_RENDER_STANDBY, 6421 I915_WRITE(MCHBAR_RENDER_STANDBY,
5983 I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT); 6422 I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT);
5984 } 6423 }
5985 } 6424 }
5986} 6425}
5987 6426
6427void intel_disable_clock_gating(struct drm_device *dev)
6428{
6429 struct drm_i915_private *dev_priv = dev->dev_private;
6430
6431 if (dev_priv->renderctx) {
6432 struct drm_i915_gem_object *obj = dev_priv->renderctx;
6433
6434 I915_WRITE(CCID, 0);
6435 POSTING_READ(CCID);
6436
6437 i915_gem_object_unpin(obj);
6438 drm_gem_object_unreference(&obj->base);
6439 dev_priv->renderctx = NULL;
6440 }
6441
6442 if (dev_priv->pwrctx) {
6443 struct drm_i915_gem_object *obj = dev_priv->pwrctx;
6444
6445 I915_WRITE(PWRCTXA, 0);
6446 POSTING_READ(PWRCTXA);
6447
6448 i915_gem_object_unpin(obj);
6449 drm_gem_object_unreference(&obj->base);
6450 dev_priv->pwrctx = NULL;
6451 }
6452}
6453
5988/* Set up chip specific display functions */ 6454/* Set up chip specific display functions */
5989static void intel_init_display(struct drm_device *dev) 6455static void intel_init_display(struct drm_device *dev)
5990{ 6456{
@@ -5997,7 +6463,7 @@ static void intel_init_display(struct drm_device *dev)
5997 dev_priv->display.dpms = i9xx_crtc_dpms; 6463 dev_priv->display.dpms = i9xx_crtc_dpms;
5998 6464
5999 if (I915_HAS_FBC(dev)) { 6465 if (I915_HAS_FBC(dev)) {
6000 if (IS_IRONLAKE_M(dev)) { 6466 if (HAS_PCH_SPLIT(dev)) {
6001 dev_priv->display.fbc_enabled = ironlake_fbc_enabled; 6467 dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
6002 dev_priv->display.enable_fbc = ironlake_enable_fbc; 6468 dev_priv->display.enable_fbc = ironlake_enable_fbc;
6003 dev_priv->display.disable_fbc = ironlake_disable_fbc; 6469 dev_priv->display.disable_fbc = ironlake_disable_fbc;
@@ -6046,6 +6512,14 @@ static void intel_init_display(struct drm_device *dev)
6046 "Disable CxSR\n"); 6512 "Disable CxSR\n");
6047 dev_priv->display.update_wm = NULL; 6513 dev_priv->display.update_wm = NULL;
6048 } 6514 }
6515 } else if (IS_GEN6(dev)) {
6516 if (SNB_READ_WM0_LATENCY()) {
6517 dev_priv->display.update_wm = sandybridge_update_wm;
6518 } else {
6519 DRM_DEBUG_KMS("Failed to read display plane latency. "
6520 "Disable CxSR\n");
6521 dev_priv->display.update_wm = NULL;
6522 }
6049 } else 6523 } else
6050 dev_priv->display.update_wm = NULL; 6524 dev_priv->display.update_wm = NULL;
6051 } else if (IS_PINEVIEW(dev)) { 6525 } else if (IS_PINEVIEW(dev)) {
@@ -6211,7 +6685,7 @@ void intel_modeset_init(struct drm_device *dev)
6211 6685
6212 intel_setup_outputs(dev); 6686 intel_setup_outputs(dev);
6213 6687
6214 intel_init_clock_gating(dev); 6688 intel_enable_clock_gating(dev);
6215 6689
6216 /* Just disable it once at startup */ 6690 /* Just disable it once at startup */
6217 i915_disable_vga(dev); 6691 i915_disable_vga(dev);
@@ -6221,6 +6695,9 @@ void intel_modeset_init(struct drm_device *dev)
6221 intel_init_emon(dev); 6695 intel_init_emon(dev);
6222 } 6696 }
6223 6697
6698 if (IS_GEN6(dev))
6699 gen6_enable_rps(dev_priv);
6700
6224 INIT_WORK(&dev_priv->idle_work, intel_idle_update); 6701 INIT_WORK(&dev_priv->idle_work, intel_idle_update);
6225 setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, 6702 setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
6226 (unsigned long)dev); 6703 (unsigned long)dev);
@@ -6252,28 +6729,12 @@ void intel_modeset_cleanup(struct drm_device *dev)
6252 if (dev_priv->display.disable_fbc) 6729 if (dev_priv->display.disable_fbc)
6253 dev_priv->display.disable_fbc(dev); 6730 dev_priv->display.disable_fbc(dev);
6254 6731
6255 if (dev_priv->renderctx) {
6256 struct drm_i915_gem_object *obj_priv;
6257
6258 obj_priv = to_intel_bo(dev_priv->renderctx);
6259 I915_WRITE(CCID, obj_priv->gtt_offset &~ CCID_EN);
6260 I915_READ(CCID);
6261 i915_gem_object_unpin(dev_priv->renderctx);
6262 drm_gem_object_unreference(dev_priv->renderctx);
6263 }
6264
6265 if (dev_priv->pwrctx) {
6266 struct drm_i915_gem_object *obj_priv;
6267
6268 obj_priv = to_intel_bo(dev_priv->pwrctx);
6269 I915_WRITE(PWRCTXA, obj_priv->gtt_offset &~ PWRCTX_EN);
6270 I915_READ(PWRCTXA);
6271 i915_gem_object_unpin(dev_priv->pwrctx);
6272 drm_gem_object_unreference(dev_priv->pwrctx);
6273 }
6274
6275 if (IS_IRONLAKE_M(dev)) 6732 if (IS_IRONLAKE_M(dev))
6276 ironlake_disable_drps(dev); 6733 ironlake_disable_drps(dev);
6734 if (IS_GEN6(dev))
6735 gen6_disable_rps(dev);
6736
6737 intel_disable_clock_gating(dev);
6277 6738
6278 mutex_unlock(&dev->struct_mutex); 6739 mutex_unlock(&dev->struct_mutex);
6279 6740
@@ -6325,3 +6786,113 @@ int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
6325 pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl); 6786 pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl);
6326 return 0; 6787 return 0;
6327} 6788}
6789
6790#ifdef CONFIG_DEBUG_FS
6791#include <linux/seq_file.h>
6792
6793struct intel_display_error_state {
6794 struct intel_cursor_error_state {
6795 u32 control;
6796 u32 position;
6797 u32 base;
6798 u32 size;
6799 } cursor[2];
6800
6801 struct intel_pipe_error_state {
6802 u32 conf;
6803 u32 source;
6804
6805 u32 htotal;
6806 u32 hblank;
6807 u32 hsync;
6808 u32 vtotal;
6809 u32 vblank;
6810 u32 vsync;
6811 } pipe[2];
6812
6813 struct intel_plane_error_state {
6814 u32 control;
6815 u32 stride;
6816 u32 size;
6817 u32 pos;
6818 u32 addr;
6819 u32 surface;
6820 u32 tile_offset;
6821 } plane[2];
6822};
6823
6824struct intel_display_error_state *
6825intel_display_capture_error_state(struct drm_device *dev)
6826{
6827 drm_i915_private_t *dev_priv = dev->dev_private;
6828 struct intel_display_error_state *error;
6829 int i;
6830
6831 error = kmalloc(sizeof(*error), GFP_ATOMIC);
6832 if (error == NULL)
6833 return NULL;
6834
6835 for (i = 0; i < 2; i++) {
6836 error->cursor[i].control = I915_READ(CURCNTR(i));
6837 error->cursor[i].position = I915_READ(CURPOS(i));
6838 error->cursor[i].base = I915_READ(CURBASE(i));
6839
6840 error->plane[i].control = I915_READ(DSPCNTR(i));
6841 error->plane[i].stride = I915_READ(DSPSTRIDE(i));
6842 error->plane[i].size = I915_READ(DSPSIZE(i));
6843 error->plane[i].pos= I915_READ(DSPPOS(i));
6844 error->plane[i].addr = I915_READ(DSPADDR(i));
6845 if (INTEL_INFO(dev)->gen >= 4) {
6846 error->plane[i].surface = I915_READ(DSPSURF(i));
6847 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
6848 }
6849
6850 error->pipe[i].conf = I915_READ(PIPECONF(i));
6851 error->pipe[i].source = I915_READ(PIPESRC(i));
6852 error->pipe[i].htotal = I915_READ(HTOTAL(i));
6853 error->pipe[i].hblank = I915_READ(HBLANK(i));
6854 error->pipe[i].hsync = I915_READ(HSYNC(i));
6855 error->pipe[i].vtotal = I915_READ(VTOTAL(i));
6856 error->pipe[i].vblank = I915_READ(VBLANK(i));
6857 error->pipe[i].vsync = I915_READ(VSYNC(i));
6858 }
6859
6860 return error;
6861}
6862
6863void
6864intel_display_print_error_state(struct seq_file *m,
6865 struct drm_device *dev,
6866 struct intel_display_error_state *error)
6867{
6868 int i;
6869
6870 for (i = 0; i < 2; i++) {
6871 seq_printf(m, "Pipe [%d]:\n", i);
6872 seq_printf(m, " CONF: %08x\n", error->pipe[i].conf);
6873 seq_printf(m, " SRC: %08x\n", error->pipe[i].source);
6874 seq_printf(m, " HTOTAL: %08x\n", error->pipe[i].htotal);
6875 seq_printf(m, " HBLANK: %08x\n", error->pipe[i].hblank);
6876 seq_printf(m, " HSYNC: %08x\n", error->pipe[i].hsync);
6877 seq_printf(m, " VTOTAL: %08x\n", error->pipe[i].vtotal);
6878 seq_printf(m, " VBLANK: %08x\n", error->pipe[i].vblank);
6879 seq_printf(m, " VSYNC: %08x\n", error->pipe[i].vsync);
6880
6881 seq_printf(m, "Plane [%d]:\n", i);
6882 seq_printf(m, " CNTR: %08x\n", error->plane[i].control);
6883 seq_printf(m, " STRIDE: %08x\n", error->plane[i].stride);
6884 seq_printf(m, " SIZE: %08x\n", error->plane[i].size);
6885 seq_printf(m, " POS: %08x\n", error->plane[i].pos);
6886 seq_printf(m, " ADDR: %08x\n", error->plane[i].addr);
6887 if (INTEL_INFO(dev)->gen >= 4) {
6888 seq_printf(m, " SURF: %08x\n", error->plane[i].surface);
6889 seq_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset);
6890 }
6891
6892 seq_printf(m, "Cursor [%d]:\n", i);
6893 seq_printf(m, " CNTR: %08x\n", error->cursor[i].control);
6894 seq_printf(m, " POS: %08x\n", error->cursor[i].position);
6895 seq_printf(m, " BASE: %08x\n", error->cursor[i].base);
6896 }
6897}
6898#endif
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 864417cffe9a..1dc60408d5b8 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1442,8 +1442,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
1442 /* Changes to enable or select take place the vblank 1442 /* Changes to enable or select take place the vblank
1443 * after being written. 1443 * after being written.
1444 */ 1444 */
1445 intel_wait_for_vblank(intel_dp->base.base.dev, 1445 intel_wait_for_vblank(dev, intel_crtc->pipe);
1446 intel_crtc->pipe);
1447 } 1446 }
1448 1447
1449 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN); 1448 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index e52c6125bb1f..d782ad9fd6db 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -127,7 +127,7 @@ intel_mode_get_pixel_multiplier(const struct drm_display_mode *mode)
127 127
128struct intel_framebuffer { 128struct intel_framebuffer {
129 struct drm_framebuffer base; 129 struct drm_framebuffer base;
130 struct drm_gem_object *obj; 130 struct drm_i915_gem_object *obj;
131}; 131};
132 132
133struct intel_fbdev { 133struct intel_fbdev {
@@ -166,7 +166,7 @@ struct intel_crtc {
166 struct intel_unpin_work *unpin_work; 166 struct intel_unpin_work *unpin_work;
167 int fdi_lanes; 167 int fdi_lanes;
168 168
169 struct drm_gem_object *cursor_bo; 169 struct drm_i915_gem_object *cursor_bo;
170 uint32_t cursor_addr; 170 uint32_t cursor_addr;
171 int16_t cursor_x, cursor_y; 171 int16_t cursor_x, cursor_y;
172 int16_t cursor_width, cursor_height; 172 int16_t cursor_width, cursor_height;
@@ -220,8 +220,8 @@ intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
220struct intel_unpin_work { 220struct intel_unpin_work {
221 struct work_struct work; 221 struct work_struct work;
222 struct drm_device *dev; 222 struct drm_device *dev;
223 struct drm_gem_object *old_fb_obj; 223 struct drm_i915_gem_object *old_fb_obj;
224 struct drm_gem_object *pending_flip_obj; 224 struct drm_i915_gem_object *pending_flip_obj;
225 struct drm_pending_vblank_event *event; 225 struct drm_pending_vblank_event *event;
226 int pending; 226 int pending;
227 bool enable_stall_check; 227 bool enable_stall_check;
@@ -236,7 +236,8 @@ void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
236extern bool intel_sdvo_init(struct drm_device *dev, int output_device); 236extern bool intel_sdvo_init(struct drm_device *dev, int output_device);
237extern void intel_dvo_init(struct drm_device *dev); 237extern void intel_dvo_init(struct drm_device *dev);
238extern void intel_tv_init(struct drm_device *dev); 238extern void intel_tv_init(struct drm_device *dev);
239extern void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj); 239extern void intel_mark_busy(struct drm_device *dev,
240 struct drm_i915_gem_object *obj);
240extern bool intel_lvds_init(struct drm_device *dev); 241extern bool intel_lvds_init(struct drm_device *dev);
241extern void intel_dp_init(struct drm_device *dev, int dp_reg); 242extern void intel_dp_init(struct drm_device *dev, int dp_reg);
242void 243void
@@ -293,19 +294,22 @@ extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
293 u16 blue, int regno); 294 u16 blue, int regno);
294extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, 295extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
295 u16 *blue, int regno); 296 u16 *blue, int regno);
296extern void intel_init_clock_gating(struct drm_device *dev); 297extern void intel_enable_clock_gating(struct drm_device *dev);
298extern void intel_disable_clock_gating(struct drm_device *dev);
297extern void ironlake_enable_drps(struct drm_device *dev); 299extern void ironlake_enable_drps(struct drm_device *dev);
298extern void ironlake_disable_drps(struct drm_device *dev); 300extern void ironlake_disable_drps(struct drm_device *dev);
301extern void gen6_enable_rps(struct drm_i915_private *dev_priv);
302extern void gen6_disable_rps(struct drm_device *dev);
299extern void intel_init_emon(struct drm_device *dev); 303extern void intel_init_emon(struct drm_device *dev);
300 304
301extern int intel_pin_and_fence_fb_obj(struct drm_device *dev, 305extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
302 struct drm_gem_object *obj, 306 struct drm_i915_gem_object *obj,
303 bool pipelined); 307 struct intel_ring_buffer *pipelined);
304 308
305extern int intel_framebuffer_init(struct drm_device *dev, 309extern int intel_framebuffer_init(struct drm_device *dev,
306 struct intel_framebuffer *ifb, 310 struct intel_framebuffer *ifb,
307 struct drm_mode_fb_cmd *mode_cmd, 311 struct drm_mode_fb_cmd *mode_cmd,
308 struct drm_gem_object *obj); 312 struct drm_i915_gem_object *obj);
309extern int intel_fbdev_init(struct drm_device *dev); 313extern int intel_fbdev_init(struct drm_device *dev);
310extern void intel_fbdev_fini(struct drm_device *dev); 314extern void intel_fbdev_fini(struct drm_device *dev);
311 315
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index af2a1dddc28e..701e830d0012 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -65,10 +65,9 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
65 struct fb_info *info; 65 struct fb_info *info;
66 struct drm_framebuffer *fb; 66 struct drm_framebuffer *fb;
67 struct drm_mode_fb_cmd mode_cmd; 67 struct drm_mode_fb_cmd mode_cmd;
68 struct drm_gem_object *fbo = NULL; 68 struct drm_i915_gem_object *obj;
69 struct drm_i915_gem_object *obj_priv;
70 struct device *device = &dev->pdev->dev; 69 struct device *device = &dev->pdev->dev;
71 int size, ret, mmio_bar = IS_GEN2(dev) ? 1 : 0; 70 int size, ret;
72 71
73 /* we don't do packed 24bpp */ 72 /* we don't do packed 24bpp */
74 if (sizes->surface_bpp == 24) 73 if (sizes->surface_bpp == 24)
@@ -83,18 +82,17 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
83 82
84 size = mode_cmd.pitch * mode_cmd.height; 83 size = mode_cmd.pitch * mode_cmd.height;
85 size = ALIGN(size, PAGE_SIZE); 84 size = ALIGN(size, PAGE_SIZE);
86 fbo = i915_gem_alloc_object(dev, size); 85 obj = i915_gem_alloc_object(dev, size);
87 if (!fbo) { 86 if (!obj) {
88 DRM_ERROR("failed to allocate framebuffer\n"); 87 DRM_ERROR("failed to allocate framebuffer\n");
89 ret = -ENOMEM; 88 ret = -ENOMEM;
90 goto out; 89 goto out;
91 } 90 }
92 obj_priv = to_intel_bo(fbo);
93 91
94 mutex_lock(&dev->struct_mutex); 92 mutex_lock(&dev->struct_mutex);
95 93
96 /* Flush everything out, we'll be doing GTT only from now on */ 94 /* Flush everything out, we'll be doing GTT only from now on */
97 ret = intel_pin_and_fence_fb_obj(dev, fbo, false); 95 ret = intel_pin_and_fence_fb_obj(dev, obj, false);
98 if (ret) { 96 if (ret) {
99 DRM_ERROR("failed to pin fb: %d\n", ret); 97 DRM_ERROR("failed to pin fb: %d\n", ret);
100 goto out_unref; 98 goto out_unref;
@@ -108,7 +106,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
108 106
109 info->par = ifbdev; 107 info->par = ifbdev;
110 108
111 ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, fbo); 109 ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, obj);
112 if (ret) 110 if (ret)
113 goto out_unpin; 111 goto out_unpin;
114 112
@@ -134,11 +132,10 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
134 else 132 else
135 info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0); 133 info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0);
136 134
137 info->fix.smem_start = dev->mode_config.fb_base + obj_priv->gtt_offset; 135 info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset;
138 info->fix.smem_len = size; 136 info->fix.smem_len = size;
139 137
140 info->screen_base = ioremap_wc(dev->agp->base + obj_priv->gtt_offset, 138 info->screen_base = ioremap_wc(dev->agp->base + obj->gtt_offset, size);
141 size);
142 if (!info->screen_base) { 139 if (!info->screen_base) {
143 ret = -ENOSPC; 140 ret = -ENOSPC;
144 goto out_unpin; 141 goto out_unpin;
@@ -153,13 +150,8 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
153 150
154// memset(info->screen_base, 0, size); 151// memset(info->screen_base, 0, size);
155 152
156 drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
157 drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height); 153 drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);
158 154
159 /* FIXME: we really shouldn't expose mmio space at all */
160 info->fix.mmio_start = pci_resource_start(dev->pdev, mmio_bar);
161 info->fix.mmio_len = pci_resource_len(dev->pdev, mmio_bar);
162
163 info->pixmap.size = 64*1024; 155 info->pixmap.size = 64*1024;
164 info->pixmap.buf_align = 8; 156 info->pixmap.buf_align = 8;
165 info->pixmap.access_align = 32; 157 info->pixmap.access_align = 32;
@@ -168,7 +160,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
168 160
169 DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n", 161 DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n",
170 fb->width, fb->height, 162 fb->width, fb->height,
171 obj_priv->gtt_offset, fbo); 163 obj->gtt_offset, obj);
172 164
173 165
174 mutex_unlock(&dev->struct_mutex); 166 mutex_unlock(&dev->struct_mutex);
@@ -176,9 +168,9 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
176 return 0; 168 return 0;
177 169
178out_unpin: 170out_unpin:
179 i915_gem_object_unpin(fbo); 171 i915_gem_object_unpin(obj);
180out_unref: 172out_unref:
181 drm_gem_object_unreference(fbo); 173 drm_gem_object_unreference(&obj->base);
182 mutex_unlock(&dev->struct_mutex); 174 mutex_unlock(&dev->struct_mutex);
183out: 175out:
184 return ret; 176 return ret;
@@ -225,7 +217,7 @@ static void intel_fbdev_destroy(struct drm_device *dev,
225 217
226 drm_framebuffer_cleanup(&ifb->base); 218 drm_framebuffer_cleanup(&ifb->base);
227 if (ifb->obj) { 219 if (ifb->obj) {
228 drm_gem_object_unreference_unlocked(ifb->obj); 220 drm_gem_object_unreference_unlocked(&ifb->obj->base);
229 ifb->obj = NULL; 221 ifb->obj = NULL;
230 } 222 }
231} 223}
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 3dba086e7eea..58040f68ed7a 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -85,8 +85,9 @@ static u32 get_reserved(struct intel_gpio *gpio)
85 85
86 /* On most chips, these bits must be preserved in software. */ 86 /* On most chips, these bits must be preserved in software. */
87 if (!IS_I830(dev) && !IS_845G(dev)) 87 if (!IS_I830(dev) && !IS_845G(dev))
88 reserved = I915_READ(gpio->reg) & (GPIO_DATA_PULLUP_DISABLE | 88 reserved = I915_READ_NOTRACE(gpio->reg) &
89 GPIO_CLOCK_PULLUP_DISABLE); 89 (GPIO_DATA_PULLUP_DISABLE |
90 GPIO_CLOCK_PULLUP_DISABLE);
90 91
91 return reserved; 92 return reserved;
92} 93}
@@ -96,9 +97,9 @@ static int get_clock(void *data)
96 struct intel_gpio *gpio = data; 97 struct intel_gpio *gpio = data;
97 struct drm_i915_private *dev_priv = gpio->dev_priv; 98 struct drm_i915_private *dev_priv = gpio->dev_priv;
98 u32 reserved = get_reserved(gpio); 99 u32 reserved = get_reserved(gpio);
99 I915_WRITE(gpio->reg, reserved | GPIO_CLOCK_DIR_MASK); 100 I915_WRITE_NOTRACE(gpio->reg, reserved | GPIO_CLOCK_DIR_MASK);
100 I915_WRITE(gpio->reg, reserved); 101 I915_WRITE_NOTRACE(gpio->reg, reserved);
101 return (I915_READ(gpio->reg) & GPIO_CLOCK_VAL_IN) != 0; 102 return (I915_READ_NOTRACE(gpio->reg) & GPIO_CLOCK_VAL_IN) != 0;
102} 103}
103 104
104static int get_data(void *data) 105static int get_data(void *data)
@@ -106,9 +107,9 @@ static int get_data(void *data)
106 struct intel_gpio *gpio = data; 107 struct intel_gpio *gpio = data;
107 struct drm_i915_private *dev_priv = gpio->dev_priv; 108 struct drm_i915_private *dev_priv = gpio->dev_priv;
108 u32 reserved = get_reserved(gpio); 109 u32 reserved = get_reserved(gpio);
109 I915_WRITE(gpio->reg, reserved | GPIO_DATA_DIR_MASK); 110 I915_WRITE_NOTRACE(gpio->reg, reserved | GPIO_DATA_DIR_MASK);
110 I915_WRITE(gpio->reg, reserved); 111 I915_WRITE_NOTRACE(gpio->reg, reserved);
111 return (I915_READ(gpio->reg) & GPIO_DATA_VAL_IN) != 0; 112 return (I915_READ_NOTRACE(gpio->reg) & GPIO_DATA_VAL_IN) != 0;
112} 113}
113 114
114static void set_clock(void *data, int state_high) 115static void set_clock(void *data, int state_high)
@@ -124,7 +125,7 @@ static void set_clock(void *data, int state_high)
124 clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK | 125 clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
125 GPIO_CLOCK_VAL_MASK; 126 GPIO_CLOCK_VAL_MASK;
126 127
127 I915_WRITE(gpio->reg, reserved | clock_bits); 128 I915_WRITE_NOTRACE(gpio->reg, reserved | clock_bits);
128 POSTING_READ(gpio->reg); 129 POSTING_READ(gpio->reg);
129} 130}
130 131
@@ -141,7 +142,7 @@ static void set_data(void *data, int state_high)
141 data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK | 142 data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
142 GPIO_DATA_VAL_MASK; 143 GPIO_DATA_VAL_MASK;
143 144
144 I915_WRITE(gpio->reg, reserved | data_bits); 145 I915_WRITE_NOTRACE(gpio->reg, reserved | data_bits);
145 POSTING_READ(gpio->reg); 146 POSTING_READ(gpio->reg);
146} 147}
147 148
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 25bcedf386fd..aa2307080be2 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -304,14 +304,13 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
304 u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay; 304 u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay;
305 u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay; 305 u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
306 306
307 pfit_control |= PFIT_ENABLE;
308 /* 965+ is easy, it does everything in hw */ 307 /* 965+ is easy, it does everything in hw */
309 if (scaled_width > scaled_height) 308 if (scaled_width > scaled_height)
310 pfit_control |= PFIT_SCALING_PILLAR; 309 pfit_control |= PFIT_ENABLE | PFIT_SCALING_PILLAR;
311 else if (scaled_width < scaled_height) 310 else if (scaled_width < scaled_height)
312 pfit_control |= PFIT_SCALING_LETTER; 311 pfit_control |= PFIT_ENABLE | PFIT_SCALING_LETTER;
313 else 312 else if (adjusted_mode->hdisplay != mode->hdisplay)
314 pfit_control |= PFIT_SCALING_AUTO; 313 pfit_control |= PFIT_ENABLE | PFIT_SCALING_AUTO;
315 } else { 314 } else {
316 u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay; 315 u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay;
317 u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay; 316 u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
@@ -358,13 +357,17 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
358 * Full scaling, even if it changes the aspect ratio. 357 * Full scaling, even if it changes the aspect ratio.
359 * Fortunately this is all done for us in hw. 358 * Fortunately this is all done for us in hw.
360 */ 359 */
361 pfit_control |= PFIT_ENABLE; 360 if (mode->vdisplay != adjusted_mode->vdisplay ||
362 if (INTEL_INFO(dev)->gen >= 4) 361 mode->hdisplay != adjusted_mode->hdisplay) {
363 pfit_control |= PFIT_SCALING_AUTO; 362 pfit_control |= PFIT_ENABLE;
364 else 363 if (INTEL_INFO(dev)->gen >= 4)
365 pfit_control |= (VERT_AUTO_SCALE | HORIZ_AUTO_SCALE | 364 pfit_control |= PFIT_SCALING_AUTO;
366 VERT_INTERP_BILINEAR | 365 else
367 HORIZ_INTERP_BILINEAR); 366 pfit_control |= (VERT_AUTO_SCALE |
367 VERT_INTERP_BILINEAR |
368 HORIZ_AUTO_SCALE |
369 HORIZ_INTERP_BILINEAR);
370 }
368 break; 371 break;
369 372
370 default: 373 default:
@@ -914,6 +917,8 @@ bool intel_lvds_init(struct drm_device *dev)
914 917
915 intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT); 918 intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT);
916 intel_encoder->crtc_mask = (1 << 1); 919 intel_encoder->crtc_mask = (1 << 1);
920 if (INTEL_INFO(dev)->gen >= 5)
921 intel_encoder->crtc_mask |= (1 << 0);
917 drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs); 922 drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs);
918 drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs); 923 drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
919 connector->display_info.subpixel_order = SubPixelHorizontalRGB; 924 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
@@ -1019,10 +1024,18 @@ bool intel_lvds_init(struct drm_device *dev)
1019out: 1024out:
1020 if (HAS_PCH_SPLIT(dev)) { 1025 if (HAS_PCH_SPLIT(dev)) {
1021 u32 pwm; 1026 u32 pwm;
1022 /* make sure PWM is enabled */ 1027
1028 pipe = (I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT) ? 1 : 0;
1029
1030 /* make sure PWM is enabled and locked to the LVDS pipe */
1023 pwm = I915_READ(BLC_PWM_CPU_CTL2); 1031 pwm = I915_READ(BLC_PWM_CPU_CTL2);
1024 pwm |= (PWM_ENABLE | PWM_PIPE_B); 1032 if (pipe == 0 && (pwm & PWM_PIPE_B))
1025 I915_WRITE(BLC_PWM_CPU_CTL2, pwm); 1033 I915_WRITE(BLC_PWM_CPU_CTL2, pwm & ~PWM_ENABLE);
1034 if (pipe)
1035 pwm |= PWM_PIPE_B;
1036 else
1037 pwm &= ~PWM_PIPE_B;
1038 I915_WRITE(BLC_PWM_CPU_CTL2, pwm | PWM_ENABLE);
1026 1039
1027 pwm = I915_READ(BLC_PWM_PCH_CTL1); 1040 pwm = I915_READ(BLC_PWM_PCH_CTL1);
1028 pwm |= PWM_PCH_ENABLE; 1041 pwm |= PWM_PCH_ENABLE;
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 9b0d9a867aea..f295a7aaadf9 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -273,14 +273,8 @@ void intel_opregion_enable_asle(struct drm_device *dev)
273 struct opregion_asle *asle = dev_priv->opregion.asle; 273 struct opregion_asle *asle = dev_priv->opregion.asle;
274 274
275 if (asle) { 275 if (asle) {
276 if (IS_MOBILE(dev)) { 276 if (IS_MOBILE(dev))
277 unsigned long irqflags;
278
279 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
280 intel_enable_asle(dev); 277 intel_enable_asle(dev);
281 spin_unlock_irqrestore(&dev_priv->user_irq_lock,
282 irqflags);
283 }
284 278
285 asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN | 279 asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN |
286 ASLE_PFMB_EN; 280 ASLE_PFMB_EN;
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 02ff0a481f47..3fbb98b948d6 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -221,15 +221,16 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
221 int ret; 221 int ret;
222 222
223 BUG_ON(overlay->last_flip_req); 223 BUG_ON(overlay->last_flip_req);
224 overlay->last_flip_req = 224 ret = i915_add_request(dev, NULL, request, LP_RING(dev_priv));
225 i915_add_request(dev, NULL, request, &dev_priv->render_ring); 225 if (ret) {
226 if (overlay->last_flip_req == 0) 226 kfree(request);
227 return -ENOMEM; 227 return ret;
228 228 }
229 overlay->last_flip_req = request->seqno;
229 overlay->flip_tail = tail; 230 overlay->flip_tail = tail;
230 ret = i915_do_wait_request(dev, 231 ret = i915_do_wait_request(dev,
231 overlay->last_flip_req, true, 232 overlay->last_flip_req, true,
232 &dev_priv->render_ring); 233 LP_RING(dev_priv));
233 if (ret) 234 if (ret)
234 return ret; 235 return ret;
235 236
@@ -289,6 +290,7 @@ i830_deactivate_pipe_a(struct drm_device *dev)
289static int intel_overlay_on(struct intel_overlay *overlay) 290static int intel_overlay_on(struct intel_overlay *overlay)
290{ 291{
291 struct drm_device *dev = overlay->dev; 292 struct drm_device *dev = overlay->dev;
293 struct drm_i915_private *dev_priv = dev->dev_private;
292 struct drm_i915_gem_request *request; 294 struct drm_i915_gem_request *request;
293 int pipe_a_quirk = 0; 295 int pipe_a_quirk = 0;
294 int ret; 296 int ret;
@@ -308,7 +310,12 @@ static int intel_overlay_on(struct intel_overlay *overlay)
308 goto out; 310 goto out;
309 } 311 }
310 312
311 BEGIN_LP_RING(4); 313 ret = BEGIN_LP_RING(4);
314 if (ret) {
315 kfree(request);
316 goto out;
317 }
318
312 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON); 319 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON);
313 OUT_RING(overlay->flip_addr | OFC_UPDATE); 320 OUT_RING(overlay->flip_addr | OFC_UPDATE);
314 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); 321 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
@@ -332,6 +339,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
332 struct drm_i915_gem_request *request; 339 struct drm_i915_gem_request *request;
333 u32 flip_addr = overlay->flip_addr; 340 u32 flip_addr = overlay->flip_addr;
334 u32 tmp; 341 u32 tmp;
342 int ret;
335 343
336 BUG_ON(!overlay->active); 344 BUG_ON(!overlay->active);
337 345
@@ -347,36 +355,44 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
347 if (tmp & (1 << 17)) 355 if (tmp & (1 << 17))
348 DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp); 356 DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
349 357
350 BEGIN_LP_RING(2); 358 ret = BEGIN_LP_RING(2);
359 if (ret) {
360 kfree(request);
361 return ret;
362 }
351 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); 363 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
352 OUT_RING(flip_addr); 364 OUT_RING(flip_addr);
353 ADVANCE_LP_RING(); 365 ADVANCE_LP_RING();
354 366
355 overlay->last_flip_req = 367 ret = i915_add_request(dev, NULL, request, LP_RING(dev_priv));
356 i915_add_request(dev, NULL, request, &dev_priv->render_ring); 368 if (ret) {
369 kfree(request);
370 return ret;
371 }
372
373 overlay->last_flip_req = request->seqno;
357 return 0; 374 return 0;
358} 375}
359 376
360static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay) 377static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
361{ 378{
362 struct drm_gem_object *obj = &overlay->old_vid_bo->base; 379 struct drm_i915_gem_object *obj = overlay->old_vid_bo;
363 380
364 i915_gem_object_unpin(obj); 381 i915_gem_object_unpin(obj);
365 drm_gem_object_unreference(obj); 382 drm_gem_object_unreference(&obj->base);
366 383
367 overlay->old_vid_bo = NULL; 384 overlay->old_vid_bo = NULL;
368} 385}
369 386
370static void intel_overlay_off_tail(struct intel_overlay *overlay) 387static void intel_overlay_off_tail(struct intel_overlay *overlay)
371{ 388{
372 struct drm_gem_object *obj; 389 struct drm_i915_gem_object *obj = overlay->vid_bo;
373 390
374 /* never have the overlay hw on without showing a frame */ 391 /* never have the overlay hw on without showing a frame */
375 BUG_ON(!overlay->vid_bo); 392 BUG_ON(!overlay->vid_bo);
376 obj = &overlay->vid_bo->base;
377 393
378 i915_gem_object_unpin(obj); 394 i915_gem_object_unpin(obj);
379 drm_gem_object_unreference(obj); 395 drm_gem_object_unreference(&obj->base);
380 overlay->vid_bo = NULL; 396 overlay->vid_bo = NULL;
381 397
382 overlay->crtc->overlay = NULL; 398 overlay->crtc->overlay = NULL;
@@ -389,8 +405,10 @@ static int intel_overlay_off(struct intel_overlay *overlay,
389 bool interruptible) 405 bool interruptible)
390{ 406{
391 struct drm_device *dev = overlay->dev; 407 struct drm_device *dev = overlay->dev;
408 struct drm_i915_private *dev_priv = dev->dev_private;
392 u32 flip_addr = overlay->flip_addr; 409 u32 flip_addr = overlay->flip_addr;
393 struct drm_i915_gem_request *request; 410 struct drm_i915_gem_request *request;
411 int ret;
394 412
395 BUG_ON(!overlay->active); 413 BUG_ON(!overlay->active);
396 414
@@ -404,7 +422,11 @@ static int intel_overlay_off(struct intel_overlay *overlay,
404 * of the hw. Do it in both cases */ 422 * of the hw. Do it in both cases */
405 flip_addr |= OFC_UPDATE; 423 flip_addr |= OFC_UPDATE;
406 424
407 BEGIN_LP_RING(6); 425 ret = BEGIN_LP_RING(6);
426 if (ret) {
427 kfree(request);
428 return ret;
429 }
408 /* wait for overlay to go idle */ 430 /* wait for overlay to go idle */
409 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); 431 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
410 OUT_RING(flip_addr); 432 OUT_RING(flip_addr);
@@ -432,7 +454,7 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
432 return 0; 454 return 0;
433 455
434 ret = i915_do_wait_request(dev, overlay->last_flip_req, 456 ret = i915_do_wait_request(dev, overlay->last_flip_req,
435 interruptible, &dev_priv->render_ring); 457 interruptible, LP_RING(dev_priv));
436 if (ret) 458 if (ret)
437 return ret; 459 return ret;
438 460
@@ -467,7 +489,12 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
467 if (request == NULL) 489 if (request == NULL)
468 return -ENOMEM; 490 return -ENOMEM;
469 491
470 BEGIN_LP_RING(2); 492 ret = BEGIN_LP_RING(2);
493 if (ret) {
494 kfree(request);
495 return ret;
496 }
497
471 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); 498 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
472 OUT_RING(MI_NOOP); 499 OUT_RING(MI_NOOP);
473 ADVANCE_LP_RING(); 500 ADVANCE_LP_RING();
@@ -736,13 +763,12 @@ static u32 overlay_cmd_reg(struct put_image_params *params)
736} 763}
737 764
738static int intel_overlay_do_put_image(struct intel_overlay *overlay, 765static int intel_overlay_do_put_image(struct intel_overlay *overlay,
739 struct drm_gem_object *new_bo, 766 struct drm_i915_gem_object *new_bo,
740 struct put_image_params *params) 767 struct put_image_params *params)
741{ 768{
742 int ret, tmp_width; 769 int ret, tmp_width;
743 struct overlay_registers *regs; 770 struct overlay_registers *regs;
744 bool scale_changed = false; 771 bool scale_changed = false;
745 struct drm_i915_gem_object *bo_priv = to_intel_bo(new_bo);
746 struct drm_device *dev = overlay->dev; 772 struct drm_device *dev = overlay->dev;
747 773
748 BUG_ON(!mutex_is_locked(&dev->struct_mutex)); 774 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -753,7 +779,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
753 if (ret != 0) 779 if (ret != 0)
754 return ret; 780 return ret;
755 781
756 ret = i915_gem_object_pin(new_bo, PAGE_SIZE); 782 ret = i915_gem_object_pin(new_bo, PAGE_SIZE, true);
757 if (ret != 0) 783 if (ret != 0)
758 return ret; 784 return ret;
759 785
@@ -761,6 +787,10 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
761 if (ret != 0) 787 if (ret != 0)
762 goto out_unpin; 788 goto out_unpin;
763 789
790 ret = i915_gem_object_put_fence(new_bo);
791 if (ret)
792 goto out_unpin;
793
764 if (!overlay->active) { 794 if (!overlay->active) {
765 regs = intel_overlay_map_regs(overlay); 795 regs = intel_overlay_map_regs(overlay);
766 if (!regs) { 796 if (!regs) {
@@ -797,7 +827,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
797 regs->SWIDTHSW = calc_swidthsw(overlay->dev, 827 regs->SWIDTHSW = calc_swidthsw(overlay->dev,
798 params->offset_Y, tmp_width); 828 params->offset_Y, tmp_width);
799 regs->SHEIGHT = params->src_h; 829 regs->SHEIGHT = params->src_h;
800 regs->OBUF_0Y = bo_priv->gtt_offset + params-> offset_Y; 830 regs->OBUF_0Y = new_bo->gtt_offset + params-> offset_Y;
801 regs->OSTRIDE = params->stride_Y; 831 regs->OSTRIDE = params->stride_Y;
802 832
803 if (params->format & I915_OVERLAY_YUV_PLANAR) { 833 if (params->format & I915_OVERLAY_YUV_PLANAR) {
@@ -811,8 +841,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
811 params->src_w/uv_hscale); 841 params->src_w/uv_hscale);
812 regs->SWIDTHSW |= max_t(u32, tmp_U, tmp_V) << 16; 842 regs->SWIDTHSW |= max_t(u32, tmp_U, tmp_V) << 16;
813 regs->SHEIGHT |= (params->src_h/uv_vscale) << 16; 843 regs->SHEIGHT |= (params->src_h/uv_vscale) << 16;
814 regs->OBUF_0U = bo_priv->gtt_offset + params->offset_U; 844 regs->OBUF_0U = new_bo->gtt_offset + params->offset_U;
815 regs->OBUF_0V = bo_priv->gtt_offset + params->offset_V; 845 regs->OBUF_0V = new_bo->gtt_offset + params->offset_V;
816 regs->OSTRIDE |= params->stride_UV << 16; 846 regs->OSTRIDE |= params->stride_UV << 16;
817 } 847 }
818 848
@@ -829,7 +859,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
829 goto out_unpin; 859 goto out_unpin;
830 860
831 overlay->old_vid_bo = overlay->vid_bo; 861 overlay->old_vid_bo = overlay->vid_bo;
832 overlay->vid_bo = to_intel_bo(new_bo); 862 overlay->vid_bo = new_bo;
833 863
834 return 0; 864 return 0;
835 865
@@ -942,7 +972,7 @@ static int check_overlay_scaling(struct put_image_params *rec)
942 972
943static int check_overlay_src(struct drm_device *dev, 973static int check_overlay_src(struct drm_device *dev,
944 struct drm_intel_overlay_put_image *rec, 974 struct drm_intel_overlay_put_image *rec,
945 struct drm_gem_object *new_bo) 975 struct drm_i915_gem_object *new_bo)
946{ 976{
947 int uv_hscale = uv_hsubsampling(rec->flags); 977 int uv_hscale = uv_hsubsampling(rec->flags);
948 int uv_vscale = uv_vsubsampling(rec->flags); 978 int uv_vscale = uv_vsubsampling(rec->flags);
@@ -1027,7 +1057,7 @@ static int check_overlay_src(struct drm_device *dev,
1027 return -EINVAL; 1057 return -EINVAL;
1028 1058
1029 tmp = rec->stride_Y*rec->src_height; 1059 tmp = rec->stride_Y*rec->src_height;
1030 if (rec->offset_Y + tmp > new_bo->size) 1060 if (rec->offset_Y + tmp > new_bo->base.size)
1031 return -EINVAL; 1061 return -EINVAL;
1032 break; 1062 break;
1033 1063
@@ -1038,12 +1068,12 @@ static int check_overlay_src(struct drm_device *dev,
1038 return -EINVAL; 1068 return -EINVAL;
1039 1069
1040 tmp = rec->stride_Y * rec->src_height; 1070 tmp = rec->stride_Y * rec->src_height;
1041 if (rec->offset_Y + tmp > new_bo->size) 1071 if (rec->offset_Y + tmp > new_bo->base.size)
1042 return -EINVAL; 1072 return -EINVAL;
1043 1073
1044 tmp = rec->stride_UV * (rec->src_height / uv_vscale); 1074 tmp = rec->stride_UV * (rec->src_height / uv_vscale);
1045 if (rec->offset_U + tmp > new_bo->size || 1075 if (rec->offset_U + tmp > new_bo->base.size ||
1046 rec->offset_V + tmp > new_bo->size) 1076 rec->offset_V + tmp > new_bo->base.size)
1047 return -EINVAL; 1077 return -EINVAL;
1048 break; 1078 break;
1049 } 1079 }
@@ -1086,7 +1116,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
1086 struct intel_overlay *overlay; 1116 struct intel_overlay *overlay;
1087 struct drm_mode_object *drmmode_obj; 1117 struct drm_mode_object *drmmode_obj;
1088 struct intel_crtc *crtc; 1118 struct intel_crtc *crtc;
1089 struct drm_gem_object *new_bo; 1119 struct drm_i915_gem_object *new_bo;
1090 struct put_image_params *params; 1120 struct put_image_params *params;
1091 int ret; 1121 int ret;
1092 1122
@@ -1125,8 +1155,8 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
1125 } 1155 }
1126 crtc = to_intel_crtc(obj_to_crtc(drmmode_obj)); 1156 crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
1127 1157
1128 new_bo = drm_gem_object_lookup(dev, file_priv, 1158 new_bo = to_intel_bo(drm_gem_object_lookup(dev, file_priv,
1129 put_image_rec->bo_handle); 1159 put_image_rec->bo_handle));
1130 if (!new_bo) { 1160 if (!new_bo) {
1131 ret = -ENOENT; 1161 ret = -ENOENT;
1132 goto out_free; 1162 goto out_free;
@@ -1135,6 +1165,12 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
1135 mutex_lock(&dev->mode_config.mutex); 1165 mutex_lock(&dev->mode_config.mutex);
1136 mutex_lock(&dev->struct_mutex); 1166 mutex_lock(&dev->struct_mutex);
1137 1167
1168 if (new_bo->tiling_mode) {
1169 DRM_ERROR("buffer used for overlay image can not be tiled\n");
1170 ret = -EINVAL;
1171 goto out_unlock;
1172 }
1173
1138 ret = intel_overlay_recover_from_interrupt(overlay, true); 1174 ret = intel_overlay_recover_from_interrupt(overlay, true);
1139 if (ret != 0) 1175 if (ret != 0)
1140 goto out_unlock; 1176 goto out_unlock;
@@ -1217,7 +1253,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
1217out_unlock: 1253out_unlock:
1218 mutex_unlock(&dev->struct_mutex); 1254 mutex_unlock(&dev->struct_mutex);
1219 mutex_unlock(&dev->mode_config.mutex); 1255 mutex_unlock(&dev->mode_config.mutex);
1220 drm_gem_object_unreference_unlocked(new_bo); 1256 drm_gem_object_unreference_unlocked(&new_bo->base);
1221out_free: 1257out_free:
1222 kfree(params); 1258 kfree(params);
1223 1259
@@ -1370,7 +1406,7 @@ void intel_setup_overlay(struct drm_device *dev)
1370{ 1406{
1371 drm_i915_private_t *dev_priv = dev->dev_private; 1407 drm_i915_private_t *dev_priv = dev->dev_private;
1372 struct intel_overlay *overlay; 1408 struct intel_overlay *overlay;
1373 struct drm_gem_object *reg_bo; 1409 struct drm_i915_gem_object *reg_bo;
1374 struct overlay_registers *regs; 1410 struct overlay_registers *regs;
1375 int ret; 1411 int ret;
1376 1412
@@ -1385,7 +1421,7 @@ void intel_setup_overlay(struct drm_device *dev)
1385 reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE); 1421 reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE);
1386 if (!reg_bo) 1422 if (!reg_bo)
1387 goto out_free; 1423 goto out_free;
1388 overlay->reg_bo = to_intel_bo(reg_bo); 1424 overlay->reg_bo = reg_bo;
1389 1425
1390 if (OVERLAY_NEEDS_PHYSICAL(dev)) { 1426 if (OVERLAY_NEEDS_PHYSICAL(dev)) {
1391 ret = i915_gem_attach_phys_object(dev, reg_bo, 1427 ret = i915_gem_attach_phys_object(dev, reg_bo,
@@ -1395,14 +1431,14 @@ void intel_setup_overlay(struct drm_device *dev)
1395 DRM_ERROR("failed to attach phys overlay regs\n"); 1431 DRM_ERROR("failed to attach phys overlay regs\n");
1396 goto out_free_bo; 1432 goto out_free_bo;
1397 } 1433 }
1398 overlay->flip_addr = overlay->reg_bo->phys_obj->handle->busaddr; 1434 overlay->flip_addr = reg_bo->phys_obj->handle->busaddr;
1399 } else { 1435 } else {
1400 ret = i915_gem_object_pin(reg_bo, PAGE_SIZE); 1436 ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true);
1401 if (ret) { 1437 if (ret) {
1402 DRM_ERROR("failed to pin overlay register bo\n"); 1438 DRM_ERROR("failed to pin overlay register bo\n");
1403 goto out_free_bo; 1439 goto out_free_bo;
1404 } 1440 }
1405 overlay->flip_addr = overlay->reg_bo->gtt_offset; 1441 overlay->flip_addr = reg_bo->gtt_offset;
1406 1442
1407 ret = i915_gem_object_set_to_gtt_domain(reg_bo, true); 1443 ret = i915_gem_object_set_to_gtt_domain(reg_bo, true);
1408 if (ret) { 1444 if (ret) {
@@ -1434,7 +1470,7 @@ void intel_setup_overlay(struct drm_device *dev)
1434out_unpin_bo: 1470out_unpin_bo:
1435 i915_gem_object_unpin(reg_bo); 1471 i915_gem_object_unpin(reg_bo);
1436out_free_bo: 1472out_free_bo:
1437 drm_gem_object_unreference(reg_bo); 1473 drm_gem_object_unreference(&reg_bo->base);
1438out_free: 1474out_free:
1439 kfree(overlay); 1475 kfree(overlay);
1440 return; 1476 return;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 92ff8f385278..7350ec2515c6 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -125,15 +125,55 @@ static int is_backlight_combination_mode(struct drm_device *dev)
125 return 0; 125 return 0;
126} 126}
127 127
128static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv)
129{
130 u32 val;
131
132 /* Restore the CTL value if it lost, e.g. GPU reset */
133
134 if (HAS_PCH_SPLIT(dev_priv->dev)) {
135 val = I915_READ(BLC_PWM_PCH_CTL2);
136 if (dev_priv->saveBLC_PWM_CTL2 == 0) {
137 dev_priv->saveBLC_PWM_CTL2 = val;
138 } else if (val == 0) {
139 I915_WRITE(BLC_PWM_PCH_CTL2,
140 dev_priv->saveBLC_PWM_CTL);
141 val = dev_priv->saveBLC_PWM_CTL;
142 }
143 } else {
144 val = I915_READ(BLC_PWM_CTL);
145 if (dev_priv->saveBLC_PWM_CTL == 0) {
146 dev_priv->saveBLC_PWM_CTL = val;
147 dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
148 } else if (val == 0) {
149 I915_WRITE(BLC_PWM_CTL,
150 dev_priv->saveBLC_PWM_CTL);
151 I915_WRITE(BLC_PWM_CTL2,
152 dev_priv->saveBLC_PWM_CTL2);
153 val = dev_priv->saveBLC_PWM_CTL;
154 }
155 }
156
157 return val;
158}
159
128u32 intel_panel_get_max_backlight(struct drm_device *dev) 160u32 intel_panel_get_max_backlight(struct drm_device *dev)
129{ 161{
130 struct drm_i915_private *dev_priv = dev->dev_private; 162 struct drm_i915_private *dev_priv = dev->dev_private;
131 u32 max; 163 u32 max;
132 164
165 max = i915_read_blc_pwm_ctl(dev_priv);
166 if (max == 0) {
167 /* XXX add code here to query mode clock or hardware clock
168 * and program max PWM appropriately.
169 */
170 printk_once(KERN_WARNING "fixme: max PWM is zero.\n");
171 return 1;
172 }
173
133 if (HAS_PCH_SPLIT(dev)) { 174 if (HAS_PCH_SPLIT(dev)) {
134 max = I915_READ(BLC_PWM_PCH_CTL2) >> 16; 175 max >>= 16;
135 } else { 176 } else {
136 max = I915_READ(BLC_PWM_CTL);
137 if (IS_PINEVIEW(dev)) { 177 if (IS_PINEVIEW(dev)) {
138 max >>= 17; 178 max >>= 17;
139 } else { 179 } else {
@@ -146,14 +186,6 @@ u32 intel_panel_get_max_backlight(struct drm_device *dev)
146 max *= 0xff; 186 max *= 0xff;
147 } 187 }
148 188
149 if (max == 0) {
150 /* XXX add code here to query mode clock or hardware clock
151 * and program max PWM appropriately.
152 */
153 DRM_ERROR("fixme: max PWM is zero.\n");
154 max = 1;
155 }
156
157 DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max); 189 DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max);
158 return max; 190 return max;
159} 191}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 31cd7e33e820..56bc95c056dd 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -49,11 +49,11 @@ static u32 i915_gem_get_seqno(struct drm_device *dev)
49} 49}
50 50
51static void 51static void
52render_ring_flush(struct drm_device *dev, 52render_ring_flush(struct intel_ring_buffer *ring,
53 struct intel_ring_buffer *ring,
54 u32 invalidate_domains, 53 u32 invalidate_domains,
55 u32 flush_domains) 54 u32 flush_domains)
56{ 55{
56 struct drm_device *dev = ring->dev;
57 drm_i915_private_t *dev_priv = dev->dev_private; 57 drm_i915_private_t *dev_priv = dev->dev_private;
58 u32 cmd; 58 u32 cmd;
59 59
@@ -109,49 +109,50 @@ render_ring_flush(struct drm_device *dev,
109 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION) 109 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
110 cmd |= MI_EXE_FLUSH; 110 cmd |= MI_EXE_FLUSH;
111 111
112 if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
113 (IS_G4X(dev) || IS_GEN5(dev)))
114 cmd |= MI_INVALIDATE_ISP;
115
112#if WATCH_EXEC 116#if WATCH_EXEC
113 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd); 117 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
114#endif 118#endif
115 intel_ring_begin(dev, ring, 2); 119 if (intel_ring_begin(ring, 2) == 0) {
116 intel_ring_emit(dev, ring, cmd); 120 intel_ring_emit(ring, cmd);
117 intel_ring_emit(dev, ring, MI_NOOP); 121 intel_ring_emit(ring, MI_NOOP);
118 intel_ring_advance(dev, ring); 122 intel_ring_advance(ring);
123 }
119 } 124 }
120} 125}
121 126
122static void ring_write_tail(struct drm_device *dev, 127static void ring_write_tail(struct intel_ring_buffer *ring,
123 struct intel_ring_buffer *ring,
124 u32 value) 128 u32 value)
125{ 129{
126 drm_i915_private_t *dev_priv = dev->dev_private; 130 drm_i915_private_t *dev_priv = ring->dev->dev_private;
127 I915_WRITE_TAIL(ring, value); 131 I915_WRITE_TAIL(ring, value);
128} 132}
129 133
130u32 intel_ring_get_active_head(struct drm_device *dev, 134u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
131 struct intel_ring_buffer *ring)
132{ 135{
133 drm_i915_private_t *dev_priv = dev->dev_private; 136 drm_i915_private_t *dev_priv = ring->dev->dev_private;
134 u32 acthd_reg = INTEL_INFO(dev)->gen >= 4 ? 137 u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ?
135 RING_ACTHD(ring->mmio_base) : ACTHD; 138 RING_ACTHD(ring->mmio_base) : ACTHD;
136 139
137 return I915_READ(acthd_reg); 140 return I915_READ(acthd_reg);
138} 141}
139 142
140static int init_ring_common(struct drm_device *dev, 143static int init_ring_common(struct intel_ring_buffer *ring)
141 struct intel_ring_buffer *ring)
142{ 144{
145 drm_i915_private_t *dev_priv = ring->dev->dev_private;
146 struct drm_i915_gem_object *obj = ring->obj;
143 u32 head; 147 u32 head;
144 drm_i915_private_t *dev_priv = dev->dev_private;
145 struct drm_i915_gem_object *obj_priv;
146 obj_priv = to_intel_bo(ring->gem_object);
147 148
148 /* Stop the ring if it's running. */ 149 /* Stop the ring if it's running. */
149 I915_WRITE_CTL(ring, 0); 150 I915_WRITE_CTL(ring, 0);
150 I915_WRITE_HEAD(ring, 0); 151 I915_WRITE_HEAD(ring, 0);
151 ring->write_tail(dev, ring, 0); 152 ring->write_tail(ring, 0);
152 153
153 /* Initialize the ring. */ 154 /* Initialize the ring. */
154 I915_WRITE_START(ring, obj_priv->gtt_offset); 155 I915_WRITE_START(ring, obj->gtt_offset);
155 head = I915_READ_HEAD(ring) & HEAD_ADDR; 156 head = I915_READ_HEAD(ring) & HEAD_ADDR;
156 157
157 /* G45 ring initialization fails to reset head to zero */ 158 /* G45 ring initialization fails to reset head to zero */
@@ -178,12 +179,13 @@ static int init_ring_common(struct drm_device *dev,
178 } 179 }
179 180
180 I915_WRITE_CTL(ring, 181 I915_WRITE_CTL(ring,
181 ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES) 182 ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
182 | RING_REPORT_64K | RING_VALID); 183 | RING_REPORT_64K | RING_VALID);
183 184
184 head = I915_READ_HEAD(ring) & HEAD_ADDR;
185 /* If the head is still not zero, the ring is dead */ 185 /* If the head is still not zero, the ring is dead */
186 if (head != 0) { 186 if ((I915_READ_CTL(ring) & RING_VALID) == 0 ||
187 I915_READ_START(ring) != obj->gtt_offset ||
188 (I915_READ_HEAD(ring) & HEAD_ADDR) != 0) {
187 DRM_ERROR("%s initialization failed " 189 DRM_ERROR("%s initialization failed "
188 "ctl %08x head %08x tail %08x start %08x\n", 190 "ctl %08x head %08x tail %08x start %08x\n",
189 ring->name, 191 ring->name,
@@ -194,8 +196,8 @@ static int init_ring_common(struct drm_device *dev,
194 return -EIO; 196 return -EIO;
195 } 197 }
196 198
197 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 199 if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
198 i915_kernel_lost_context(dev); 200 i915_kernel_lost_context(ring->dev);
199 else { 201 else {
200 ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; 202 ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
201 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; 203 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
@@ -203,335 +205,500 @@ static int init_ring_common(struct drm_device *dev,
203 if (ring->space < 0) 205 if (ring->space < 0)
204 ring->space += ring->size; 206 ring->space += ring->size;
205 } 207 }
208
206 return 0; 209 return 0;
207} 210}
208 211
209static int init_render_ring(struct drm_device *dev, 212/*
210 struct intel_ring_buffer *ring) 213 * 965+ support PIPE_CONTROL commands, which provide finer grained control
214 * over cache flushing.
215 */
216struct pipe_control {
217 struct drm_i915_gem_object *obj;
218 volatile u32 *cpu_page;
219 u32 gtt_offset;
220};
221
222static int
223init_pipe_control(struct intel_ring_buffer *ring)
211{ 224{
212 drm_i915_private_t *dev_priv = dev->dev_private; 225 struct pipe_control *pc;
213 int ret = init_ring_common(dev, ring); 226 struct drm_i915_gem_object *obj;
214 int mode; 227 int ret;
228
229 if (ring->private)
230 return 0;
231
232 pc = kmalloc(sizeof(*pc), GFP_KERNEL);
233 if (!pc)
234 return -ENOMEM;
235
236 obj = i915_gem_alloc_object(ring->dev, 4096);
237 if (obj == NULL) {
238 DRM_ERROR("Failed to allocate seqno page\n");
239 ret = -ENOMEM;
240 goto err;
241 }
242 obj->agp_type = AGP_USER_CACHED_MEMORY;
243
244 ret = i915_gem_object_pin(obj, 4096, true);
245 if (ret)
246 goto err_unref;
247
248 pc->gtt_offset = obj->gtt_offset;
249 pc->cpu_page = kmap(obj->pages[0]);
250 if (pc->cpu_page == NULL)
251 goto err_unpin;
252
253 pc->obj = obj;
254 ring->private = pc;
255 return 0;
256
257err_unpin:
258 i915_gem_object_unpin(obj);
259err_unref:
260 drm_gem_object_unreference(&obj->base);
261err:
262 kfree(pc);
263 return ret;
264}
265
266static void
267cleanup_pipe_control(struct intel_ring_buffer *ring)
268{
269 struct pipe_control *pc = ring->private;
270 struct drm_i915_gem_object *obj;
271
272 if (!ring->private)
273 return;
274
275 obj = pc->obj;
276 kunmap(obj->pages[0]);
277 i915_gem_object_unpin(obj);
278 drm_gem_object_unreference(&obj->base);
279
280 kfree(pc);
281 ring->private = NULL;
282}
283
284static int init_render_ring(struct intel_ring_buffer *ring)
285{
286 struct drm_device *dev = ring->dev;
287 struct drm_i915_private *dev_priv = dev->dev_private;
288 int ret = init_ring_common(ring);
215 289
216 if (INTEL_INFO(dev)->gen > 3) { 290 if (INTEL_INFO(dev)->gen > 3) {
217 mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH; 291 int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
218 if (IS_GEN6(dev)) 292 if (IS_GEN6(dev))
219 mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE; 293 mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
220 I915_WRITE(MI_MODE, mode); 294 I915_WRITE(MI_MODE, mode);
221 } 295 }
296
297 if (INTEL_INFO(dev)->gen >= 6) {
298 } else if (IS_GEN5(dev)) {
299 ret = init_pipe_control(ring);
300 if (ret)
301 return ret;
302 }
303
222 return ret; 304 return ret;
223} 305}
224 306
225#define PIPE_CONTROL_FLUSH(addr) \ 307static void render_ring_cleanup(struct intel_ring_buffer *ring)
308{
309 if (!ring->private)
310 return;
311
312 cleanup_pipe_control(ring);
313}
314
315static void
316update_semaphore(struct intel_ring_buffer *ring, int i, u32 seqno)
317{
318 struct drm_device *dev = ring->dev;
319 struct drm_i915_private *dev_priv = dev->dev_private;
320 int id;
321
322 /*
323 * cs -> 1 = vcs, 0 = bcs
324 * vcs -> 1 = bcs, 0 = cs,
325 * bcs -> 1 = cs, 0 = vcs.
326 */
327 id = ring - dev_priv->ring;
328 id += 2 - i;
329 id %= 3;
330
331 intel_ring_emit(ring,
332 MI_SEMAPHORE_MBOX |
333 MI_SEMAPHORE_REGISTER |
334 MI_SEMAPHORE_UPDATE);
335 intel_ring_emit(ring, seqno);
336 intel_ring_emit(ring,
337 RING_SYNC_0(dev_priv->ring[id].mmio_base) + 4*i);
338}
339
340static int
341gen6_add_request(struct intel_ring_buffer *ring,
342 u32 *result)
343{
344 u32 seqno;
345 int ret;
346
347 ret = intel_ring_begin(ring, 10);
348 if (ret)
349 return ret;
350
351 seqno = i915_gem_get_seqno(ring->dev);
352 update_semaphore(ring, 0, seqno);
353 update_semaphore(ring, 1, seqno);
354
355 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
356 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
357 intel_ring_emit(ring, seqno);
358 intel_ring_emit(ring, MI_USER_INTERRUPT);
359 intel_ring_advance(ring);
360
361 *result = seqno;
362 return 0;
363}
364
365int
366intel_ring_sync(struct intel_ring_buffer *ring,
367 struct intel_ring_buffer *to,
368 u32 seqno)
369{
370 int ret;
371
372 ret = intel_ring_begin(ring, 4);
373 if (ret)
374 return ret;
375
376 intel_ring_emit(ring,
377 MI_SEMAPHORE_MBOX |
378 MI_SEMAPHORE_REGISTER |
379 intel_ring_sync_index(ring, to) << 17 |
380 MI_SEMAPHORE_COMPARE);
381 intel_ring_emit(ring, seqno);
382 intel_ring_emit(ring, 0);
383 intel_ring_emit(ring, MI_NOOP);
384 intel_ring_advance(ring);
385
386 return 0;
387}
388
389#define PIPE_CONTROL_FLUSH(ring__, addr__) \
226do { \ 390do { \
227 OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \ 391 intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \
228 PIPE_CONTROL_DEPTH_STALL | 2); \ 392 PIPE_CONTROL_DEPTH_STALL | 2); \
229 OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT); \ 393 intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \
230 OUT_RING(0); \ 394 intel_ring_emit(ring__, 0); \
231 OUT_RING(0); \ 395 intel_ring_emit(ring__, 0); \
232} while (0) 396} while (0)
233 397
234/** 398static int
235 * Creates a new sequence number, emitting a write of it to the status page 399pc_render_add_request(struct intel_ring_buffer *ring,
236 * plus an interrupt, which will trigger i915_user_interrupt_handler. 400 u32 *result)
237 *
238 * Must be called with struct_lock held.
239 *
240 * Returned sequence numbers are nonzero on success.
241 */
242static u32
243render_ring_add_request(struct drm_device *dev,
244 struct intel_ring_buffer *ring,
245 u32 flush_domains)
246{ 401{
247 drm_i915_private_t *dev_priv = dev->dev_private; 402 struct drm_device *dev = ring->dev;
248 u32 seqno; 403 u32 seqno = i915_gem_get_seqno(dev);
404 struct pipe_control *pc = ring->private;
405 u32 scratch_addr = pc->gtt_offset + 128;
406 int ret;
249 407
250 seqno = i915_gem_get_seqno(dev); 408 /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
251 409 * incoherent with writes to memory, i.e. completely fubar,
252 if (IS_GEN6(dev)) { 410 * so we need to use PIPE_NOTIFY instead.
253 BEGIN_LP_RING(6); 411 *
254 OUT_RING(GFX_OP_PIPE_CONTROL | 3); 412 * However, we also need to workaround the qword write
255 OUT_RING(PIPE_CONTROL_QW_WRITE | 413 * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
256 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_IS_FLUSH | 414 * memory before requesting an interrupt.
257 PIPE_CONTROL_NOTIFY); 415 */
258 OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); 416 ret = intel_ring_begin(ring, 32);
259 OUT_RING(seqno); 417 if (ret)
260 OUT_RING(0); 418 return ret;
261 OUT_RING(0); 419
262 ADVANCE_LP_RING(); 420 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
263 } else if (HAS_PIPE_CONTROL(dev)) { 421 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
264 u32 scratch_addr = dev_priv->seqno_gfx_addr + 128; 422 intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
423 intel_ring_emit(ring, seqno);
424 intel_ring_emit(ring, 0);
425 PIPE_CONTROL_FLUSH(ring, scratch_addr);
426 scratch_addr += 128; /* write to separate cachelines */
427 PIPE_CONTROL_FLUSH(ring, scratch_addr);
428 scratch_addr += 128;
429 PIPE_CONTROL_FLUSH(ring, scratch_addr);
430 scratch_addr += 128;
431 PIPE_CONTROL_FLUSH(ring, scratch_addr);
432 scratch_addr += 128;
433 PIPE_CONTROL_FLUSH(ring, scratch_addr);
434 scratch_addr += 128;
435 PIPE_CONTROL_FLUSH(ring, scratch_addr);
436 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
437 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
438 PIPE_CONTROL_NOTIFY);
439 intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
440 intel_ring_emit(ring, seqno);
441 intel_ring_emit(ring, 0);
442 intel_ring_advance(ring);
443
444 *result = seqno;
445 return 0;
446}
265 447
266 /* 448static int
267 * Workaround qword write incoherence by flushing the 449render_ring_add_request(struct intel_ring_buffer *ring,
268 * PIPE_NOTIFY buffers out to memory before requesting 450 u32 *result)
269 * an interrupt. 451{
270 */ 452 struct drm_device *dev = ring->dev;
271 BEGIN_LP_RING(32); 453 u32 seqno = i915_gem_get_seqno(dev);
272 OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | 454 int ret;
273 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
274 OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
275 OUT_RING(seqno);
276 OUT_RING(0);
277 PIPE_CONTROL_FLUSH(scratch_addr);
278 scratch_addr += 128; /* write to separate cachelines */
279 PIPE_CONTROL_FLUSH(scratch_addr);
280 scratch_addr += 128;
281 PIPE_CONTROL_FLUSH(scratch_addr);
282 scratch_addr += 128;
283 PIPE_CONTROL_FLUSH(scratch_addr);
284 scratch_addr += 128;
285 PIPE_CONTROL_FLUSH(scratch_addr);
286 scratch_addr += 128;
287 PIPE_CONTROL_FLUSH(scratch_addr);
288 OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
289 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
290 PIPE_CONTROL_NOTIFY);
291 OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
292 OUT_RING(seqno);
293 OUT_RING(0);
294 ADVANCE_LP_RING();
295 } else {
296 BEGIN_LP_RING(4);
297 OUT_RING(MI_STORE_DWORD_INDEX);
298 OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
299 OUT_RING(seqno);
300 455
301 OUT_RING(MI_USER_INTERRUPT); 456 ret = intel_ring_begin(ring, 4);
302 ADVANCE_LP_RING(); 457 if (ret)
303 } 458 return ret;
304 return seqno; 459
460 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
461 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
462 intel_ring_emit(ring, seqno);
463 intel_ring_emit(ring, MI_USER_INTERRUPT);
464 intel_ring_advance(ring);
465
466 *result = seqno;
467 return 0;
305} 468}
306 469
307static u32 470static u32
308render_ring_get_seqno(struct drm_device *dev, 471ring_get_seqno(struct intel_ring_buffer *ring)
309 struct intel_ring_buffer *ring)
310{ 472{
311 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 473 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
312 if (HAS_PIPE_CONTROL(dev))
313 return ((volatile u32 *)(dev_priv->seqno_page))[0];
314 else
315 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
316} 474}
317 475
318static void 476static u32
319render_ring_get_user_irq(struct drm_device *dev, 477pc_render_get_seqno(struct intel_ring_buffer *ring)
320 struct intel_ring_buffer *ring)
321{ 478{
322 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 479 struct pipe_control *pc = ring->private;
323 unsigned long irqflags; 480 return pc->cpu_page[0];
481}
482
483static bool
484render_ring_get_irq(struct intel_ring_buffer *ring)
485{
486 struct drm_device *dev = ring->dev;
487
488 if (!dev->irq_enabled)
489 return false;
490
491 if (atomic_inc_return(&ring->irq_refcount) == 1) {
492 drm_i915_private_t *dev_priv = dev->dev_private;
493 unsigned long irqflags;
324 494
325 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 495 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
326 if (dev->irq_enabled && (++ring->user_irq_refcount == 1)) {
327 if (HAS_PCH_SPLIT(dev)) 496 if (HAS_PCH_SPLIT(dev))
328 ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY); 497 ironlake_enable_graphics_irq(dev_priv,
498 GT_PIPE_NOTIFY | GT_USER_INTERRUPT);
329 else 499 else
330 i915_enable_irq(dev_priv, I915_USER_INTERRUPT); 500 i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
501 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
331 } 502 }
332 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); 503
504 return true;
333} 505}
334 506
335static void 507static void
336render_ring_put_user_irq(struct drm_device *dev, 508render_ring_put_irq(struct intel_ring_buffer *ring)
337 struct intel_ring_buffer *ring)
338{ 509{
339 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 510 struct drm_device *dev = ring->dev;
340 unsigned long irqflags; 511
512 if (atomic_dec_and_test(&ring->irq_refcount)) {
513 drm_i915_private_t *dev_priv = dev->dev_private;
514 unsigned long irqflags;
341 515
342 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 516 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
343 BUG_ON(dev->irq_enabled && ring->user_irq_refcount <= 0);
344 if (dev->irq_enabled && (--ring->user_irq_refcount == 0)) {
345 if (HAS_PCH_SPLIT(dev)) 517 if (HAS_PCH_SPLIT(dev))
346 ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY); 518 ironlake_disable_graphics_irq(dev_priv,
519 GT_USER_INTERRUPT |
520 GT_PIPE_NOTIFY);
347 else 521 else
348 i915_disable_irq(dev_priv, I915_USER_INTERRUPT); 522 i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
523 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
349 } 524 }
350 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
351} 525}
352 526
353void intel_ring_setup_status_page(struct drm_device *dev, 527void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
354 struct intel_ring_buffer *ring)
355{ 528{
356 drm_i915_private_t *dev_priv = dev->dev_private; 529 drm_i915_private_t *dev_priv = ring->dev->dev_private;
357 if (IS_GEN6(dev)) { 530 u32 mmio = IS_GEN6(ring->dev) ?
358 I915_WRITE(RING_HWS_PGA_GEN6(ring->mmio_base), 531 RING_HWS_PGA_GEN6(ring->mmio_base) :
359 ring->status_page.gfx_addr); 532 RING_HWS_PGA(ring->mmio_base);
360 I915_READ(RING_HWS_PGA_GEN6(ring->mmio_base)); /* posting read */ 533 I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
361 } else { 534 POSTING_READ(mmio);
362 I915_WRITE(RING_HWS_PGA(ring->mmio_base),
363 ring->status_page.gfx_addr);
364 I915_READ(RING_HWS_PGA(ring->mmio_base)); /* posting read */
365 }
366
367} 535}
368 536
369static void 537static void
370bsd_ring_flush(struct drm_device *dev, 538bsd_ring_flush(struct intel_ring_buffer *ring,
371 struct intel_ring_buffer *ring, 539 u32 invalidate_domains,
372 u32 invalidate_domains, 540 u32 flush_domains)
373 u32 flush_domains)
374{ 541{
375 intel_ring_begin(dev, ring, 2); 542 if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
376 intel_ring_emit(dev, ring, MI_FLUSH); 543 return;
377 intel_ring_emit(dev, ring, MI_NOOP);
378 intel_ring_advance(dev, ring);
379}
380 544
381static int init_bsd_ring(struct drm_device *dev, 545 if (intel_ring_begin(ring, 2) == 0) {
382 struct intel_ring_buffer *ring) 546 intel_ring_emit(ring, MI_FLUSH);
383{ 547 intel_ring_emit(ring, MI_NOOP);
384 return init_ring_common(dev, ring); 548 intel_ring_advance(ring);
549 }
385} 550}
386 551
387static u32 552static int
388ring_add_request(struct drm_device *dev, 553ring_add_request(struct intel_ring_buffer *ring,
389 struct intel_ring_buffer *ring, 554 u32 *result)
390 u32 flush_domains)
391{ 555{
392 u32 seqno; 556 u32 seqno;
557 int ret;
558
559 ret = intel_ring_begin(ring, 4);
560 if (ret)
561 return ret;
393 562
394 seqno = i915_gem_get_seqno(dev); 563 seqno = i915_gem_get_seqno(ring->dev);
395 564
396 intel_ring_begin(dev, ring, 4); 565 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
397 intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX); 566 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
398 intel_ring_emit(dev, ring, 567 intel_ring_emit(ring, seqno);
399 I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 568 intel_ring_emit(ring, MI_USER_INTERRUPT);
400 intel_ring_emit(dev, ring, seqno); 569 intel_ring_advance(ring);
401 intel_ring_emit(dev, ring, MI_USER_INTERRUPT);
402 intel_ring_advance(dev, ring);
403 570
404 DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno); 571 DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
405 572 *result = seqno;
406 return seqno; 573 return 0;
407} 574}
408 575
409static void 576static bool
410bsd_ring_get_user_irq(struct drm_device *dev, 577ring_get_irq(struct intel_ring_buffer *ring, u32 flag)
411 struct intel_ring_buffer *ring)
412{ 578{
413 /* do nothing */ 579 struct drm_device *dev = ring->dev;
580
581 if (!dev->irq_enabled)
582 return false;
583
584 if (atomic_inc_return(&ring->irq_refcount) == 1) {
585 drm_i915_private_t *dev_priv = dev->dev_private;
586 unsigned long irqflags;
587
588 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
589 ironlake_enable_graphics_irq(dev_priv, flag);
590 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
591 }
592
593 return true;
414} 594}
595
415static void 596static void
416bsd_ring_put_user_irq(struct drm_device *dev, 597ring_put_irq(struct intel_ring_buffer *ring, u32 flag)
417 struct intel_ring_buffer *ring)
418{ 598{
419 /* do nothing */ 599 struct drm_device *dev = ring->dev;
600
601 if (atomic_dec_and_test(&ring->irq_refcount)) {
602 drm_i915_private_t *dev_priv = dev->dev_private;
603 unsigned long irqflags;
604
605 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
606 ironlake_disable_graphics_irq(dev_priv, flag);
607 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
608 }
420} 609}
421 610
422static u32 611static bool
423ring_status_page_get_seqno(struct drm_device *dev, 612bsd_ring_get_irq(struct intel_ring_buffer *ring)
424 struct intel_ring_buffer *ring)
425{ 613{
426 return intel_read_status_page(ring, I915_GEM_HWS_INDEX); 614 return ring_get_irq(ring, GT_BSD_USER_INTERRUPT);
615}
616static void
617bsd_ring_put_irq(struct intel_ring_buffer *ring)
618{
619 ring_put_irq(ring, GT_BSD_USER_INTERRUPT);
427} 620}
428 621
429static int 622static int
430ring_dispatch_gem_execbuffer(struct drm_device *dev, 623ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
431 struct intel_ring_buffer *ring,
432 struct drm_i915_gem_execbuffer2 *exec,
433 struct drm_clip_rect *cliprects,
434 uint64_t exec_offset)
435{ 624{
436 uint32_t exec_start; 625 int ret;
437 exec_start = (uint32_t) exec_offset + exec->batch_start_offset; 626
438 intel_ring_begin(dev, ring, 2); 627 ret = intel_ring_begin(ring, 2);
439 intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START | 628 if (ret)
440 (2 << 6) | MI_BATCH_NON_SECURE_I965); 629 return ret;
441 intel_ring_emit(dev, ring, exec_start); 630
442 intel_ring_advance(dev, ring); 631 intel_ring_emit(ring,
632 MI_BATCH_BUFFER_START | (2 << 6) |
633 MI_BATCH_NON_SECURE_I965);
634 intel_ring_emit(ring, offset);
635 intel_ring_advance(ring);
636
443 return 0; 637 return 0;
444} 638}
445 639
446static int 640static int
447render_ring_dispatch_gem_execbuffer(struct drm_device *dev, 641render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
448 struct intel_ring_buffer *ring, 642 u32 offset, u32 len)
449 struct drm_i915_gem_execbuffer2 *exec,
450 struct drm_clip_rect *cliprects,
451 uint64_t exec_offset)
452{ 643{
644 struct drm_device *dev = ring->dev;
453 drm_i915_private_t *dev_priv = dev->dev_private; 645 drm_i915_private_t *dev_priv = dev->dev_private;
454 int nbox = exec->num_cliprects; 646 int ret;
455 int i = 0, count;
456 uint32_t exec_start, exec_len;
457 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
458 exec_len = (uint32_t) exec->batch_len;
459 647
460 trace_i915_gem_request_submit(dev, dev_priv->next_seqno + 1); 648 trace_i915_gem_request_submit(dev, dev_priv->next_seqno + 1);
461 649
462 count = nbox ? nbox : 1; 650 if (IS_I830(dev) || IS_845G(dev)) {
651 ret = intel_ring_begin(ring, 4);
652 if (ret)
653 return ret;
463 654
464 for (i = 0; i < count; i++) { 655 intel_ring_emit(ring, MI_BATCH_BUFFER);
465 if (i < nbox) { 656 intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
466 int ret = i915_emit_box(dev, cliprects, i, 657 intel_ring_emit(ring, offset + len - 8);
467 exec->DR1, exec->DR4); 658 intel_ring_emit(ring, 0);
468 if (ret) 659 } else {
469 return ret; 660 ret = intel_ring_begin(ring, 2);
470 } 661 if (ret)
662 return ret;
471 663
472 if (IS_I830(dev) || IS_845G(dev)) { 664 if (INTEL_INFO(dev)->gen >= 4) {
473 intel_ring_begin(dev, ring, 4); 665 intel_ring_emit(ring,
474 intel_ring_emit(dev, ring, MI_BATCH_BUFFER); 666 MI_BATCH_BUFFER_START | (2 << 6) |
475 intel_ring_emit(dev, ring, 667 MI_BATCH_NON_SECURE_I965);
476 exec_start | MI_BATCH_NON_SECURE); 668 intel_ring_emit(ring, offset);
477 intel_ring_emit(dev, ring, exec_start + exec_len - 4);
478 intel_ring_emit(dev, ring, 0);
479 } else { 669 } else {
480 intel_ring_begin(dev, ring, 2); 670 intel_ring_emit(ring,
481 if (INTEL_INFO(dev)->gen >= 4) { 671 MI_BATCH_BUFFER_START | (2 << 6));
482 intel_ring_emit(dev, ring, 672 intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
483 MI_BATCH_BUFFER_START | (2 << 6)
484 | MI_BATCH_NON_SECURE_I965);
485 intel_ring_emit(dev, ring, exec_start);
486 } else {
487 intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START
488 | (2 << 6));
489 intel_ring_emit(dev, ring, exec_start |
490 MI_BATCH_NON_SECURE);
491 }
492 } 673 }
493 intel_ring_advance(dev, ring);
494 } 674 }
495 675 intel_ring_advance(ring);
496 if (IS_G4X(dev) || IS_GEN5(dev)) {
497 intel_ring_begin(dev, ring, 2);
498 intel_ring_emit(dev, ring, MI_FLUSH |
499 MI_NO_WRITE_FLUSH |
500 MI_INVALIDATE_ISP );
501 intel_ring_emit(dev, ring, MI_NOOP);
502 intel_ring_advance(dev, ring);
503 }
504 /* XXX breadcrumb */
505 676
506 return 0; 677 return 0;
507} 678}
508 679
509static void cleanup_status_page(struct drm_device *dev, 680static void cleanup_status_page(struct intel_ring_buffer *ring)
510 struct intel_ring_buffer *ring)
511{ 681{
512 drm_i915_private_t *dev_priv = dev->dev_private; 682 drm_i915_private_t *dev_priv = ring->dev->dev_private;
513 struct drm_gem_object *obj; 683 struct drm_i915_gem_object *obj;
514 struct drm_i915_gem_object *obj_priv;
515 684
516 obj = ring->status_page.obj; 685 obj = ring->status_page.obj;
517 if (obj == NULL) 686 if (obj == NULL)
518 return; 687 return;
519 obj_priv = to_intel_bo(obj);
520 688
521 kunmap(obj_priv->pages[0]); 689 kunmap(obj->pages[0]);
522 i915_gem_object_unpin(obj); 690 i915_gem_object_unpin(obj);
523 drm_gem_object_unreference(obj); 691 drm_gem_object_unreference(&obj->base);
524 ring->status_page.obj = NULL; 692 ring->status_page.obj = NULL;
525 693
526 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); 694 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
527} 695}
528 696
529static int init_status_page(struct drm_device *dev, 697static int init_status_page(struct intel_ring_buffer *ring)
530 struct intel_ring_buffer *ring)
531{ 698{
699 struct drm_device *dev = ring->dev;
532 drm_i915_private_t *dev_priv = dev->dev_private; 700 drm_i915_private_t *dev_priv = dev->dev_private;
533 struct drm_gem_object *obj; 701 struct drm_i915_gem_object *obj;
534 struct drm_i915_gem_object *obj_priv;
535 int ret; 702 int ret;
536 703
537 obj = i915_gem_alloc_object(dev, 4096); 704 obj = i915_gem_alloc_object(dev, 4096);
@@ -540,16 +707,15 @@ static int init_status_page(struct drm_device *dev,
540 ret = -ENOMEM; 707 ret = -ENOMEM;
541 goto err; 708 goto err;
542 } 709 }
543 obj_priv = to_intel_bo(obj); 710 obj->agp_type = AGP_USER_CACHED_MEMORY;
544 obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
545 711
546 ret = i915_gem_object_pin(obj, 4096); 712 ret = i915_gem_object_pin(obj, 4096, true);
547 if (ret != 0) { 713 if (ret != 0) {
548 goto err_unref; 714 goto err_unref;
549 } 715 }
550 716
551 ring->status_page.gfx_addr = obj_priv->gtt_offset; 717 ring->status_page.gfx_addr = obj->gtt_offset;
552 ring->status_page.page_addr = kmap(obj_priv->pages[0]); 718 ring->status_page.page_addr = kmap(obj->pages[0]);
553 if (ring->status_page.page_addr == NULL) { 719 if (ring->status_page.page_addr == NULL) {
554 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); 720 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
555 goto err_unpin; 721 goto err_unpin;
@@ -557,7 +723,7 @@ static int init_status_page(struct drm_device *dev,
557 ring->status_page.obj = obj; 723 ring->status_page.obj = obj;
558 memset(ring->status_page.page_addr, 0, PAGE_SIZE); 724 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
559 725
560 intel_ring_setup_status_page(dev, ring); 726 intel_ring_setup_status_page(ring);
561 DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", 727 DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
562 ring->name, ring->status_page.gfx_addr); 728 ring->name, ring->status_page.gfx_addr);
563 729
@@ -566,7 +732,7 @@ static int init_status_page(struct drm_device *dev,
566err_unpin: 732err_unpin:
567 i915_gem_object_unpin(obj); 733 i915_gem_object_unpin(obj);
568err_unref: 734err_unref:
569 drm_gem_object_unreference(obj); 735 drm_gem_object_unreference(&obj->base);
570err: 736err:
571 return ret; 737 return ret;
572} 738}
@@ -574,9 +740,7 @@ err:
574int intel_init_ring_buffer(struct drm_device *dev, 740int intel_init_ring_buffer(struct drm_device *dev,
575 struct intel_ring_buffer *ring) 741 struct intel_ring_buffer *ring)
576{ 742{
577 struct drm_i915_private *dev_priv = dev->dev_private; 743 struct drm_i915_gem_object *obj;
578 struct drm_i915_gem_object *obj_priv;
579 struct drm_gem_object *obj;
580 int ret; 744 int ret;
581 745
582 ring->dev = dev; 746 ring->dev = dev;
@@ -585,7 +749,7 @@ int intel_init_ring_buffer(struct drm_device *dev,
585 INIT_LIST_HEAD(&ring->gpu_write_list); 749 INIT_LIST_HEAD(&ring->gpu_write_list);
586 750
587 if (I915_NEED_GFX_HWS(dev)) { 751 if (I915_NEED_GFX_HWS(dev)) {
588 ret = init_status_page(dev, ring); 752 ret = init_status_page(ring);
589 if (ret) 753 if (ret)
590 return ret; 754 return ret;
591 } 755 }
@@ -597,15 +761,14 @@ int intel_init_ring_buffer(struct drm_device *dev,
597 goto err_hws; 761 goto err_hws;
598 } 762 }
599 763
600 ring->gem_object = obj; 764 ring->obj = obj;
601 765
602 ret = i915_gem_object_pin(obj, PAGE_SIZE); 766 ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
603 if (ret) 767 if (ret)
604 goto err_unref; 768 goto err_unref;
605 769
606 obj_priv = to_intel_bo(obj);
607 ring->map.size = ring->size; 770 ring->map.size = ring->size;
608 ring->map.offset = dev->agp->base + obj_priv->gtt_offset; 771 ring->map.offset = dev->agp->base + obj->gtt_offset;
609 ring->map.type = 0; 772 ring->map.type = 0;
610 ring->map.flags = 0; 773 ring->map.flags = 0;
611 ring->map.mtrr = 0; 774 ring->map.mtrr = 0;
@@ -618,60 +781,57 @@ int intel_init_ring_buffer(struct drm_device *dev,
618 } 781 }
619 782
620 ring->virtual_start = ring->map.handle; 783 ring->virtual_start = ring->map.handle;
621 ret = ring->init(dev, ring); 784 ret = ring->init(ring);
622 if (ret) 785 if (ret)
623 goto err_unmap; 786 goto err_unmap;
624 787
625 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 788 return 0;
626 i915_kernel_lost_context(dev);
627 else {
628 ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
629 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
630 ring->space = ring->head - (ring->tail + 8);
631 if (ring->space < 0)
632 ring->space += ring->size;
633 }
634 return ret;
635 789
636err_unmap: 790err_unmap:
637 drm_core_ioremapfree(&ring->map, dev); 791 drm_core_ioremapfree(&ring->map, dev);
638err_unpin: 792err_unpin:
639 i915_gem_object_unpin(obj); 793 i915_gem_object_unpin(obj);
640err_unref: 794err_unref:
641 drm_gem_object_unreference(obj); 795 drm_gem_object_unreference(&obj->base);
642 ring->gem_object = NULL; 796 ring->obj = NULL;
643err_hws: 797err_hws:
644 cleanup_status_page(dev, ring); 798 cleanup_status_page(ring);
645 return ret; 799 return ret;
646} 800}
647 801
648void intel_cleanup_ring_buffer(struct drm_device *dev, 802void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
649 struct intel_ring_buffer *ring)
650{ 803{
651 if (ring->gem_object == NULL) 804 struct drm_i915_private *dev_priv;
805 int ret;
806
807 if (ring->obj == NULL)
652 return; 808 return;
653 809
654 drm_core_ioremapfree(&ring->map, dev); 810 /* Disable the ring buffer. The ring must be idle at this point */
811 dev_priv = ring->dev->dev_private;
812 ret = intel_wait_ring_buffer(ring, ring->size - 8);
813 I915_WRITE_CTL(ring, 0);
655 814
656 i915_gem_object_unpin(ring->gem_object); 815 drm_core_ioremapfree(&ring->map, ring->dev);
657 drm_gem_object_unreference(ring->gem_object); 816
658 ring->gem_object = NULL; 817 i915_gem_object_unpin(ring->obj);
818 drm_gem_object_unreference(&ring->obj->base);
819 ring->obj = NULL;
659 820
660 if (ring->cleanup) 821 if (ring->cleanup)
661 ring->cleanup(ring); 822 ring->cleanup(ring);
662 823
663 cleanup_status_page(dev, ring); 824 cleanup_status_page(ring);
664} 825}
665 826
666static int intel_wrap_ring_buffer(struct drm_device *dev, 827static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
667 struct intel_ring_buffer *ring)
668{ 828{
669 unsigned int *virt; 829 unsigned int *virt;
670 int rem; 830 int rem;
671 rem = ring->size - ring->tail; 831 rem = ring->size - ring->tail;
672 832
673 if (ring->space < rem) { 833 if (ring->space < rem) {
674 int ret = intel_wait_ring_buffer(dev, ring, rem); 834 int ret = intel_wait_ring_buffer(ring, rem);
675 if (ret) 835 if (ret)
676 return ret; 836 return ret;
677 } 837 }
@@ -689,11 +849,11 @@ static int intel_wrap_ring_buffer(struct drm_device *dev,
689 return 0; 849 return 0;
690} 850}
691 851
692int intel_wait_ring_buffer(struct drm_device *dev, 852int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
693 struct intel_ring_buffer *ring, int n)
694{ 853{
854 struct drm_device *dev = ring->dev;
855 struct drm_i915_private *dev_priv = dev->dev_private;
695 unsigned long end; 856 unsigned long end;
696 drm_i915_private_t *dev_priv = dev->dev_private;
697 u32 head; 857 u32 head;
698 858
699 trace_i915_ring_wait_begin (dev); 859 trace_i915_ring_wait_begin (dev);
@@ -711,7 +871,7 @@ int intel_wait_ring_buffer(struct drm_device *dev,
711 if (ring->space < 0) 871 if (ring->space < 0)
712 ring->space += ring->size; 872 ring->space += ring->size;
713 if (ring->space >= n) { 873 if (ring->space >= n) {
714 trace_i915_ring_wait_end (dev); 874 trace_i915_ring_wait_end(dev);
715 return 0; 875 return 0;
716 } 876 }
717 877
@@ -722,29 +882,39 @@ int intel_wait_ring_buffer(struct drm_device *dev,
722 } 882 }
723 883
724 msleep(1); 884 msleep(1);
885 if (atomic_read(&dev_priv->mm.wedged))
886 return -EAGAIN;
725 } while (!time_after(jiffies, end)); 887 } while (!time_after(jiffies, end));
726 trace_i915_ring_wait_end (dev); 888 trace_i915_ring_wait_end (dev);
727 return -EBUSY; 889 return -EBUSY;
728} 890}
729 891
730void intel_ring_begin(struct drm_device *dev, 892int intel_ring_begin(struct intel_ring_buffer *ring,
731 struct intel_ring_buffer *ring, 893 int num_dwords)
732 int num_dwords)
733{ 894{
734 int n = 4*num_dwords; 895 int n = 4*num_dwords;
735 if (unlikely(ring->tail + n > ring->size)) 896 int ret;
736 intel_wrap_ring_buffer(dev, ring); 897
737 if (unlikely(ring->space < n)) 898 if (unlikely(ring->tail + n > ring->size)) {
738 intel_wait_ring_buffer(dev, ring, n); 899 ret = intel_wrap_ring_buffer(ring);
900 if (unlikely(ret))
901 return ret;
902 }
903
904 if (unlikely(ring->space < n)) {
905 ret = intel_wait_ring_buffer(ring, n);
906 if (unlikely(ret))
907 return ret;
908 }
739 909
740 ring->space -= n; 910 ring->space -= n;
911 return 0;
741} 912}
742 913
743void intel_ring_advance(struct drm_device *dev, 914void intel_ring_advance(struct intel_ring_buffer *ring)
744 struct intel_ring_buffer *ring)
745{ 915{
746 ring->tail &= ring->size - 1; 916 ring->tail &= ring->size - 1;
747 ring->write_tail(dev, ring, ring->tail); 917 ring->write_tail(ring, ring->tail);
748} 918}
749 919
750static const struct intel_ring_buffer render_ring = { 920static const struct intel_ring_buffer render_ring = {
@@ -756,10 +926,11 @@ static const struct intel_ring_buffer render_ring = {
756 .write_tail = ring_write_tail, 926 .write_tail = ring_write_tail,
757 .flush = render_ring_flush, 927 .flush = render_ring_flush,
758 .add_request = render_ring_add_request, 928 .add_request = render_ring_add_request,
759 .get_seqno = render_ring_get_seqno, 929 .get_seqno = ring_get_seqno,
760 .user_irq_get = render_ring_get_user_irq, 930 .irq_get = render_ring_get_irq,
761 .user_irq_put = render_ring_put_user_irq, 931 .irq_put = render_ring_put_irq,
762 .dispatch_gem_execbuffer = render_ring_dispatch_gem_execbuffer, 932 .dispatch_execbuffer = render_ring_dispatch_execbuffer,
933 .cleanup = render_ring_cleanup,
763}; 934};
764 935
765/* ring buffer for bit-stream decoder */ 936/* ring buffer for bit-stream decoder */
@@ -769,22 +940,21 @@ static const struct intel_ring_buffer bsd_ring = {
769 .id = RING_BSD, 940 .id = RING_BSD,
770 .mmio_base = BSD_RING_BASE, 941 .mmio_base = BSD_RING_BASE,
771 .size = 32 * PAGE_SIZE, 942 .size = 32 * PAGE_SIZE,
772 .init = init_bsd_ring, 943 .init = init_ring_common,
773 .write_tail = ring_write_tail, 944 .write_tail = ring_write_tail,
774 .flush = bsd_ring_flush, 945 .flush = bsd_ring_flush,
775 .add_request = ring_add_request, 946 .add_request = ring_add_request,
776 .get_seqno = ring_status_page_get_seqno, 947 .get_seqno = ring_get_seqno,
777 .user_irq_get = bsd_ring_get_user_irq, 948 .irq_get = bsd_ring_get_irq,
778 .user_irq_put = bsd_ring_put_user_irq, 949 .irq_put = bsd_ring_put_irq,
779 .dispatch_gem_execbuffer = ring_dispatch_gem_execbuffer, 950 .dispatch_execbuffer = ring_dispatch_execbuffer,
780}; 951};
781 952
782 953
783static void gen6_bsd_ring_write_tail(struct drm_device *dev, 954static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
784 struct intel_ring_buffer *ring,
785 u32 value) 955 u32 value)
786{ 956{
787 drm_i915_private_t *dev_priv = dev->dev_private; 957 drm_i915_private_t *dev_priv = ring->dev->dev_private;
788 958
789 /* Every tail move must follow the sequence below */ 959 /* Every tail move must follow the sequence below */
790 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, 960 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
@@ -803,69 +973,80 @@ static void gen6_bsd_ring_write_tail(struct drm_device *dev,
803 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE); 973 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
804} 974}
805 975
806static void gen6_ring_flush(struct drm_device *dev, 976static void gen6_ring_flush(struct intel_ring_buffer *ring,
807 struct intel_ring_buffer *ring,
808 u32 invalidate_domains, 977 u32 invalidate_domains,
809 u32 flush_domains) 978 u32 flush_domains)
810{ 979{
811 intel_ring_begin(dev, ring, 4); 980 if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
812 intel_ring_emit(dev, ring, MI_FLUSH_DW); 981 return;
813 intel_ring_emit(dev, ring, 0); 982
814 intel_ring_emit(dev, ring, 0); 983 if (intel_ring_begin(ring, 4) == 0) {
815 intel_ring_emit(dev, ring, 0); 984 intel_ring_emit(ring, MI_FLUSH_DW);
816 intel_ring_advance(dev, ring); 985 intel_ring_emit(ring, 0);
986 intel_ring_emit(ring, 0);
987 intel_ring_emit(ring, 0);
988 intel_ring_advance(ring);
989 }
817} 990}
818 991
819static int 992static int
820gen6_ring_dispatch_gem_execbuffer(struct drm_device *dev, 993gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
821 struct intel_ring_buffer *ring, 994 u32 offset, u32 len)
822 struct drm_i915_gem_execbuffer2 *exec,
823 struct drm_clip_rect *cliprects,
824 uint64_t exec_offset)
825{ 995{
826 uint32_t exec_start; 996 int ret;
827 997
828 exec_start = (uint32_t) exec_offset + exec->batch_start_offset; 998 ret = intel_ring_begin(ring, 2);
999 if (ret)
1000 return ret;
829 1001
830 intel_ring_begin(dev, ring, 2); 1002 intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
831 intel_ring_emit(dev, ring,
832 MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
833 /* bit0-7 is the length on GEN6+ */ 1003 /* bit0-7 is the length on GEN6+ */
834 intel_ring_emit(dev, ring, exec_start); 1004 intel_ring_emit(ring, offset);
835 intel_ring_advance(dev, ring); 1005 intel_ring_advance(ring);
836 1006
837 return 0; 1007 return 0;
838} 1008}
839 1009
1010static bool
1011gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring)
1012{
1013 return ring_get_irq(ring, GT_GEN6_BSD_USER_INTERRUPT);
1014}
1015
1016static void
1017gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
1018{
1019 ring_put_irq(ring, GT_GEN6_BSD_USER_INTERRUPT);
1020}
1021
840/* ring buffer for Video Codec for Gen6+ */ 1022/* ring buffer for Video Codec for Gen6+ */
841static const struct intel_ring_buffer gen6_bsd_ring = { 1023static const struct intel_ring_buffer gen6_bsd_ring = {
842 .name = "gen6 bsd ring", 1024 .name = "gen6 bsd ring",
843 .id = RING_BSD, 1025 .id = RING_BSD,
844 .mmio_base = GEN6_BSD_RING_BASE, 1026 .mmio_base = GEN6_BSD_RING_BASE,
845 .size = 32 * PAGE_SIZE, 1027 .size = 32 * PAGE_SIZE,
846 .init = init_bsd_ring, 1028 .init = init_ring_common,
847 .write_tail = gen6_bsd_ring_write_tail, 1029 .write_tail = gen6_bsd_ring_write_tail,
848 .flush = gen6_ring_flush, 1030 .flush = gen6_ring_flush,
849 .add_request = ring_add_request, 1031 .add_request = gen6_add_request,
850 .get_seqno = ring_status_page_get_seqno, 1032 .get_seqno = ring_get_seqno,
851 .user_irq_get = bsd_ring_get_user_irq, 1033 .irq_get = gen6_bsd_ring_get_irq,
852 .user_irq_put = bsd_ring_put_user_irq, 1034 .irq_put = gen6_bsd_ring_put_irq,
853 .dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer, 1035 .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
854}; 1036};
855 1037
856/* Blitter support (SandyBridge+) */ 1038/* Blitter support (SandyBridge+) */
857 1039
858static void 1040static bool
859blt_ring_get_user_irq(struct drm_device *dev, 1041blt_ring_get_irq(struct intel_ring_buffer *ring)
860 struct intel_ring_buffer *ring)
861{ 1042{
862 /* do nothing */ 1043 return ring_get_irq(ring, GT_BLT_USER_INTERRUPT);
863} 1044}
1045
864static void 1046static void
865blt_ring_put_user_irq(struct drm_device *dev, 1047blt_ring_put_irq(struct intel_ring_buffer *ring)
866 struct intel_ring_buffer *ring)
867{ 1048{
868 /* do nothing */ 1049 ring_put_irq(ring, GT_BLT_USER_INTERRUPT);
869} 1050}
870 1051
871 1052
@@ -883,32 +1064,31 @@ to_blt_workaround(struct intel_ring_buffer *ring)
883 return ring->private; 1064 return ring->private;
884} 1065}
885 1066
886static int blt_ring_init(struct drm_device *dev, 1067static int blt_ring_init(struct intel_ring_buffer *ring)
887 struct intel_ring_buffer *ring)
888{ 1068{
889 if (NEED_BLT_WORKAROUND(dev)) { 1069 if (NEED_BLT_WORKAROUND(ring->dev)) {
890 struct drm_i915_gem_object *obj; 1070 struct drm_i915_gem_object *obj;
891 u32 __iomem *ptr; 1071 u32 *ptr;
892 int ret; 1072 int ret;
893 1073
894 obj = to_intel_bo(i915_gem_alloc_object(dev, 4096)); 1074 obj = i915_gem_alloc_object(ring->dev, 4096);
895 if (obj == NULL) 1075 if (obj == NULL)
896 return -ENOMEM; 1076 return -ENOMEM;
897 1077
898 ret = i915_gem_object_pin(&obj->base, 4096); 1078 ret = i915_gem_object_pin(obj, 4096, true);
899 if (ret) { 1079 if (ret) {
900 drm_gem_object_unreference(&obj->base); 1080 drm_gem_object_unreference(&obj->base);
901 return ret; 1081 return ret;
902 } 1082 }
903 1083
904 ptr = kmap(obj->pages[0]); 1084 ptr = kmap(obj->pages[0]);
905 iowrite32(MI_BATCH_BUFFER_END, ptr); 1085 *ptr++ = MI_BATCH_BUFFER_END;
906 iowrite32(MI_NOOP, ptr+1); 1086 *ptr++ = MI_NOOP;
907 kunmap(obj->pages[0]); 1087 kunmap(obj->pages[0]);
908 1088
909 ret = i915_gem_object_set_to_gtt_domain(&obj->base, false); 1089 ret = i915_gem_object_set_to_gtt_domain(obj, false);
910 if (ret) { 1090 if (ret) {
911 i915_gem_object_unpin(&obj->base); 1091 i915_gem_object_unpin(obj);
912 drm_gem_object_unreference(&obj->base); 1092 drm_gem_object_unreference(&obj->base);
913 return ret; 1093 return ret;
914 } 1094 }
@@ -916,51 +1096,39 @@ static int blt_ring_init(struct drm_device *dev,
916 ring->private = obj; 1096 ring->private = obj;
917 } 1097 }
918 1098
919 return init_ring_common(dev, ring); 1099 return init_ring_common(ring);
920} 1100}
921 1101
922static void blt_ring_begin(struct drm_device *dev, 1102static int blt_ring_begin(struct intel_ring_buffer *ring,
923 struct intel_ring_buffer *ring,
924 int num_dwords) 1103 int num_dwords)
925{ 1104{
926 if (ring->private) { 1105 if (ring->private) {
927 intel_ring_begin(dev, ring, num_dwords+2); 1106 int ret = intel_ring_begin(ring, num_dwords+2);
928 intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START); 1107 if (ret)
929 intel_ring_emit(dev, ring, to_blt_workaround(ring)->gtt_offset); 1108 return ret;
1109
1110 intel_ring_emit(ring, MI_BATCH_BUFFER_START);
1111 intel_ring_emit(ring, to_blt_workaround(ring)->gtt_offset);
1112
1113 return 0;
930 } else 1114 } else
931 intel_ring_begin(dev, ring, 4); 1115 return intel_ring_begin(ring, 4);
932} 1116}
933 1117
934static void blt_ring_flush(struct drm_device *dev, 1118static void blt_ring_flush(struct intel_ring_buffer *ring,
935 struct intel_ring_buffer *ring,
936 u32 invalidate_domains, 1119 u32 invalidate_domains,
937 u32 flush_domains) 1120 u32 flush_domains)
938{ 1121{
939 blt_ring_begin(dev, ring, 4); 1122 if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
940 intel_ring_emit(dev, ring, MI_FLUSH_DW); 1123 return;
941 intel_ring_emit(dev, ring, 0);
942 intel_ring_emit(dev, ring, 0);
943 intel_ring_emit(dev, ring, 0);
944 intel_ring_advance(dev, ring);
945}
946
947static u32
948blt_ring_add_request(struct drm_device *dev,
949 struct intel_ring_buffer *ring,
950 u32 flush_domains)
951{
952 u32 seqno = i915_gem_get_seqno(dev);
953
954 blt_ring_begin(dev, ring, 4);
955 intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX);
956 intel_ring_emit(dev, ring,
957 I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
958 intel_ring_emit(dev, ring, seqno);
959 intel_ring_emit(dev, ring, MI_USER_INTERRUPT);
960 intel_ring_advance(dev, ring);
961 1124
962 DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno); 1125 if (blt_ring_begin(ring, 4) == 0) {
963 return seqno; 1126 intel_ring_emit(ring, MI_FLUSH_DW);
1127 intel_ring_emit(ring, 0);
1128 intel_ring_emit(ring, 0);
1129 intel_ring_emit(ring, 0);
1130 intel_ring_advance(ring);
1131 }
964} 1132}
965 1133
966static void blt_ring_cleanup(struct intel_ring_buffer *ring) 1134static void blt_ring_cleanup(struct intel_ring_buffer *ring)
@@ -981,47 +1149,54 @@ static const struct intel_ring_buffer gen6_blt_ring = {
981 .init = blt_ring_init, 1149 .init = blt_ring_init,
982 .write_tail = ring_write_tail, 1150 .write_tail = ring_write_tail,
983 .flush = blt_ring_flush, 1151 .flush = blt_ring_flush,
984 .add_request = blt_ring_add_request, 1152 .add_request = gen6_add_request,
985 .get_seqno = ring_status_page_get_seqno, 1153 .get_seqno = ring_get_seqno,
986 .user_irq_get = blt_ring_get_user_irq, 1154 .irq_get = blt_ring_get_irq,
987 .user_irq_put = blt_ring_put_user_irq, 1155 .irq_put = blt_ring_put_irq,
988 .dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer, 1156 .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
989 .cleanup = blt_ring_cleanup, 1157 .cleanup = blt_ring_cleanup,
990}; 1158};
991 1159
992int intel_init_render_ring_buffer(struct drm_device *dev) 1160int intel_init_render_ring_buffer(struct drm_device *dev)
993{ 1161{
994 drm_i915_private_t *dev_priv = dev->dev_private; 1162 drm_i915_private_t *dev_priv = dev->dev_private;
995 1163 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
996 dev_priv->render_ring = render_ring; 1164
1165 *ring = render_ring;
1166 if (INTEL_INFO(dev)->gen >= 6) {
1167 ring->add_request = gen6_add_request;
1168 } else if (IS_GEN5(dev)) {
1169 ring->add_request = pc_render_add_request;
1170 ring->get_seqno = pc_render_get_seqno;
1171 }
997 1172
998 if (!I915_NEED_GFX_HWS(dev)) { 1173 if (!I915_NEED_GFX_HWS(dev)) {
999 dev_priv->render_ring.status_page.page_addr 1174 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1000 = dev_priv->status_page_dmah->vaddr; 1175 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1001 memset(dev_priv->render_ring.status_page.page_addr,
1002 0, PAGE_SIZE);
1003 } 1176 }
1004 1177
1005 return intel_init_ring_buffer(dev, &dev_priv->render_ring); 1178 return intel_init_ring_buffer(dev, ring);
1006} 1179}
1007 1180
1008int intel_init_bsd_ring_buffer(struct drm_device *dev) 1181int intel_init_bsd_ring_buffer(struct drm_device *dev)
1009{ 1182{
1010 drm_i915_private_t *dev_priv = dev->dev_private; 1183 drm_i915_private_t *dev_priv = dev->dev_private;
1184 struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
1011 1185
1012 if (IS_GEN6(dev)) 1186 if (IS_GEN6(dev))
1013 dev_priv->bsd_ring = gen6_bsd_ring; 1187 *ring = gen6_bsd_ring;
1014 else 1188 else
1015 dev_priv->bsd_ring = bsd_ring; 1189 *ring = bsd_ring;
1016 1190
1017 return intel_init_ring_buffer(dev, &dev_priv->bsd_ring); 1191 return intel_init_ring_buffer(dev, ring);
1018} 1192}
1019 1193
1020int intel_init_blt_ring_buffer(struct drm_device *dev) 1194int intel_init_blt_ring_buffer(struct drm_device *dev)
1021{ 1195{
1022 drm_i915_private_t *dev_priv = dev->dev_private; 1196 drm_i915_private_t *dev_priv = dev->dev_private;
1197 struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
1023 1198
1024 dev_priv->blt_ring = gen6_blt_ring; 1199 *ring = gen6_blt_ring;
1025 1200
1026 return intel_init_ring_buffer(dev, &dev_priv->blt_ring); 1201 return intel_init_ring_buffer(dev, ring);
1027} 1202}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index d2cd0f1efeed..8e2e357ad6ee 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -1,22 +1,37 @@
1#ifndef _INTEL_RINGBUFFER_H_ 1#ifndef _INTEL_RINGBUFFER_H_
2#define _INTEL_RINGBUFFER_H_ 2#define _INTEL_RINGBUFFER_H_
3 3
4enum {
5 RCS = 0x0,
6 VCS,
7 BCS,
8 I915_NUM_RINGS,
9};
10
4struct intel_hw_status_page { 11struct intel_hw_status_page {
5 void *page_addr; 12 u32 __iomem *page_addr;
6 unsigned int gfx_addr; 13 unsigned int gfx_addr;
7 struct drm_gem_object *obj; 14 struct drm_i915_gem_object *obj;
8}; 15};
9 16
10#define I915_READ_TAIL(ring) I915_READ(RING_TAIL(ring->mmio_base)) 17#define I915_RING_READ(reg) i915_safe_read(dev_priv, reg)
18
19#define I915_READ_TAIL(ring) I915_RING_READ(RING_TAIL(ring->mmio_base))
11#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL(ring->mmio_base), val) 20#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL(ring->mmio_base), val)
12#define I915_READ_START(ring) I915_READ(RING_START(ring->mmio_base)) 21
22#define I915_READ_START(ring) I915_RING_READ(RING_START(ring->mmio_base))
13#define I915_WRITE_START(ring, val) I915_WRITE(RING_START(ring->mmio_base), val) 23#define I915_WRITE_START(ring, val) I915_WRITE(RING_START(ring->mmio_base), val)
14#define I915_READ_HEAD(ring) I915_READ(RING_HEAD(ring->mmio_base)) 24
25#define I915_READ_HEAD(ring) I915_RING_READ(RING_HEAD(ring->mmio_base))
15#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD(ring->mmio_base), val) 26#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD(ring->mmio_base), val)
16#define I915_READ_CTL(ring) I915_READ(RING_CTL(ring->mmio_base)) 27
28#define I915_READ_CTL(ring) I915_RING_READ(RING_CTL(ring->mmio_base))
17#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL(ring->mmio_base), val) 29#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL(ring->mmio_base), val)
18 30
19struct drm_i915_gem_execbuffer2; 31#define I915_READ_NOPID(ring) I915_RING_READ(RING_NOPID(ring->mmio_base))
32#define I915_READ_SYNC_0(ring) I915_RING_READ(RING_SYNC_0(ring->mmio_base))
33#define I915_READ_SYNC_1(ring) I915_RING_READ(RING_SYNC_1(ring->mmio_base))
34
20struct intel_ring_buffer { 35struct intel_ring_buffer {
21 const char *name; 36 const char *name;
22 enum intel_ring_id { 37 enum intel_ring_id {
@@ -25,45 +40,36 @@ struct intel_ring_buffer {
25 RING_BLT = 0x4, 40 RING_BLT = 0x4,
26 } id; 41 } id;
27 u32 mmio_base; 42 u32 mmio_base;
28 unsigned long size;
29 void *virtual_start; 43 void *virtual_start;
30 struct drm_device *dev; 44 struct drm_device *dev;
31 struct drm_gem_object *gem_object; 45 struct drm_i915_gem_object *obj;
32 46
33 u32 actual_head; 47 u32 actual_head;
34 u32 head; 48 u32 head;
35 u32 tail; 49 u32 tail;
36 int space; 50 int space;
51 int size;
37 struct intel_hw_status_page status_page; 52 struct intel_hw_status_page status_page;
38 53
39 u32 irq_gem_seqno; /* last seq seem at irq time */ 54 u32 irq_seqno; /* last seq seem at irq time */
40 u32 waiting_gem_seqno; 55 u32 waiting_seqno;
41 int user_irq_refcount; 56 u32 sync_seqno[I915_NUM_RINGS-1];
42 void (*user_irq_get)(struct drm_device *dev, 57 atomic_t irq_refcount;
43 struct intel_ring_buffer *ring); 58 bool __must_check (*irq_get)(struct intel_ring_buffer *ring);
44 void (*user_irq_put)(struct drm_device *dev, 59 void (*irq_put)(struct intel_ring_buffer *ring);
45 struct intel_ring_buffer *ring);
46 60
47 int (*init)(struct drm_device *dev, 61 int (*init)(struct intel_ring_buffer *ring);
48 struct intel_ring_buffer *ring);
49 62
50 void (*write_tail)(struct drm_device *dev, 63 void (*write_tail)(struct intel_ring_buffer *ring,
51 struct intel_ring_buffer *ring,
52 u32 value); 64 u32 value);
53 void (*flush)(struct drm_device *dev, 65 void (*flush)(struct intel_ring_buffer *ring,
54 struct intel_ring_buffer *ring, 66 u32 invalidate_domains,
55 u32 invalidate_domains, 67 u32 flush_domains);
56 u32 flush_domains); 68 int (*add_request)(struct intel_ring_buffer *ring,
57 u32 (*add_request)(struct drm_device *dev, 69 u32 *seqno);
58 struct intel_ring_buffer *ring, 70 u32 (*get_seqno)(struct intel_ring_buffer *ring);
59 u32 flush_domains); 71 int (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
60 u32 (*get_seqno)(struct drm_device *dev, 72 u32 offset, u32 length);
61 struct intel_ring_buffer *ring);
62 int (*dispatch_gem_execbuffer)(struct drm_device *dev,
63 struct intel_ring_buffer *ring,
64 struct drm_i915_gem_execbuffer2 *exec,
65 struct drm_clip_rect *cliprects,
66 uint64_t exec_offset);
67 void (*cleanup)(struct intel_ring_buffer *ring); 73 void (*cleanup)(struct intel_ring_buffer *ring);
68 74
69 /** 75 /**
@@ -96,7 +102,7 @@ struct intel_ring_buffer {
96 /** 102 /**
97 * Do we have some not yet emitted requests outstanding? 103 * Do we have some not yet emitted requests outstanding?
98 */ 104 */
99 bool outstanding_lazy_request; 105 u32 outstanding_lazy_request;
100 106
101 wait_queue_head_t irq_queue; 107 wait_queue_head_t irq_queue;
102 drm_local_map_t map; 108 drm_local_map_t map;
@@ -105,44 +111,54 @@ struct intel_ring_buffer {
105}; 111};
106 112
107static inline u32 113static inline u32
114intel_ring_sync_index(struct intel_ring_buffer *ring,
115 struct intel_ring_buffer *other)
116{
117 int idx;
118
119 /*
120 * cs -> 0 = vcs, 1 = bcs
121 * vcs -> 0 = bcs, 1 = cs,
122 * bcs -> 0 = cs, 1 = vcs.
123 */
124
125 idx = (other - ring) - 1;
126 if (idx < 0)
127 idx += I915_NUM_RINGS;
128
129 return idx;
130}
131
132static inline u32
108intel_read_status_page(struct intel_ring_buffer *ring, 133intel_read_status_page(struct intel_ring_buffer *ring,
109 int reg) 134 int reg)
110{ 135{
111 u32 *regs = ring->status_page.page_addr; 136 return ioread32(ring->status_page.page_addr + reg);
112 return regs[reg];
113} 137}
114 138
115int intel_init_ring_buffer(struct drm_device *dev, 139void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
116 struct intel_ring_buffer *ring); 140int __must_check intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n);
117void intel_cleanup_ring_buffer(struct drm_device *dev, 141int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
118 struct intel_ring_buffer *ring); 142
119int intel_wait_ring_buffer(struct drm_device *dev, 143static inline void intel_ring_emit(struct intel_ring_buffer *ring,
120 struct intel_ring_buffer *ring, int n); 144 u32 data)
121void intel_ring_begin(struct drm_device *dev,
122 struct intel_ring_buffer *ring, int n);
123
124static inline void intel_ring_emit(struct drm_device *dev,
125 struct intel_ring_buffer *ring,
126 unsigned int data)
127{ 145{
128 unsigned int *virt = ring->virtual_start + ring->tail; 146 iowrite32(data, ring->virtual_start + ring->tail);
129 *virt = data;
130 ring->tail += 4; 147 ring->tail += 4;
131} 148}
132 149
133void intel_ring_advance(struct drm_device *dev, 150void intel_ring_advance(struct intel_ring_buffer *ring);
134 struct intel_ring_buffer *ring);
135 151
136u32 intel_ring_get_seqno(struct drm_device *dev, 152u32 intel_ring_get_seqno(struct intel_ring_buffer *ring);
137 struct intel_ring_buffer *ring); 153int intel_ring_sync(struct intel_ring_buffer *ring,
154 struct intel_ring_buffer *to,
155 u32 seqno);
138 156
139int intel_init_render_ring_buffer(struct drm_device *dev); 157int intel_init_render_ring_buffer(struct drm_device *dev);
140int intel_init_bsd_ring_buffer(struct drm_device *dev); 158int intel_init_bsd_ring_buffer(struct drm_device *dev);
141int intel_init_blt_ring_buffer(struct drm_device *dev); 159int intel_init_blt_ring_buffer(struct drm_device *dev);
142 160
143u32 intel_ring_get_active_head(struct drm_device *dev, 161u32 intel_ring_get_active_head(struct intel_ring_buffer *ring);
144 struct intel_ring_buffer *ring); 162void intel_ring_setup_status_page(struct intel_ring_buffer *ring);
145void intel_ring_setup_status_page(struct drm_device *dev,
146 struct intel_ring_buffer *ring);
147 163
148#endif /* _INTEL_RINGBUFFER_H_ */ 164#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 6bc42fa2a6ec..9d0af36a13ec 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1045,7 +1045,9 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1045 1045
1046 /* Set the SDVO control regs. */ 1046 /* Set the SDVO control regs. */
1047 if (INTEL_INFO(dev)->gen >= 4) { 1047 if (INTEL_INFO(dev)->gen >= 4) {
1048 sdvox = SDVO_BORDER_ENABLE; 1048 sdvox = 0;
1049 if (INTEL_INFO(dev)->gen < 5)
1050 sdvox |= SDVO_BORDER_ENABLE;
1049 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 1051 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1050 sdvox |= SDVO_VSYNC_ACTIVE_HIGH; 1052 sdvox |= SDVO_VSYNC_ACTIVE_HIGH;
1051 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 1053 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
@@ -1075,7 +1077,8 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1075 sdvox |= (pixel_multiplier - 1) << SDVO_PORT_MULTIPLY_SHIFT; 1077 sdvox |= (pixel_multiplier - 1) << SDVO_PORT_MULTIPLY_SHIFT;
1076 } 1078 }
1077 1079
1078 if (input_dtd.part2.sdvo_flags & SDVO_NEED_TO_STALL) 1080 if (input_dtd.part2.sdvo_flags & SDVO_NEED_TO_STALL &&
1081 INTEL_INFO(dev)->gen < 5)
1079 sdvox |= SDVO_STALL_SELECT; 1082 sdvox |= SDVO_STALL_SELECT;
1080 intel_sdvo_write_sdvox(intel_sdvo, sdvox); 1083 intel_sdvo_write_sdvox(intel_sdvo, sdvox);
1081} 1084}
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 2f7681989316..93206e4eaa6f 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1245,10 +1245,11 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
1245 int type; 1245 int type;
1246 1246
1247 /* Disable TV interrupts around load detect or we'll recurse */ 1247 /* Disable TV interrupts around load detect or we'll recurse */
1248 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 1248 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1249 i915_disable_pipestat(dev_priv, 0, PIPE_HOTPLUG_INTERRUPT_ENABLE | 1249 i915_disable_pipestat(dev_priv, 0,
1250 PIPE_HOTPLUG_INTERRUPT_ENABLE |
1250 PIPE_HOTPLUG_TV_INTERRUPT_ENABLE); 1251 PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
1251 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); 1252 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1252 1253
1253 save_tv_dac = tv_dac = I915_READ(TV_DAC); 1254 save_tv_dac = tv_dac = I915_READ(TV_DAC);
1254 save_tv_ctl = tv_ctl = I915_READ(TV_CTL); 1255 save_tv_ctl = tv_ctl = I915_READ(TV_CTL);
@@ -1301,10 +1302,11 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
1301 I915_WRITE(TV_CTL, save_tv_ctl); 1302 I915_WRITE(TV_CTL, save_tv_ctl);
1302 1303
1303 /* Restore interrupt config */ 1304 /* Restore interrupt config */
1304 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 1305 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1305 i915_enable_pipestat(dev_priv, 0, PIPE_HOTPLUG_INTERRUPT_ENABLE | 1306 i915_enable_pipestat(dev_priv, 0,
1307 PIPE_HOTPLUG_INTERRUPT_ENABLE |
1306 PIPE_HOTPLUG_TV_INTERRUPT_ENABLE); 1308 PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
1307 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); 1309 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1308 1310
1309 return type; 1311 return type;
1310} 1312}
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index 72730e9ca06c..21d6c29c2d21 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -10,7 +10,7 @@ config DRM_NOUVEAU
10 select FB 10 select FB
11 select FRAMEBUFFER_CONSOLE if !EMBEDDED 11 select FRAMEBUFFER_CONSOLE if !EMBEDDED
12 select FB_BACKLIGHT if DRM_NOUVEAU_BACKLIGHT 12 select FB_BACKLIGHT if DRM_NOUVEAU_BACKLIGHT
13 select ACPI_VIDEO if ACPI 13 select ACPI_VIDEO if ACPI && X86 && BACKLIGHT_CLASS_DEVICE && VIDEO_OUTPUT_CONTROL && INPUT
14 help 14 help
15 Choose this option for open-source nVidia support. 15 Choose this option for open-source nVidia support.
16 16
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index 23fa82d667d6..e12c97fd8db8 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -5,27 +5,32 @@
5ccflags-y := -Iinclude/drm 5ccflags-y := -Iinclude/drm
6nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \ 6nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
7 nouveau_object.o nouveau_irq.o nouveau_notifier.o \ 7 nouveau_object.o nouveau_irq.o nouveau_notifier.o \
8 nouveau_sgdma.o nouveau_dma.o \ 8 nouveau_sgdma.o nouveau_dma.o nouveau_util.o \
9 nouveau_bo.o nouveau_fence.o nouveau_gem.o nouveau_ttm.o \ 9 nouveau_bo.o nouveau_fence.o nouveau_gem.o nouveau_ttm.o \
10 nouveau_hw.o nouveau_calc.o nouveau_bios.o nouveau_i2c.o \ 10 nouveau_hw.o nouveau_calc.o nouveau_bios.o nouveau_i2c.o \
11 nouveau_display.o nouveau_connector.o nouveau_fbcon.o \ 11 nouveau_display.o nouveau_connector.o nouveau_fbcon.o \
12 nouveau_dp.o nouveau_ramht.o \ 12 nouveau_dp.o nouveau_ramht.o \
13 nouveau_pm.o nouveau_volt.o nouveau_perf.o nouveau_temp.o \ 13 nouveau_pm.o nouveau_volt.o nouveau_perf.o nouveau_temp.o \
14 nouveau_mm.o nouveau_vm.o \
14 nv04_timer.o \ 15 nv04_timer.o \
15 nv04_mc.o nv40_mc.o nv50_mc.o \ 16 nv04_mc.o nv40_mc.o nv50_mc.o \
16 nv04_fb.o nv10_fb.o nv30_fb.o nv40_fb.o nv50_fb.o nvc0_fb.o \ 17 nv04_fb.o nv10_fb.o nv30_fb.o nv40_fb.o nv50_fb.o nvc0_fb.o \
17 nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o nvc0_fifo.o \ 18 nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o nvc0_fifo.o \
18 nv04_graph.o nv10_graph.o nv20_graph.o \ 19 nv04_graph.o nv10_graph.o nv20_graph.o \
19 nv40_graph.o nv50_graph.o nvc0_graph.o \ 20 nv40_graph.o nv50_graph.o nvc0_graph.o \
20 nv40_grctx.o nv50_grctx.o \ 21 nv40_grctx.o nv50_grctx.o nvc0_grctx.o \
22 nv84_crypt.o \
21 nv04_instmem.o nv50_instmem.o nvc0_instmem.o \ 23 nv04_instmem.o nv50_instmem.o nvc0_instmem.o \
22 nv50_crtc.o nv50_dac.o nv50_sor.o \ 24 nv50_evo.o nv50_crtc.o nv50_dac.o nv50_sor.o \
23 nv50_cursor.o nv50_display.o nv50_fbcon.o \ 25 nv50_cursor.o nv50_display.o \
24 nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \ 26 nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \
25 nv04_crtc.o nv04_display.o nv04_cursor.o nv04_fbcon.o \ 27 nv04_crtc.o nv04_display.o nv04_cursor.o \
28 nv04_fbcon.o nv50_fbcon.o nvc0_fbcon.o \
26 nv10_gpio.o nv50_gpio.o \ 29 nv10_gpio.o nv50_gpio.o \
27 nv50_calc.o \ 30 nv50_calc.o \
28 nv04_pm.o nv50_pm.o nva3_pm.o 31 nv04_pm.o nv50_pm.o nva3_pm.o \
32 nv50_vram.o nvc0_vram.o \
33 nv50_vm.o nvc0_vm.o
29 34
30nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o 35nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o
31nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o 36nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 119152606e4c..a54238058dc5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -130,10 +130,15 @@ static int nouveau_dsm_init(void)
130 130
131static int nouveau_dsm_get_client_id(struct pci_dev *pdev) 131static int nouveau_dsm_get_client_id(struct pci_dev *pdev)
132{ 132{
133 if (nouveau_dsm_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev)) 133 /* easy option one - intel vendor ID means Integrated */
134 if (pdev->vendor == PCI_VENDOR_ID_INTEL)
134 return VGA_SWITCHEROO_IGD; 135 return VGA_SWITCHEROO_IGD;
135 else 136
136 return VGA_SWITCHEROO_DIS; 137 /* is this device on Bus 0? - this may need improving */
138 if (pdev->bus->number == 0)
139 return VGA_SWITCHEROO_IGD;
140
141 return VGA_SWITCHEROO_DIS;
137} 142}
138 143
139static struct vga_switcheroo_handler nouveau_dsm_handler = { 144static struct vga_switcheroo_handler nouveau_dsm_handler = {
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index b2293576f278..d3046559bf05 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -6053,52 +6053,17 @@ static struct dcb_entry *new_dcb_entry(struct dcb_table *dcb)
6053 return entry; 6053 return entry;
6054} 6054}
6055 6055
6056static void fabricate_vga_output(struct dcb_table *dcb, int i2c, int heads) 6056static void fabricate_dcb_output(struct dcb_table *dcb, int type, int i2c,
6057 int heads, int or)
6057{ 6058{
6058 struct dcb_entry *entry = new_dcb_entry(dcb); 6059 struct dcb_entry *entry = new_dcb_entry(dcb);
6059 6060
6060 entry->type = 0; 6061 entry->type = type;
6061 entry->i2c_index = i2c; 6062 entry->i2c_index = i2c;
6062 entry->heads = heads; 6063 entry->heads = heads;
6063 entry->location = DCB_LOC_ON_CHIP; 6064 if (type != OUTPUT_ANALOG)
6064 entry->or = 1; 6065 entry->location = !DCB_LOC_ON_CHIP; /* ie OFF CHIP */
6065} 6066 entry->or = or;
6066
6067static void fabricate_dvi_i_output(struct dcb_table *dcb, bool twoHeads)
6068{
6069 struct dcb_entry *entry = new_dcb_entry(dcb);
6070
6071 entry->type = 2;
6072 entry->i2c_index = LEGACY_I2C_PANEL;
6073 entry->heads = twoHeads ? 3 : 1;
6074 entry->location = !DCB_LOC_ON_CHIP; /* ie OFF CHIP */
6075 entry->or = 1; /* means |0x10 gets set on CRE_LCD__INDEX */
6076 entry->duallink_possible = false; /* SiI164 and co. are single link */
6077
6078#if 0
6079 /*
6080 * For dvi-a either crtc probably works, but my card appears to only
6081 * support dvi-d. "nvidia" still attempts to program it for dvi-a,
6082 * doing the full fp output setup (program 0x6808.. fp dimension regs,
6083 * setting 0x680848 to 0x10000111 to enable, maybe setting 0x680880);
6084 * the monitor picks up the mode res ok and lights up, but no pixel
6085 * data appears, so the board manufacturer probably connected up the
6086 * sync lines, but missed the video traces / components
6087 *
6088 * with this introduction, dvi-a left as an exercise for the reader.
6089 */
6090 fabricate_vga_output(dcb, LEGACY_I2C_PANEL, entry->heads);
6091#endif
6092}
6093
6094static void fabricate_tv_output(struct dcb_table *dcb, bool twoHeads)
6095{
6096 struct dcb_entry *entry = new_dcb_entry(dcb);
6097
6098 entry->type = 1;
6099 entry->i2c_index = LEGACY_I2C_TV;
6100 entry->heads = twoHeads ? 3 : 1;
6101 entry->location = !DCB_LOC_ON_CHIP; /* ie OFF CHIP */
6102} 6067}
6103 6068
6104static bool 6069static bool
@@ -6365,8 +6330,36 @@ apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf)
6365 return true; 6330 return true;
6366} 6331}
6367 6332
6333static void
6334fabricate_dcb_encoder_table(struct drm_device *dev, struct nvbios *bios)
6335{
6336 struct dcb_table *dcb = &bios->dcb;
6337 int all_heads = (nv_two_heads(dev) ? 3 : 1);
6338
6339#ifdef __powerpc__
6340 /* Apple iMac G4 NV17 */
6341 if (of_machine_is_compatible("PowerMac4,5")) {
6342 fabricate_dcb_output(dcb, OUTPUT_TMDS, 0, all_heads, 1);
6343 fabricate_dcb_output(dcb, OUTPUT_ANALOG, 1, all_heads, 2);
6344 return;
6345 }
6346#endif
6347
6348 /* Make up some sane defaults */
6349 fabricate_dcb_output(dcb, OUTPUT_ANALOG, LEGACY_I2C_CRT, 1, 1);
6350
6351 if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0)
6352 fabricate_dcb_output(dcb, OUTPUT_TV, LEGACY_I2C_TV,
6353 all_heads, 0);
6354
6355 else if (bios->tmds.output0_script_ptr ||
6356 bios->tmds.output1_script_ptr)
6357 fabricate_dcb_output(dcb, OUTPUT_TMDS, LEGACY_I2C_PANEL,
6358 all_heads, 1);
6359}
6360
6368static int 6361static int
6369parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads) 6362parse_dcb_table(struct drm_device *dev, struct nvbios *bios)
6370{ 6363{
6371 struct drm_nouveau_private *dev_priv = dev->dev_private; 6364 struct drm_nouveau_private *dev_priv = dev->dev_private;
6372 struct dcb_table *dcb = &bios->dcb; 6365 struct dcb_table *dcb = &bios->dcb;
@@ -6386,12 +6379,7 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
6386 6379
6387 /* this situation likely means a really old card, pre DCB */ 6380 /* this situation likely means a really old card, pre DCB */
6388 if (dcbptr == 0x0) { 6381 if (dcbptr == 0x0) {
6389 NV_INFO(dev, "Assuming a CRT output exists\n"); 6382 fabricate_dcb_encoder_table(dev, bios);
6390 fabricate_vga_output(dcb, LEGACY_I2C_CRT, 1);
6391
6392 if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0)
6393 fabricate_tv_output(dcb, twoHeads);
6394
6395 return 0; 6383 return 0;
6396 } 6384 }
6397 6385
@@ -6451,21 +6439,7 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
6451 */ 6439 */
6452 NV_TRACEWARN(dev, "No useful information in BIOS output table; " 6440 NV_TRACEWARN(dev, "No useful information in BIOS output table; "
6453 "adding all possible outputs\n"); 6441 "adding all possible outputs\n");
6454 fabricate_vga_output(dcb, LEGACY_I2C_CRT, 1); 6442 fabricate_dcb_encoder_table(dev, bios);
6455
6456 /*
6457 * Attempt to detect TV before DVI because the test
6458 * for the former is more accurate and it rules the
6459 * latter out.
6460 */
6461 if (nv04_tv_identify(dev,
6462 bios->legacy.i2c_indices.tv) >= 0)
6463 fabricate_tv_output(dcb, twoHeads);
6464
6465 else if (bios->tmds.output0_script_ptr ||
6466 bios->tmds.output1_script_ptr)
6467 fabricate_dvi_i_output(dcb, twoHeads);
6468
6469 return 0; 6443 return 0;
6470 } 6444 }
6471 6445
@@ -6859,7 +6833,7 @@ nouveau_bios_init(struct drm_device *dev)
6859 if (ret) 6833 if (ret)
6860 return ret; 6834 return ret;
6861 6835
6862 ret = parse_dcb_table(dev, bios, nv_two_heads(dev)); 6836 ret = parse_dcb_table(dev, bios);
6863 if (ret) 6837 if (ret)
6864 return ret; 6838 return ret;
6865 6839
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index c41e1c200ef5..a7fae26f4654 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -32,6 +32,8 @@
32#include "nouveau_drm.h" 32#include "nouveau_drm.h"
33#include "nouveau_drv.h" 33#include "nouveau_drv.h"
34#include "nouveau_dma.h" 34#include "nouveau_dma.h"
35#include "nouveau_mm.h"
36#include "nouveau_vm.h"
35 37
36#include <linux/log2.h> 38#include <linux/log2.h>
37#include <linux/slab.h> 39#include <linux/slab.h>
@@ -46,82 +48,51 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
46 if (unlikely(nvbo->gem)) 48 if (unlikely(nvbo->gem))
47 DRM_ERROR("bo %p still attached to GEM object\n", bo); 49 DRM_ERROR("bo %p still attached to GEM object\n", bo);
48 50
49 if (nvbo->tile) 51 nv10_mem_put_tile_region(dev, nvbo->tile, NULL);
50 nv10_mem_expire_tiling(dev, nvbo->tile, NULL); 52 nouveau_vm_put(&nvbo->vma);
51
52 kfree(nvbo); 53 kfree(nvbo);
53} 54}
54 55
55static void 56static void
56nouveau_bo_fixup_align(struct drm_device *dev, 57nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, int *size,
57 uint32_t tile_mode, uint32_t tile_flags, 58 int *page_shift)
58 int *align, int *size)
59{ 59{
60 struct drm_nouveau_private *dev_priv = dev->dev_private; 60 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
61
62 /*
63 * Some of the tile_flags have a periodic structure of N*4096 bytes,
64 * align to to that as well as the page size. Align the size to the
65 * appropriate boundaries. This does imply that sizes are rounded up
66 * 3-7 pages, so be aware of this and do not waste memory by allocating
67 * many small buffers.
68 */
69 if (dev_priv->card_type == NV_50) {
70 uint32_t block_size = dev_priv->vram_size >> 15;
71 int i;
72
73 switch (tile_flags) {
74 case 0x1800:
75 case 0x2800:
76 case 0x4800:
77 case 0x7a00:
78 if (is_power_of_2(block_size)) {
79 for (i = 1; i < 10; i++) {
80 *align = 12 * i * block_size;
81 if (!(*align % 65536))
82 break;
83 }
84 } else {
85 for (i = 1; i < 10; i++) {
86 *align = 8 * i * block_size;
87 if (!(*align % 65536))
88 break;
89 }
90 }
91 *size = roundup(*size, *align);
92 break;
93 default:
94 break;
95 }
96 61
97 } else { 62 if (dev_priv->card_type < NV_50) {
98 if (tile_mode) { 63 if (nvbo->tile_mode) {
99 if (dev_priv->chipset >= 0x40) { 64 if (dev_priv->chipset >= 0x40) {
100 *align = 65536; 65 *align = 65536;
101 *size = roundup(*size, 64 * tile_mode); 66 *size = roundup(*size, 64 * nvbo->tile_mode);
102 67
103 } else if (dev_priv->chipset >= 0x30) { 68 } else if (dev_priv->chipset >= 0x30) {
104 *align = 32768; 69 *align = 32768;
105 *size = roundup(*size, 64 * tile_mode); 70 *size = roundup(*size, 64 * nvbo->tile_mode);
106 71
107 } else if (dev_priv->chipset >= 0x20) { 72 } else if (dev_priv->chipset >= 0x20) {
108 *align = 16384; 73 *align = 16384;
109 *size = roundup(*size, 64 * tile_mode); 74 *size = roundup(*size, 64 * nvbo->tile_mode);
110 75
111 } else if (dev_priv->chipset >= 0x10) { 76 } else if (dev_priv->chipset >= 0x10) {
112 *align = 16384; 77 *align = 16384;
113 *size = roundup(*size, 32 * tile_mode); 78 *size = roundup(*size, 32 * nvbo->tile_mode);
114 } 79 }
115 } 80 }
81 } else {
82 if (likely(dev_priv->chan_vm)) {
83 if (*size > 256 * 1024)
84 *page_shift = dev_priv->chan_vm->lpg_shift;
85 else
86 *page_shift = dev_priv->chan_vm->spg_shift;
87 } else {
88 *page_shift = 12;
89 }
90
91 *size = roundup(*size, (1 << *page_shift));
92 *align = max((1 << *page_shift), *align);
116 } 93 }
117 94
118 /* ALIGN works only on powers of two. */
119 *size = roundup(*size, PAGE_SIZE); 95 *size = roundup(*size, PAGE_SIZE);
120
121 if (dev_priv->card_type == NV_50) {
122 *size = roundup(*size, 65536);
123 *align = max(65536, *align);
124 }
125} 96}
126 97
127int 98int
@@ -132,7 +103,7 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
132{ 103{
133 struct drm_nouveau_private *dev_priv = dev->dev_private; 104 struct drm_nouveau_private *dev_priv = dev->dev_private;
134 struct nouveau_bo *nvbo; 105 struct nouveau_bo *nvbo;
135 int ret = 0; 106 int ret = 0, page_shift = 0;
136 107
137 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL); 108 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
138 if (!nvbo) 109 if (!nvbo)
@@ -145,10 +116,18 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
145 nvbo->tile_flags = tile_flags; 116 nvbo->tile_flags = tile_flags;
146 nvbo->bo.bdev = &dev_priv->ttm.bdev; 117 nvbo->bo.bdev = &dev_priv->ttm.bdev;
147 118
148 nouveau_bo_fixup_align(dev, tile_mode, nouveau_bo_tile_layout(nvbo), 119 nouveau_bo_fixup_align(nvbo, &align, &size, &page_shift);
149 &align, &size);
150 align >>= PAGE_SHIFT; 120 align >>= PAGE_SHIFT;
151 121
122 if (!nvbo->no_vm && dev_priv->chan_vm) {
123 ret = nouveau_vm_get(dev_priv->chan_vm, size, page_shift,
124 NV_MEM_ACCESS_RW, &nvbo->vma);
125 if (ret) {
126 kfree(nvbo);
127 return ret;
128 }
129 }
130
152 nouveau_bo_placement_set(nvbo, flags, 0); 131 nouveau_bo_placement_set(nvbo, flags, 0);
153 132
154 nvbo->channel = chan; 133 nvbo->channel = chan;
@@ -161,6 +140,11 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
161 } 140 }
162 nvbo->channel = NULL; 141 nvbo->channel = NULL;
163 142
143 if (nvbo->vma.node) {
144 if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
145 nvbo->bo.offset = nvbo->vma.offset;
146 }
147
164 *pnvbo = nvbo; 148 *pnvbo = nvbo;
165 return 0; 149 return 0;
166} 150}
@@ -244,7 +228,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
244 228
245 nouveau_bo_placement_set(nvbo, memtype, 0); 229 nouveau_bo_placement_set(nvbo, memtype, 0);
246 230
247 ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false); 231 ret = nouveau_bo_validate(nvbo, false, false, false);
248 if (ret == 0) { 232 if (ret == 0) {
249 switch (bo->mem.mem_type) { 233 switch (bo->mem.mem_type) {
250 case TTM_PL_VRAM: 234 case TTM_PL_VRAM:
@@ -280,7 +264,7 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
280 264
281 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0); 265 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
282 266
283 ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false); 267 ret = nouveau_bo_validate(nvbo, false, false, false);
284 if (ret == 0) { 268 if (ret == 0) {
285 switch (bo->mem.mem_type) { 269 switch (bo->mem.mem_type) {
286 case TTM_PL_VRAM: 270 case TTM_PL_VRAM:
@@ -319,6 +303,25 @@ nouveau_bo_unmap(struct nouveau_bo *nvbo)
319 ttm_bo_kunmap(&nvbo->kmap); 303 ttm_bo_kunmap(&nvbo->kmap);
320} 304}
321 305
306int
307nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
308 bool no_wait_reserve, bool no_wait_gpu)
309{
310 int ret;
311
312 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, interruptible,
313 no_wait_reserve, no_wait_gpu);
314 if (ret)
315 return ret;
316
317 if (nvbo->vma.node) {
318 if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
319 nvbo->bo.offset = nvbo->vma.offset;
320 }
321
322 return 0;
323}
324
322u16 325u16
323nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index) 326nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
324{ 327{
@@ -410,37 +413,40 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
410 man->default_caching = TTM_PL_FLAG_CACHED; 413 man->default_caching = TTM_PL_FLAG_CACHED;
411 break; 414 break;
412 case TTM_PL_VRAM: 415 case TTM_PL_VRAM:
413 man->func = &ttm_bo_manager_func; 416 if (dev_priv->card_type >= NV_50) {
417 man->func = &nouveau_vram_manager;
418 man->io_reserve_fastpath = false;
419 man->use_io_reserve_lru = true;
420 } else {
421 man->func = &ttm_bo_manager_func;
422 }
414 man->flags = TTM_MEMTYPE_FLAG_FIXED | 423 man->flags = TTM_MEMTYPE_FLAG_FIXED |
415 TTM_MEMTYPE_FLAG_MAPPABLE; 424 TTM_MEMTYPE_FLAG_MAPPABLE;
416 man->available_caching = TTM_PL_FLAG_UNCACHED | 425 man->available_caching = TTM_PL_FLAG_UNCACHED |
417 TTM_PL_FLAG_WC; 426 TTM_PL_FLAG_WC;
418 man->default_caching = TTM_PL_FLAG_WC; 427 man->default_caching = TTM_PL_FLAG_WC;
419 if (dev_priv->card_type == NV_50)
420 man->gpu_offset = 0x40000000;
421 else
422 man->gpu_offset = 0;
423 break; 428 break;
424 case TTM_PL_TT: 429 case TTM_PL_TT:
425 man->func = &ttm_bo_manager_func; 430 man->func = &ttm_bo_manager_func;
426 switch (dev_priv->gart_info.type) { 431 switch (dev_priv->gart_info.type) {
427 case NOUVEAU_GART_AGP: 432 case NOUVEAU_GART_AGP:
428 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; 433 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
429 man->available_caching = TTM_PL_FLAG_UNCACHED; 434 man->available_caching = TTM_PL_FLAG_UNCACHED |
430 man->default_caching = TTM_PL_FLAG_UNCACHED; 435 TTM_PL_FLAG_WC;
436 man->default_caching = TTM_PL_FLAG_WC;
431 break; 437 break;
432 case NOUVEAU_GART_SGDMA: 438 case NOUVEAU_GART_SGDMA:
433 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | 439 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
434 TTM_MEMTYPE_FLAG_CMA; 440 TTM_MEMTYPE_FLAG_CMA;
435 man->available_caching = TTM_PL_MASK_CACHING; 441 man->available_caching = TTM_PL_MASK_CACHING;
436 man->default_caching = TTM_PL_FLAG_CACHED; 442 man->default_caching = TTM_PL_FLAG_CACHED;
443 man->gpu_offset = dev_priv->gart_info.aper_base;
437 break; 444 break;
438 default: 445 default:
439 NV_ERROR(dev, "Unknown GART type: %d\n", 446 NV_ERROR(dev, "Unknown GART type: %d\n",
440 dev_priv->gart_info.type); 447 dev_priv->gart_info.type);
441 return -EINVAL; 448 return -EINVAL;
442 } 449 }
443 man->gpu_offset = dev_priv->vm_gart_base;
444 break; 450 break;
445 default: 451 default:
446 NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type); 452 NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type);
@@ -485,16 +491,9 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
485 if (ret) 491 if (ret)
486 return ret; 492 return ret;
487 493
488 if (nvbo->channel) {
489 ret = nouveau_fence_sync(fence, nvbo->channel);
490 if (ret)
491 goto out;
492 }
493
494 ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, evict, 494 ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, evict,
495 no_wait_reserve, no_wait_gpu, new_mem); 495 no_wait_reserve, no_wait_gpu, new_mem);
496out: 496 nouveau_fence_unref(&fence);
497 nouveau_fence_unref((void *)&fence);
498 return ret; 497 return ret;
499} 498}
500 499
@@ -516,6 +515,58 @@ nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
516} 515}
517 516
518static int 517static int
518nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
519 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
520{
521 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
522 struct nouveau_bo *nvbo = nouveau_bo(bo);
523 u64 src_offset = old_mem->start << PAGE_SHIFT;
524 u64 dst_offset = new_mem->start << PAGE_SHIFT;
525 u32 page_count = new_mem->num_pages;
526 int ret;
527
528 if (!nvbo->no_vm) {
529 if (old_mem->mem_type == TTM_PL_VRAM)
530 src_offset = nvbo->vma.offset;
531 else
532 src_offset += dev_priv->gart_info.aper_base;
533
534 if (new_mem->mem_type == TTM_PL_VRAM)
535 dst_offset = nvbo->vma.offset;
536 else
537 dst_offset += dev_priv->gart_info.aper_base;
538 }
539
540 page_count = new_mem->num_pages;
541 while (page_count) {
542 int line_count = (page_count > 2047) ? 2047 : page_count;
543
544 ret = RING_SPACE(chan, 12);
545 if (ret)
546 return ret;
547
548 BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0238, 2);
549 OUT_RING (chan, upper_32_bits(dst_offset));
550 OUT_RING (chan, lower_32_bits(dst_offset));
551 BEGIN_NVC0(chan, 2, NvSubM2MF, 0x030c, 6);
552 OUT_RING (chan, upper_32_bits(src_offset));
553 OUT_RING (chan, lower_32_bits(src_offset));
554 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
555 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
556 OUT_RING (chan, PAGE_SIZE); /* line_length */
557 OUT_RING (chan, line_count);
558 BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0300, 1);
559 OUT_RING (chan, 0x00100110);
560
561 page_count -= line_count;
562 src_offset += (PAGE_SIZE * line_count);
563 dst_offset += (PAGE_SIZE * line_count);
564 }
565
566 return 0;
567}
568
569static int
519nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, 570nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
520 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) 571 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
521{ 572{
@@ -529,14 +580,14 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
529 dst_offset = new_mem->start << PAGE_SHIFT; 580 dst_offset = new_mem->start << PAGE_SHIFT;
530 if (!nvbo->no_vm) { 581 if (!nvbo->no_vm) {
531 if (old_mem->mem_type == TTM_PL_VRAM) 582 if (old_mem->mem_type == TTM_PL_VRAM)
532 src_offset += dev_priv->vm_vram_base; 583 src_offset = nvbo->vma.offset;
533 else 584 else
534 src_offset += dev_priv->vm_gart_base; 585 src_offset += dev_priv->gart_info.aper_base;
535 586
536 if (new_mem->mem_type == TTM_PL_VRAM) 587 if (new_mem->mem_type == TTM_PL_VRAM)
537 dst_offset += dev_priv->vm_vram_base; 588 dst_offset = nvbo->vma.offset;
538 else 589 else
539 dst_offset += dev_priv->vm_gart_base; 590 dst_offset += dev_priv->gart_info.aper_base;
540 } 591 }
541 592
542 ret = RING_SPACE(chan, 3); 593 ret = RING_SPACE(chan, 3);
@@ -683,17 +734,27 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
683 int ret; 734 int ret;
684 735
685 chan = nvbo->channel; 736 chan = nvbo->channel;
686 if (!chan || nvbo->no_vm) 737 if (!chan || nvbo->no_vm) {
687 chan = dev_priv->channel; 738 chan = dev_priv->channel;
739 mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX);
740 }
688 741
689 if (dev_priv->card_type < NV_50) 742 if (dev_priv->card_type < NV_50)
690 ret = nv04_bo_move_m2mf(chan, bo, &bo->mem, new_mem); 743 ret = nv04_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
691 else 744 else
745 if (dev_priv->card_type < NV_C0)
692 ret = nv50_bo_move_m2mf(chan, bo, &bo->mem, new_mem); 746 ret = nv50_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
693 if (ret) 747 else
694 return ret; 748 ret = nvc0_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
749 if (ret == 0) {
750 ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
751 no_wait_reserve,
752 no_wait_gpu, new_mem);
753 }
695 754
696 return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait_reserve, no_wait_gpu, new_mem); 755 if (chan == dev_priv->channel)
756 mutex_unlock(&chan->mutex);
757 return ret;
697} 758}
698 759
699static int 760static int
@@ -771,7 +832,6 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
771 struct drm_device *dev = dev_priv->dev; 832 struct drm_device *dev = dev_priv->dev;
772 struct nouveau_bo *nvbo = nouveau_bo(bo); 833 struct nouveau_bo *nvbo = nouveau_bo(bo);
773 uint64_t offset; 834 uint64_t offset;
774 int ret;
775 835
776 if (nvbo->no_vm || new_mem->mem_type != TTM_PL_VRAM) { 836 if (nvbo->no_vm || new_mem->mem_type != TTM_PL_VRAM) {
777 /* Nothing to do. */ 837 /* Nothing to do. */
@@ -781,18 +841,12 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
781 841
782 offset = new_mem->start << PAGE_SHIFT; 842 offset = new_mem->start << PAGE_SHIFT;
783 843
784 if (dev_priv->card_type == NV_50) { 844 if (dev_priv->chan_vm) {
785 ret = nv50_mem_vm_bind_linear(dev, 845 nouveau_vm_map(&nvbo->vma, new_mem->mm_node);
786 offset + dev_priv->vm_vram_base,
787 new_mem->size,
788 nouveau_bo_tile_layout(nvbo),
789 offset);
790 if (ret)
791 return ret;
792
793 } else if (dev_priv->card_type >= NV_10) { 846 } else if (dev_priv->card_type >= NV_10) {
794 *new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size, 847 *new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size,
795 nvbo->tile_mode); 848 nvbo->tile_mode,
849 nvbo->tile_flags);
796 } 850 }
797 851
798 return 0; 852 return 0;
@@ -808,9 +862,7 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
808 862
809 if (dev_priv->card_type >= NV_10 && 863 if (dev_priv->card_type >= NV_10 &&
810 dev_priv->card_type < NV_50) { 864 dev_priv->card_type < NV_50) {
811 if (*old_tile) 865 nv10_mem_put_tile_region(dev, *old_tile, bo->sync_obj);
812 nv10_mem_expire_tiling(dev, *old_tile, bo->sync_obj);
813
814 *old_tile = new_tile; 866 *old_tile = new_tile;
815 } 867 }
816} 868}
@@ -879,6 +931,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
879 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; 931 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
880 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); 932 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
881 struct drm_device *dev = dev_priv->dev; 933 struct drm_device *dev = dev_priv->dev;
934 int ret;
882 935
883 mem->bus.addr = NULL; 936 mem->bus.addr = NULL;
884 mem->bus.offset = 0; 937 mem->bus.offset = 0;
@@ -901,9 +954,40 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
901#endif 954#endif
902 break; 955 break;
903 case TTM_PL_VRAM: 956 case TTM_PL_VRAM:
904 mem->bus.offset = mem->start << PAGE_SHIFT; 957 {
958 struct nouveau_vram *vram = mem->mm_node;
959 u8 page_shift;
960
961 if (!dev_priv->bar1_vm) {
962 mem->bus.offset = mem->start << PAGE_SHIFT;
963 mem->bus.base = pci_resource_start(dev->pdev, 1);
964 mem->bus.is_iomem = true;
965 break;
966 }
967
968 if (dev_priv->card_type == NV_C0)
969 page_shift = vram->page_shift;
970 else
971 page_shift = 12;
972
973 ret = nouveau_vm_get(dev_priv->bar1_vm, mem->bus.size,
974 page_shift, NV_MEM_ACCESS_RW,
975 &vram->bar_vma);
976 if (ret)
977 return ret;
978
979 nouveau_vm_map(&vram->bar_vma, vram);
980 if (ret) {
981 nouveau_vm_put(&vram->bar_vma);
982 return ret;
983 }
984
985 mem->bus.offset = vram->bar_vma.offset;
986 if (dev_priv->card_type == NV_50) /*XXX*/
987 mem->bus.offset -= 0x0020000000ULL;
905 mem->bus.base = pci_resource_start(dev->pdev, 1); 988 mem->bus.base = pci_resource_start(dev->pdev, 1);
906 mem->bus.is_iomem = true; 989 mem->bus.is_iomem = true;
990 }
907 break; 991 break;
908 default: 992 default:
909 return -EINVAL; 993 return -EINVAL;
@@ -914,6 +998,17 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
914static void 998static void
915nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) 999nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
916{ 1000{
1001 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
1002 struct nouveau_vram *vram = mem->mm_node;
1003
1004 if (!dev_priv->bar1_vm || mem->mem_type != TTM_PL_VRAM)
1005 return;
1006
1007 if (!vram->bar_vma.node)
1008 return;
1009
1010 nouveau_vm_unmap(&vram->bar_vma);
1011 nouveau_vm_put(&vram->bar_vma);
917} 1012}
918 1013
919static int 1014static int
@@ -939,7 +1034,23 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
939 nvbo->placement.fpfn = 0; 1034 nvbo->placement.fpfn = 0;
940 nvbo->placement.lpfn = dev_priv->fb_mappable_pages; 1035 nvbo->placement.lpfn = dev_priv->fb_mappable_pages;
941 nouveau_bo_placement_set(nvbo, TTM_PL_VRAM, 0); 1036 nouveau_bo_placement_set(nvbo, TTM_PL_VRAM, 0);
942 return ttm_bo_validate(bo, &nvbo->placement, false, true, false); 1037 return nouveau_bo_validate(nvbo, false, true, false);
1038}
1039
1040void
1041nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
1042{
1043 struct nouveau_fence *old_fence;
1044
1045 if (likely(fence))
1046 nouveau_fence_ref(fence);
1047
1048 spin_lock(&nvbo->bo.bdev->fence_lock);
1049 old_fence = nvbo->bo.sync_obj;
1050 nvbo->bo.sync_obj = fence;
1051 spin_unlock(&nvbo->bo.bdev->fence_lock);
1052
1053 nouveau_fence_unref(&old_fence);
943} 1054}
944 1055
945struct ttm_bo_driver nouveau_bo_driver = { 1056struct ttm_bo_driver nouveau_bo_driver = {
@@ -949,11 +1060,11 @@ struct ttm_bo_driver nouveau_bo_driver = {
949 .evict_flags = nouveau_bo_evict_flags, 1060 .evict_flags = nouveau_bo_evict_flags,
950 .move = nouveau_bo_move, 1061 .move = nouveau_bo_move,
951 .verify_access = nouveau_bo_verify_access, 1062 .verify_access = nouveau_bo_verify_access,
952 .sync_obj_signaled = nouveau_fence_signalled, 1063 .sync_obj_signaled = __nouveau_fence_signalled,
953 .sync_obj_wait = nouveau_fence_wait, 1064 .sync_obj_wait = __nouveau_fence_wait,
954 .sync_obj_flush = nouveau_fence_flush, 1065 .sync_obj_flush = __nouveau_fence_flush,
955 .sync_obj_unref = nouveau_fence_unref, 1066 .sync_obj_unref = __nouveau_fence_unref,
956 .sync_obj_ref = nouveau_fence_ref, 1067 .sync_obj_ref = __nouveau_fence_ref,
957 .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify, 1068 .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
958 .io_mem_reserve = &nouveau_ttm_io_mem_reserve, 1069 .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
959 .io_mem_free = &nouveau_ttm_io_mem_free, 1070 .io_mem_free = &nouveau_ttm_io_mem_free,
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index 373950e34814..3960d66d7aba 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -38,23 +38,28 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
38 int ret; 38 int ret;
39 39
40 if (dev_priv->card_type >= NV_50) { 40 if (dev_priv->card_type >= NV_50) {
41 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, 41 if (dev_priv->card_type < NV_C0) {
42 dev_priv->vm_end, NV_DMA_ACCESS_RO, 42 ret = nouveau_gpuobj_dma_new(chan,
43 NV_DMA_TARGET_AGP, &pushbuf); 43 NV_CLASS_DMA_IN_MEMORY, 0,
44 (1ULL << 40),
45 NV_MEM_ACCESS_RO,
46 NV_MEM_TARGET_VM,
47 &pushbuf);
48 }
44 chan->pushbuf_base = pb->bo.offset; 49 chan->pushbuf_base = pb->bo.offset;
45 } else 50 } else
46 if (pb->bo.mem.mem_type == TTM_PL_TT) { 51 if (pb->bo.mem.mem_type == TTM_PL_TT) {
47 ret = nouveau_gpuobj_gart_dma_new(chan, 0, 52 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
48 dev_priv->gart_info.aper_size, 53 dev_priv->gart_info.aper_size,
49 NV_DMA_ACCESS_RO, &pushbuf, 54 NV_MEM_ACCESS_RO,
50 NULL); 55 NV_MEM_TARGET_GART, &pushbuf);
51 chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT; 56 chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
52 } else 57 } else
53 if (dev_priv->card_type != NV_04) { 58 if (dev_priv->card_type != NV_04) {
54 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, 59 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
55 dev_priv->fb_available_size, 60 dev_priv->fb_available_size,
56 NV_DMA_ACCESS_RO, 61 NV_MEM_ACCESS_RO,
57 NV_DMA_TARGET_VIDMEM, &pushbuf); 62 NV_MEM_TARGET_VRAM, &pushbuf);
58 chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT; 63 chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
59 } else { 64 } else {
60 /* NV04 cmdbuf hack, from original ddx.. not sure of it's 65 /* NV04 cmdbuf hack, from original ddx.. not sure of it's
@@ -62,17 +67,16 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
62 * VRAM. 67 * VRAM.
63 */ 68 */
64 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 69 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
65 pci_resource_start(dev->pdev, 70 pci_resource_start(dev->pdev, 1),
66 1),
67 dev_priv->fb_available_size, 71 dev_priv->fb_available_size,
68 NV_DMA_ACCESS_RO, 72 NV_MEM_ACCESS_RO,
69 NV_DMA_TARGET_PCI, &pushbuf); 73 NV_MEM_TARGET_PCI, &pushbuf);
70 chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT; 74 chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
71 } 75 }
72 76
73 nouveau_gpuobj_ref(pushbuf, &chan->pushbuf); 77 nouveau_gpuobj_ref(pushbuf, &chan->pushbuf);
74 nouveau_gpuobj_ref(NULL, &pushbuf); 78 nouveau_gpuobj_ref(NULL, &pushbuf);
75 return 0; 79 return ret;
76} 80}
77 81
78static struct nouveau_bo * 82static struct nouveau_bo *
@@ -100,6 +104,13 @@ nouveau_channel_user_pushbuf_alloc(struct drm_device *dev)
100 return NULL; 104 return NULL;
101 } 105 }
102 106
107 ret = nouveau_bo_map(pushbuf);
108 if (ret) {
109 nouveau_bo_unpin(pushbuf);
110 nouveau_bo_ref(NULL, &pushbuf);
111 return NULL;
112 }
113
103 return pushbuf; 114 return pushbuf;
104} 115}
105 116
@@ -107,74 +118,59 @@ nouveau_channel_user_pushbuf_alloc(struct drm_device *dev)
107int 118int
108nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, 119nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
109 struct drm_file *file_priv, 120 struct drm_file *file_priv,
110 uint32_t vram_handle, uint32_t tt_handle) 121 uint32_t vram_handle, uint32_t gart_handle)
111{ 122{
112 struct drm_nouveau_private *dev_priv = dev->dev_private; 123 struct drm_nouveau_private *dev_priv = dev->dev_private;
113 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
114 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; 124 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
115 struct nouveau_channel *chan; 125 struct nouveau_channel *chan;
116 int channel, user; 126 unsigned long flags;
117 int ret; 127 int ret;
118 128
119 /* 129 /* allocate and lock channel structure */
120 * Alright, here is the full story 130 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
121 * Nvidia cards have multiple hw fifo contexts (praise them for that, 131 if (!chan)
122 * no complicated crash-prone context switches) 132 return -ENOMEM;
123 * We allocate a new context for each app and let it write to it 133 chan->dev = dev;
124 * directly (woo, full userspace command submission !) 134 chan->file_priv = file_priv;
125 * When there are no more contexts, you lost 135 chan->vram_handle = vram_handle;
126 */ 136 chan->gart_handle = gart_handle;
127 for (channel = 0; channel < pfifo->channels; channel++) { 137
128 if (dev_priv->fifos[channel] == NULL) 138 kref_init(&chan->ref);
139 atomic_set(&chan->users, 1);
140 mutex_init(&chan->mutex);
141 mutex_lock(&chan->mutex);
142
143 /* allocate hw channel id */
144 spin_lock_irqsave(&dev_priv->channels.lock, flags);
145 for (chan->id = 0; chan->id < pfifo->channels; chan->id++) {
146 if (!dev_priv->channels.ptr[chan->id]) {
147 nouveau_channel_ref(chan, &dev_priv->channels.ptr[chan->id]);
129 break; 148 break;
149 }
130 } 150 }
151 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
131 152
132 /* no more fifos. you lost. */ 153 if (chan->id == pfifo->channels) {
133 if (channel == pfifo->channels) 154 mutex_unlock(&chan->mutex);
134 return -EINVAL; 155 kfree(chan);
156 return -ENODEV;
157 }
135 158
136 dev_priv->fifos[channel] = kzalloc(sizeof(struct nouveau_channel), 159 NV_DEBUG(dev, "initialising channel %d\n", chan->id);
137 GFP_KERNEL);
138 if (!dev_priv->fifos[channel])
139 return -ENOMEM;
140 chan = dev_priv->fifos[channel];
141 INIT_LIST_HEAD(&chan->nvsw.vbl_wait); 160 INIT_LIST_HEAD(&chan->nvsw.vbl_wait);
161 INIT_LIST_HEAD(&chan->nvsw.flip);
142 INIT_LIST_HEAD(&chan->fence.pending); 162 INIT_LIST_HEAD(&chan->fence.pending);
143 chan->dev = dev;
144 chan->id = channel;
145 chan->file_priv = file_priv;
146 chan->vram_handle = vram_handle;
147 chan->gart_handle = tt_handle;
148
149 NV_INFO(dev, "Allocating FIFO number %d\n", channel);
150 163
151 /* Allocate DMA push buffer */ 164 /* Allocate DMA push buffer */
152 chan->pushbuf_bo = nouveau_channel_user_pushbuf_alloc(dev); 165 chan->pushbuf_bo = nouveau_channel_user_pushbuf_alloc(dev);
153 if (!chan->pushbuf_bo) { 166 if (!chan->pushbuf_bo) {
154 ret = -ENOMEM; 167 ret = -ENOMEM;
155 NV_ERROR(dev, "pushbuf %d\n", ret); 168 NV_ERROR(dev, "pushbuf %d\n", ret);
156 nouveau_channel_free(chan); 169 nouveau_channel_put(&chan);
157 return ret; 170 return ret;
158 } 171 }
159 172
160 nouveau_dma_pre_init(chan); 173 nouveau_dma_pre_init(chan);
161
162 /* Locate channel's user control regs */
163 if (dev_priv->card_type < NV_40)
164 user = NV03_USER(channel);
165 else
166 if (dev_priv->card_type < NV_50)
167 user = NV40_USER(channel);
168 else
169 user = NV50_USER(channel);
170
171 chan->user = ioremap(pci_resource_start(dev->pdev, 0) + user,
172 PAGE_SIZE);
173 if (!chan->user) {
174 NV_ERROR(dev, "ioremap of regs failed.\n");
175 nouveau_channel_free(chan);
176 return -ENOMEM;
177 }
178 chan->user_put = 0x40; 174 chan->user_put = 0x40;
179 chan->user_get = 0x44; 175 chan->user_get = 0x44;
180 176
@@ -182,15 +178,15 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
182 ret = nouveau_notifier_init_channel(chan); 178 ret = nouveau_notifier_init_channel(chan);
183 if (ret) { 179 if (ret) {
184 NV_ERROR(dev, "ntfy %d\n", ret); 180 NV_ERROR(dev, "ntfy %d\n", ret);
185 nouveau_channel_free(chan); 181 nouveau_channel_put(&chan);
186 return ret; 182 return ret;
187 } 183 }
188 184
189 /* Setup channel's default objects */ 185 /* Setup channel's default objects */
190 ret = nouveau_gpuobj_channel_init(chan, vram_handle, tt_handle); 186 ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle);
191 if (ret) { 187 if (ret) {
192 NV_ERROR(dev, "gpuobj %d\n", ret); 188 NV_ERROR(dev, "gpuobj %d\n", ret);
193 nouveau_channel_free(chan); 189 nouveau_channel_put(&chan);
194 return ret; 190 return ret;
195 } 191 }
196 192
@@ -198,24 +194,17 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
198 ret = nouveau_channel_pushbuf_ctxdma_init(chan); 194 ret = nouveau_channel_pushbuf_ctxdma_init(chan);
199 if (ret) { 195 if (ret) {
200 NV_ERROR(dev, "pbctxdma %d\n", ret); 196 NV_ERROR(dev, "pbctxdma %d\n", ret);
201 nouveau_channel_free(chan); 197 nouveau_channel_put(&chan);
202 return ret; 198 return ret;
203 } 199 }
204 200
205 /* disable the fifo caches */ 201 /* disable the fifo caches */
206 pfifo->reassign(dev, false); 202 pfifo->reassign(dev, false);
207 203
208 /* Create a graphics context for new channel */
209 ret = pgraph->create_context(chan);
210 if (ret) {
211 nouveau_channel_free(chan);
212 return ret;
213 }
214
215 /* Construct inital RAMFC for new channel */ 204 /* Construct inital RAMFC for new channel */
216 ret = pfifo->create_context(chan); 205 ret = pfifo->create_context(chan);
217 if (ret) { 206 if (ret) {
218 nouveau_channel_free(chan); 207 nouveau_channel_put(&chan);
219 return ret; 208 return ret;
220 } 209 }
221 210
@@ -225,83 +214,111 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
225 if (!ret) 214 if (!ret)
226 ret = nouveau_fence_channel_init(chan); 215 ret = nouveau_fence_channel_init(chan);
227 if (ret) { 216 if (ret) {
228 nouveau_channel_free(chan); 217 nouveau_channel_put(&chan);
229 return ret; 218 return ret;
230 } 219 }
231 220
232 nouveau_debugfs_channel_init(chan); 221 nouveau_debugfs_channel_init(chan);
233 222
234 NV_INFO(dev, "%s: initialised FIFO %d\n", __func__, channel); 223 NV_DEBUG(dev, "channel %d initialised\n", chan->id);
235 *chan_ret = chan; 224 *chan_ret = chan;
236 return 0; 225 return 0;
237} 226}
238 227
239/* stops a fifo */ 228struct nouveau_channel *
229nouveau_channel_get_unlocked(struct nouveau_channel *ref)
230{
231 struct nouveau_channel *chan = NULL;
232
233 if (likely(ref && atomic_inc_not_zero(&ref->users)))
234 nouveau_channel_ref(ref, &chan);
235
236 return chan;
237}
238
239struct nouveau_channel *
240nouveau_channel_get(struct drm_device *dev, struct drm_file *file_priv, int id)
241{
242 struct drm_nouveau_private *dev_priv = dev->dev_private;
243 struct nouveau_channel *chan;
244 unsigned long flags;
245
246 if (unlikely(id < 0 || id >= NOUVEAU_MAX_CHANNEL_NR))
247 return ERR_PTR(-EINVAL);
248
249 spin_lock_irqsave(&dev_priv->channels.lock, flags);
250 chan = nouveau_channel_get_unlocked(dev_priv->channels.ptr[id]);
251 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
252
253 if (unlikely(!chan))
254 return ERR_PTR(-EINVAL);
255
256 if (unlikely(file_priv && chan->file_priv != file_priv)) {
257 nouveau_channel_put_unlocked(&chan);
258 return ERR_PTR(-EINVAL);
259 }
260
261 mutex_lock(&chan->mutex);
262 return chan;
263}
264
240void 265void
241nouveau_channel_free(struct nouveau_channel *chan) 266nouveau_channel_put_unlocked(struct nouveau_channel **pchan)
242{ 267{
268 struct nouveau_channel *chan = *pchan;
243 struct drm_device *dev = chan->dev; 269 struct drm_device *dev = chan->dev;
244 struct drm_nouveau_private *dev_priv = dev->dev_private; 270 struct drm_nouveau_private *dev_priv = dev->dev_private;
245 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
246 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; 271 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
272 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
273 struct nouveau_crypt_engine *pcrypt = &dev_priv->engine.crypt;
247 unsigned long flags; 274 unsigned long flags;
248 int ret;
249 275
250 NV_INFO(dev, "%s: freeing fifo %d\n", __func__, chan->id); 276 /* decrement the refcount, and we're done if there's still refs */
277 if (likely(!atomic_dec_and_test(&chan->users))) {
278 nouveau_channel_ref(NULL, pchan);
279 return;
280 }
251 281
282 /* noone wants the channel anymore */
283 NV_DEBUG(dev, "freeing channel %d\n", chan->id);
252 nouveau_debugfs_channel_fini(chan); 284 nouveau_debugfs_channel_fini(chan);
253 285
254 /* Give outstanding push buffers a chance to complete */ 286 /* give it chance to idle */
255 nouveau_fence_update(chan); 287 nouveau_channel_idle(chan);
256 if (chan->fence.sequence != chan->fence.sequence_ack) {
257 struct nouveau_fence *fence = NULL;
258 288
259 ret = nouveau_fence_new(chan, &fence, true); 289 /* ensure all outstanding fences are signaled. they should be if the
260 if (ret == 0) {
261 ret = nouveau_fence_wait(fence, NULL, false, false);
262 nouveau_fence_unref((void *)&fence);
263 }
264
265 if (ret)
266 NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id);
267 }
268
269 /* Ensure all outstanding fences are signaled. They should be if the
270 * above attempts at idling were OK, but if we failed this'll tell TTM 290 * above attempts at idling were OK, but if we failed this'll tell TTM
271 * we're done with the buffers. 291 * we're done with the buffers.
272 */ 292 */
273 nouveau_fence_channel_fini(chan); 293 nouveau_fence_channel_fini(chan);
274 294
275 /* This will prevent pfifo from switching channels. */ 295 /* boot it off the hardware */
276 pfifo->reassign(dev, false); 296 pfifo->reassign(dev, false);
277 297
278 /* We want to give pgraph a chance to idle and get rid of all potential 298 /* We want to give pgraph a chance to idle and get rid of all
279 * errors. We need to do this before the lock, otherwise the irq handler 299 * potential errors. We need to do this without the context
280 * is unable to process them. 300 * switch lock held, otherwise the irq handler is unable to
301 * process them.
281 */ 302 */
282 if (pgraph->channel(dev) == chan) 303 if (pgraph->channel(dev) == chan)
283 nouveau_wait_for_idle(dev); 304 nouveau_wait_for_idle(dev);
284 305
285 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 306 /* destroy the engine specific contexts */
286
287 pgraph->fifo_access(dev, false);
288 if (pgraph->channel(dev) == chan)
289 pgraph->unload_context(dev);
290 pgraph->destroy_context(chan);
291 pgraph->fifo_access(dev, true);
292
293 if (pfifo->channel_id(dev) == chan->id) {
294 pfifo->disable(dev);
295 pfifo->unload_context(dev);
296 pfifo->enable(dev);
297 }
298 pfifo->destroy_context(chan); 307 pfifo->destroy_context(chan);
308 pgraph->destroy_context(chan);
309 if (pcrypt->destroy_context)
310 pcrypt->destroy_context(chan);
299 311
300 pfifo->reassign(dev, true); 312 pfifo->reassign(dev, true);
301 313
302 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); 314 /* aside from its resources, the channel should now be dead,
315 * remove it from the channel list
316 */
317 spin_lock_irqsave(&dev_priv->channels.lock, flags);
318 nouveau_channel_ref(NULL, &dev_priv->channels.ptr[chan->id]);
319 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
303 320
304 /* Release the channel's resources */ 321 /* destroy any resources the channel owned */
305 nouveau_gpuobj_ref(NULL, &chan->pushbuf); 322 nouveau_gpuobj_ref(NULL, &chan->pushbuf);
306 if (chan->pushbuf_bo) { 323 if (chan->pushbuf_bo) {
307 nouveau_bo_unmap(chan->pushbuf_bo); 324 nouveau_bo_unmap(chan->pushbuf_bo);
@@ -310,44 +327,80 @@ nouveau_channel_free(struct nouveau_channel *chan)
310 } 327 }
311 nouveau_gpuobj_channel_takedown(chan); 328 nouveau_gpuobj_channel_takedown(chan);
312 nouveau_notifier_takedown_channel(chan); 329 nouveau_notifier_takedown_channel(chan);
313 if (chan->user)
314 iounmap(chan->user);
315 330
316 dev_priv->fifos[chan->id] = NULL; 331 nouveau_channel_ref(NULL, pchan);
332}
333
334void
335nouveau_channel_put(struct nouveau_channel **pchan)
336{
337 mutex_unlock(&(*pchan)->mutex);
338 nouveau_channel_put_unlocked(pchan);
339}
340
341static void
342nouveau_channel_del(struct kref *ref)
343{
344 struct nouveau_channel *chan =
345 container_of(ref, struct nouveau_channel, ref);
346
317 kfree(chan); 347 kfree(chan);
318} 348}
319 349
350void
351nouveau_channel_ref(struct nouveau_channel *chan,
352 struct nouveau_channel **pchan)
353{
354 if (chan)
355 kref_get(&chan->ref);
356
357 if (*pchan)
358 kref_put(&(*pchan)->ref, nouveau_channel_del);
359
360 *pchan = chan;
361}
362
363void
364nouveau_channel_idle(struct nouveau_channel *chan)
365{
366 struct drm_device *dev = chan->dev;
367 struct nouveau_fence *fence = NULL;
368 int ret;
369
370 nouveau_fence_update(chan);
371
372 if (chan->fence.sequence != chan->fence.sequence_ack) {
373 ret = nouveau_fence_new(chan, &fence, true);
374 if (!ret) {
375 ret = nouveau_fence_wait(fence, false, false);
376 nouveau_fence_unref(&fence);
377 }
378
379 if (ret)
380 NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id);
381 }
382}
383
320/* cleans up all the fifos from file_priv */ 384/* cleans up all the fifos from file_priv */
321void 385void
322nouveau_channel_cleanup(struct drm_device *dev, struct drm_file *file_priv) 386nouveau_channel_cleanup(struct drm_device *dev, struct drm_file *file_priv)
323{ 387{
324 struct drm_nouveau_private *dev_priv = dev->dev_private; 388 struct drm_nouveau_private *dev_priv = dev->dev_private;
325 struct nouveau_engine *engine = &dev_priv->engine; 389 struct nouveau_engine *engine = &dev_priv->engine;
390 struct nouveau_channel *chan;
326 int i; 391 int i;
327 392
328 NV_DEBUG(dev, "clearing FIFO enables from file_priv\n"); 393 NV_DEBUG(dev, "clearing FIFO enables from file_priv\n");
329 for (i = 0; i < engine->fifo.channels; i++) { 394 for (i = 0; i < engine->fifo.channels; i++) {
330 struct nouveau_channel *chan = dev_priv->fifos[i]; 395 chan = nouveau_channel_get(dev, file_priv, i);
396 if (IS_ERR(chan))
397 continue;
331 398
332 if (chan && chan->file_priv == file_priv) 399 atomic_dec(&chan->users);
333 nouveau_channel_free(chan); 400 nouveau_channel_put(&chan);
334 } 401 }
335} 402}
336 403
337int
338nouveau_channel_owner(struct drm_device *dev, struct drm_file *file_priv,
339 int channel)
340{
341 struct drm_nouveau_private *dev_priv = dev->dev_private;
342 struct nouveau_engine *engine = &dev_priv->engine;
343
344 if (channel >= engine->fifo.channels)
345 return 0;
346 if (dev_priv->fifos[channel] == NULL)
347 return 0;
348
349 return (dev_priv->fifos[channel]->file_priv == file_priv);
350}
351 404
352/*********************************** 405/***********************************
353 * ioctls wrapping the functions 406 * ioctls wrapping the functions
@@ -383,36 +436,44 @@ nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data,
383 else 436 else
384 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART; 437 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;
385 438
386 init->subchan[0].handle = NvM2MF; 439 if (dev_priv->card_type < NV_C0) {
387 if (dev_priv->card_type < NV_50) 440 init->subchan[0].handle = NvM2MF;
388 init->subchan[0].grclass = 0x0039; 441 if (dev_priv->card_type < NV_50)
389 else 442 init->subchan[0].grclass = 0x0039;
390 init->subchan[0].grclass = 0x5039; 443 else
391 init->subchan[1].handle = NvSw; 444 init->subchan[0].grclass = 0x5039;
392 init->subchan[1].grclass = NV_SW; 445 init->subchan[1].handle = NvSw;
393 init->nr_subchan = 2; 446 init->subchan[1].grclass = NV_SW;
447 init->nr_subchan = 2;
448 } else {
449 init->subchan[0].handle = 0x9039;
450 init->subchan[0].grclass = 0x9039;
451 init->nr_subchan = 1;
452 }
394 453
395 /* Named memory object area */ 454 /* Named memory object area */
396 ret = drm_gem_handle_create(file_priv, chan->notifier_bo->gem, 455 ret = drm_gem_handle_create(file_priv, chan->notifier_bo->gem,
397 &init->notifier_handle); 456 &init->notifier_handle);
398 if (ret) {
399 nouveau_channel_free(chan);
400 return ret;
401 }
402 457
403 return 0; 458 if (ret == 0)
459 atomic_inc(&chan->users); /* userspace reference */
460 nouveau_channel_put(&chan);
461 return ret;
404} 462}
405 463
406static int 464static int
407nouveau_ioctl_fifo_free(struct drm_device *dev, void *data, 465nouveau_ioctl_fifo_free(struct drm_device *dev, void *data,
408 struct drm_file *file_priv) 466 struct drm_file *file_priv)
409{ 467{
410 struct drm_nouveau_channel_free *cfree = data; 468 struct drm_nouveau_channel_free *req = data;
411 struct nouveau_channel *chan; 469 struct nouveau_channel *chan;
412 470
413 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(cfree->channel, file_priv, chan); 471 chan = nouveau_channel_get(dev, file_priv, req->channel);
472 if (IS_ERR(chan))
473 return PTR_ERR(chan);
414 474
415 nouveau_channel_free(chan); 475 atomic_dec(&chan->users);
476 nouveau_channel_put(&chan);
416 return 0; 477 return 0;
417} 478}
418 479
@@ -421,18 +482,18 @@ nouveau_ioctl_fifo_free(struct drm_device *dev, void *data,
421 ***********************************/ 482 ***********************************/
422 483
423struct drm_ioctl_desc nouveau_ioctls[] = { 484struct drm_ioctl_desc nouveau_ioctls[] = {
424 DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH), 485 DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_UNLOCKED|DRM_AUTH),
425 DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 486 DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
426 DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH), 487 DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_UNLOCKED|DRM_AUTH),
427 DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_AUTH), 488 DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_UNLOCKED|DRM_AUTH),
428 DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_AUTH), 489 DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_UNLOCKED|DRM_AUTH),
429 DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_AUTH), 490 DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_UNLOCKED|DRM_AUTH),
430 DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH), 491 DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_UNLOCKED|DRM_AUTH),
431 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH), 492 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_UNLOCKED|DRM_AUTH),
432 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH), 493 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_UNLOCKED|DRM_AUTH),
433 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH), 494 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_UNLOCKED|DRM_AUTH),
434 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_AUTH), 495 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_UNLOCKED|DRM_AUTH),
435 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_AUTH), 496 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_UNLOCKED|DRM_AUTH),
436}; 497};
437 498
438int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls); 499int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 52c356e9a3d1..a21e00076839 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -37,6 +37,8 @@
37#include "nouveau_connector.h" 37#include "nouveau_connector.h"
38#include "nouveau_hw.h" 38#include "nouveau_hw.h"
39 39
40static void nouveau_connector_hotplug(void *, int);
41
40static struct nouveau_encoder * 42static struct nouveau_encoder *
41find_encoder_by_type(struct drm_connector *connector, int type) 43find_encoder_by_type(struct drm_connector *connector, int type)
42{ 44{
@@ -94,22 +96,30 @@ nouveau_connector_bpp(struct drm_connector *connector)
94} 96}
95 97
96static void 98static void
97nouveau_connector_destroy(struct drm_connector *drm_connector) 99nouveau_connector_destroy(struct drm_connector *connector)
98{ 100{
99 struct nouveau_connector *nv_connector = 101 struct nouveau_connector *nv_connector = nouveau_connector(connector);
100 nouveau_connector(drm_connector); 102 struct drm_nouveau_private *dev_priv;
103 struct nouveau_gpio_engine *pgpio;
101 struct drm_device *dev; 104 struct drm_device *dev;
102 105
103 if (!nv_connector) 106 if (!nv_connector)
104 return; 107 return;
105 108
106 dev = nv_connector->base.dev; 109 dev = nv_connector->base.dev;
110 dev_priv = dev->dev_private;
107 NV_DEBUG_KMS(dev, "\n"); 111 NV_DEBUG_KMS(dev, "\n");
108 112
113 pgpio = &dev_priv->engine.gpio;
114 if (pgpio->irq_unregister) {
115 pgpio->irq_unregister(dev, nv_connector->dcb->gpio_tag,
116 nouveau_connector_hotplug, connector);
117 }
118
109 kfree(nv_connector->edid); 119 kfree(nv_connector->edid);
110 drm_sysfs_connector_remove(drm_connector); 120 drm_sysfs_connector_remove(connector);
111 drm_connector_cleanup(drm_connector); 121 drm_connector_cleanup(connector);
112 kfree(drm_connector); 122 kfree(connector);
113} 123}
114 124
115static struct nouveau_i2c_chan * 125static struct nouveau_i2c_chan *
@@ -760,6 +770,7 @@ nouveau_connector_create(struct drm_device *dev, int index)
760{ 770{
761 const struct drm_connector_funcs *funcs = &nouveau_connector_funcs; 771 const struct drm_connector_funcs *funcs = &nouveau_connector_funcs;
762 struct drm_nouveau_private *dev_priv = dev->dev_private; 772 struct drm_nouveau_private *dev_priv = dev->dev_private;
773 struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
763 struct nouveau_connector *nv_connector = NULL; 774 struct nouveau_connector *nv_connector = NULL;
764 struct dcb_connector_table_entry *dcb = NULL; 775 struct dcb_connector_table_entry *dcb = NULL;
765 struct drm_connector *connector; 776 struct drm_connector *connector;
@@ -876,6 +887,11 @@ nouveau_connector_create(struct drm_device *dev, int index)
876 break; 887 break;
877 } 888 }
878 889
890 if (pgpio->irq_register) {
891 pgpio->irq_register(dev, nv_connector->dcb->gpio_tag,
892 nouveau_connector_hotplug, connector);
893 }
894
879 drm_sysfs_connector_add(connector); 895 drm_sysfs_connector_add(connector);
880 dcb->drm = connector; 896 dcb->drm = connector;
881 return dcb->drm; 897 return dcb->drm;
@@ -886,3 +902,29 @@ fail:
886 return ERR_PTR(ret); 902 return ERR_PTR(ret);
887 903
888} 904}
905
906static void
907nouveau_connector_hotplug(void *data, int plugged)
908{
909 struct drm_connector *connector = data;
910 struct drm_device *dev = connector->dev;
911
912 NV_INFO(dev, "%splugged %s\n", plugged ? "" : "un",
913 drm_get_connector_name(connector));
914
915 if (connector->encoder && connector->encoder->crtc &&
916 connector->encoder->crtc->enabled) {
917 struct nouveau_encoder *nv_encoder = nouveau_encoder(connector->encoder);
918 struct drm_encoder_helper_funcs *helper =
919 connector->encoder->helper_private;
920
921 if (nv_encoder->dcb->type == OUTPUT_DP) {
922 if (plugged)
923 helper->dpms(connector->encoder, DRM_MODE_DPMS_ON);
924 else
925 helper->dpms(connector->encoder, DRM_MODE_DPMS_OFF);
926 }
927 }
928
929 drm_helper_hpd_irq_event(dev);
930}
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 2e11fd65b4dd..505c6bfb4d75 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -29,6 +29,9 @@
29#include "nouveau_drv.h" 29#include "nouveau_drv.h"
30#include "nouveau_fb.h" 30#include "nouveau_fb.h"
31#include "nouveau_fbcon.h" 31#include "nouveau_fbcon.h"
32#include "nouveau_hw.h"
33#include "nouveau_crtc.h"
34#include "nouveau_dma.h"
32 35
33static void 36static void
34nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb) 37nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
@@ -104,3 +107,207 @@ const struct drm_mode_config_funcs nouveau_mode_config_funcs = {
104 .output_poll_changed = nouveau_fbcon_output_poll_changed, 107 .output_poll_changed = nouveau_fbcon_output_poll_changed,
105}; 108};
106 109
110int
111nouveau_vblank_enable(struct drm_device *dev, int crtc)
112{
113 struct drm_nouveau_private *dev_priv = dev->dev_private;
114
115 if (dev_priv->card_type >= NV_50)
116 nv_mask(dev, NV50_PDISPLAY_INTR_EN_1, 0,
117 NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(crtc));
118 else
119 NVWriteCRTC(dev, crtc, NV_PCRTC_INTR_EN_0,
120 NV_PCRTC_INTR_0_VBLANK);
121
122 return 0;
123}
124
125void
126nouveau_vblank_disable(struct drm_device *dev, int crtc)
127{
128 struct drm_nouveau_private *dev_priv = dev->dev_private;
129
130 if (dev_priv->card_type >= NV_50)
131 nv_mask(dev, NV50_PDISPLAY_INTR_EN_1,
132 NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(crtc), 0);
133 else
134 NVWriteCRTC(dev, crtc, NV_PCRTC_INTR_EN_0, 0);
135}
136
137static int
138nouveau_page_flip_reserve(struct nouveau_bo *old_bo,
139 struct nouveau_bo *new_bo)
140{
141 int ret;
142
143 ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM);
144 if (ret)
145 return ret;
146
147 ret = ttm_bo_reserve(&new_bo->bo, false, false, false, 0);
148 if (ret)
149 goto fail;
150
151 ret = ttm_bo_reserve(&old_bo->bo, false, false, false, 0);
152 if (ret)
153 goto fail_unreserve;
154
155 return 0;
156
157fail_unreserve:
158 ttm_bo_unreserve(&new_bo->bo);
159fail:
160 nouveau_bo_unpin(new_bo);
161 return ret;
162}
163
164static void
165nouveau_page_flip_unreserve(struct nouveau_bo *old_bo,
166 struct nouveau_bo *new_bo,
167 struct nouveau_fence *fence)
168{
169 nouveau_bo_fence(new_bo, fence);
170 ttm_bo_unreserve(&new_bo->bo);
171
172 nouveau_bo_fence(old_bo, fence);
173 ttm_bo_unreserve(&old_bo->bo);
174
175 nouveau_bo_unpin(old_bo);
176}
177
178static int
179nouveau_page_flip_emit(struct nouveau_channel *chan,
180 struct nouveau_bo *old_bo,
181 struct nouveau_bo *new_bo,
182 struct nouveau_page_flip_state *s,
183 struct nouveau_fence **pfence)
184{
185 struct drm_device *dev = chan->dev;
186 unsigned long flags;
187 int ret;
188
189 /* Queue it to the pending list */
190 spin_lock_irqsave(&dev->event_lock, flags);
191 list_add_tail(&s->head, &chan->nvsw.flip);
192 spin_unlock_irqrestore(&dev->event_lock, flags);
193
194 /* Synchronize with the old framebuffer */
195 ret = nouveau_fence_sync(old_bo->bo.sync_obj, chan);
196 if (ret)
197 goto fail;
198
199 /* Emit the pageflip */
200 ret = RING_SPACE(chan, 2);
201 if (ret)
202 goto fail;
203
204 BEGIN_RING(chan, NvSubSw, NV_SW_PAGE_FLIP, 1);
205 OUT_RING(chan, 0);
206 FIRE_RING(chan);
207
208 ret = nouveau_fence_new(chan, pfence, true);
209 if (ret)
210 goto fail;
211
212 return 0;
213fail:
214 spin_lock_irqsave(&dev->event_lock, flags);
215 list_del(&s->head);
216 spin_unlock_irqrestore(&dev->event_lock, flags);
217 return ret;
218}
219
220int
221nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
222 struct drm_pending_vblank_event *event)
223{
224 struct drm_device *dev = crtc->dev;
225 struct drm_nouveau_private *dev_priv = dev->dev_private;
226 struct nouveau_bo *old_bo = nouveau_framebuffer(crtc->fb)->nvbo;
227 struct nouveau_bo *new_bo = nouveau_framebuffer(fb)->nvbo;
228 struct nouveau_page_flip_state *s;
229 struct nouveau_channel *chan;
230 struct nouveau_fence *fence;
231 int ret;
232
233 if (dev_priv->engine.graph.accel_blocked)
234 return -ENODEV;
235
236 s = kzalloc(sizeof(*s), GFP_KERNEL);
237 if (!s)
238 return -ENOMEM;
239
240 /* Don't let the buffers go away while we flip */
241 ret = nouveau_page_flip_reserve(old_bo, new_bo);
242 if (ret)
243 goto fail_free;
244
245 /* Initialize a page flip struct */
246 *s = (struct nouveau_page_flip_state)
247 { { }, s->event, nouveau_crtc(crtc)->index,
248 fb->bits_per_pixel, fb->pitch, crtc->x, crtc->y,
249 new_bo->bo.offset };
250
251 /* Choose the channel the flip will be handled in */
252 chan = nouveau_fence_channel(new_bo->bo.sync_obj);
253 if (!chan)
254 chan = nouveau_channel_get_unlocked(dev_priv->channel);
255 mutex_lock(&chan->mutex);
256
257 /* Emit a page flip */
258 ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence);
259 nouveau_channel_put(&chan);
260 if (ret)
261 goto fail_unreserve;
262
263 /* Update the crtc struct and cleanup */
264 crtc->fb = fb;
265
266 nouveau_page_flip_unreserve(old_bo, new_bo, fence);
267 nouveau_fence_unref(&fence);
268 return 0;
269
270fail_unreserve:
271 nouveau_page_flip_unreserve(old_bo, new_bo, NULL);
272fail_free:
273 kfree(s);
274 return ret;
275}
276
277int
278nouveau_finish_page_flip(struct nouveau_channel *chan,
279 struct nouveau_page_flip_state *ps)
280{
281 struct drm_device *dev = chan->dev;
282 struct nouveau_page_flip_state *s;
283 unsigned long flags;
284
285 spin_lock_irqsave(&dev->event_lock, flags);
286
287 if (list_empty(&chan->nvsw.flip)) {
288 NV_ERROR(dev, "Unexpected pageflip in channel %d.\n", chan->id);
289 spin_unlock_irqrestore(&dev->event_lock, flags);
290 return -EINVAL;
291 }
292
293 s = list_first_entry(&chan->nvsw.flip,
294 struct nouveau_page_flip_state, head);
295 if (s->event) {
296 struct drm_pending_vblank_event *e = s->event;
297 struct timeval now;
298
299 do_gettimeofday(&now);
300 e->event.sequence = 0;
301 e->event.tv_sec = now.tv_sec;
302 e->event.tv_usec = now.tv_usec;
303 list_add_tail(&e->base.link, &e->base.file_priv->event_list);
304 wake_up_interruptible(&e->base.file_priv->event_wait);
305 }
306
307 list_del(&s->head);
308 *ps = *s;
309 kfree(s);
310
311 spin_unlock_irqrestore(&dev->event_lock, flags);
312 return 0;
313}
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index 82581e600dcd..65699bfaaaea 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -36,7 +36,7 @@ nouveau_dma_pre_init(struct nouveau_channel *chan)
36 struct drm_nouveau_private *dev_priv = chan->dev->dev_private; 36 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
37 struct nouveau_bo *pushbuf = chan->pushbuf_bo; 37 struct nouveau_bo *pushbuf = chan->pushbuf_bo;
38 38
39 if (dev_priv->card_type == NV_50) { 39 if (dev_priv->card_type >= NV_50) {
40 const int ib_size = pushbuf->bo.mem.size / 2; 40 const int ib_size = pushbuf->bo.mem.size / 2;
41 41
42 chan->dma.ib_base = (pushbuf->bo.mem.size - ib_size) >> 2; 42 chan->dma.ib_base = (pushbuf->bo.mem.size - ib_size) >> 2;
@@ -59,17 +59,26 @@ nouveau_dma_init(struct nouveau_channel *chan)
59{ 59{
60 struct drm_device *dev = chan->dev; 60 struct drm_device *dev = chan->dev;
61 struct drm_nouveau_private *dev_priv = dev->dev_private; 61 struct drm_nouveau_private *dev_priv = dev->dev_private;
62 struct nouveau_gpuobj *obj = NULL;
63 int ret, i; 62 int ret, i;
64 63
65 /* Create NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */ 64 if (dev_priv->card_type >= NV_C0) {
66 ret = nouveau_gpuobj_gr_new(chan, dev_priv->card_type < NV_50 ? 65 ret = nouveau_gpuobj_gr_new(chan, 0x9039, 0x9039);
67 0x0039 : 0x5039, &obj); 66 if (ret)
68 if (ret) 67 return ret;
69 return ret; 68
69 ret = RING_SPACE(chan, 2);
70 if (ret)
71 return ret;
72
73 BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0000, 1);
74 OUT_RING (chan, 0x00009039);
75 FIRE_RING (chan);
76 return 0;
77 }
70 78
71 ret = nouveau_ramht_insert(chan, NvM2MF, obj); 79 /* Create NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */
72 nouveau_gpuobj_ref(NULL, &obj); 80 ret = nouveau_gpuobj_gr_new(chan, NvM2MF, dev_priv->card_type < NV_50 ?
81 0x0039 : 0x5039);
73 if (ret) 82 if (ret)
74 return ret; 83 return ret;
75 84
@@ -78,11 +87,6 @@ nouveau_dma_init(struct nouveau_channel *chan)
78 if (ret) 87 if (ret)
79 return ret; 88 return ret;
80 89
81 /* Map push buffer */
82 ret = nouveau_bo_map(chan->pushbuf_bo);
83 if (ret)
84 return ret;
85
86 /* Insert NOPS for NOUVEAU_DMA_SKIPS */ 90 /* Insert NOPS for NOUVEAU_DMA_SKIPS */
87 ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS); 91 ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS);
88 if (ret) 92 if (ret)
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index d578c21d3c8d..c36f1763feaa 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -77,7 +77,8 @@ enum {
77 /* G80+ display objects */ 77 /* G80+ display objects */
78 NvEvoVRAM = 0x01000000, 78 NvEvoVRAM = 0x01000000,
79 NvEvoFB16 = 0x01000001, 79 NvEvoFB16 = 0x01000001,
80 NvEvoFB32 = 0x01000002 80 NvEvoFB32 = 0x01000002,
81 NvEvoVRAM_LP = 0x01000003
81}; 82};
82 83
83#define NV_MEMORY_TO_MEMORY_FORMAT 0x00000039 84#define NV_MEMORY_TO_MEMORY_FORMAT 0x00000039
@@ -125,6 +126,12 @@ extern void
125OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords); 126OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords);
126 127
127static inline void 128static inline void
129BEGIN_NVC0(struct nouveau_channel *chan, int op, int subc, int mthd, int size)
130{
131 OUT_RING(chan, (op << 28) | (size << 16) | (subc << 13) | (mthd >> 2));
132}
133
134static inline void
128BEGIN_RING(struct nouveau_channel *chan, int subc, int mthd, int size) 135BEGIN_RING(struct nouveau_channel *chan, int subc, int mthd, int size)
129{ 136{
130 OUT_RING(chan, (subc << 13) | (size << 18) | mthd); 137 OUT_RING(chan, (subc << 13) | (size << 18) | mthd);
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index 4562f309ae3d..38d599554bce 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -279,7 +279,7 @@ nouveau_dp_link_train(struct drm_encoder *encoder)
279 struct bit_displayport_encoder_table *dpe; 279 struct bit_displayport_encoder_table *dpe;
280 int dpe_headerlen; 280 int dpe_headerlen;
281 uint8_t config[4], status[3]; 281 uint8_t config[4], status[3];
282 bool cr_done, cr_max_vs, eq_done; 282 bool cr_done, cr_max_vs, eq_done, hpd_state;
283 int ret = 0, i, tries, voltage; 283 int ret = 0, i, tries, voltage;
284 284
285 NV_DEBUG_KMS(dev, "link training!!\n"); 285 NV_DEBUG_KMS(dev, "link training!!\n");
@@ -297,7 +297,7 @@ nouveau_dp_link_train(struct drm_encoder *encoder)
297 /* disable hotplug detect, this flips around on some panels during 297 /* disable hotplug detect, this flips around on some panels during
298 * link training. 298 * link training.
299 */ 299 */
300 pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, false); 300 hpd_state = pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, false);
301 301
302 if (dpe->script0) { 302 if (dpe->script0) {
303 NV_DEBUG_KMS(dev, "SOR-%d: running DP script 0\n", nv_encoder->or); 303 NV_DEBUG_KMS(dev, "SOR-%d: running DP script 0\n", nv_encoder->or);
@@ -439,7 +439,7 @@ stop:
439 } 439 }
440 440
441 /* re-enable hotplug detect */ 441 /* re-enable hotplug detect */
442 pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, true); 442 pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, hpd_state);
443 443
444 return eq_done; 444 return eq_done;
445} 445}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index 90875494a65a..13bb672a16f4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -115,6 +115,10 @@ MODULE_PARM_DESC(perflvl_wr, "Allow perflvl changes (warning: dangerous!)\n");
115int nouveau_perflvl_wr; 115int nouveau_perflvl_wr;
116module_param_named(perflvl_wr, nouveau_perflvl_wr, int, 0400); 116module_param_named(perflvl_wr, nouveau_perflvl_wr, int, 0400);
117 117
118MODULE_PARM_DESC(msi, "Enable MSI (default: off)\n");
119int nouveau_msi;
120module_param_named(msi, nouveau_msi, int, 0400);
121
118int nouveau_fbpercrtc; 122int nouveau_fbpercrtc;
119#if 0 123#if 0
120module_param_named(fbpercrtc, nouveau_fbpercrtc, int, 0400); 124module_param_named(fbpercrtc, nouveau_fbpercrtc, int, 0400);
@@ -167,6 +171,9 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
167 if (pm_state.event == PM_EVENT_PRETHAW) 171 if (pm_state.event == PM_EVENT_PRETHAW)
168 return 0; 172 return 0;
169 173
174 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
175 return 0;
176
170 NV_INFO(dev, "Disabling fbcon acceleration...\n"); 177 NV_INFO(dev, "Disabling fbcon acceleration...\n");
171 nouveau_fbcon_save_disable_accel(dev); 178 nouveau_fbcon_save_disable_accel(dev);
172 179
@@ -193,23 +200,10 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
193 200
194 NV_INFO(dev, "Idling channels...\n"); 201 NV_INFO(dev, "Idling channels...\n");
195 for (i = 0; i < pfifo->channels; i++) { 202 for (i = 0; i < pfifo->channels; i++) {
196 struct nouveau_fence *fence = NULL; 203 chan = dev_priv->channels.ptr[i];
197
198 chan = dev_priv->fifos[i];
199 if (!chan || (dev_priv->card_type >= NV_50 &&
200 chan == dev_priv->fifos[0]))
201 continue;
202
203 ret = nouveau_fence_new(chan, &fence, true);
204 if (ret == 0) {
205 ret = nouveau_fence_wait(fence, NULL, false, false);
206 nouveau_fence_unref((void *)&fence);
207 }
208 204
209 if (ret) { 205 if (chan && chan->pushbuf_bo)
210 NV_ERROR(dev, "Failed to idle channel %d for suspend\n", 206 nouveau_channel_idle(chan);
211 chan->id);
212 }
213 } 207 }
214 208
215 pgraph->fifo_access(dev, false); 209 pgraph->fifo_access(dev, false);
@@ -219,17 +213,17 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
219 pfifo->unload_context(dev); 213 pfifo->unload_context(dev);
220 pgraph->unload_context(dev); 214 pgraph->unload_context(dev);
221 215
222 NV_INFO(dev, "Suspending GPU objects...\n"); 216 ret = pinstmem->suspend(dev);
223 ret = nouveau_gpuobj_suspend(dev);
224 if (ret) { 217 if (ret) {
225 NV_ERROR(dev, "... failed: %d\n", ret); 218 NV_ERROR(dev, "... failed: %d\n", ret);
226 goto out_abort; 219 goto out_abort;
227 } 220 }
228 221
229 ret = pinstmem->suspend(dev); 222 NV_INFO(dev, "Suspending GPU objects...\n");
223 ret = nouveau_gpuobj_suspend(dev);
230 if (ret) { 224 if (ret) {
231 NV_ERROR(dev, "... failed: %d\n", ret); 225 NV_ERROR(dev, "... failed: %d\n", ret);
232 nouveau_gpuobj_suspend_cleanup(dev); 226 pinstmem->resume(dev);
233 goto out_abort; 227 goto out_abort;
234 } 228 }
235 229
@@ -263,6 +257,9 @@ nouveau_pci_resume(struct pci_dev *pdev)
263 struct drm_crtc *crtc; 257 struct drm_crtc *crtc;
264 int ret, i; 258 int ret, i;
265 259
260 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
261 return 0;
262
266 nouveau_fbcon_save_disable_accel(dev); 263 nouveau_fbcon_save_disable_accel(dev);
267 264
268 NV_INFO(dev, "We're back, enabling device...\n"); 265 NV_INFO(dev, "We're back, enabling device...\n");
@@ -294,17 +291,18 @@ nouveau_pci_resume(struct pci_dev *pdev)
294 } 291 }
295 } 292 }
296 293
294 NV_INFO(dev, "Restoring GPU objects...\n");
295 nouveau_gpuobj_resume(dev);
296
297 NV_INFO(dev, "Reinitialising engines...\n"); 297 NV_INFO(dev, "Reinitialising engines...\n");
298 engine->instmem.resume(dev); 298 engine->instmem.resume(dev);
299 engine->mc.init(dev); 299 engine->mc.init(dev);
300 engine->timer.init(dev); 300 engine->timer.init(dev);
301 engine->fb.init(dev); 301 engine->fb.init(dev);
302 engine->graph.init(dev); 302 engine->graph.init(dev);
303 engine->crypt.init(dev);
303 engine->fifo.init(dev); 304 engine->fifo.init(dev);
304 305
305 NV_INFO(dev, "Restoring GPU objects...\n");
306 nouveau_gpuobj_resume(dev);
307
308 nouveau_irq_postinstall(dev); 306 nouveau_irq_postinstall(dev);
309 307
310 /* Re-write SKIPS, they'll have been lost over the suspend */ 308 /* Re-write SKIPS, they'll have been lost over the suspend */
@@ -313,7 +311,7 @@ nouveau_pci_resume(struct pci_dev *pdev)
313 int j; 311 int j;
314 312
315 for (i = 0; i < dev_priv->engine.fifo.channels; i++) { 313 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
316 chan = dev_priv->fifos[i]; 314 chan = dev_priv->channels.ptr[i];
317 if (!chan || !chan->pushbuf_bo) 315 if (!chan || !chan->pushbuf_bo)
318 continue; 316 continue;
319 317
@@ -347,13 +345,11 @@ nouveau_pci_resume(struct pci_dev *pdev)
347 345
348 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 346 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
349 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 347 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
348 u32 offset = nv_crtc->cursor.nvbo->bo.mem.start << PAGE_SHIFT;
350 349
351 nv_crtc->cursor.set_offset(nv_crtc, 350 nv_crtc->cursor.set_offset(nv_crtc, offset);
352 nv_crtc->cursor.nvbo->bo.offset -
353 dev_priv->vm_vram_base);
354
355 nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x, 351 nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x,
356 nv_crtc->cursor_saved_y); 352 nv_crtc->cursor_saved_y);
357 } 353 }
358 354
359 /* Force CLUT to get re-loaded during modeset */ 355 /* Force CLUT to get re-loaded during modeset */
@@ -393,6 +389,9 @@ static struct drm_driver driver = {
393 .irq_postinstall = nouveau_irq_postinstall, 389 .irq_postinstall = nouveau_irq_postinstall,
394 .irq_uninstall = nouveau_irq_uninstall, 390 .irq_uninstall = nouveau_irq_uninstall,
395 .irq_handler = nouveau_irq_handler, 391 .irq_handler = nouveau_irq_handler,
392 .get_vblank_counter = drm_vblank_count,
393 .enable_vblank = nouveau_vblank_enable,
394 .disable_vblank = nouveau_vblank_disable,
396 .reclaim_buffers = drm_core_reclaim_buffers, 395 .reclaim_buffers = drm_core_reclaim_buffers,
397 .ioctls = nouveau_ioctls, 396 .ioctls = nouveau_ioctls,
398 .fops = { 397 .fops = {
@@ -403,6 +402,7 @@ static struct drm_driver driver = {
403 .mmap = nouveau_ttm_mmap, 402 .mmap = nouveau_ttm_mmap,
404 .poll = drm_poll, 403 .poll = drm_poll,
405 .fasync = drm_fasync, 404 .fasync = drm_fasync,
405 .read = drm_read,
406#if defined(CONFIG_COMPAT) 406#if defined(CONFIG_COMPAT)
407 .compat_ioctl = nouveau_compat_ioctl, 407 .compat_ioctl = nouveau_compat_ioctl,
408#endif 408#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 1c7db64c03bf..46e32573b3a3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -54,22 +54,37 @@ struct nouveau_fpriv {
54#include "nouveau_drm.h" 54#include "nouveau_drm.h"
55#include "nouveau_reg.h" 55#include "nouveau_reg.h"
56#include "nouveau_bios.h" 56#include "nouveau_bios.h"
57#include "nouveau_util.h"
58
57struct nouveau_grctx; 59struct nouveau_grctx;
60struct nouveau_vram;
61#include "nouveau_vm.h"
58 62
59#define MAX_NUM_DCB_ENTRIES 16 63#define MAX_NUM_DCB_ENTRIES 16
60 64
61#define NOUVEAU_MAX_CHANNEL_NR 128 65#define NOUVEAU_MAX_CHANNEL_NR 128
62#define NOUVEAU_MAX_TILE_NR 15 66#define NOUVEAU_MAX_TILE_NR 15
63 67
64#define NV50_VM_MAX_VRAM (2*1024*1024*1024ULL) 68struct nouveau_vram {
65#define NV50_VM_BLOCK (512*1024*1024ULL) 69 struct drm_device *dev;
66#define NV50_VM_VRAM_NR (NV50_VM_MAX_VRAM / NV50_VM_BLOCK) 70
71 struct nouveau_vma bar_vma;
72 u8 page_shift;
73
74 struct list_head regions;
75 u32 memtype;
76 u64 offset;
77 u64 size;
78};
67 79
68struct nouveau_tile_reg { 80struct nouveau_tile_reg {
69 struct nouveau_fence *fence;
70 uint32_t addr;
71 uint32_t size;
72 bool used; 81 bool used;
82 uint32_t addr;
83 uint32_t limit;
84 uint32_t pitch;
85 uint32_t zcomp;
86 struct drm_mm_node *tag_mem;
87 struct nouveau_fence *fence;
73}; 88};
74 89
75struct nouveau_bo { 90struct nouveau_bo {
@@ -88,6 +103,7 @@ struct nouveau_bo {
88 103
89 struct nouveau_channel *channel; 104 struct nouveau_channel *channel;
90 105
106 struct nouveau_vma vma;
91 bool mappable; 107 bool mappable;
92 bool no_vm; 108 bool no_vm;
93 109
@@ -96,7 +112,6 @@ struct nouveau_bo {
96 struct nouveau_tile_reg *tile; 112 struct nouveau_tile_reg *tile;
97 113
98 struct drm_gem_object *gem; 114 struct drm_gem_object *gem;
99 struct drm_file *cpu_filp;
100 int pin_refcnt; 115 int pin_refcnt;
101}; 116};
102 117
@@ -133,20 +148,28 @@ enum nouveau_flags {
133 148
134#define NVOBJ_ENGINE_SW 0 149#define NVOBJ_ENGINE_SW 0
135#define NVOBJ_ENGINE_GR 1 150#define NVOBJ_ENGINE_GR 1
136#define NVOBJ_ENGINE_DISPLAY 2 151#define NVOBJ_ENGINE_PPP 2
152#define NVOBJ_ENGINE_COPY 3
153#define NVOBJ_ENGINE_VP 4
154#define NVOBJ_ENGINE_CRYPT 5
155#define NVOBJ_ENGINE_BSP 6
156#define NVOBJ_ENGINE_DISPLAY 0xcafe0001
137#define NVOBJ_ENGINE_INT 0xdeadbeef 157#define NVOBJ_ENGINE_INT 0xdeadbeef
138 158
159#define NVOBJ_FLAG_DONT_MAP (1 << 0)
139#define NVOBJ_FLAG_ZERO_ALLOC (1 << 1) 160#define NVOBJ_FLAG_ZERO_ALLOC (1 << 1)
140#define NVOBJ_FLAG_ZERO_FREE (1 << 2) 161#define NVOBJ_FLAG_ZERO_FREE (1 << 2)
162#define NVOBJ_FLAG_VM (1 << 3)
163
164#define NVOBJ_CINST_GLOBAL 0xdeadbeef
165
141struct nouveau_gpuobj { 166struct nouveau_gpuobj {
142 struct drm_device *dev; 167 struct drm_device *dev;
143 struct kref refcount; 168 struct kref refcount;
144 struct list_head list; 169 struct list_head list;
145 170
146 struct drm_mm_node *im_pramin; 171 void *node;
147 struct nouveau_bo *im_backing; 172 u32 *suspend;
148 uint32_t *im_backing_suspend;
149 int im_bound;
150 173
151 uint32_t flags; 174 uint32_t flags;
152 175
@@ -162,10 +185,29 @@ struct nouveau_gpuobj {
162 void *priv; 185 void *priv;
163}; 186};
164 187
188struct nouveau_page_flip_state {
189 struct list_head head;
190 struct drm_pending_vblank_event *event;
191 int crtc, bpp, pitch, x, y;
192 uint64_t offset;
193};
194
195enum nouveau_channel_mutex_class {
196 NOUVEAU_UCHANNEL_MUTEX,
197 NOUVEAU_KCHANNEL_MUTEX
198};
199
165struct nouveau_channel { 200struct nouveau_channel {
166 struct drm_device *dev; 201 struct drm_device *dev;
167 int id; 202 int id;
168 203
204 /* references to the channel data structure */
205 struct kref ref;
206 /* users of the hardware channel resources, the hardware
207 * context will be kicked off when it reaches zero. */
208 atomic_t users;
209 struct mutex mutex;
210
169 /* owner of this fifo */ 211 /* owner of this fifo */
170 struct drm_file *file_priv; 212 struct drm_file *file_priv;
171 /* mapping of the fifo itself */ 213 /* mapping of the fifo itself */
@@ -198,16 +240,17 @@ struct nouveau_channel {
198 /* PFIFO context */ 240 /* PFIFO context */
199 struct nouveau_gpuobj *ramfc; 241 struct nouveau_gpuobj *ramfc;
200 struct nouveau_gpuobj *cache; 242 struct nouveau_gpuobj *cache;
243 void *fifo_priv;
201 244
202 /* PGRAPH context */ 245 /* PGRAPH context */
203 /* XXX may be merge 2 pointers as private data ??? */ 246 /* XXX may be merge 2 pointers as private data ??? */
204 struct nouveau_gpuobj *ramin_grctx; 247 struct nouveau_gpuobj *ramin_grctx;
248 struct nouveau_gpuobj *crypt_ctx;
205 void *pgraph_ctx; 249 void *pgraph_ctx;
206 250
207 /* NV50 VM */ 251 /* NV50 VM */
252 struct nouveau_vm *vm;
208 struct nouveau_gpuobj *vm_pd; 253 struct nouveau_gpuobj *vm_pd;
209 struct nouveau_gpuobj *vm_gart_pt;
210 struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR];
211 254
212 /* Objects */ 255 /* Objects */
213 struct nouveau_gpuobj *ramin; /* Private instmem */ 256 struct nouveau_gpuobj *ramin; /* Private instmem */
@@ -238,9 +281,11 @@ struct nouveau_channel {
238 281
239 struct { 282 struct {
240 struct nouveau_gpuobj *vblsem; 283 struct nouveau_gpuobj *vblsem;
284 uint32_t vblsem_head;
241 uint32_t vblsem_offset; 285 uint32_t vblsem_offset;
242 uint32_t vblsem_rval; 286 uint32_t vblsem_rval;
243 struct list_head vbl_wait; 287 struct list_head vbl_wait;
288 struct list_head flip;
244 } nvsw; 289 } nvsw;
245 290
246 struct { 291 struct {
@@ -258,11 +303,11 @@ struct nouveau_instmem_engine {
258 int (*suspend)(struct drm_device *dev); 303 int (*suspend)(struct drm_device *dev);
259 void (*resume)(struct drm_device *dev); 304 void (*resume)(struct drm_device *dev);
260 305
261 int (*populate)(struct drm_device *, struct nouveau_gpuobj *, 306 int (*get)(struct nouveau_gpuobj *, u32 size, u32 align);
262 uint32_t *size); 307 void (*put)(struct nouveau_gpuobj *);
263 void (*clear)(struct drm_device *, struct nouveau_gpuobj *); 308 int (*map)(struct nouveau_gpuobj *);
264 int (*bind)(struct drm_device *, struct nouveau_gpuobj *); 309 void (*unmap)(struct nouveau_gpuobj *);
265 int (*unbind)(struct drm_device *, struct nouveau_gpuobj *); 310
266 void (*flush)(struct drm_device *); 311 void (*flush)(struct drm_device *);
267}; 312};
268 313
@@ -279,15 +324,21 @@ struct nouveau_timer_engine {
279 324
280struct nouveau_fb_engine { 325struct nouveau_fb_engine {
281 int num_tiles; 326 int num_tiles;
327 struct drm_mm tag_heap;
328 void *priv;
282 329
283 int (*init)(struct drm_device *dev); 330 int (*init)(struct drm_device *dev);
284 void (*takedown)(struct drm_device *dev); 331 void (*takedown)(struct drm_device *dev);
285 332
286 void (*set_region_tiling)(struct drm_device *dev, int i, uint32_t addr, 333 void (*init_tile_region)(struct drm_device *dev, int i,
287 uint32_t size, uint32_t pitch); 334 uint32_t addr, uint32_t size,
335 uint32_t pitch, uint32_t flags);
336 void (*set_tile_region)(struct drm_device *dev, int i);
337 void (*free_tile_region)(struct drm_device *dev, int i);
288}; 338};
289 339
290struct nouveau_fifo_engine { 340struct nouveau_fifo_engine {
341 void *priv;
291 int channels; 342 int channels;
292 343
293 struct nouveau_gpuobj *playlist[2]; 344 struct nouveau_gpuobj *playlist[2];
@@ -310,22 +361,11 @@ struct nouveau_fifo_engine {
310 void (*tlb_flush)(struct drm_device *dev); 361 void (*tlb_flush)(struct drm_device *dev);
311}; 362};
312 363
313struct nouveau_pgraph_object_method {
314 int id;
315 int (*exec)(struct nouveau_channel *chan, int grclass, int mthd,
316 uint32_t data);
317};
318
319struct nouveau_pgraph_object_class {
320 int id;
321 bool software;
322 struct nouveau_pgraph_object_method *methods;
323};
324
325struct nouveau_pgraph_engine { 364struct nouveau_pgraph_engine {
326 struct nouveau_pgraph_object_class *grclass;
327 bool accel_blocked; 365 bool accel_blocked;
366 bool registered;
328 int grctx_size; 367 int grctx_size;
368 void *priv;
329 369
330 /* NV2x/NV3x context table (0x400780) */ 370 /* NV2x/NV3x context table (0x400780) */
331 struct nouveau_gpuobj *ctx_table; 371 struct nouveau_gpuobj *ctx_table;
@@ -342,8 +382,7 @@ struct nouveau_pgraph_engine {
342 int (*unload_context)(struct drm_device *); 382 int (*unload_context)(struct drm_device *);
343 void (*tlb_flush)(struct drm_device *dev); 383 void (*tlb_flush)(struct drm_device *dev);
344 384
345 void (*set_region_tiling)(struct drm_device *dev, int i, uint32_t addr, 385 void (*set_tile_region)(struct drm_device *dev, int i);
346 uint32_t size, uint32_t pitch);
347}; 386};
348 387
349struct nouveau_display_engine { 388struct nouveau_display_engine {
@@ -355,13 +394,19 @@ struct nouveau_display_engine {
355}; 394};
356 395
357struct nouveau_gpio_engine { 396struct nouveau_gpio_engine {
397 void *priv;
398
358 int (*init)(struct drm_device *); 399 int (*init)(struct drm_device *);
359 void (*takedown)(struct drm_device *); 400 void (*takedown)(struct drm_device *);
360 401
361 int (*get)(struct drm_device *, enum dcb_gpio_tag); 402 int (*get)(struct drm_device *, enum dcb_gpio_tag);
362 int (*set)(struct drm_device *, enum dcb_gpio_tag, int state); 403 int (*set)(struct drm_device *, enum dcb_gpio_tag, int state);
363 404
364 void (*irq_enable)(struct drm_device *, enum dcb_gpio_tag, bool on); 405 int (*irq_register)(struct drm_device *, enum dcb_gpio_tag,
406 void (*)(void *, int), void *);
407 void (*irq_unregister)(struct drm_device *, enum dcb_gpio_tag,
408 void (*)(void *, int), void *);
409 bool (*irq_enable)(struct drm_device *, enum dcb_gpio_tag, bool on);
365}; 410};
366 411
367struct nouveau_pm_voltage_level { 412struct nouveau_pm_voltage_level {
@@ -437,6 +482,7 @@ struct nouveau_pm_engine {
437 struct nouveau_pm_level *cur; 482 struct nouveau_pm_level *cur;
438 483
439 struct device *hwmon; 484 struct device *hwmon;
485 struct notifier_block acpi_nb;
440 486
441 int (*clock_get)(struct drm_device *, u32 id); 487 int (*clock_get)(struct drm_device *, u32 id);
442 void *(*clock_pre)(struct drm_device *, struct nouveau_pm_level *, 488 void *(*clock_pre)(struct drm_device *, struct nouveau_pm_level *,
@@ -449,6 +495,25 @@ struct nouveau_pm_engine {
449 int (*temp_get)(struct drm_device *); 495 int (*temp_get)(struct drm_device *);
450}; 496};
451 497
498struct nouveau_crypt_engine {
499 bool registered;
500
501 int (*init)(struct drm_device *);
502 void (*takedown)(struct drm_device *);
503 int (*create_context)(struct nouveau_channel *);
504 void (*destroy_context)(struct nouveau_channel *);
505 void (*tlb_flush)(struct drm_device *dev);
506};
507
508struct nouveau_vram_engine {
509 int (*init)(struct drm_device *);
510 int (*get)(struct drm_device *, u64, u32 align, u32 size_nc,
511 u32 type, struct nouveau_vram **);
512 void (*put)(struct drm_device *, struct nouveau_vram **);
513
514 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
515};
516
452struct nouveau_engine { 517struct nouveau_engine {
453 struct nouveau_instmem_engine instmem; 518 struct nouveau_instmem_engine instmem;
454 struct nouveau_mc_engine mc; 519 struct nouveau_mc_engine mc;
@@ -459,6 +524,8 @@ struct nouveau_engine {
459 struct nouveau_display_engine display; 524 struct nouveau_display_engine display;
460 struct nouveau_gpio_engine gpio; 525 struct nouveau_gpio_engine gpio;
461 struct nouveau_pm_engine pm; 526 struct nouveau_pm_engine pm;
527 struct nouveau_crypt_engine crypt;
528 struct nouveau_vram_engine vram;
462}; 529};
463 530
464struct nouveau_pll_vals { 531struct nouveau_pll_vals {
@@ -577,18 +644,15 @@ struct drm_nouveau_private {
577 bool ramin_available; 644 bool ramin_available;
578 struct drm_mm ramin_heap; 645 struct drm_mm ramin_heap;
579 struct list_head gpuobj_list; 646 struct list_head gpuobj_list;
647 struct list_head classes;
580 648
581 struct nouveau_bo *vga_ram; 649 struct nouveau_bo *vga_ram;
582 650
651 /* interrupt handling */
652 void (*irq_handler[32])(struct drm_device *);
653 bool msi_enabled;
583 struct workqueue_struct *wq; 654 struct workqueue_struct *wq;
584 struct work_struct irq_work; 655 struct work_struct irq_work;
585 struct work_struct hpd_work;
586
587 struct {
588 spinlock_t lock;
589 uint32_t hpd0_bits;
590 uint32_t hpd1_bits;
591 } hpd_state;
592 656
593 struct list_head vbl_waiting; 657 struct list_head vbl_waiting;
594 658
@@ -605,8 +669,10 @@ struct drm_nouveau_private {
605 struct nouveau_bo *bo; 669 struct nouveau_bo *bo;
606 } fence; 670 } fence;
607 671
608 int fifo_alloc_count; 672 struct {
609 struct nouveau_channel *fifos[NOUVEAU_MAX_CHANNEL_NR]; 673 spinlock_t lock;
674 struct nouveau_channel *ptr[NOUVEAU_MAX_CHANNEL_NR];
675 } channels;
610 676
611 struct nouveau_engine engine; 677 struct nouveau_engine engine;
612 struct nouveau_channel *channel; 678 struct nouveau_channel *channel;
@@ -632,12 +698,14 @@ struct drm_nouveau_private {
632 uint64_t aper_free; 698 uint64_t aper_free;
633 699
634 struct nouveau_gpuobj *sg_ctxdma; 700 struct nouveau_gpuobj *sg_ctxdma;
635 struct page *sg_dummy_page; 701 struct nouveau_vma vma;
636 dma_addr_t sg_dummy_bus;
637 } gart_info; 702 } gart_info;
638 703
639 /* nv10-nv40 tiling regions */ 704 /* nv10-nv40 tiling regions */
640 struct nouveau_tile_reg tile[NOUVEAU_MAX_TILE_NR]; 705 struct {
706 struct nouveau_tile_reg reg[NOUVEAU_MAX_TILE_NR];
707 spinlock_t lock;
708 } tile;
641 709
642 /* VRAM/fb configuration */ 710 /* VRAM/fb configuration */
643 uint64_t vram_size; 711 uint64_t vram_size;
@@ -650,14 +718,12 @@ struct drm_nouveau_private {
650 uint64_t fb_aper_free; 718 uint64_t fb_aper_free;
651 int fb_mtrr; 719 int fb_mtrr;
652 720
721 /* BAR control (NV50-) */
722 struct nouveau_vm *bar1_vm;
723 struct nouveau_vm *bar3_vm;
724
653 /* G8x/G9x virtual address space */ 725 /* G8x/G9x virtual address space */
654 uint64_t vm_gart_base; 726 struct nouveau_vm *chan_vm;
655 uint64_t vm_gart_size;
656 uint64_t vm_vram_base;
657 uint64_t vm_vram_size;
658 uint64_t vm_end;
659 struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR];
660 int vm_vram_pt_nr;
661 727
662 struct nvbios vbios; 728 struct nvbios vbios;
663 729
@@ -674,6 +740,7 @@ struct drm_nouveau_private {
674 struct backlight_device *backlight; 740 struct backlight_device *backlight;
675 741
676 struct nouveau_channel *evo; 742 struct nouveau_channel *evo;
743 u32 evo_alloc;
677 struct { 744 struct {
678 struct dcb_entry *dcb; 745 struct dcb_entry *dcb;
679 u16 script; 746 u16 script;
@@ -686,6 +753,8 @@ struct drm_nouveau_private {
686 753
687 struct nouveau_fbdev *nfbdev; 754 struct nouveau_fbdev *nfbdev;
688 struct apertures_struct *apertures; 755 struct apertures_struct *apertures;
756
757 bool powered_down;
689}; 758};
690 759
691static inline struct drm_nouveau_private * 760static inline struct drm_nouveau_private *
@@ -719,16 +788,6 @@ nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pnvbo)
719 return 0; 788 return 0;
720} 789}
721 790
722#define NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(id, cl, ch) do { \
723 struct drm_nouveau_private *nv = dev->dev_private; \
724 if (!nouveau_channel_owner(dev, (cl), (id))) { \
725 NV_ERROR(dev, "pid %d doesn't own channel %d\n", \
726 DRM_CURRENTPID, (id)); \
727 return -EPERM; \
728 } \
729 (ch) = nv->fifos[(id)]; \
730} while (0)
731
732/* nouveau_drv.c */ 791/* nouveau_drv.c */
733extern int nouveau_agpmode; 792extern int nouveau_agpmode;
734extern int nouveau_duallink; 793extern int nouveau_duallink;
@@ -748,6 +807,7 @@ extern int nouveau_force_post;
748extern int nouveau_override_conntype; 807extern int nouveau_override_conntype;
749extern char *nouveau_perflvl; 808extern char *nouveau_perflvl;
750extern int nouveau_perflvl_wr; 809extern int nouveau_perflvl_wr;
810extern int nouveau_msi;
751 811
752extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state); 812extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state);
753extern int nouveau_pci_resume(struct pci_dev *pdev); 813extern int nouveau_pci_resume(struct pci_dev *pdev);
@@ -762,8 +822,10 @@ extern int nouveau_ioctl_getparam(struct drm_device *, void *data,
762 struct drm_file *); 822 struct drm_file *);
763extern int nouveau_ioctl_setparam(struct drm_device *, void *data, 823extern int nouveau_ioctl_setparam(struct drm_device *, void *data,
764 struct drm_file *); 824 struct drm_file *);
765extern bool nouveau_wait_until(struct drm_device *, uint64_t timeout, 825extern bool nouveau_wait_eq(struct drm_device *, uint64_t timeout,
766 uint32_t reg, uint32_t mask, uint32_t val); 826 uint32_t reg, uint32_t mask, uint32_t val);
827extern bool nouveau_wait_ne(struct drm_device *, uint64_t timeout,
828 uint32_t reg, uint32_t mask, uint32_t val);
767extern bool nouveau_wait_for_idle(struct drm_device *); 829extern bool nouveau_wait_for_idle(struct drm_device *);
768extern int nouveau_card_init(struct drm_device *); 830extern int nouveau_card_init(struct drm_device *);
769 831
@@ -775,18 +837,18 @@ extern void nouveau_mem_gart_fini(struct drm_device *);
775extern int nouveau_mem_init_agp(struct drm_device *); 837extern int nouveau_mem_init_agp(struct drm_device *);
776extern int nouveau_mem_reset_agp(struct drm_device *); 838extern int nouveau_mem_reset_agp(struct drm_device *);
777extern void nouveau_mem_close(struct drm_device *); 839extern void nouveau_mem_close(struct drm_device *);
778extern struct nouveau_tile_reg *nv10_mem_set_tiling(struct drm_device *dev, 840extern int nouveau_mem_detect(struct drm_device *);
779 uint32_t addr, 841extern bool nouveau_mem_flags_valid(struct drm_device *, u32 tile_flags);
780 uint32_t size, 842extern struct nouveau_tile_reg *nv10_mem_set_tiling(
781 uint32_t pitch); 843 struct drm_device *dev, uint32_t addr, uint32_t size,
782extern void nv10_mem_expire_tiling(struct drm_device *dev, 844 uint32_t pitch, uint32_t flags);
783 struct nouveau_tile_reg *tile, 845extern void nv10_mem_put_tile_region(struct drm_device *dev,
784 struct nouveau_fence *fence); 846 struct nouveau_tile_reg *tile,
785extern int nv50_mem_vm_bind_linear(struct drm_device *, uint64_t virt, 847 struct nouveau_fence *fence);
786 uint32_t size, uint32_t flags, 848extern const struct ttm_mem_type_manager_func nouveau_vram_manager;
787 uint64_t phys); 849
788extern void nv50_mem_vm_unbind(struct drm_device *, uint64_t virt, 850/* nvc0_vram.c */
789 uint32_t size); 851extern const struct ttm_mem_type_manager_func nvc0_vram_manager;
790 852
791/* nouveau_notifier.c */ 853/* nouveau_notifier.c */
792extern int nouveau_notifier_init_channel(struct nouveau_channel *); 854extern int nouveau_notifier_init_channel(struct nouveau_channel *);
@@ -803,21 +865,44 @@ extern int nouveau_ioctl_notifier_free(struct drm_device *, void *data,
803extern struct drm_ioctl_desc nouveau_ioctls[]; 865extern struct drm_ioctl_desc nouveau_ioctls[];
804extern int nouveau_max_ioctl; 866extern int nouveau_max_ioctl;
805extern void nouveau_channel_cleanup(struct drm_device *, struct drm_file *); 867extern void nouveau_channel_cleanup(struct drm_device *, struct drm_file *);
806extern int nouveau_channel_owner(struct drm_device *, struct drm_file *,
807 int channel);
808extern int nouveau_channel_alloc(struct drm_device *dev, 868extern int nouveau_channel_alloc(struct drm_device *dev,
809 struct nouveau_channel **chan, 869 struct nouveau_channel **chan,
810 struct drm_file *file_priv, 870 struct drm_file *file_priv,
811 uint32_t fb_ctxdma, uint32_t tt_ctxdma); 871 uint32_t fb_ctxdma, uint32_t tt_ctxdma);
812extern void nouveau_channel_free(struct nouveau_channel *); 872extern struct nouveau_channel *
873nouveau_channel_get_unlocked(struct nouveau_channel *);
874extern struct nouveau_channel *
875nouveau_channel_get(struct drm_device *, struct drm_file *, int id);
876extern void nouveau_channel_put_unlocked(struct nouveau_channel **);
877extern void nouveau_channel_put(struct nouveau_channel **);
878extern void nouveau_channel_ref(struct nouveau_channel *chan,
879 struct nouveau_channel **pchan);
880extern void nouveau_channel_idle(struct nouveau_channel *chan);
813 881
814/* nouveau_object.c */ 882/* nouveau_object.c */
883#define NVOBJ_CLASS(d,c,e) do { \
884 int ret = nouveau_gpuobj_class_new((d), (c), NVOBJ_ENGINE_##e); \
885 if (ret) \
886 return ret; \
887} while(0)
888
889#define NVOBJ_MTHD(d,c,m,e) do { \
890 int ret = nouveau_gpuobj_mthd_new((d), (c), (m), (e)); \
891 if (ret) \
892 return ret; \
893} while(0)
894
815extern int nouveau_gpuobj_early_init(struct drm_device *); 895extern int nouveau_gpuobj_early_init(struct drm_device *);
816extern int nouveau_gpuobj_init(struct drm_device *); 896extern int nouveau_gpuobj_init(struct drm_device *);
817extern void nouveau_gpuobj_takedown(struct drm_device *); 897extern void nouveau_gpuobj_takedown(struct drm_device *);
818extern int nouveau_gpuobj_suspend(struct drm_device *dev); 898extern int nouveau_gpuobj_suspend(struct drm_device *dev);
819extern void nouveau_gpuobj_suspend_cleanup(struct drm_device *dev);
820extern void nouveau_gpuobj_resume(struct drm_device *dev); 899extern void nouveau_gpuobj_resume(struct drm_device *dev);
900extern int nouveau_gpuobj_class_new(struct drm_device *, u32 class, u32 eng);
901extern int nouveau_gpuobj_mthd_new(struct drm_device *, u32 class, u32 mthd,
902 int (*exec)(struct nouveau_channel *,
903 u32 class, u32 mthd, u32 data));
904extern int nouveau_gpuobj_mthd_call(struct nouveau_channel *, u32, u32, u32);
905extern int nouveau_gpuobj_mthd_call2(struct drm_device *, int, u32, u32, u32);
821extern int nouveau_gpuobj_channel_init(struct nouveau_channel *, 906extern int nouveau_gpuobj_channel_init(struct nouveau_channel *,
822 uint32_t vram_h, uint32_t tt_h); 907 uint32_t vram_h, uint32_t tt_h);
823extern void nouveau_gpuobj_channel_takedown(struct nouveau_channel *); 908extern void nouveau_gpuobj_channel_takedown(struct nouveau_channel *);
@@ -832,21 +917,25 @@ extern int nouveau_gpuobj_new_fake(struct drm_device *, u32 pinst, u64 vinst,
832extern int nouveau_gpuobj_dma_new(struct nouveau_channel *, int class, 917extern int nouveau_gpuobj_dma_new(struct nouveau_channel *, int class,
833 uint64_t offset, uint64_t size, int access, 918 uint64_t offset, uint64_t size, int access,
834 int target, struct nouveau_gpuobj **); 919 int target, struct nouveau_gpuobj **);
835extern int nouveau_gpuobj_gart_dma_new(struct nouveau_channel *, 920extern int nouveau_gpuobj_gr_new(struct nouveau_channel *, u32 handle, int class);
836 uint64_t offset, uint64_t size, 921extern int nv50_gpuobj_dma_new(struct nouveau_channel *, int class, u64 base,
837 int access, struct nouveau_gpuobj **, 922 u64 size, int target, int access, u32 type,
838 uint32_t *o_ret); 923 u32 comp, struct nouveau_gpuobj **pobj);
839extern int nouveau_gpuobj_gr_new(struct nouveau_channel *, int class, 924extern void nv50_gpuobj_dma_init(struct nouveau_gpuobj *, u32 offset,
840 struct nouveau_gpuobj **); 925 int class, u64 base, u64 size, int target,
841extern int nouveau_gpuobj_sw_new(struct nouveau_channel *, int class, 926 int access, u32 type, u32 comp);
842 struct nouveau_gpuobj **);
843extern int nouveau_ioctl_grobj_alloc(struct drm_device *, void *data, 927extern int nouveau_ioctl_grobj_alloc(struct drm_device *, void *data,
844 struct drm_file *); 928 struct drm_file *);
845extern int nouveau_ioctl_gpuobj_free(struct drm_device *, void *data, 929extern int nouveau_ioctl_gpuobj_free(struct drm_device *, void *data,
846 struct drm_file *); 930 struct drm_file *);
847 931
848/* nouveau_irq.c */ 932/* nouveau_irq.c */
933extern int nouveau_irq_init(struct drm_device *);
934extern void nouveau_irq_fini(struct drm_device *);
849extern irqreturn_t nouveau_irq_handler(DRM_IRQ_ARGS); 935extern irqreturn_t nouveau_irq_handler(DRM_IRQ_ARGS);
936extern void nouveau_irq_register(struct drm_device *, int status_bit,
937 void (*)(struct drm_device *));
938extern void nouveau_irq_unregister(struct drm_device *, int status_bit);
850extern void nouveau_irq_preinstall(struct drm_device *); 939extern void nouveau_irq_preinstall(struct drm_device *);
851extern int nouveau_irq_postinstall(struct drm_device *); 940extern int nouveau_irq_postinstall(struct drm_device *);
852extern void nouveau_irq_uninstall(struct drm_device *); 941extern void nouveau_irq_uninstall(struct drm_device *);
@@ -854,8 +943,8 @@ extern void nouveau_irq_uninstall(struct drm_device *);
854/* nouveau_sgdma.c */ 943/* nouveau_sgdma.c */
855extern int nouveau_sgdma_init(struct drm_device *); 944extern int nouveau_sgdma_init(struct drm_device *);
856extern void nouveau_sgdma_takedown(struct drm_device *); 945extern void nouveau_sgdma_takedown(struct drm_device *);
857extern int nouveau_sgdma_get_page(struct drm_device *, uint32_t offset, 946extern uint32_t nouveau_sgdma_get_physical(struct drm_device *,
858 uint32_t *page); 947 uint32_t offset);
859extern struct ttm_backend *nouveau_sgdma_init_ttm(struct drm_device *); 948extern struct ttm_backend *nouveau_sgdma_init_ttm(struct drm_device *);
860 949
861/* nouveau_debugfs.c */ 950/* nouveau_debugfs.c */
@@ -966,18 +1055,25 @@ extern void nv04_fb_takedown(struct drm_device *);
966/* nv10_fb.c */ 1055/* nv10_fb.c */
967extern int nv10_fb_init(struct drm_device *); 1056extern int nv10_fb_init(struct drm_device *);
968extern void nv10_fb_takedown(struct drm_device *); 1057extern void nv10_fb_takedown(struct drm_device *);
969extern void nv10_fb_set_region_tiling(struct drm_device *, int, uint32_t, 1058extern void nv10_fb_init_tile_region(struct drm_device *dev, int i,
970 uint32_t, uint32_t); 1059 uint32_t addr, uint32_t size,
1060 uint32_t pitch, uint32_t flags);
1061extern void nv10_fb_set_tile_region(struct drm_device *dev, int i);
1062extern void nv10_fb_free_tile_region(struct drm_device *dev, int i);
971 1063
972/* nv30_fb.c */ 1064/* nv30_fb.c */
973extern int nv30_fb_init(struct drm_device *); 1065extern int nv30_fb_init(struct drm_device *);
974extern void nv30_fb_takedown(struct drm_device *); 1066extern void nv30_fb_takedown(struct drm_device *);
1067extern void nv30_fb_init_tile_region(struct drm_device *dev, int i,
1068 uint32_t addr, uint32_t size,
1069 uint32_t pitch, uint32_t flags);
1070extern void nv30_fb_free_tile_region(struct drm_device *dev, int i);
975 1071
976/* nv40_fb.c */ 1072/* nv40_fb.c */
977extern int nv40_fb_init(struct drm_device *); 1073extern int nv40_fb_init(struct drm_device *);
978extern void nv40_fb_takedown(struct drm_device *); 1074extern void nv40_fb_takedown(struct drm_device *);
979extern void nv40_fb_set_region_tiling(struct drm_device *, int, uint32_t, 1075extern void nv40_fb_set_tile_region(struct drm_device *dev, int i);
980 uint32_t, uint32_t); 1076
981/* nv50_fb.c */ 1077/* nv50_fb.c */
982extern int nv50_fb_init(struct drm_device *); 1078extern int nv50_fb_init(struct drm_device *);
983extern void nv50_fb_takedown(struct drm_device *); 1079extern void nv50_fb_takedown(struct drm_device *);
@@ -989,6 +1085,7 @@ extern void nvc0_fb_takedown(struct drm_device *);
989 1085
990/* nv04_fifo.c */ 1086/* nv04_fifo.c */
991extern int nv04_fifo_init(struct drm_device *); 1087extern int nv04_fifo_init(struct drm_device *);
1088extern void nv04_fifo_fini(struct drm_device *);
992extern void nv04_fifo_disable(struct drm_device *); 1089extern void nv04_fifo_disable(struct drm_device *);
993extern void nv04_fifo_enable(struct drm_device *); 1090extern void nv04_fifo_enable(struct drm_device *);
994extern bool nv04_fifo_reassign(struct drm_device *, bool); 1091extern bool nv04_fifo_reassign(struct drm_device *, bool);
@@ -998,19 +1095,18 @@ extern int nv04_fifo_create_context(struct nouveau_channel *);
998extern void nv04_fifo_destroy_context(struct nouveau_channel *); 1095extern void nv04_fifo_destroy_context(struct nouveau_channel *);
999extern int nv04_fifo_load_context(struct nouveau_channel *); 1096extern int nv04_fifo_load_context(struct nouveau_channel *);
1000extern int nv04_fifo_unload_context(struct drm_device *); 1097extern int nv04_fifo_unload_context(struct drm_device *);
1098extern void nv04_fifo_isr(struct drm_device *);
1001 1099
1002/* nv10_fifo.c */ 1100/* nv10_fifo.c */
1003extern int nv10_fifo_init(struct drm_device *); 1101extern int nv10_fifo_init(struct drm_device *);
1004extern int nv10_fifo_channel_id(struct drm_device *); 1102extern int nv10_fifo_channel_id(struct drm_device *);
1005extern int nv10_fifo_create_context(struct nouveau_channel *); 1103extern int nv10_fifo_create_context(struct nouveau_channel *);
1006extern void nv10_fifo_destroy_context(struct nouveau_channel *);
1007extern int nv10_fifo_load_context(struct nouveau_channel *); 1104extern int nv10_fifo_load_context(struct nouveau_channel *);
1008extern int nv10_fifo_unload_context(struct drm_device *); 1105extern int nv10_fifo_unload_context(struct drm_device *);
1009 1106
1010/* nv40_fifo.c */ 1107/* nv40_fifo.c */
1011extern int nv40_fifo_init(struct drm_device *); 1108extern int nv40_fifo_init(struct drm_device *);
1012extern int nv40_fifo_create_context(struct nouveau_channel *); 1109extern int nv40_fifo_create_context(struct nouveau_channel *);
1013extern void nv40_fifo_destroy_context(struct nouveau_channel *);
1014extern int nv40_fifo_load_context(struct nouveau_channel *); 1110extern int nv40_fifo_load_context(struct nouveau_channel *);
1015extern int nv40_fifo_unload_context(struct drm_device *); 1111extern int nv40_fifo_unload_context(struct drm_device *);
1016 1112
@@ -1038,7 +1134,6 @@ extern int nvc0_fifo_load_context(struct nouveau_channel *);
1038extern int nvc0_fifo_unload_context(struct drm_device *); 1134extern int nvc0_fifo_unload_context(struct drm_device *);
1039 1135
1040/* nv04_graph.c */ 1136/* nv04_graph.c */
1041extern struct nouveau_pgraph_object_class nv04_graph_grclass[];
1042extern int nv04_graph_init(struct drm_device *); 1137extern int nv04_graph_init(struct drm_device *);
1043extern void nv04_graph_takedown(struct drm_device *); 1138extern void nv04_graph_takedown(struct drm_device *);
1044extern void nv04_graph_fifo_access(struct drm_device *, bool); 1139extern void nv04_graph_fifo_access(struct drm_device *, bool);
@@ -1047,10 +1142,11 @@ extern int nv04_graph_create_context(struct nouveau_channel *);
1047extern void nv04_graph_destroy_context(struct nouveau_channel *); 1142extern void nv04_graph_destroy_context(struct nouveau_channel *);
1048extern int nv04_graph_load_context(struct nouveau_channel *); 1143extern int nv04_graph_load_context(struct nouveau_channel *);
1049extern int nv04_graph_unload_context(struct drm_device *); 1144extern int nv04_graph_unload_context(struct drm_device *);
1050extern void nv04_graph_context_switch(struct drm_device *); 1145extern int nv04_graph_mthd_page_flip(struct nouveau_channel *chan,
1146 u32 class, u32 mthd, u32 data);
1147extern struct nouveau_bitfield nv04_graph_nsource[];
1051 1148
1052/* nv10_graph.c */ 1149/* nv10_graph.c */
1053extern struct nouveau_pgraph_object_class nv10_graph_grclass[];
1054extern int nv10_graph_init(struct drm_device *); 1150extern int nv10_graph_init(struct drm_device *);
1055extern void nv10_graph_takedown(struct drm_device *); 1151extern void nv10_graph_takedown(struct drm_device *);
1056extern struct nouveau_channel *nv10_graph_channel(struct drm_device *); 1152extern struct nouveau_channel *nv10_graph_channel(struct drm_device *);
@@ -1058,13 +1154,11 @@ extern int nv10_graph_create_context(struct nouveau_channel *);
1058extern void nv10_graph_destroy_context(struct nouveau_channel *); 1154extern void nv10_graph_destroy_context(struct nouveau_channel *);
1059extern int nv10_graph_load_context(struct nouveau_channel *); 1155extern int nv10_graph_load_context(struct nouveau_channel *);
1060extern int nv10_graph_unload_context(struct drm_device *); 1156extern int nv10_graph_unload_context(struct drm_device *);
1061extern void nv10_graph_context_switch(struct drm_device *); 1157extern void nv10_graph_set_tile_region(struct drm_device *dev, int i);
1062extern void nv10_graph_set_region_tiling(struct drm_device *, int, uint32_t, 1158extern struct nouveau_bitfield nv10_graph_intr[];
1063 uint32_t, uint32_t); 1159extern struct nouveau_bitfield nv10_graph_nstatus[];
1064 1160
1065/* nv20_graph.c */ 1161/* nv20_graph.c */
1066extern struct nouveau_pgraph_object_class nv20_graph_grclass[];
1067extern struct nouveau_pgraph_object_class nv30_graph_grclass[];
1068extern int nv20_graph_create_context(struct nouveau_channel *); 1162extern int nv20_graph_create_context(struct nouveau_channel *);
1069extern void nv20_graph_destroy_context(struct nouveau_channel *); 1163extern void nv20_graph_destroy_context(struct nouveau_channel *);
1070extern int nv20_graph_load_context(struct nouveau_channel *); 1164extern int nv20_graph_load_context(struct nouveau_channel *);
@@ -1072,11 +1166,9 @@ extern int nv20_graph_unload_context(struct drm_device *);
1072extern int nv20_graph_init(struct drm_device *); 1166extern int nv20_graph_init(struct drm_device *);
1073extern void nv20_graph_takedown(struct drm_device *); 1167extern void nv20_graph_takedown(struct drm_device *);
1074extern int nv30_graph_init(struct drm_device *); 1168extern int nv30_graph_init(struct drm_device *);
1075extern void nv20_graph_set_region_tiling(struct drm_device *, int, uint32_t, 1169extern void nv20_graph_set_tile_region(struct drm_device *dev, int i);
1076 uint32_t, uint32_t);
1077 1170
1078/* nv40_graph.c */ 1171/* nv40_graph.c */
1079extern struct nouveau_pgraph_object_class nv40_graph_grclass[];
1080extern int nv40_graph_init(struct drm_device *); 1172extern int nv40_graph_init(struct drm_device *);
1081extern void nv40_graph_takedown(struct drm_device *); 1173extern void nv40_graph_takedown(struct drm_device *);
1082extern struct nouveau_channel *nv40_graph_channel(struct drm_device *); 1174extern struct nouveau_channel *nv40_graph_channel(struct drm_device *);
@@ -1085,11 +1177,9 @@ extern void nv40_graph_destroy_context(struct nouveau_channel *);
1085extern int nv40_graph_load_context(struct nouveau_channel *); 1177extern int nv40_graph_load_context(struct nouveau_channel *);
1086extern int nv40_graph_unload_context(struct drm_device *); 1178extern int nv40_graph_unload_context(struct drm_device *);
1087extern void nv40_grctx_init(struct nouveau_grctx *); 1179extern void nv40_grctx_init(struct nouveau_grctx *);
1088extern void nv40_graph_set_region_tiling(struct drm_device *, int, uint32_t, 1180extern void nv40_graph_set_tile_region(struct drm_device *dev, int i);
1089 uint32_t, uint32_t);
1090 1181
1091/* nv50_graph.c */ 1182/* nv50_graph.c */
1092extern struct nouveau_pgraph_object_class nv50_graph_grclass[];
1093extern int nv50_graph_init(struct drm_device *); 1183extern int nv50_graph_init(struct drm_device *);
1094extern void nv50_graph_takedown(struct drm_device *); 1184extern void nv50_graph_takedown(struct drm_device *);
1095extern void nv50_graph_fifo_access(struct drm_device *, bool); 1185extern void nv50_graph_fifo_access(struct drm_device *, bool);
@@ -1098,10 +1188,10 @@ extern int nv50_graph_create_context(struct nouveau_channel *);
1098extern void nv50_graph_destroy_context(struct nouveau_channel *); 1188extern void nv50_graph_destroy_context(struct nouveau_channel *);
1099extern int nv50_graph_load_context(struct nouveau_channel *); 1189extern int nv50_graph_load_context(struct nouveau_channel *);
1100extern int nv50_graph_unload_context(struct drm_device *); 1190extern int nv50_graph_unload_context(struct drm_device *);
1101extern void nv50_graph_context_switch(struct drm_device *);
1102extern int nv50_grctx_init(struct nouveau_grctx *); 1191extern int nv50_grctx_init(struct nouveau_grctx *);
1103extern void nv50_graph_tlb_flush(struct drm_device *dev); 1192extern void nv50_graph_tlb_flush(struct drm_device *dev);
1104extern void nv86_graph_tlb_flush(struct drm_device *dev); 1193extern void nv86_graph_tlb_flush(struct drm_device *dev);
1194extern struct nouveau_enum nv50_data_error_names[];
1105 1195
1106/* nvc0_graph.c */ 1196/* nvc0_graph.c */
1107extern int nvc0_graph_init(struct drm_device *); 1197extern int nvc0_graph_init(struct drm_device *);
@@ -1113,16 +1203,22 @@ extern void nvc0_graph_destroy_context(struct nouveau_channel *);
1113extern int nvc0_graph_load_context(struct nouveau_channel *); 1203extern int nvc0_graph_load_context(struct nouveau_channel *);
1114extern int nvc0_graph_unload_context(struct drm_device *); 1204extern int nvc0_graph_unload_context(struct drm_device *);
1115 1205
1206/* nv84_crypt.c */
1207extern int nv84_crypt_init(struct drm_device *dev);
1208extern void nv84_crypt_fini(struct drm_device *dev);
1209extern int nv84_crypt_create_context(struct nouveau_channel *);
1210extern void nv84_crypt_destroy_context(struct nouveau_channel *);
1211extern void nv84_crypt_tlb_flush(struct drm_device *dev);
1212
1116/* nv04_instmem.c */ 1213/* nv04_instmem.c */
1117extern int nv04_instmem_init(struct drm_device *); 1214extern int nv04_instmem_init(struct drm_device *);
1118extern void nv04_instmem_takedown(struct drm_device *); 1215extern void nv04_instmem_takedown(struct drm_device *);
1119extern int nv04_instmem_suspend(struct drm_device *); 1216extern int nv04_instmem_suspend(struct drm_device *);
1120extern void nv04_instmem_resume(struct drm_device *); 1217extern void nv04_instmem_resume(struct drm_device *);
1121extern int nv04_instmem_populate(struct drm_device *, struct nouveau_gpuobj *, 1218extern int nv04_instmem_get(struct nouveau_gpuobj *, u32 size, u32 align);
1122 uint32_t *size); 1219extern void nv04_instmem_put(struct nouveau_gpuobj *);
1123extern void nv04_instmem_clear(struct drm_device *, struct nouveau_gpuobj *); 1220extern int nv04_instmem_map(struct nouveau_gpuobj *);
1124extern int nv04_instmem_bind(struct drm_device *, struct nouveau_gpuobj *); 1221extern void nv04_instmem_unmap(struct nouveau_gpuobj *);
1125extern int nv04_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *);
1126extern void nv04_instmem_flush(struct drm_device *); 1222extern void nv04_instmem_flush(struct drm_device *);
1127 1223
1128/* nv50_instmem.c */ 1224/* nv50_instmem.c */
@@ -1130,26 +1226,18 @@ extern int nv50_instmem_init(struct drm_device *);
1130extern void nv50_instmem_takedown(struct drm_device *); 1226extern void nv50_instmem_takedown(struct drm_device *);
1131extern int nv50_instmem_suspend(struct drm_device *); 1227extern int nv50_instmem_suspend(struct drm_device *);
1132extern void nv50_instmem_resume(struct drm_device *); 1228extern void nv50_instmem_resume(struct drm_device *);
1133extern int nv50_instmem_populate(struct drm_device *, struct nouveau_gpuobj *, 1229extern int nv50_instmem_get(struct nouveau_gpuobj *, u32 size, u32 align);
1134 uint32_t *size); 1230extern void nv50_instmem_put(struct nouveau_gpuobj *);
1135extern void nv50_instmem_clear(struct drm_device *, struct nouveau_gpuobj *); 1231extern int nv50_instmem_map(struct nouveau_gpuobj *);
1136extern int nv50_instmem_bind(struct drm_device *, struct nouveau_gpuobj *); 1232extern void nv50_instmem_unmap(struct nouveau_gpuobj *);
1137extern int nv50_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *);
1138extern void nv50_instmem_flush(struct drm_device *); 1233extern void nv50_instmem_flush(struct drm_device *);
1139extern void nv84_instmem_flush(struct drm_device *); 1234extern void nv84_instmem_flush(struct drm_device *);
1140extern void nv50_vm_flush(struct drm_device *, int engine);
1141 1235
1142/* nvc0_instmem.c */ 1236/* nvc0_instmem.c */
1143extern int nvc0_instmem_init(struct drm_device *); 1237extern int nvc0_instmem_init(struct drm_device *);
1144extern void nvc0_instmem_takedown(struct drm_device *); 1238extern void nvc0_instmem_takedown(struct drm_device *);
1145extern int nvc0_instmem_suspend(struct drm_device *); 1239extern int nvc0_instmem_suspend(struct drm_device *);
1146extern void nvc0_instmem_resume(struct drm_device *); 1240extern void nvc0_instmem_resume(struct drm_device *);
1147extern int nvc0_instmem_populate(struct drm_device *, struct nouveau_gpuobj *,
1148 uint32_t *size);
1149extern void nvc0_instmem_clear(struct drm_device *, struct nouveau_gpuobj *);
1150extern int nvc0_instmem_bind(struct drm_device *, struct nouveau_gpuobj *);
1151extern int nvc0_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *);
1152extern void nvc0_instmem_flush(struct drm_device *);
1153 1241
1154/* nv04_mc.c */ 1242/* nv04_mc.c */
1155extern int nv04_mc_init(struct drm_device *); 1243extern int nv04_mc_init(struct drm_device *);
@@ -1219,6 +1307,9 @@ extern u16 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index);
1219extern void nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val); 1307extern void nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val);
1220extern u32 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index); 1308extern u32 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index);
1221extern void nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val); 1309extern void nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val);
1310extern void nouveau_bo_fence(struct nouveau_bo *, struct nouveau_fence *);
1311extern int nouveau_bo_validate(struct nouveau_bo *, bool interruptible,
1312 bool no_wait_reserve, bool no_wait_gpu);
1222 1313
1223/* nouveau_fence.c */ 1314/* nouveau_fence.c */
1224struct nouveau_fence; 1315struct nouveau_fence;
@@ -1234,12 +1325,35 @@ extern void nouveau_fence_work(struct nouveau_fence *fence,
1234 void (*work)(void *priv, bool signalled), 1325 void (*work)(void *priv, bool signalled),
1235 void *priv); 1326 void *priv);
1236struct nouveau_channel *nouveau_fence_channel(struct nouveau_fence *); 1327struct nouveau_channel *nouveau_fence_channel(struct nouveau_fence *);
1237extern bool nouveau_fence_signalled(void *obj, void *arg); 1328
1238extern int nouveau_fence_wait(void *obj, void *arg, bool lazy, bool intr); 1329extern bool __nouveau_fence_signalled(void *obj, void *arg);
1330extern int __nouveau_fence_wait(void *obj, void *arg, bool lazy, bool intr);
1331extern int __nouveau_fence_flush(void *obj, void *arg);
1332extern void __nouveau_fence_unref(void **obj);
1333extern void *__nouveau_fence_ref(void *obj);
1334
1335static inline bool nouveau_fence_signalled(struct nouveau_fence *obj)
1336{
1337 return __nouveau_fence_signalled(obj, NULL);
1338}
1339static inline int
1340nouveau_fence_wait(struct nouveau_fence *obj, bool lazy, bool intr)
1341{
1342 return __nouveau_fence_wait(obj, NULL, lazy, intr);
1343}
1239extern int nouveau_fence_sync(struct nouveau_fence *, struct nouveau_channel *); 1344extern int nouveau_fence_sync(struct nouveau_fence *, struct nouveau_channel *);
1240extern int nouveau_fence_flush(void *obj, void *arg); 1345static inline int nouveau_fence_flush(struct nouveau_fence *obj)
1241extern void nouveau_fence_unref(void **obj); 1346{
1242extern void *nouveau_fence_ref(void *obj); 1347 return __nouveau_fence_flush(obj, NULL);
1348}
1349static inline void nouveau_fence_unref(struct nouveau_fence **obj)
1350{
1351 __nouveau_fence_unref((void **)obj);
1352}
1353static inline struct nouveau_fence *nouveau_fence_ref(struct nouveau_fence *obj)
1354{
1355 return __nouveau_fence_ref(obj);
1356}
1243 1357
1244/* nouveau_gem.c */ 1358/* nouveau_gem.c */
1245extern int nouveau_gem_new(struct drm_device *, struct nouveau_channel *, 1359extern int nouveau_gem_new(struct drm_device *, struct nouveau_channel *,
@@ -1259,15 +1373,28 @@ extern int nouveau_gem_ioctl_cpu_fini(struct drm_device *, void *,
1259extern int nouveau_gem_ioctl_info(struct drm_device *, void *, 1373extern int nouveau_gem_ioctl_info(struct drm_device *, void *,
1260 struct drm_file *); 1374 struct drm_file *);
1261 1375
1376/* nouveau_display.c */
1377int nouveau_vblank_enable(struct drm_device *dev, int crtc);
1378void nouveau_vblank_disable(struct drm_device *dev, int crtc);
1379int nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1380 struct drm_pending_vblank_event *event);
1381int nouveau_finish_page_flip(struct nouveau_channel *,
1382 struct nouveau_page_flip_state *);
1383
1262/* nv10_gpio.c */ 1384/* nv10_gpio.c */
1263int nv10_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag); 1385int nv10_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
1264int nv10_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state); 1386int nv10_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
1265 1387
1266/* nv50_gpio.c */ 1388/* nv50_gpio.c */
1267int nv50_gpio_init(struct drm_device *dev); 1389int nv50_gpio_init(struct drm_device *dev);
1390void nv50_gpio_fini(struct drm_device *dev);
1268int nv50_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag); 1391int nv50_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
1269int nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state); 1392int nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
1270void nv50_gpio_irq_enable(struct drm_device *, enum dcb_gpio_tag, bool on); 1393int nv50_gpio_irq_register(struct drm_device *, enum dcb_gpio_tag,
1394 void (*)(void *, int), void *);
1395void nv50_gpio_irq_unregister(struct drm_device *, enum dcb_gpio_tag,
1396 void (*)(void *, int), void *);
1397bool nv50_gpio_irq_enable(struct drm_device *, enum dcb_gpio_tag, bool on);
1271 1398
1272/* nv50_calc. */ 1399/* nv50_calc. */
1273int nv50_calc_pll(struct drm_device *, struct pll_lims *, int clk, 1400int nv50_calc_pll(struct drm_device *, struct pll_lims *, int clk,
@@ -1334,7 +1461,9 @@ static inline void nv_wr08(struct drm_device *dev, unsigned reg, u8 val)
1334} 1461}
1335 1462
1336#define nv_wait(dev, reg, mask, val) \ 1463#define nv_wait(dev, reg, mask, val) \
1337 nouveau_wait_until(dev, 2000000000ULL, (reg), (mask), (val)) 1464 nouveau_wait_eq(dev, 2000000000ULL, (reg), (mask), (val))
1465#define nv_wait_ne(dev, reg, mask, val) \
1466 nouveau_wait_ne(dev, 2000000000ULL, (reg), (mask), (val))
1338 1467
1339/* PRAMIN access */ 1468/* PRAMIN access */
1340static inline u32 nv_ri32(struct drm_device *dev, unsigned offset) 1469static inline u32 nv_ri32(struct drm_device *dev, unsigned offset)
@@ -1447,6 +1576,23 @@ nv_match_device(struct drm_device *dev, unsigned device,
1447 dev->pdev->subsystem_device == sub_device; 1576 dev->pdev->subsystem_device == sub_device;
1448} 1577}
1449 1578
1579/* memory type/access flags, do not match hardware values */
1580#define NV_MEM_ACCESS_RO 1
1581#define NV_MEM_ACCESS_WO 2
1582#define NV_MEM_ACCESS_RW (NV_MEM_ACCESS_RO | NV_MEM_ACCESS_WO)
1583#define NV_MEM_ACCESS_SYS 4
1584#define NV_MEM_ACCESS_VM 8
1585
1586#define NV_MEM_TARGET_VRAM 0
1587#define NV_MEM_TARGET_PCI 1
1588#define NV_MEM_TARGET_PCI_NOSNOOP 2
1589#define NV_MEM_TARGET_VM 3
1590#define NV_MEM_TARGET_GART 4
1591
1592#define NV_MEM_TYPE_VM 0x7f
1593#define NV_MEM_COMP_VM 0x03
1594
1595/* NV_SW object class */
1450#define NV_SW 0x0000506e 1596#define NV_SW 0x0000506e
1451#define NV_SW_DMA_SEMAPHORE 0x00000060 1597#define NV_SW_DMA_SEMAPHORE 0x00000060
1452#define NV_SW_SEMAPHORE_OFFSET 0x00000064 1598#define NV_SW_SEMAPHORE_OFFSET 0x00000064
@@ -1457,5 +1603,6 @@ nv_match_device(struct drm_device *dev, unsigned device,
1457#define NV_SW_VBLSEM_OFFSET 0x00000400 1603#define NV_SW_VBLSEM_OFFSET 0x00000400
1458#define NV_SW_VBLSEM_RELEASE_VALUE 0x00000404 1604#define NV_SW_VBLSEM_RELEASE_VALUE 0x00000404
1459#define NV_SW_VBLSEM_RELEASE 0x00000408 1605#define NV_SW_VBLSEM_RELEASE 0x00000408
1606#define NV_SW_PAGE_FLIP 0x00000500
1460 1607
1461#endif /* __NOUVEAU_DRV_H__ */ 1608#endif /* __NOUVEAU_DRV_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 02a4d1fd4845..a26d04740c88 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -49,6 +49,102 @@
49#include "nouveau_fbcon.h" 49#include "nouveau_fbcon.h"
50#include "nouveau_dma.h" 50#include "nouveau_dma.h"
51 51
52static void
53nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
54{
55 struct nouveau_fbdev *nfbdev = info->par;
56 struct drm_device *dev = nfbdev->dev;
57 struct drm_nouveau_private *dev_priv = dev->dev_private;
58 int ret;
59
60 if (info->state != FBINFO_STATE_RUNNING)
61 return;
62
63 ret = -ENODEV;
64 if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
65 mutex_trylock(&dev_priv->channel->mutex)) {
66 if (dev_priv->card_type < NV_50)
67 ret = nv04_fbcon_fillrect(info, rect);
68 else
69 if (dev_priv->card_type < NV_C0)
70 ret = nv50_fbcon_fillrect(info, rect);
71 else
72 ret = nvc0_fbcon_fillrect(info, rect);
73 mutex_unlock(&dev_priv->channel->mutex);
74 }
75
76 if (ret == 0)
77 return;
78
79 if (ret != -ENODEV)
80 nouveau_fbcon_gpu_lockup(info);
81 cfb_fillrect(info, rect);
82}
83
84static void
85nouveau_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *image)
86{
87 struct nouveau_fbdev *nfbdev = info->par;
88 struct drm_device *dev = nfbdev->dev;
89 struct drm_nouveau_private *dev_priv = dev->dev_private;
90 int ret;
91
92 if (info->state != FBINFO_STATE_RUNNING)
93 return;
94
95 ret = -ENODEV;
96 if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
97 mutex_trylock(&dev_priv->channel->mutex)) {
98 if (dev_priv->card_type < NV_50)
99 ret = nv04_fbcon_copyarea(info, image);
100 else
101 if (dev_priv->card_type < NV_C0)
102 ret = nv50_fbcon_copyarea(info, image);
103 else
104 ret = nvc0_fbcon_copyarea(info, image);
105 mutex_unlock(&dev_priv->channel->mutex);
106 }
107
108 if (ret == 0)
109 return;
110
111 if (ret != -ENODEV)
112 nouveau_fbcon_gpu_lockup(info);
113 cfb_copyarea(info, image);
114}
115
116static void
117nouveau_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
118{
119 struct nouveau_fbdev *nfbdev = info->par;
120 struct drm_device *dev = nfbdev->dev;
121 struct drm_nouveau_private *dev_priv = dev->dev_private;
122 int ret;
123
124 if (info->state != FBINFO_STATE_RUNNING)
125 return;
126
127 ret = -ENODEV;
128 if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
129 mutex_trylock(&dev_priv->channel->mutex)) {
130 if (dev_priv->card_type < NV_50)
131 ret = nv04_fbcon_imageblit(info, image);
132 else
133 if (dev_priv->card_type < NV_C0)
134 ret = nv50_fbcon_imageblit(info, image);
135 else
136 ret = nvc0_fbcon_imageblit(info, image);
137 mutex_unlock(&dev_priv->channel->mutex);
138 }
139
140 if (ret == 0)
141 return;
142
143 if (ret != -ENODEV)
144 nouveau_fbcon_gpu_lockup(info);
145 cfb_imageblit(info, image);
146}
147
52static int 148static int
53nouveau_fbcon_sync(struct fb_info *info) 149nouveau_fbcon_sync(struct fb_info *info)
54{ 150{
@@ -58,22 +154,36 @@ nouveau_fbcon_sync(struct fb_info *info)
58 struct nouveau_channel *chan = dev_priv->channel; 154 struct nouveau_channel *chan = dev_priv->channel;
59 int ret, i; 155 int ret, i;
60 156
61 if (!chan || !chan->accel_done || 157 if (!chan || !chan->accel_done || in_interrupt() ||
62 info->state != FBINFO_STATE_RUNNING || 158 info->state != FBINFO_STATE_RUNNING ||
63 info->flags & FBINFO_HWACCEL_DISABLED) 159 info->flags & FBINFO_HWACCEL_DISABLED)
64 return 0; 160 return 0;
65 161
66 if (RING_SPACE(chan, 4)) { 162 if (!mutex_trylock(&chan->mutex))
163 return 0;
164
165 ret = RING_SPACE(chan, 4);
166 if (ret) {
167 mutex_unlock(&chan->mutex);
67 nouveau_fbcon_gpu_lockup(info); 168 nouveau_fbcon_gpu_lockup(info);
68 return 0; 169 return 0;
69 } 170 }
70 171
71 BEGIN_RING(chan, 0, 0x0104, 1); 172 if (dev_priv->card_type >= NV_C0) {
72 OUT_RING(chan, 0); 173 BEGIN_NVC0(chan, 2, NvSub2D, 0x010c, 1);
73 BEGIN_RING(chan, 0, 0x0100, 1); 174 OUT_RING (chan, 0);
74 OUT_RING(chan, 0); 175 BEGIN_NVC0(chan, 2, NvSub2D, 0x0100, 1);
176 OUT_RING (chan, 0);
177 } else {
178 BEGIN_RING(chan, 0, 0x0104, 1);
179 OUT_RING (chan, 0);
180 BEGIN_RING(chan, 0, 0x0100, 1);
181 OUT_RING (chan, 0);
182 }
183
75 nouveau_bo_wr32(chan->notifier_bo, chan->m2mf_ntfy + 3, 0xffffffff); 184 nouveau_bo_wr32(chan->notifier_bo, chan->m2mf_ntfy + 3, 0xffffffff);
76 FIRE_RING(chan); 185 FIRE_RING(chan);
186 mutex_unlock(&chan->mutex);
77 187
78 ret = -EBUSY; 188 ret = -EBUSY;
79 for (i = 0; i < 100000; i++) { 189 for (i = 0; i < 100000; i++) {
@@ -97,9 +207,9 @@ static struct fb_ops nouveau_fbcon_ops = {
97 .owner = THIS_MODULE, 207 .owner = THIS_MODULE,
98 .fb_check_var = drm_fb_helper_check_var, 208 .fb_check_var = drm_fb_helper_check_var,
99 .fb_set_par = drm_fb_helper_set_par, 209 .fb_set_par = drm_fb_helper_set_par,
100 .fb_fillrect = cfb_fillrect, 210 .fb_fillrect = nouveau_fbcon_fillrect,
101 .fb_copyarea = cfb_copyarea, 211 .fb_copyarea = nouveau_fbcon_copyarea,
102 .fb_imageblit = cfb_imageblit, 212 .fb_imageblit = nouveau_fbcon_imageblit,
103 .fb_sync = nouveau_fbcon_sync, 213 .fb_sync = nouveau_fbcon_sync,
104 .fb_pan_display = drm_fb_helper_pan_display, 214 .fb_pan_display = drm_fb_helper_pan_display,
105 .fb_blank = drm_fb_helper_blank, 215 .fb_blank = drm_fb_helper_blank,
@@ -108,29 +218,13 @@ static struct fb_ops nouveau_fbcon_ops = {
108 .fb_debug_leave = drm_fb_helper_debug_leave, 218 .fb_debug_leave = drm_fb_helper_debug_leave,
109}; 219};
110 220
111static struct fb_ops nv04_fbcon_ops = { 221static struct fb_ops nouveau_fbcon_sw_ops = {
112 .owner = THIS_MODULE, 222 .owner = THIS_MODULE,
113 .fb_check_var = drm_fb_helper_check_var, 223 .fb_check_var = drm_fb_helper_check_var,
114 .fb_set_par = drm_fb_helper_set_par, 224 .fb_set_par = drm_fb_helper_set_par,
115 .fb_fillrect = nv04_fbcon_fillrect, 225 .fb_fillrect = cfb_fillrect,
116 .fb_copyarea = nv04_fbcon_copyarea, 226 .fb_copyarea = cfb_copyarea,
117 .fb_imageblit = nv04_fbcon_imageblit, 227 .fb_imageblit = cfb_imageblit,
118 .fb_sync = nouveau_fbcon_sync,
119 .fb_pan_display = drm_fb_helper_pan_display,
120 .fb_blank = drm_fb_helper_blank,
121 .fb_setcmap = drm_fb_helper_setcmap,
122 .fb_debug_enter = drm_fb_helper_debug_enter,
123 .fb_debug_leave = drm_fb_helper_debug_leave,
124};
125
126static struct fb_ops nv50_fbcon_ops = {
127 .owner = THIS_MODULE,
128 .fb_check_var = drm_fb_helper_check_var,
129 .fb_set_par = drm_fb_helper_set_par,
130 .fb_fillrect = nv50_fbcon_fillrect,
131 .fb_copyarea = nv50_fbcon_copyarea,
132 .fb_imageblit = nv50_fbcon_imageblit,
133 .fb_sync = nouveau_fbcon_sync,
134 .fb_pan_display = drm_fb_helper_pan_display, 228 .fb_pan_display = drm_fb_helper_pan_display,
135 .fb_blank = drm_fb_helper_blank, 229 .fb_blank = drm_fb_helper_blank,
136 .fb_setcmap = drm_fb_helper_setcmap, 230 .fb_setcmap = drm_fb_helper_setcmap,
@@ -257,21 +351,16 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
257 FBINFO_HWACCEL_FILLRECT | 351 FBINFO_HWACCEL_FILLRECT |
258 FBINFO_HWACCEL_IMAGEBLIT; 352 FBINFO_HWACCEL_IMAGEBLIT;
259 info->flags |= FBINFO_CAN_FORCE_OUTPUT; 353 info->flags |= FBINFO_CAN_FORCE_OUTPUT;
260 info->fbops = &nouveau_fbcon_ops; 354 info->fbops = &nouveau_fbcon_sw_ops;
261 info->fix.smem_start = dev->mode_config.fb_base + nvbo->bo.offset - 355 info->fix.smem_start = dev->mode_config.fb_base +
262 dev_priv->vm_vram_base; 356 (nvbo->bo.mem.start << PAGE_SHIFT);
263 info->fix.smem_len = size; 357 info->fix.smem_len = size;
264 358
265 info->screen_base = nvbo_kmap_obj_iovirtual(nouveau_fb->nvbo); 359 info->screen_base = nvbo_kmap_obj_iovirtual(nouveau_fb->nvbo);
266 info->screen_size = size; 360 info->screen_size = size;
267 361
268 drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
269 drm_fb_helper_fill_var(info, &nfbdev->helper, sizes->fb_width, sizes->fb_height); 362 drm_fb_helper_fill_var(info, &nfbdev->helper, sizes->fb_width, sizes->fb_height);
270 363
271 /* FIXME: we really shouldn't expose mmio space at all */
272 info->fix.mmio_start = pci_resource_start(pdev, 1);
273 info->fix.mmio_len = pci_resource_len(pdev, 1);
274
275 /* Set aperture base/size for vesafb takeover */ 364 /* Set aperture base/size for vesafb takeover */
276 info->apertures = dev_priv->apertures; 365 info->apertures = dev_priv->apertures;
277 if (!info->apertures) { 366 if (!info->apertures) {
@@ -285,19 +374,20 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
285 info->pixmap.flags = FB_PIXMAP_SYSTEM; 374 info->pixmap.flags = FB_PIXMAP_SYSTEM;
286 info->pixmap.scan_align = 1; 375 info->pixmap.scan_align = 1;
287 376
377 mutex_unlock(&dev->struct_mutex);
378
288 if (dev_priv->channel && !nouveau_nofbaccel) { 379 if (dev_priv->channel && !nouveau_nofbaccel) {
289 switch (dev_priv->card_type) { 380 ret = -ENODEV;
290 case NV_C0: 381 if (dev_priv->card_type < NV_50)
291 break; 382 ret = nv04_fbcon_accel_init(info);
292 case NV_50: 383 else
293 nv50_fbcon_accel_init(info); 384 if (dev_priv->card_type < NV_C0)
294 info->fbops = &nv50_fbcon_ops; 385 ret = nv50_fbcon_accel_init(info);
295 break; 386 else
296 default: 387 ret = nvc0_fbcon_accel_init(info);
297 nv04_fbcon_accel_init(info); 388
298 info->fbops = &nv04_fbcon_ops; 389 if (ret == 0)
299 break; 390 info->fbops = &nouveau_fbcon_ops;
300 };
301 } 391 }
302 392
303 nouveau_fbcon_zfill(dev, nfbdev); 393 nouveau_fbcon_zfill(dev, nfbdev);
@@ -308,7 +398,6 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
308 nouveau_fb->base.height, 398 nouveau_fb->base.height,
309 nvbo->bo.offset, nvbo); 399 nvbo->bo.offset, nvbo);
310 400
311 mutex_unlock(&dev->struct_mutex);
312 vga_switcheroo_client_fb_set(dev->pdev, info); 401 vga_switcheroo_client_fb_set(dev->pdev, info);
313 return 0; 402 return 0;
314 403
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
index e7e12684c37e..b73c29f87fc3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
@@ -40,15 +40,21 @@ struct nouveau_fbdev {
40 40
41void nouveau_fbcon_restore(void); 41void nouveau_fbcon_restore(void);
42 42
43void nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region); 43int nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
44void nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect); 44int nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
45void nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image); 45int nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
46int nv04_fbcon_accel_init(struct fb_info *info); 46int nv04_fbcon_accel_init(struct fb_info *info);
47void nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect); 47
48void nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region); 48int nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
49void nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image); 49int nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
50int nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
50int nv50_fbcon_accel_init(struct fb_info *info); 51int nv50_fbcon_accel_init(struct fb_info *info);
51 52
53int nvc0_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
54int nvc0_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
55int nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
56int nvc0_fbcon_accel_init(struct fb_info *info);
57
52void nouveau_fbcon_gpu_lockup(struct fb_info *info); 58void nouveau_fbcon_gpu_lockup(struct fb_info *info);
53 59
54int nouveau_fbcon_init(struct drm_device *dev); 60int nouveau_fbcon_init(struct drm_device *dev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index ab1bbfbf266e..221b8462ea37 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -32,7 +32,8 @@
32#include "nouveau_dma.h" 32#include "nouveau_dma.h"
33 33
34#define USE_REFCNT(dev) (nouveau_private(dev)->chipset >= 0x10) 34#define USE_REFCNT(dev) (nouveau_private(dev)->chipset >= 0x10)
35#define USE_SEMA(dev) (nouveau_private(dev)->chipset >= 0x17) 35#define USE_SEMA(dev) (nouveau_private(dev)->chipset >= 0x17 && \
36 nouveau_private(dev)->card_type < NV_C0)
36 37
37struct nouveau_fence { 38struct nouveau_fence {
38 struct nouveau_channel *channel; 39 struct nouveau_channel *channel;
@@ -64,6 +65,7 @@ nouveau_fence_del(struct kref *ref)
64 struct nouveau_fence *fence = 65 struct nouveau_fence *fence =
65 container_of(ref, struct nouveau_fence, refcount); 66 container_of(ref, struct nouveau_fence, refcount);
66 67
68 nouveau_channel_ref(NULL, &fence->channel);
67 kfree(fence); 69 kfree(fence);
68} 70}
69 71
@@ -76,14 +78,17 @@ nouveau_fence_update(struct nouveau_channel *chan)
76 78
77 spin_lock(&chan->fence.lock); 79 spin_lock(&chan->fence.lock);
78 80
79 if (USE_REFCNT(dev)) 81 /* Fetch the last sequence if the channel is still up and running */
80 sequence = nvchan_rd32(chan, 0x48); 82 if (likely(!list_empty(&chan->fence.pending))) {
81 else 83 if (USE_REFCNT(dev))
82 sequence = atomic_read(&chan->fence.last_sequence_irq); 84 sequence = nvchan_rd32(chan, 0x48);
85 else
86 sequence = atomic_read(&chan->fence.last_sequence_irq);
83 87
84 if (chan->fence.sequence_ack == sequence) 88 if (chan->fence.sequence_ack == sequence)
85 goto out; 89 goto out;
86 chan->fence.sequence_ack = sequence; 90 chan->fence.sequence_ack = sequence;
91 }
87 92
88 list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) { 93 list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) {
89 sequence = fence->sequence; 94 sequence = fence->sequence;
@@ -113,13 +118,13 @@ nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence,
113 if (!fence) 118 if (!fence)
114 return -ENOMEM; 119 return -ENOMEM;
115 kref_init(&fence->refcount); 120 kref_init(&fence->refcount);
116 fence->channel = chan; 121 nouveau_channel_ref(chan, &fence->channel);
117 122
118 if (emit) 123 if (emit)
119 ret = nouveau_fence_emit(fence); 124 ret = nouveau_fence_emit(fence);
120 125
121 if (ret) 126 if (ret)
122 nouveau_fence_unref((void *)&fence); 127 nouveau_fence_unref(&fence);
123 *pfence = fence; 128 *pfence = fence;
124 return ret; 129 return ret;
125} 130}
@@ -127,7 +132,7 @@ nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence,
127struct nouveau_channel * 132struct nouveau_channel *
128nouveau_fence_channel(struct nouveau_fence *fence) 133nouveau_fence_channel(struct nouveau_fence *fence)
129{ 134{
130 return fence ? fence->channel : NULL; 135 return fence ? nouveau_channel_get_unlocked(fence->channel) : NULL;
131} 136}
132 137
133int 138int
@@ -135,6 +140,7 @@ nouveau_fence_emit(struct nouveau_fence *fence)
135{ 140{
136 struct nouveau_channel *chan = fence->channel; 141 struct nouveau_channel *chan = fence->channel;
137 struct drm_device *dev = chan->dev; 142 struct drm_device *dev = chan->dev;
143 struct drm_nouveau_private *dev_priv = dev->dev_private;
138 int ret; 144 int ret;
139 145
140 ret = RING_SPACE(chan, 2); 146 ret = RING_SPACE(chan, 2);
@@ -155,8 +161,15 @@ nouveau_fence_emit(struct nouveau_fence *fence)
155 list_add_tail(&fence->entry, &chan->fence.pending); 161 list_add_tail(&fence->entry, &chan->fence.pending);
156 spin_unlock(&chan->fence.lock); 162 spin_unlock(&chan->fence.lock);
157 163
158 BEGIN_RING(chan, NvSubSw, USE_REFCNT(dev) ? 0x0050 : 0x0150, 1); 164 if (USE_REFCNT(dev)) {
159 OUT_RING(chan, fence->sequence); 165 if (dev_priv->card_type < NV_C0)
166 BEGIN_RING(chan, NvSubSw, 0x0050, 1);
167 else
168 BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0050, 1);
169 } else {
170 BEGIN_RING(chan, NvSubSw, 0x0150, 1);
171 }
172 OUT_RING (chan, fence->sequence);
160 FIRE_RING(chan); 173 FIRE_RING(chan);
161 174
162 return 0; 175 return 0;
@@ -182,7 +195,7 @@ nouveau_fence_work(struct nouveau_fence *fence,
182} 195}
183 196
184void 197void
185nouveau_fence_unref(void **sync_obj) 198__nouveau_fence_unref(void **sync_obj)
186{ 199{
187 struct nouveau_fence *fence = nouveau_fence(*sync_obj); 200 struct nouveau_fence *fence = nouveau_fence(*sync_obj);
188 201
@@ -192,7 +205,7 @@ nouveau_fence_unref(void **sync_obj)
192} 205}
193 206
194void * 207void *
195nouveau_fence_ref(void *sync_obj) 208__nouveau_fence_ref(void *sync_obj)
196{ 209{
197 struct nouveau_fence *fence = nouveau_fence(sync_obj); 210 struct nouveau_fence *fence = nouveau_fence(sync_obj);
198 211
@@ -201,7 +214,7 @@ nouveau_fence_ref(void *sync_obj)
201} 214}
202 215
203bool 216bool
204nouveau_fence_signalled(void *sync_obj, void *sync_arg) 217__nouveau_fence_signalled(void *sync_obj, void *sync_arg)
205{ 218{
206 struct nouveau_fence *fence = nouveau_fence(sync_obj); 219 struct nouveau_fence *fence = nouveau_fence(sync_obj);
207 struct nouveau_channel *chan = fence->channel; 220 struct nouveau_channel *chan = fence->channel;
@@ -214,13 +227,14 @@ nouveau_fence_signalled(void *sync_obj, void *sync_arg)
214} 227}
215 228
216int 229int
217nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr) 230__nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
218{ 231{
219 unsigned long timeout = jiffies + (3 * DRM_HZ); 232 unsigned long timeout = jiffies + (3 * DRM_HZ);
233 unsigned long sleep_time = jiffies + 1;
220 int ret = 0; 234 int ret = 0;
221 235
222 while (1) { 236 while (1) {
223 if (nouveau_fence_signalled(sync_obj, sync_arg)) 237 if (__nouveau_fence_signalled(sync_obj, sync_arg))
224 break; 238 break;
225 239
226 if (time_after_eq(jiffies, timeout)) { 240 if (time_after_eq(jiffies, timeout)) {
@@ -230,7 +244,7 @@ nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
230 244
231 __set_current_state(intr ? TASK_INTERRUPTIBLE 245 __set_current_state(intr ? TASK_INTERRUPTIBLE
232 : TASK_UNINTERRUPTIBLE); 246 : TASK_UNINTERRUPTIBLE);
233 if (lazy) 247 if (lazy && time_after_eq(jiffies, sleep_time))
234 schedule_timeout(1); 248 schedule_timeout(1);
235 249
236 if (intr && signal_pending(current)) { 250 if (intr && signal_pending(current)) {
@@ -368,7 +382,7 @@ emit_semaphore(struct nouveau_channel *chan, int method,
368 382
369 kref_get(&sema->ref); 383 kref_get(&sema->ref);
370 nouveau_fence_work(fence, semaphore_work, sema); 384 nouveau_fence_work(fence, semaphore_work, sema);
371 nouveau_fence_unref((void *)&fence); 385 nouveau_fence_unref(&fence);
372 386
373 return 0; 387 return 0;
374} 388}
@@ -380,33 +394,49 @@ nouveau_fence_sync(struct nouveau_fence *fence,
380 struct nouveau_channel *chan = nouveau_fence_channel(fence); 394 struct nouveau_channel *chan = nouveau_fence_channel(fence);
381 struct drm_device *dev = wchan->dev; 395 struct drm_device *dev = wchan->dev;
382 struct nouveau_semaphore *sema; 396 struct nouveau_semaphore *sema;
383 int ret; 397 int ret = 0;
384 398
385 if (likely(!fence || chan == wchan || 399 if (likely(!chan || chan == wchan ||
386 nouveau_fence_signalled(fence, NULL))) 400 nouveau_fence_signalled(fence)))
387 return 0; 401 goto out;
388 402
389 sema = alloc_semaphore(dev); 403 sema = alloc_semaphore(dev);
390 if (!sema) { 404 if (!sema) {
391 /* Early card or broken userspace, fall back to 405 /* Early card or broken userspace, fall back to
392 * software sync. */ 406 * software sync. */
393 return nouveau_fence_wait(fence, NULL, false, false); 407 ret = nouveau_fence_wait(fence, true, false);
408 goto out;
409 }
410
411 /* try to take chan's mutex, if we can't take it right away
412 * we have to fallback to software sync to prevent locking
413 * order issues
414 */
415 if (!mutex_trylock(&chan->mutex)) {
416 ret = nouveau_fence_wait(fence, true, false);
417 goto out_unref;
394 } 418 }
395 419
396 /* Make wchan wait until it gets signalled */ 420 /* Make wchan wait until it gets signalled */
397 ret = emit_semaphore(wchan, NV_SW_SEMAPHORE_ACQUIRE, sema); 421 ret = emit_semaphore(wchan, NV_SW_SEMAPHORE_ACQUIRE, sema);
398 if (ret) 422 if (ret)
399 goto out; 423 goto out_unlock;
400 424
401 /* Signal the semaphore from chan */ 425 /* Signal the semaphore from chan */
402 ret = emit_semaphore(chan, NV_SW_SEMAPHORE_RELEASE, sema); 426 ret = emit_semaphore(chan, NV_SW_SEMAPHORE_RELEASE, sema);
403out: 427
428out_unlock:
429 mutex_unlock(&chan->mutex);
430out_unref:
404 kref_put(&sema->ref, free_semaphore); 431 kref_put(&sema->ref, free_semaphore);
432out:
433 if (chan)
434 nouveau_channel_put_unlocked(&chan);
405 return ret; 435 return ret;
406} 436}
407 437
408int 438int
409nouveau_fence_flush(void *sync_obj, void *sync_arg) 439__nouveau_fence_flush(void *sync_obj, void *sync_arg)
410{ 440{
411 return 0; 441 return 0;
412} 442}
@@ -420,30 +450,27 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
420 int ret; 450 int ret;
421 451
422 /* Create an NV_SW object for various sync purposes */ 452 /* Create an NV_SW object for various sync purposes */
423 ret = nouveau_gpuobj_sw_new(chan, NV_SW, &obj); 453 ret = nouveau_gpuobj_gr_new(chan, NvSw, NV_SW);
424 if (ret) 454 if (ret)
425 return ret; 455 return ret;
426 456
427 ret = nouveau_ramht_insert(chan, NvSw, obj); 457 /* we leave subchannel empty for nvc0 */
428 nouveau_gpuobj_ref(NULL, &obj); 458 if (dev_priv->card_type < NV_C0) {
429 if (ret) 459 ret = RING_SPACE(chan, 2);
430 return ret; 460 if (ret)
431 461 return ret;
432 ret = RING_SPACE(chan, 2); 462 BEGIN_RING(chan, NvSubSw, 0, 1);
433 if (ret) 463 OUT_RING(chan, NvSw);
434 return ret; 464 }
435 BEGIN_RING(chan, NvSubSw, 0, 1);
436 OUT_RING(chan, NvSw);
437 465
438 /* Create a DMA object for the shared cross-channel sync area. */ 466 /* Create a DMA object for the shared cross-channel sync area. */
439 if (USE_SEMA(dev)) { 467 if (USE_SEMA(dev)) {
440 struct drm_mm_node *mem = dev_priv->fence.bo->bo.mem.mm_node; 468 struct ttm_mem_reg *mem = &dev_priv->fence.bo->bo.mem;
441 469
442 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 470 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
443 mem->start << PAGE_SHIFT, 471 mem->start << PAGE_SHIFT,
444 mem->size << PAGE_SHIFT, 472 mem->size, NV_MEM_ACCESS_RW,
445 NV_DMA_ACCESS_RW, 473 NV_MEM_TARGET_VRAM, &obj);
446 NV_DMA_TARGET_VIDMEM, &obj);
447 if (ret) 474 if (ret)
448 return ret; 475 return ret;
449 476
@@ -473,6 +500,8 @@ nouveau_fence_channel_fini(struct nouveau_channel *chan)
473{ 500{
474 struct nouveau_fence *tmp, *fence; 501 struct nouveau_fence *tmp, *fence;
475 502
503 spin_lock(&chan->fence.lock);
504
476 list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) { 505 list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) {
477 fence->signalled = true; 506 fence->signalled = true;
478 list_del(&fence->entry); 507 list_del(&fence->entry);
@@ -482,6 +511,8 @@ nouveau_fence_channel_fini(struct nouveau_channel *chan)
482 511
483 kref_put(&fence->refcount, nouveau_fence_del); 512 kref_put(&fence->refcount, nouveau_fence_del);
484 } 513 }
514
515 spin_unlock(&chan->fence.lock);
485} 516}
486 517
487int 518int
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 9a1fdcf400c2..506c508b7eda 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -48,9 +48,6 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
48 return; 48 return;
49 nvbo->gem = NULL; 49 nvbo->gem = NULL;
50 50
51 if (unlikely(nvbo->cpu_filp))
52 ttm_bo_synccpu_write_release(bo);
53
54 if (unlikely(nvbo->pin_refcnt)) { 51 if (unlikely(nvbo->pin_refcnt)) {
55 nvbo->pin_refcnt = 1; 52 nvbo->pin_refcnt = 1;
56 nouveau_bo_unpin(nvbo); 53 nouveau_bo_unpin(nvbo);
@@ -106,32 +103,6 @@ nouveau_gem_info(struct drm_gem_object *gem, struct drm_nouveau_gem_info *rep)
106 return 0; 103 return 0;
107} 104}
108 105
109static bool
110nouveau_gem_tile_flags_valid(struct drm_device *dev, uint32_t tile_flags)
111{
112 struct drm_nouveau_private *dev_priv = dev->dev_private;
113
114 if (dev_priv->card_type >= NV_50) {
115 switch (tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK) {
116 case 0x0000:
117 case 0x1800:
118 case 0x2800:
119 case 0x4800:
120 case 0x7000:
121 case 0x7400:
122 case 0x7a00:
123 case 0xe000:
124 return true;
125 }
126 } else {
127 if (!(tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK))
128 return true;
129 }
130
131 NV_ERROR(dev, "bad page flags: 0x%08x\n", tile_flags);
132 return false;
133}
134
135int 106int
136nouveau_gem_ioctl_new(struct drm_device *dev, void *data, 107nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
137 struct drm_file *file_priv) 108 struct drm_file *file_priv)
@@ -146,11 +117,6 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
146 if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL)) 117 if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL))
147 dev_priv->ttm.bdev.dev_mapping = dev_priv->dev->dev_mapping; 118 dev_priv->ttm.bdev.dev_mapping = dev_priv->dev->dev_mapping;
148 119
149 if (req->channel_hint) {
150 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel_hint,
151 file_priv, chan);
152 }
153
154 if (req->info.domain & NOUVEAU_GEM_DOMAIN_VRAM) 120 if (req->info.domain & NOUVEAU_GEM_DOMAIN_VRAM)
155 flags |= TTM_PL_FLAG_VRAM; 121 flags |= TTM_PL_FLAG_VRAM;
156 if (req->info.domain & NOUVEAU_GEM_DOMAIN_GART) 122 if (req->info.domain & NOUVEAU_GEM_DOMAIN_GART)
@@ -158,13 +124,23 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
158 if (!flags || req->info.domain & NOUVEAU_GEM_DOMAIN_CPU) 124 if (!flags || req->info.domain & NOUVEAU_GEM_DOMAIN_CPU)
159 flags |= TTM_PL_FLAG_SYSTEM; 125 flags |= TTM_PL_FLAG_SYSTEM;
160 126
161 if (!nouveau_gem_tile_flags_valid(dev, req->info.tile_flags)) 127 if (!dev_priv->engine.vram.flags_valid(dev, req->info.tile_flags)) {
128 NV_ERROR(dev, "bad page flags: 0x%08x\n", req->info.tile_flags);
162 return -EINVAL; 129 return -EINVAL;
130 }
131
132 if (req->channel_hint) {
133 chan = nouveau_channel_get(dev, file_priv, req->channel_hint);
134 if (IS_ERR(chan))
135 return PTR_ERR(chan);
136 }
163 137
164 ret = nouveau_gem_new(dev, chan, req->info.size, req->align, flags, 138 ret = nouveau_gem_new(dev, chan, req->info.size, req->align, flags,
165 req->info.tile_mode, req->info.tile_flags, false, 139 req->info.tile_mode, req->info.tile_flags, false,
166 (req->info.domain & NOUVEAU_GEM_DOMAIN_MAPPABLE), 140 (req->info.domain & NOUVEAU_GEM_DOMAIN_MAPPABLE),
167 &nvbo); 141 &nvbo);
142 if (chan)
143 nouveau_channel_put(&chan);
168 if (ret) 144 if (ret)
169 return ret; 145 return ret;
170 146
@@ -231,15 +207,8 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence)
231 207
232 list_for_each_safe(entry, tmp, list) { 208 list_for_each_safe(entry, tmp, list) {
233 nvbo = list_entry(entry, struct nouveau_bo, entry); 209 nvbo = list_entry(entry, struct nouveau_bo, entry);
234 if (likely(fence)) { 210
235 struct nouveau_fence *prev_fence; 211 nouveau_bo_fence(nvbo, fence);
236
237 spin_lock(&nvbo->bo.lock);
238 prev_fence = nvbo->bo.sync_obj;
239 nvbo->bo.sync_obj = nouveau_fence_ref(fence);
240 spin_unlock(&nvbo->bo.lock);
241 nouveau_fence_unref((void *)&prev_fence);
242 }
243 212
244 if (unlikely(nvbo->validate_mapped)) { 213 if (unlikely(nvbo->validate_mapped)) {
245 ttm_bo_kunmap(&nvbo->kmap); 214 ttm_bo_kunmap(&nvbo->kmap);
@@ -299,14 +268,15 @@ retry:
299 return -EINVAL; 268 return -EINVAL;
300 } 269 }
301 270
302 ret = ttm_bo_reserve(&nvbo->bo, false, false, true, sequence); 271 ret = ttm_bo_reserve(&nvbo->bo, true, false, true, sequence);
303 if (ret) { 272 if (ret) {
304 validate_fini(op, NULL); 273 validate_fini(op, NULL);
305 if (ret == -EAGAIN) 274 if (unlikely(ret == -EAGAIN))
306 ret = ttm_bo_wait_unreserved(&nvbo->bo, false); 275 ret = ttm_bo_wait_unreserved(&nvbo->bo, true);
307 drm_gem_object_unreference_unlocked(gem); 276 drm_gem_object_unreference_unlocked(gem);
308 if (ret) { 277 if (unlikely(ret)) {
309 NV_ERROR(dev, "fail reserve\n"); 278 if (ret != -ERESTARTSYS)
279 NV_ERROR(dev, "fail reserve\n");
310 return ret; 280 return ret;
311 } 281 }
312 goto retry; 282 goto retry;
@@ -331,25 +301,6 @@ retry:
331 validate_fini(op, NULL); 301 validate_fini(op, NULL);
332 return -EINVAL; 302 return -EINVAL;
333 } 303 }
334
335 if (unlikely(atomic_read(&nvbo->bo.cpu_writers) > 0)) {
336 validate_fini(op, NULL);
337
338 if (nvbo->cpu_filp == file_priv) {
339 NV_ERROR(dev, "bo %p mapped by process trying "
340 "to validate it!\n", nvbo);
341 return -EINVAL;
342 }
343
344 mutex_unlock(&drm_global_mutex);
345 ret = ttm_bo_wait_cpu(&nvbo->bo, false);
346 mutex_lock(&drm_global_mutex);
347 if (ret) {
348 NV_ERROR(dev, "fail wait_cpu\n");
349 return ret;
350 }
351 goto retry;
352 }
353 } 304 }
354 305
355 return 0; 306 return 0;
@@ -383,11 +334,11 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
383 } 334 }
384 335
385 nvbo->channel = (b->read_domains & (1 << 31)) ? NULL : chan; 336 nvbo->channel = (b->read_domains & (1 << 31)) ? NULL : chan;
386 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, 337 ret = nouveau_bo_validate(nvbo, true, false, false);
387 false, false, false);
388 nvbo->channel = NULL; 338 nvbo->channel = NULL;
389 if (unlikely(ret)) { 339 if (unlikely(ret)) {
390 NV_ERROR(dev, "fail ttm_validate\n"); 340 if (ret != -ERESTARTSYS)
341 NV_ERROR(dev, "fail ttm_validate\n");
391 return ret; 342 return ret;
392 } 343 }
393 344
@@ -439,13 +390,15 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
439 390
440 ret = validate_init(chan, file_priv, pbbo, nr_buffers, op); 391 ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
441 if (unlikely(ret)) { 392 if (unlikely(ret)) {
442 NV_ERROR(dev, "validate_init\n"); 393 if (ret != -ERESTARTSYS)
394 NV_ERROR(dev, "validate_init\n");
443 return ret; 395 return ret;
444 } 396 }
445 397
446 ret = validate_list(chan, &op->vram_list, pbbo, user_buffers); 398 ret = validate_list(chan, &op->vram_list, pbbo, user_buffers);
447 if (unlikely(ret < 0)) { 399 if (unlikely(ret < 0)) {
448 NV_ERROR(dev, "validate vram_list\n"); 400 if (ret != -ERESTARTSYS)
401 NV_ERROR(dev, "validate vram_list\n");
449 validate_fini(op, NULL); 402 validate_fini(op, NULL);
450 return ret; 403 return ret;
451 } 404 }
@@ -453,7 +406,8 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
453 406
454 ret = validate_list(chan, &op->gart_list, pbbo, user_buffers); 407 ret = validate_list(chan, &op->gart_list, pbbo, user_buffers);
455 if (unlikely(ret < 0)) { 408 if (unlikely(ret < 0)) {
456 NV_ERROR(dev, "validate gart_list\n"); 409 if (ret != -ERESTARTSYS)
410 NV_ERROR(dev, "validate gart_list\n");
457 validate_fini(op, NULL); 411 validate_fini(op, NULL);
458 return ret; 412 return ret;
459 } 413 }
@@ -461,7 +415,8 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
461 415
462 ret = validate_list(chan, &op->both_list, pbbo, user_buffers); 416 ret = validate_list(chan, &op->both_list, pbbo, user_buffers);
463 if (unlikely(ret < 0)) { 417 if (unlikely(ret < 0)) {
464 NV_ERROR(dev, "validate both_list\n"); 418 if (ret != -ERESTARTSYS)
419 NV_ERROR(dev, "validate both_list\n");
465 validate_fini(op, NULL); 420 validate_fini(op, NULL);
466 return ret; 421 return ret;
467 } 422 }
@@ -557,9 +512,9 @@ nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
557 data |= r->vor; 512 data |= r->vor;
558 } 513 }
559 514
560 spin_lock(&nvbo->bo.lock); 515 spin_lock(&nvbo->bo.bdev->fence_lock);
561 ret = ttm_bo_wait(&nvbo->bo, false, false, false); 516 ret = ttm_bo_wait(&nvbo->bo, false, false, false);
562 spin_unlock(&nvbo->bo.lock); 517 spin_unlock(&nvbo->bo.bdev->fence_lock);
563 if (ret) { 518 if (ret) {
564 NV_ERROR(dev, "reloc wait_idle failed: %d\n", ret); 519 NV_ERROR(dev, "reloc wait_idle failed: %d\n", ret);
565 break; 520 break;
@@ -585,7 +540,9 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
585 struct nouveau_fence *fence = NULL; 540 struct nouveau_fence *fence = NULL;
586 int i, j, ret = 0, do_reloc = 0; 541 int i, j, ret = 0, do_reloc = 0;
587 542
588 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel, file_priv, chan); 543 chan = nouveau_channel_get(dev, file_priv, req->channel);
544 if (IS_ERR(chan))
545 return PTR_ERR(chan);
589 546
590 req->vram_available = dev_priv->fb_aper_free; 547 req->vram_available = dev_priv->fb_aper_free;
591 req->gart_available = dev_priv->gart_info.aper_free; 548 req->gart_available = dev_priv->gart_info.aper_free;
@@ -595,28 +552,34 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
595 if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) { 552 if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
596 NV_ERROR(dev, "pushbuf push count exceeds limit: %d max %d\n", 553 NV_ERROR(dev, "pushbuf push count exceeds limit: %d max %d\n",
597 req->nr_push, NOUVEAU_GEM_MAX_PUSH); 554 req->nr_push, NOUVEAU_GEM_MAX_PUSH);
555 nouveau_channel_put(&chan);
598 return -EINVAL; 556 return -EINVAL;
599 } 557 }
600 558
601 if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) { 559 if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
602 NV_ERROR(dev, "pushbuf bo count exceeds limit: %d max %d\n", 560 NV_ERROR(dev, "pushbuf bo count exceeds limit: %d max %d\n",
603 req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS); 561 req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
562 nouveau_channel_put(&chan);
604 return -EINVAL; 563 return -EINVAL;
605 } 564 }
606 565
607 if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) { 566 if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
608 NV_ERROR(dev, "pushbuf reloc count exceeds limit: %d max %d\n", 567 NV_ERROR(dev, "pushbuf reloc count exceeds limit: %d max %d\n",
609 req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS); 568 req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
569 nouveau_channel_put(&chan);
610 return -EINVAL; 570 return -EINVAL;
611 } 571 }
612 572
613 push = u_memcpya(req->push, req->nr_push, sizeof(*push)); 573 push = u_memcpya(req->push, req->nr_push, sizeof(*push));
614 if (IS_ERR(push)) 574 if (IS_ERR(push)) {
575 nouveau_channel_put(&chan);
615 return PTR_ERR(push); 576 return PTR_ERR(push);
577 }
616 578
617 bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo)); 579 bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
618 if (IS_ERR(bo)) { 580 if (IS_ERR(bo)) {
619 kfree(push); 581 kfree(push);
582 nouveau_channel_put(&chan);
620 return PTR_ERR(bo); 583 return PTR_ERR(bo);
621 } 584 }
622 585
@@ -639,7 +602,8 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
639 ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers, 602 ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
640 req->nr_buffers, &op, &do_reloc); 603 req->nr_buffers, &op, &do_reloc);
641 if (ret) { 604 if (ret) {
642 NV_ERROR(dev, "validate: %d\n", ret); 605 if (ret != -ERESTARTSYS)
606 NV_ERROR(dev, "validate: %d\n", ret);
643 goto out; 607 goto out;
644 } 608 }
645 609
@@ -732,7 +696,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
732 696
733out: 697out:
734 validate_fini(&op, fence); 698 validate_fini(&op, fence);
735 nouveau_fence_unref((void**)&fence); 699 nouveau_fence_unref(&fence);
736 kfree(bo); 700 kfree(bo);
737 kfree(push); 701 kfree(push);
738 702
@@ -750,6 +714,7 @@ out_next:
750 req->suffix1 = 0x00000000; 714 req->suffix1 = 0x00000000;
751 } 715 }
752 716
717 nouveau_channel_put(&chan);
753 return ret; 718 return ret;
754} 719}
755 720
@@ -781,26 +746,9 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
781 return -ENOENT; 746 return -ENOENT;
782 nvbo = nouveau_gem_object(gem); 747 nvbo = nouveau_gem_object(gem);
783 748
784 if (nvbo->cpu_filp) { 749 spin_lock(&nvbo->bo.bdev->fence_lock);
785 if (nvbo->cpu_filp == file_priv) 750 ret = ttm_bo_wait(&nvbo->bo, true, true, no_wait);
786 goto out; 751 spin_unlock(&nvbo->bo.bdev->fence_lock);
787
788 ret = ttm_bo_wait_cpu(&nvbo->bo, no_wait);
789 if (ret)
790 goto out;
791 }
792
793 if (req->flags & NOUVEAU_GEM_CPU_PREP_NOBLOCK) {
794 spin_lock(&nvbo->bo.lock);
795 ret = ttm_bo_wait(&nvbo->bo, false, false, no_wait);
796 spin_unlock(&nvbo->bo.lock);
797 } else {
798 ret = ttm_bo_synccpu_write_grab(&nvbo->bo, no_wait);
799 if (ret == 0)
800 nvbo->cpu_filp = file_priv;
801 }
802
803out:
804 drm_gem_object_unreference_unlocked(gem); 752 drm_gem_object_unreference_unlocked(gem);
805 return ret; 753 return ret;
806} 754}
@@ -809,26 +757,7 @@ int
809nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data, 757nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
810 struct drm_file *file_priv) 758 struct drm_file *file_priv)
811{ 759{
812 struct drm_nouveau_gem_cpu_prep *req = data; 760 return 0;
813 struct drm_gem_object *gem;
814 struct nouveau_bo *nvbo;
815 int ret = -EINVAL;
816
817 gem = drm_gem_object_lookup(dev, file_priv, req->handle);
818 if (!gem)
819 return -ENOENT;
820 nvbo = nouveau_gem_object(gem);
821
822 if (nvbo->cpu_filp != file_priv)
823 goto out;
824 nvbo->cpu_filp = NULL;
825
826 ttm_bo_synccpu_write_release(&nvbo->bo);
827 ret = 0;
828
829out:
830 drm_gem_object_unreference_unlocked(gem);
831 return ret;
832} 761}
833 762
834int 763int
diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.c b/drivers/gpu/drm/nouveau/nouveau_hw.c
index b9672a05c411..053edf9d2f67 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hw.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hw.c
@@ -953,7 +953,7 @@ nv_load_state_ext(struct drm_device *dev, int head,
953 NVWriteCRTC(dev, head, NV_PCRTC_850, regp->crtc_850); 953 NVWriteCRTC(dev, head, NV_PCRTC_850, regp->crtc_850);
954 954
955 reg900 = NVReadRAMDAC(dev, head, NV_PRAMDAC_900); 955 reg900 = NVReadRAMDAC(dev, head, NV_PRAMDAC_900);
956 if (regp->crtc_cfg == NV_PCRTC_CONFIG_START_ADDRESS_HSYNC) 956 if (regp->crtc_cfg == NV10_PCRTC_CONFIG_START_ADDRESS_HSYNC)
957 NVWriteRAMDAC(dev, head, NV_PRAMDAC_900, reg900 | 0x10000); 957 NVWriteRAMDAC(dev, head, NV_PRAMDAC_900, reg900 | 0x10000);
958 else 958 else
959 NVWriteRAMDAC(dev, head, NV_PRAMDAC_900, reg900 & ~0x10000); 959 NVWriteRAMDAC(dev, head, NV_PRAMDAC_900, reg900 & ~0x10000);
@@ -999,8 +999,8 @@ nv_load_state_ext(struct drm_device *dev, int head,
999 if (dev_priv->card_type == NV_10) { 999 if (dev_priv->card_type == NV_10) {
1000 /* Not waiting for vertical retrace before modifying 1000 /* Not waiting for vertical retrace before modifying
1001 CRE_53/CRE_54 causes lockups. */ 1001 CRE_53/CRE_54 causes lockups. */
1002 nouveau_wait_until(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x8); 1002 nouveau_wait_eq(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x8);
1003 nouveau_wait_until(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x0); 1003 nouveau_wait_eq(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x0);
1004 } 1004 }
1005 1005
1006 wr_cio_state(dev, head, regp, NV_CIO_CRE_53); 1006 wr_cio_state(dev, head, regp, NV_CIO_CRE_53);
@@ -1017,8 +1017,9 @@ nv_load_state_ext(struct drm_device *dev, int head,
1017 1017
1018 NVWriteCRTC(dev, head, NV_PCRTC_START, regp->fb_start); 1018 NVWriteCRTC(dev, head, NV_PCRTC_START, regp->fb_start);
1019 1019
1020 /* Setting 1 on this value gives you interrupts for every vblank period. */ 1020 /* Enable vblank interrupts. */
1021 NVWriteCRTC(dev, head, NV_PCRTC_INTR_EN_0, 0); 1021 NVWriteCRTC(dev, head, NV_PCRTC_INTR_EN_0,
1022 (dev->vblank_enabled[head] ? 1 : 0));
1022 NVWriteCRTC(dev, head, NV_PCRTC_INTR_0, NV_PCRTC_INTR_0_VBLANK); 1023 NVWriteCRTC(dev, head, NV_PCRTC_INTR_0, NV_PCRTC_INTR_0_VBLANK);
1023} 1024}
1024 1025
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
index 7bfd9e6c9d67..2ba7265bc967 100644
--- a/drivers/gpu/drm/nouveau/nouveau_irq.c
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -36,18 +36,7 @@
36#include "nouveau_drv.h" 36#include "nouveau_drv.h"
37#include "nouveau_reg.h" 37#include "nouveau_reg.h"
38#include "nouveau_ramht.h" 38#include "nouveau_ramht.h"
39#include <linux/ratelimit.h> 39#include "nouveau_util.h"
40
41/* needed for hotplug irq */
42#include "nouveau_connector.h"
43#include "nv50_display.h"
44
45static DEFINE_RATELIMIT_STATE(nouveau_ratelimit_state, 3 * HZ, 20);
46
47static int nouveau_ratelimit(void)
48{
49 return __ratelimit(&nouveau_ratelimit_state);
50}
51 40
52void 41void
53nouveau_irq_preinstall(struct drm_device *dev) 42nouveau_irq_preinstall(struct drm_device *dev)
@@ -57,19 +46,19 @@ nouveau_irq_preinstall(struct drm_device *dev)
57 /* Master disable */ 46 /* Master disable */
58 nv_wr32(dev, NV03_PMC_INTR_EN_0, 0); 47 nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
59 48
60 if (dev_priv->card_type >= NV_50) { 49 INIT_LIST_HEAD(&dev_priv->vbl_waiting);
61 INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh);
62 INIT_WORK(&dev_priv->hpd_work, nv50_display_irq_hotplug_bh);
63 spin_lock_init(&dev_priv->hpd_state.lock);
64 INIT_LIST_HEAD(&dev_priv->vbl_waiting);
65 }
66} 50}
67 51
68int 52int
69nouveau_irq_postinstall(struct drm_device *dev) 53nouveau_irq_postinstall(struct drm_device *dev)
70{ 54{
55 struct drm_nouveau_private *dev_priv = dev->dev_private;
56
71 /* Master enable */ 57 /* Master enable */
72 nv_wr32(dev, NV03_PMC_INTR_EN_0, NV_PMC_INTR_EN_0_MASTER_ENABLE); 58 nv_wr32(dev, NV03_PMC_INTR_EN_0, NV_PMC_INTR_EN_0_MASTER_ENABLE);
59 if (dev_priv->msi_enabled)
60 nv_wr08(dev, 0x00088068, 0xff);
61
73 return 0; 62 return 0;
74} 63}
75 64
@@ -80,1178 +69,83 @@ nouveau_irq_uninstall(struct drm_device *dev)
80 nv_wr32(dev, NV03_PMC_INTR_EN_0, 0); 69 nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
81} 70}
82 71
83static int 72irqreturn_t
84nouveau_call_method(struct nouveau_channel *chan, int class, int mthd, int data) 73nouveau_irq_handler(DRM_IRQ_ARGS)
85{
86 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
87 struct nouveau_pgraph_object_method *grm;
88 struct nouveau_pgraph_object_class *grc;
89
90 grc = dev_priv->engine.graph.grclass;
91 while (grc->id) {
92 if (grc->id == class)
93 break;
94 grc++;
95 }
96
97 if (grc->id != class || !grc->methods)
98 return -ENOENT;
99
100 grm = grc->methods;
101 while (grm->id) {
102 if (grm->id == mthd)
103 return grm->exec(chan, class, mthd, data);
104 grm++;
105 }
106
107 return -ENOENT;
108}
109
110static bool
111nouveau_fifo_swmthd(struct nouveau_channel *chan, uint32_t addr, uint32_t data)
112{
113 struct drm_device *dev = chan->dev;
114 const int subc = (addr >> 13) & 0x7;
115 const int mthd = addr & 0x1ffc;
116
117 if (mthd == 0x0000) {
118 struct nouveau_gpuobj *gpuobj;
119
120 gpuobj = nouveau_ramht_find(chan, data);
121 if (!gpuobj)
122 return false;
123
124 if (gpuobj->engine != NVOBJ_ENGINE_SW)
125 return false;
126
127 chan->sw_subchannel[subc] = gpuobj->class;
128 nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_rd32(dev,
129 NV04_PFIFO_CACHE1_ENGINE) & ~(0xf << subc * 4));
130 return true;
131 }
132
133 /* hw object */
134 if (nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE) & (1 << (subc*4)))
135 return false;
136
137 if (nouveau_call_method(chan, chan->sw_subchannel[subc], mthd, data))
138 return false;
139
140 return true;
141}
142
143static void
144nouveau_fifo_irq_handler(struct drm_device *dev)
145{
146 struct drm_nouveau_private *dev_priv = dev->dev_private;
147 struct nouveau_engine *engine = &dev_priv->engine;
148 uint32_t status, reassign;
149 int cnt = 0;
150
151 reassign = nv_rd32(dev, NV03_PFIFO_CACHES) & 1;
152 while ((status = nv_rd32(dev, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) {
153 struct nouveau_channel *chan = NULL;
154 uint32_t chid, get;
155
156 nv_wr32(dev, NV03_PFIFO_CACHES, 0);
157
158 chid = engine->fifo.channel_id(dev);
159 if (chid >= 0 && chid < engine->fifo.channels)
160 chan = dev_priv->fifos[chid];
161 get = nv_rd32(dev, NV03_PFIFO_CACHE1_GET);
162
163 if (status & NV_PFIFO_INTR_CACHE_ERROR) {
164 uint32_t mthd, data;
165 int ptr;
166
167 /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before
168 * wrapping on my G80 chips, but CACHE1 isn't big
169 * enough for this much data.. Tests show that it
170 * wraps around to the start at GET=0x800.. No clue
171 * as to why..
172 */
173 ptr = (get & 0x7ff) >> 2;
174
175 if (dev_priv->card_type < NV_40) {
176 mthd = nv_rd32(dev,
177 NV04_PFIFO_CACHE1_METHOD(ptr));
178 data = nv_rd32(dev,
179 NV04_PFIFO_CACHE1_DATA(ptr));
180 } else {
181 mthd = nv_rd32(dev,
182 NV40_PFIFO_CACHE1_METHOD(ptr));
183 data = nv_rd32(dev,
184 NV40_PFIFO_CACHE1_DATA(ptr));
185 }
186
187 if (!chan || !nouveau_fifo_swmthd(chan, mthd, data)) {
188 NV_INFO(dev, "PFIFO_CACHE_ERROR - Ch %d/%d "
189 "Mthd 0x%04x Data 0x%08x\n",
190 chid, (mthd >> 13) & 7, mthd & 0x1ffc,
191 data);
192 }
193
194 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
195 nv_wr32(dev, NV03_PFIFO_INTR_0,
196 NV_PFIFO_INTR_CACHE_ERROR);
197
198 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
199 nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) & ~1);
200 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
201 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
202 nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) | 1);
203 nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
204
205 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH,
206 nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
207 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
208
209 status &= ~NV_PFIFO_INTR_CACHE_ERROR;
210 }
211
212 if (status & NV_PFIFO_INTR_DMA_PUSHER) {
213 u32 dma_get = nv_rd32(dev, 0x003244);
214 u32 dma_put = nv_rd32(dev, 0x003240);
215 u32 push = nv_rd32(dev, 0x003220);
216 u32 state = nv_rd32(dev, 0x003228);
217
218 if (dev_priv->card_type == NV_50) {
219 u32 ho_get = nv_rd32(dev, 0x003328);
220 u32 ho_put = nv_rd32(dev, 0x003320);
221 u32 ib_get = nv_rd32(dev, 0x003334);
222 u32 ib_put = nv_rd32(dev, 0x003330);
223
224 if (nouveau_ratelimit())
225 NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x "
226 "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x "
227 "State 0x%08x Push 0x%08x\n",
228 chid, ho_get, dma_get, ho_put,
229 dma_put, ib_get, ib_put, state,
230 push);
231
232 /* METHOD_COUNT, in DMA_STATE on earlier chipsets */
233 nv_wr32(dev, 0x003364, 0x00000000);
234 if (dma_get != dma_put || ho_get != ho_put) {
235 nv_wr32(dev, 0x003244, dma_put);
236 nv_wr32(dev, 0x003328, ho_put);
237 } else
238 if (ib_get != ib_put) {
239 nv_wr32(dev, 0x003334, ib_put);
240 }
241 } else {
242 NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%08x "
243 "Put 0x%08x State 0x%08x Push 0x%08x\n",
244 chid, dma_get, dma_put, state, push);
245
246 if (dma_get != dma_put)
247 nv_wr32(dev, 0x003244, dma_put);
248 }
249
250 nv_wr32(dev, 0x003228, 0x00000000);
251 nv_wr32(dev, 0x003220, 0x00000001);
252 nv_wr32(dev, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
253 status &= ~NV_PFIFO_INTR_DMA_PUSHER;
254 }
255
256 if (status & NV_PFIFO_INTR_SEMAPHORE) {
257 uint32_t sem;
258
259 status &= ~NV_PFIFO_INTR_SEMAPHORE;
260 nv_wr32(dev, NV03_PFIFO_INTR_0,
261 NV_PFIFO_INTR_SEMAPHORE);
262
263 sem = nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE);
264 nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
265
266 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
267 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
268 }
269
270 if (dev_priv->card_type == NV_50) {
271 if (status & 0x00000010) {
272 nv50_fb_vm_trap(dev, 1, "PFIFO_BAR_FAULT");
273 status &= ~0x00000010;
274 nv_wr32(dev, 0x002100, 0x00000010);
275 }
276 }
277
278 if (status) {
279 if (nouveau_ratelimit())
280 NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n",
281 status, chid);
282 nv_wr32(dev, NV03_PFIFO_INTR_0, status);
283 status = 0;
284 }
285
286 nv_wr32(dev, NV03_PFIFO_CACHES, reassign);
287 }
288
289 if (status) {
290 NV_INFO(dev, "PFIFO still angry after %d spins, halt\n", cnt);
291 nv_wr32(dev, 0x2140, 0);
292 nv_wr32(dev, 0x140, 0);
293 }
294
295 nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING);
296}
297
298struct nouveau_bitfield_names {
299 uint32_t mask;
300 const char *name;
301};
302
303static struct nouveau_bitfield_names nstatus_names[] =
304{
305 { NV04_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
306 { NV04_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
307 { NV04_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
308 { NV04_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" }
309};
310
311static struct nouveau_bitfield_names nstatus_names_nv10[] =
312{
313 { NV10_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
314 { NV10_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
315 { NV10_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
316 { NV10_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" }
317};
318
319static struct nouveau_bitfield_names nsource_names[] =
320{
321 { NV03_PGRAPH_NSOURCE_NOTIFICATION, "NOTIFICATION" },
322 { NV03_PGRAPH_NSOURCE_DATA_ERROR, "DATA_ERROR" },
323 { NV03_PGRAPH_NSOURCE_PROTECTION_ERROR, "PROTECTION_ERROR" },
324 { NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION, "RANGE_EXCEPTION" },
325 { NV03_PGRAPH_NSOURCE_LIMIT_COLOR, "LIMIT_COLOR" },
326 { NV03_PGRAPH_NSOURCE_LIMIT_ZETA, "LIMIT_ZETA" },
327 { NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD, "ILLEGAL_MTHD" },
328 { NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION, "DMA_R_PROTECTION" },
329 { NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION, "DMA_W_PROTECTION" },
330 { NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION, "FORMAT_EXCEPTION" },
331 { NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION, "PATCH_EXCEPTION" },
332 { NV03_PGRAPH_NSOURCE_STATE_INVALID, "STATE_INVALID" },
333 { NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY, "DOUBLE_NOTIFY" },
334 { NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE, "NOTIFY_IN_USE" },
335 { NV03_PGRAPH_NSOURCE_METHOD_CNT, "METHOD_CNT" },
336 { NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION, "BFR_NOTIFICATION" },
337 { NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION, "DMA_VTX_PROTECTION" },
338 { NV03_PGRAPH_NSOURCE_DMA_WIDTH_A, "DMA_WIDTH_A" },
339 { NV03_PGRAPH_NSOURCE_DMA_WIDTH_B, "DMA_WIDTH_B" },
340};
341
342static void
343nouveau_print_bitfield_names_(uint32_t value,
344 const struct nouveau_bitfield_names *namelist,
345 const int namelist_len)
346{
347 /*
348 * Caller must have already printed the KERN_* log level for us.
349 * Also the caller is responsible for adding the newline.
350 */
351 int i;
352 for (i = 0; i < namelist_len; ++i) {
353 uint32_t mask = namelist[i].mask;
354 if (value & mask) {
355 printk(" %s", namelist[i].name);
356 value &= ~mask;
357 }
358 }
359 if (value)
360 printk(" (unknown bits 0x%08x)", value);
361}
362#define nouveau_print_bitfield_names(val, namelist) \
363 nouveau_print_bitfield_names_((val), (namelist), ARRAY_SIZE(namelist))
364
365struct nouveau_enum_names {
366 uint32_t value;
367 const char *name;
368};
369
370static void
371nouveau_print_enum_names_(uint32_t value,
372 const struct nouveau_enum_names *namelist,
373 const int namelist_len)
374{
375 /*
376 * Caller must have already printed the KERN_* log level for us.
377 * Also the caller is responsible for adding the newline.
378 */
379 int i;
380 for (i = 0; i < namelist_len; ++i) {
381 if (value == namelist[i].value) {
382 printk("%s", namelist[i].name);
383 return;
384 }
385 }
386 printk("unknown value 0x%08x", value);
387}
388#define nouveau_print_enum_names(val, namelist) \
389 nouveau_print_enum_names_((val), (namelist), ARRAY_SIZE(namelist))
390
391static int
392nouveau_graph_chid_from_grctx(struct drm_device *dev)
393{ 74{
75 struct drm_device *dev = (struct drm_device *)arg;
394 struct drm_nouveau_private *dev_priv = dev->dev_private; 76 struct drm_nouveau_private *dev_priv = dev->dev_private;
395 uint32_t inst; 77 unsigned long flags;
78 u32 stat;
396 int i; 79 int i;
397 80
398 if (dev_priv->card_type < NV_40) 81 stat = nv_rd32(dev, NV03_PMC_INTR_0);
399 return dev_priv->engine.fifo.channels; 82 if (!stat)
400 else 83 return IRQ_NONE;
401 if (dev_priv->card_type < NV_50) {
402 inst = (nv_rd32(dev, 0x40032c) & 0xfffff) << 4;
403
404 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
405 struct nouveau_channel *chan = dev_priv->fifos[i];
406
407 if (!chan || !chan->ramin_grctx)
408 continue;
409
410 if (inst == chan->ramin_grctx->pinst)
411 break;
412 }
413 } else {
414 inst = (nv_rd32(dev, 0x40032c) & 0xfffff) << 12;
415
416 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
417 struct nouveau_channel *chan = dev_priv->fifos[i];
418
419 if (!chan || !chan->ramin)
420 continue;
421
422 if (inst == chan->ramin->vinst)
423 break;
424 }
425 }
426
427
428 return i;
429}
430
431static int
432nouveau_graph_trapped_channel(struct drm_device *dev, int *channel_ret)
433{
434 struct drm_nouveau_private *dev_priv = dev->dev_private;
435 struct nouveau_engine *engine = &dev_priv->engine;
436 int channel;
437
438 if (dev_priv->card_type < NV_10)
439 channel = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 24) & 0xf;
440 else
441 if (dev_priv->card_type < NV_40)
442 channel = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
443 else
444 channel = nouveau_graph_chid_from_grctx(dev);
445
446 if (channel >= engine->fifo.channels || !dev_priv->fifos[channel]) {
447 NV_ERROR(dev, "AIII, invalid/inactive channel id %d\n", channel);
448 return -EINVAL;
449 }
450
451 *channel_ret = channel;
452 return 0;
453}
454
455struct nouveau_pgraph_trap {
456 int channel;
457 int class;
458 int subc, mthd, size;
459 uint32_t data, data2;
460 uint32_t nsource, nstatus;
461};
462
463static void
464nouveau_graph_trap_info(struct drm_device *dev,
465 struct nouveau_pgraph_trap *trap)
466{
467 struct drm_nouveau_private *dev_priv = dev->dev_private;
468 uint32_t address;
469
470 trap->nsource = trap->nstatus = 0;
471 if (dev_priv->card_type < NV_50) {
472 trap->nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
473 trap->nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
474 }
475
476 if (nouveau_graph_trapped_channel(dev, &trap->channel))
477 trap->channel = -1;
478 address = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
479
480 trap->mthd = address & 0x1FFC;
481 trap->data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
482 if (dev_priv->card_type < NV_10) {
483 trap->subc = (address >> 13) & 0x7;
484 } else {
485 trap->subc = (address >> 16) & 0x7;
486 trap->data2 = nv_rd32(dev, NV10_PGRAPH_TRAPPED_DATA_HIGH);
487 }
488
489 if (dev_priv->card_type < NV_10)
490 trap->class = nv_rd32(dev, 0x400180 + trap->subc*4) & 0xFF;
491 else if (dev_priv->card_type < NV_40)
492 trap->class = nv_rd32(dev, 0x400160 + trap->subc*4) & 0xFFF;
493 else if (dev_priv->card_type < NV_50)
494 trap->class = nv_rd32(dev, 0x400160 + trap->subc*4) & 0xFFFF;
495 else
496 trap->class = nv_rd32(dev, 0x400814);
497}
498
499static void
500nouveau_graph_dump_trap_info(struct drm_device *dev, const char *id,
501 struct nouveau_pgraph_trap *trap)
502{
503 struct drm_nouveau_private *dev_priv = dev->dev_private;
504 uint32_t nsource = trap->nsource, nstatus = trap->nstatus;
505
506 if (dev_priv->card_type < NV_50) {
507 NV_INFO(dev, "%s - nSource:", id);
508 nouveau_print_bitfield_names(nsource, nsource_names);
509 printk(", nStatus:");
510 if (dev_priv->card_type < NV_10)
511 nouveau_print_bitfield_names(nstatus, nstatus_names);
512 else
513 nouveau_print_bitfield_names(nstatus, nstatus_names_nv10);
514 printk("\n");
515 }
516
517 NV_INFO(dev, "%s - Ch %d/%d Class 0x%04x Mthd 0x%04x "
518 "Data 0x%08x:0x%08x\n",
519 id, trap->channel, trap->subc,
520 trap->class, trap->mthd,
521 trap->data2, trap->data);
522}
523
524static int
525nouveau_pgraph_intr_swmthd(struct drm_device *dev,
526 struct nouveau_pgraph_trap *trap)
527{
528 struct drm_nouveau_private *dev_priv = dev->dev_private;
529
530 if (trap->channel < 0 ||
531 trap->channel >= dev_priv->engine.fifo.channels ||
532 !dev_priv->fifos[trap->channel])
533 return -ENODEV;
534
535 return nouveau_call_method(dev_priv->fifos[trap->channel],
536 trap->class, trap->mthd, trap->data);
537}
538
539static inline void
540nouveau_pgraph_intr_notify(struct drm_device *dev, uint32_t nsource)
541{
542 struct nouveau_pgraph_trap trap;
543 int unhandled = 0;
544 84
545 nouveau_graph_trap_info(dev, &trap); 85 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
86 for (i = 0; i < 32 && stat; i++) {
87 if (!(stat & (1 << i)) || !dev_priv->irq_handler[i])
88 continue;
546 89
547 if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) { 90 dev_priv->irq_handler[i](dev);
548 if (nouveau_pgraph_intr_swmthd(dev, &trap)) 91 stat &= ~(1 << i);
549 unhandled = 1;
550 } else {
551 unhandled = 1;
552 } 92 }
553 93
554 if (unhandled) 94 if (dev_priv->msi_enabled)
555 nouveau_graph_dump_trap_info(dev, "PGRAPH_NOTIFY", &trap); 95 nv_wr08(dev, 0x00088068, 0xff);
556} 96 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
557
558
559static inline void
560nouveau_pgraph_intr_error(struct drm_device *dev, uint32_t nsource)
561{
562 struct nouveau_pgraph_trap trap;
563 int unhandled = 0;
564
565 nouveau_graph_trap_info(dev, &trap);
566 trap.nsource = nsource;
567
568 if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
569 if (nouveau_pgraph_intr_swmthd(dev, &trap))
570 unhandled = 1;
571 } else if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) {
572 uint32_t v = nv_rd32(dev, 0x402000);
573 nv_wr32(dev, 0x402000, v);
574
575 /* dump the error anyway for now: it's useful for
576 Gallium development */
577 unhandled = 1;
578 } else {
579 unhandled = 1;
580 }
581 97
582 if (unhandled && nouveau_ratelimit()) 98 if (stat && nouveau_ratelimit())
583 nouveau_graph_dump_trap_info(dev, "PGRAPH_ERROR", &trap); 99 NV_ERROR(dev, "PMC - unhandled INTR 0x%08x\n", stat);
100 return IRQ_HANDLED;
584} 101}
585 102
586static inline void 103int
587nouveau_pgraph_intr_context_switch(struct drm_device *dev) 104nouveau_irq_init(struct drm_device *dev)
588{ 105{
589 struct drm_nouveau_private *dev_priv = dev->dev_private; 106 struct drm_nouveau_private *dev_priv = dev->dev_private;
590 struct nouveau_engine *engine = &dev_priv->engine; 107 int ret;
591 uint32_t chid;
592
593 chid = engine->fifo.channel_id(dev);
594 NV_DEBUG(dev, "PGRAPH context switch interrupt channel %x\n", chid);
595
596 switch (dev_priv->card_type) {
597 case NV_04:
598 nv04_graph_context_switch(dev);
599 break;
600 case NV_10:
601 nv10_graph_context_switch(dev);
602 break;
603 default:
604 NV_ERROR(dev, "Context switch not implemented\n");
605 break;
606 }
607}
608
609static void
610nouveau_pgraph_irq_handler(struct drm_device *dev)
611{
612 uint32_t status;
613
614 while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) {
615 uint32_t nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
616
617 if (status & NV_PGRAPH_INTR_NOTIFY) {
618 nouveau_pgraph_intr_notify(dev, nsource);
619
620 status &= ~NV_PGRAPH_INTR_NOTIFY;
621 nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_NOTIFY);
622 }
623
624 if (status & NV_PGRAPH_INTR_ERROR) {
625 nouveau_pgraph_intr_error(dev, nsource);
626 108
627 status &= ~NV_PGRAPH_INTR_ERROR; 109 if (nouveau_msi != 0 && dev_priv->card_type >= NV_50) {
628 nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_ERROR); 110 ret = pci_enable_msi(dev->pdev);
111 if (ret == 0) {
112 NV_INFO(dev, "enabled MSI\n");
113 dev_priv->msi_enabled = true;
629 } 114 }
630
631 if (status & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
632 status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
633 nv_wr32(dev, NV03_PGRAPH_INTR,
634 NV_PGRAPH_INTR_CONTEXT_SWITCH);
635
636 nouveau_pgraph_intr_context_switch(dev);
637 }
638
639 if (status) {
640 NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", status);
641 nv_wr32(dev, NV03_PGRAPH_INTR, status);
642 }
643
644 if ((nv_rd32(dev, NV04_PGRAPH_FIFO) & (1 << 0)) == 0)
645 nv_wr32(dev, NV04_PGRAPH_FIFO, 1);
646 } 115 }
647 116
648 nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING); 117 return drm_irq_install(dev);
649}
650
651static struct nouveau_enum_names nv50_mp_exec_error_names[] =
652{
653 { 3, "STACK_UNDERFLOW" },
654 { 4, "QUADON_ACTIVE" },
655 { 8, "TIMEOUT" },
656 { 0x10, "INVALID_OPCODE" },
657 { 0x40, "BREAKPOINT" },
658};
659
660static void
661nv50_pgraph_mp_trap(struct drm_device *dev, int tpid, int display)
662{
663 struct drm_nouveau_private *dev_priv = dev->dev_private;
664 uint32_t units = nv_rd32(dev, 0x1540);
665 uint32_t addr, mp10, status, pc, oplow, ophigh;
666 int i;
667 int mps = 0;
668 for (i = 0; i < 4; i++) {
669 if (!(units & 1 << (i+24)))
670 continue;
671 if (dev_priv->chipset < 0xa0)
672 addr = 0x408200 + (tpid << 12) + (i << 7);
673 else
674 addr = 0x408100 + (tpid << 11) + (i << 7);
675 mp10 = nv_rd32(dev, addr + 0x10);
676 status = nv_rd32(dev, addr + 0x14);
677 if (!status)
678 continue;
679 if (display) {
680 nv_rd32(dev, addr + 0x20);
681 pc = nv_rd32(dev, addr + 0x24);
682 oplow = nv_rd32(dev, addr + 0x70);
683 ophigh= nv_rd32(dev, addr + 0x74);
684 NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - "
685 "TP %d MP %d: ", tpid, i);
686 nouveau_print_enum_names(status,
687 nv50_mp_exec_error_names);
688 printk(" at %06x warp %d, opcode %08x %08x\n",
689 pc&0xffffff, pc >> 24,
690 oplow, ophigh);
691 }
692 nv_wr32(dev, addr + 0x10, mp10);
693 nv_wr32(dev, addr + 0x14, 0);
694 mps++;
695 }
696 if (!mps && display)
697 NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - TP %d: "
698 "No MPs claiming errors?\n", tpid);
699} 118}
700 119
701static void 120void
702nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old, 121nouveau_irq_fini(struct drm_device *dev)
703 uint32_t ustatus_new, int display, const char *name)
704{ 122{
705 struct drm_nouveau_private *dev_priv = dev->dev_private; 123 struct drm_nouveau_private *dev_priv = dev->dev_private;
706 int tps = 0;
707 uint32_t units = nv_rd32(dev, 0x1540);
708 int i, r;
709 uint32_t ustatus_addr, ustatus;
710 for (i = 0; i < 16; i++) {
711 if (!(units & (1 << i)))
712 continue;
713 if (dev_priv->chipset < 0xa0)
714 ustatus_addr = ustatus_old + (i << 12);
715 else
716 ustatus_addr = ustatus_new + (i << 11);
717 ustatus = nv_rd32(dev, ustatus_addr) & 0x7fffffff;
718 if (!ustatus)
719 continue;
720 tps++;
721 switch (type) {
722 case 6: /* texture error... unknown for now */
723 nv50_fb_vm_trap(dev, display, name);
724 if (display) {
725 NV_ERROR(dev, "magic set %d:\n", i);
726 for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4)
727 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
728 nv_rd32(dev, r));
729 }
730 break;
731 case 7: /* MP error */
732 if (ustatus & 0x00010000) {
733 nv50_pgraph_mp_trap(dev, i, display);
734 ustatus &= ~0x00010000;
735 }
736 break;
737 case 8: /* TPDMA error */
738 {
739 uint32_t e0c = nv_rd32(dev, ustatus_addr + 4);
740 uint32_t e10 = nv_rd32(dev, ustatus_addr + 8);
741 uint32_t e14 = nv_rd32(dev, ustatus_addr + 0xc);
742 uint32_t e18 = nv_rd32(dev, ustatus_addr + 0x10);
743 uint32_t e1c = nv_rd32(dev, ustatus_addr + 0x14);
744 uint32_t e20 = nv_rd32(dev, ustatus_addr + 0x18);
745 uint32_t e24 = nv_rd32(dev, ustatus_addr + 0x1c);
746 nv50_fb_vm_trap(dev, display, name);
747 /* 2d engine destination */
748 if (ustatus & 0x00000010) {
749 if (display) {
750 NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - Unknown fault at address %02x%08x\n",
751 i, e14, e10);
752 NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
753 i, e0c, e18, e1c, e20, e24);
754 }
755 ustatus &= ~0x00000010;
756 }
757 /* Render target */
758 if (ustatus & 0x00000040) {
759 if (display) {
760 NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - Unknown fault at address %02x%08x\n",
761 i, e14, e10);
762 NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
763 i, e0c, e18, e1c, e20, e24);
764 }
765 ustatus &= ~0x00000040;
766 }
767 /* CUDA memory: l[], g[] or stack. */
768 if (ustatus & 0x00000080) {
769 if (display) {
770 if (e18 & 0x80000000) {
771 /* g[] read fault? */
772 NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global read fault at address %02x%08x\n",
773 i, e14, e10 | ((e18 >> 24) & 0x1f));
774 e18 &= ~0x1f000000;
775 } else if (e18 & 0xc) {
776 /* g[] write fault? */
777 NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global write fault at address %02x%08x\n",
778 i, e14, e10 | ((e18 >> 7) & 0x1f));
779 e18 &= ~0x00000f80;
780 } else {
781 NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Unknown CUDA fault at address %02x%08x\n",
782 i, e14, e10);
783 }
784 NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
785 i, e0c, e18, e1c, e20, e24);
786 }
787 ustatus &= ~0x00000080;
788 }
789 }
790 break;
791 }
792 if (ustatus) {
793 if (display)
794 NV_INFO(dev, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus);
795 }
796 nv_wr32(dev, ustatus_addr, 0xc0000000);
797 }
798
799 if (!tps && display)
800 NV_INFO(dev, "%s - No TPs claiming errors?\n", name);
801}
802
803static void
804nv50_pgraph_trap_handler(struct drm_device *dev)
805{
806 struct nouveau_pgraph_trap trap;
807 uint32_t status = nv_rd32(dev, 0x400108);
808 uint32_t ustatus;
809 int display = nouveau_ratelimit();
810
811
812 if (!status && display) {
813 nouveau_graph_trap_info(dev, &trap);
814 nouveau_graph_dump_trap_info(dev, "PGRAPH_TRAP", &trap);
815 NV_INFO(dev, "PGRAPH_TRAP - no units reporting traps?\n");
816 }
817
818 /* DISPATCH: Relays commands to other units and handles NOTIFY,
819 * COND, QUERY. If you get a trap from it, the command is still stuck
820 * in DISPATCH and you need to do something about it. */
821 if (status & 0x001) {
822 ustatus = nv_rd32(dev, 0x400804) & 0x7fffffff;
823 if (!ustatus && display) {
824 NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - no ustatus?\n");
825 }
826
827 /* Known to be triggered by screwed up NOTIFY and COND... */
828 if (ustatus & 0x00000001) {
829 nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_FAULT");
830 nv_wr32(dev, 0x400500, 0);
831 if (nv_rd32(dev, 0x400808) & 0x80000000) {
832 if (display) {
833 if (nouveau_graph_trapped_channel(dev, &trap.channel))
834 trap.channel = -1;
835 trap.class = nv_rd32(dev, 0x400814);
836 trap.mthd = nv_rd32(dev, 0x400808) & 0x1ffc;
837 trap.subc = (nv_rd32(dev, 0x400808) >> 16) & 0x7;
838 trap.data = nv_rd32(dev, 0x40080c);
839 trap.data2 = nv_rd32(dev, 0x400810);
840 nouveau_graph_dump_trap_info(dev,
841 "PGRAPH_TRAP_DISPATCH_FAULT", &trap);
842 NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - 400808: %08x\n", nv_rd32(dev, 0x400808));
843 NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - 400848: %08x\n", nv_rd32(dev, 0x400848));
844 }
845 nv_wr32(dev, 0x400808, 0);
846 } else if (display) {
847 NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - No stuck command?\n");
848 }
849 nv_wr32(dev, 0x4008e8, nv_rd32(dev, 0x4008e8) & 3);
850 nv_wr32(dev, 0x400848, 0);
851 ustatus &= ~0x00000001;
852 }
853 if (ustatus & 0x00000002) {
854 nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_QUERY");
855 nv_wr32(dev, 0x400500, 0);
856 if (nv_rd32(dev, 0x40084c) & 0x80000000) {
857 if (display) {
858 if (nouveau_graph_trapped_channel(dev, &trap.channel))
859 trap.channel = -1;
860 trap.class = nv_rd32(dev, 0x400814);
861 trap.mthd = nv_rd32(dev, 0x40084c) & 0x1ffc;
862 trap.subc = (nv_rd32(dev, 0x40084c) >> 16) & 0x7;
863 trap.data = nv_rd32(dev, 0x40085c);
864 trap.data2 = 0;
865 nouveau_graph_dump_trap_info(dev,
866 "PGRAPH_TRAP_DISPATCH_QUERY", &trap);
867 NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_QUERY - 40084c: %08x\n", nv_rd32(dev, 0x40084c));
868 }
869 nv_wr32(dev, 0x40084c, 0);
870 } else if (display) {
871 NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_QUERY - No stuck command?\n");
872 }
873 ustatus &= ~0x00000002;
874 }
875 if (ustatus && display)
876 NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - Unhandled ustatus 0x%08x\n", ustatus);
877 nv_wr32(dev, 0x400804, 0xc0000000);
878 nv_wr32(dev, 0x400108, 0x001);
879 status &= ~0x001;
880 }
881
882 /* TRAPs other than dispatch use the "normal" trap regs. */
883 if (status && display) {
884 nouveau_graph_trap_info(dev, &trap);
885 nouveau_graph_dump_trap_info(dev,
886 "PGRAPH_TRAP", &trap);
887 }
888
889 /* M2MF: Memory to memory copy engine. */
890 if (status & 0x002) {
891 ustatus = nv_rd32(dev, 0x406800) & 0x7fffffff;
892 if (!ustatus && display) {
893 NV_INFO(dev, "PGRAPH_TRAP_M2MF - no ustatus?\n");
894 }
895 if (ustatus & 0x00000001) {
896 nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_NOTIFY");
897 ustatus &= ~0x00000001;
898 }
899 if (ustatus & 0x00000002) {
900 nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_IN");
901 ustatus &= ~0x00000002;
902 }
903 if (ustatus & 0x00000004) {
904 nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_OUT");
905 ustatus &= ~0x00000004;
906 }
907 NV_INFO (dev, "PGRAPH_TRAP_M2MF - %08x %08x %08x %08x\n",
908 nv_rd32(dev, 0x406804),
909 nv_rd32(dev, 0x406808),
910 nv_rd32(dev, 0x40680c),
911 nv_rd32(dev, 0x406810));
912 if (ustatus && display)
913 NV_INFO(dev, "PGRAPH_TRAP_M2MF - Unhandled ustatus 0x%08x\n", ustatus);
914 /* No sane way found yet -- just reset the bugger. */
915 nv_wr32(dev, 0x400040, 2);
916 nv_wr32(dev, 0x400040, 0);
917 nv_wr32(dev, 0x406800, 0xc0000000);
918 nv_wr32(dev, 0x400108, 0x002);
919 status &= ~0x002;
920 }
921
922 /* VFETCH: Fetches data from vertex buffers. */
923 if (status & 0x004) {
924 ustatus = nv_rd32(dev, 0x400c04) & 0x7fffffff;
925 if (!ustatus && display) {
926 NV_INFO(dev, "PGRAPH_TRAP_VFETCH - no ustatus?\n");
927 }
928 if (ustatus & 0x00000001) {
929 nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_VFETCH_FAULT");
930 NV_INFO (dev, "PGRAPH_TRAP_VFETCH_FAULT - %08x %08x %08x %08x\n",
931 nv_rd32(dev, 0x400c00),
932 nv_rd32(dev, 0x400c08),
933 nv_rd32(dev, 0x400c0c),
934 nv_rd32(dev, 0x400c10));
935 ustatus &= ~0x00000001;
936 }
937 if (ustatus && display)
938 NV_INFO(dev, "PGRAPH_TRAP_VFETCH - Unhandled ustatus 0x%08x\n", ustatus);
939 nv_wr32(dev, 0x400c04, 0xc0000000);
940 nv_wr32(dev, 0x400108, 0x004);
941 status &= ~0x004;
942 }
943
944 /* STRMOUT: DirectX streamout / OpenGL transform feedback. */
945 if (status & 0x008) {
946 ustatus = nv_rd32(dev, 0x401800) & 0x7fffffff;
947 if (!ustatus && display) {
948 NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - no ustatus?\n");
949 }
950 if (ustatus & 0x00000001) {
951 nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_STRMOUT_FAULT");
952 NV_INFO (dev, "PGRAPH_TRAP_STRMOUT_FAULT - %08x %08x %08x %08x\n",
953 nv_rd32(dev, 0x401804),
954 nv_rd32(dev, 0x401808),
955 nv_rd32(dev, 0x40180c),
956 nv_rd32(dev, 0x401810));
957 ustatus &= ~0x00000001;
958 }
959 if (ustatus && display)
960 NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - Unhandled ustatus 0x%08x\n", ustatus);
961 /* No sane way found yet -- just reset the bugger. */
962 nv_wr32(dev, 0x400040, 0x80);
963 nv_wr32(dev, 0x400040, 0);
964 nv_wr32(dev, 0x401800, 0xc0000000);
965 nv_wr32(dev, 0x400108, 0x008);
966 status &= ~0x008;
967 }
968
969 /* CCACHE: Handles code and c[] caches and fills them. */
970 if (status & 0x010) {
971 ustatus = nv_rd32(dev, 0x405018) & 0x7fffffff;
972 if (!ustatus && display) {
973 NV_INFO(dev, "PGRAPH_TRAP_CCACHE - no ustatus?\n");
974 }
975 if (ustatus & 0x00000001) {
976 nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_CCACHE_FAULT");
977 NV_INFO (dev, "PGRAPH_TRAP_CCACHE_FAULT - %08x %08x %08x %08x %08x %08x %08x\n",
978 nv_rd32(dev, 0x405800),
979 nv_rd32(dev, 0x405804),
980 nv_rd32(dev, 0x405808),
981 nv_rd32(dev, 0x40580c),
982 nv_rd32(dev, 0x405810),
983 nv_rd32(dev, 0x405814),
984 nv_rd32(dev, 0x40581c));
985 ustatus &= ~0x00000001;
986 }
987 if (ustatus && display)
988 NV_INFO(dev, "PGRAPH_TRAP_CCACHE - Unhandled ustatus 0x%08x\n", ustatus);
989 nv_wr32(dev, 0x405018, 0xc0000000);
990 nv_wr32(dev, 0x400108, 0x010);
991 status &= ~0x010;
992 }
993
994 /* Unknown, not seen yet... 0x402000 is the only trap status reg
995 * remaining, so try to handle it anyway. Perhaps related to that
996 * unknown DMA slot on tesla? */
997 if (status & 0x20) {
998 nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_UNKC04");
999 ustatus = nv_rd32(dev, 0x402000) & 0x7fffffff;
1000 if (display)
1001 NV_INFO(dev, "PGRAPH_TRAP_UNKC04 - Unhandled ustatus 0x%08x\n", ustatus);
1002 nv_wr32(dev, 0x402000, 0xc0000000);
1003 /* no status modifiction on purpose */
1004 }
1005
1006 /* TEXTURE: CUDA texturing units */
1007 if (status & 0x040) {
1008 nv50_pgraph_tp_trap (dev, 6, 0x408900, 0x408600, display,
1009 "PGRAPH_TRAP_TEXTURE");
1010 nv_wr32(dev, 0x400108, 0x040);
1011 status &= ~0x040;
1012 }
1013
1014 /* MP: CUDA execution engines. */
1015 if (status & 0x080) {
1016 nv50_pgraph_tp_trap (dev, 7, 0x408314, 0x40831c, display,
1017 "PGRAPH_TRAP_MP");
1018 nv_wr32(dev, 0x400108, 0x080);
1019 status &= ~0x080;
1020 }
1021
1022 /* TPDMA: Handles TP-initiated uncached memory accesses:
1023 * l[], g[], stack, 2d surfaces, render targets. */
1024 if (status & 0x100) {
1025 nv50_pgraph_tp_trap (dev, 8, 0x408e08, 0x408708, display,
1026 "PGRAPH_TRAP_TPDMA");
1027 nv_wr32(dev, 0x400108, 0x100);
1028 status &= ~0x100;
1029 }
1030
1031 if (status) {
1032 if (display)
1033 NV_INFO(dev, "PGRAPH_TRAP - Unknown trap 0x%08x\n",
1034 status);
1035 nv_wr32(dev, 0x400108, status);
1036 }
1037}
1038
1039/* There must be a *lot* of these. Will take some time to gather them up. */
1040static struct nouveau_enum_names nv50_data_error_names[] =
1041{
1042 { 4, "INVALID_VALUE" },
1043 { 5, "INVALID_ENUM" },
1044 { 8, "INVALID_OBJECT" },
1045 { 0xc, "INVALID_BITFIELD" },
1046 { 0x28, "MP_NO_REG_SPACE" },
1047 { 0x2b, "MP_BLOCK_SIZE_MISMATCH" },
1048};
1049
1050static void
1051nv50_pgraph_irq_handler(struct drm_device *dev)
1052{
1053 struct nouveau_pgraph_trap trap;
1054 int unhandled = 0;
1055 uint32_t status;
1056
1057 while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) {
1058 /* NOTIFY: You've set a NOTIFY an a command and it's done. */
1059 if (status & 0x00000001) {
1060 nouveau_graph_trap_info(dev, &trap);
1061 if (nouveau_ratelimit())
1062 nouveau_graph_dump_trap_info(dev,
1063 "PGRAPH_NOTIFY", &trap);
1064 status &= ~0x00000001;
1065 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001);
1066 }
1067
1068 /* COMPUTE_QUERY: Purpose and exact cause unknown, happens
1069 * when you write 0x200 to 0x50c0 method 0x31c. */
1070 if (status & 0x00000002) {
1071 nouveau_graph_trap_info(dev, &trap);
1072 if (nouveau_ratelimit())
1073 nouveau_graph_dump_trap_info(dev,
1074 "PGRAPH_COMPUTE_QUERY", &trap);
1075 status &= ~0x00000002;
1076 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000002);
1077 }
1078
1079 /* Unknown, never seen: 0x4 */
1080
1081 /* ILLEGAL_MTHD: You used a wrong method for this class. */
1082 if (status & 0x00000010) {
1083 nouveau_graph_trap_info(dev, &trap);
1084 if (nouveau_pgraph_intr_swmthd(dev, &trap))
1085 unhandled = 1;
1086 if (unhandled && nouveau_ratelimit())
1087 nouveau_graph_dump_trap_info(dev,
1088 "PGRAPH_ILLEGAL_MTHD", &trap);
1089 status &= ~0x00000010;
1090 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010);
1091 }
1092
1093 /* ILLEGAL_CLASS: You used a wrong class. */
1094 if (status & 0x00000020) {
1095 nouveau_graph_trap_info(dev, &trap);
1096 if (nouveau_ratelimit())
1097 nouveau_graph_dump_trap_info(dev,
1098 "PGRAPH_ILLEGAL_CLASS", &trap);
1099 status &= ~0x00000020;
1100 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000020);
1101 }
1102
1103 /* DOUBLE_NOTIFY: You tried to set a NOTIFY on another NOTIFY. */
1104 if (status & 0x00000040) {
1105 nouveau_graph_trap_info(dev, &trap);
1106 if (nouveau_ratelimit())
1107 nouveau_graph_dump_trap_info(dev,
1108 "PGRAPH_DOUBLE_NOTIFY", &trap);
1109 status &= ~0x00000040;
1110 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000040);
1111 }
1112
1113 /* CONTEXT_SWITCH: PGRAPH needs us to load a new context */
1114 if (status & 0x00001000) {
1115 nv_wr32(dev, 0x400500, 0x00000000);
1116 nv_wr32(dev, NV03_PGRAPH_INTR,
1117 NV_PGRAPH_INTR_CONTEXT_SWITCH);
1118 nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev,
1119 NV40_PGRAPH_INTR_EN) &
1120 ~NV_PGRAPH_INTR_CONTEXT_SWITCH);
1121 nv_wr32(dev, 0x400500, 0x00010001);
1122
1123 nv50_graph_context_switch(dev);
1124
1125 status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
1126 }
1127
1128 /* BUFFER_NOTIFY: Your m2mf transfer finished */
1129 if (status & 0x00010000) {
1130 nouveau_graph_trap_info(dev, &trap);
1131 if (nouveau_ratelimit())
1132 nouveau_graph_dump_trap_info(dev,
1133 "PGRAPH_BUFFER_NOTIFY", &trap);
1134 status &= ~0x00010000;
1135 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00010000);
1136 }
1137
1138 /* DATA_ERROR: Invalid value for this method, or invalid
1139 * state in current PGRAPH context for this operation */
1140 if (status & 0x00100000) {
1141 nouveau_graph_trap_info(dev, &trap);
1142 if (nouveau_ratelimit()) {
1143 nouveau_graph_dump_trap_info(dev,
1144 "PGRAPH_DATA_ERROR", &trap);
1145 NV_INFO (dev, "PGRAPH_DATA_ERROR - ");
1146 nouveau_print_enum_names(nv_rd32(dev, 0x400110),
1147 nv50_data_error_names);
1148 printk("\n");
1149 }
1150 status &= ~0x00100000;
1151 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000);
1152 }
1153 124
1154 /* TRAP: Something bad happened in the middle of command 125 drm_irq_uninstall(dev);
1155 * execution. Has a billion types, subtypes, and even 126 if (dev_priv->msi_enabled)
1156 * subsubtypes. */ 127 pci_disable_msi(dev->pdev);
1157 if (status & 0x00200000) {
1158 nv50_pgraph_trap_handler(dev);
1159 status &= ~0x00200000;
1160 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000);
1161 }
1162
1163 /* Unknown, never seen: 0x00400000 */
1164
1165 /* SINGLE_STEP: Happens on every method if you turned on
1166 * single stepping in 40008c */
1167 if (status & 0x01000000) {
1168 nouveau_graph_trap_info(dev, &trap);
1169 if (nouveau_ratelimit())
1170 nouveau_graph_dump_trap_info(dev,
1171 "PGRAPH_SINGLE_STEP", &trap);
1172 status &= ~0x01000000;
1173 nv_wr32(dev, NV03_PGRAPH_INTR, 0x01000000);
1174 }
1175
1176 /* 0x02000000 happens when you pause a ctxprog...
1177 * but the only way this can happen that I know is by
1178 * poking the relevant MMIO register, and we don't
1179 * do that. */
1180
1181 if (status) {
1182 NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n",
1183 status);
1184 nv_wr32(dev, NV03_PGRAPH_INTR, status);
1185 }
1186
1187 {
1188 const int isb = (1 << 16) | (1 << 0);
1189
1190 if ((nv_rd32(dev, 0x400500) & isb) != isb)
1191 nv_wr32(dev, 0x400500,
1192 nv_rd32(dev, 0x400500) | isb);
1193 }
1194 }
1195
1196 nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
1197 if (nv_rd32(dev, 0x400824) & (1 << 31))
1198 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
1199} 128}
1200 129
1201static void 130void
1202nouveau_crtc_irq_handler(struct drm_device *dev, int crtc) 131nouveau_irq_register(struct drm_device *dev, int status_bit,
132 void (*handler)(struct drm_device *))
1203{ 133{
1204 if (crtc & 1) 134 struct drm_nouveau_private *dev_priv = dev->dev_private;
1205 nv_wr32(dev, NV_CRTC0_INTSTAT, NV_CRTC_INTR_VBLANK); 135 unsigned long flags;
1206 136
1207 if (crtc & 2) 137 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
1208 nv_wr32(dev, NV_CRTC1_INTSTAT, NV_CRTC_INTR_VBLANK); 138 dev_priv->irq_handler[status_bit] = handler;
139 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
1209} 140}
1210 141
1211irqreturn_t 142void
1212nouveau_irq_handler(DRM_IRQ_ARGS) 143nouveau_irq_unregister(struct drm_device *dev, int status_bit)
1213{ 144{
1214 struct drm_device *dev = (struct drm_device *)arg;
1215 struct drm_nouveau_private *dev_priv = dev->dev_private; 145 struct drm_nouveau_private *dev_priv = dev->dev_private;
1216 uint32_t status;
1217 unsigned long flags; 146 unsigned long flags;
1218 147
1219 status = nv_rd32(dev, NV03_PMC_INTR_0);
1220 if (!status)
1221 return IRQ_NONE;
1222
1223 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 148 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
1224 149 dev_priv->irq_handler[status_bit] = NULL;
1225 if (status & NV_PMC_INTR_0_PFIFO_PENDING) {
1226 nouveau_fifo_irq_handler(dev);
1227 status &= ~NV_PMC_INTR_0_PFIFO_PENDING;
1228 }
1229
1230 if (status & NV_PMC_INTR_0_PGRAPH_PENDING) {
1231 if (dev_priv->card_type >= NV_50)
1232 nv50_pgraph_irq_handler(dev);
1233 else
1234 nouveau_pgraph_irq_handler(dev);
1235
1236 status &= ~NV_PMC_INTR_0_PGRAPH_PENDING;
1237 }
1238
1239 if (status & NV_PMC_INTR_0_CRTCn_PENDING) {
1240 nouveau_crtc_irq_handler(dev, (status>>24)&3);
1241 status &= ~NV_PMC_INTR_0_CRTCn_PENDING;
1242 }
1243
1244 if (status & (NV_PMC_INTR_0_NV50_DISPLAY_PENDING |
1245 NV_PMC_INTR_0_NV50_I2C_PENDING)) {
1246 nv50_display_irq_handler(dev);
1247 status &= ~(NV_PMC_INTR_0_NV50_DISPLAY_PENDING |
1248 NV_PMC_INTR_0_NV50_I2C_PENDING);
1249 }
1250
1251 if (status)
1252 NV_ERROR(dev, "Unhandled PMC INTR status bits 0x%08x\n", status);
1253
1254 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); 150 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
1255
1256 return IRQ_HANDLED;
1257} 151}
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index fe4a30dc4b42..69044eb104bb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -36,183 +36,112 @@
36 36
37#include "nouveau_drv.h" 37#include "nouveau_drv.h"
38#include "nouveau_pm.h" 38#include "nouveau_pm.h"
39#include "nouveau_mm.h"
40#include "nouveau_vm.h"
39 41
40/* 42/*
41 * NV10-NV40 tiling helpers 43 * NV10-NV40 tiling helpers
42 */ 44 */
43 45
44static void 46static void
45nv10_mem_set_region_tiling(struct drm_device *dev, int i, uint32_t addr, 47nv10_mem_update_tile_region(struct drm_device *dev,
46 uint32_t size, uint32_t pitch) 48 struct nouveau_tile_reg *tile, uint32_t addr,
49 uint32_t size, uint32_t pitch, uint32_t flags)
47{ 50{
48 struct drm_nouveau_private *dev_priv = dev->dev_private; 51 struct drm_nouveau_private *dev_priv = dev->dev_private;
49 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; 52 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
50 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; 53 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
51 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; 54 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
52 struct nouveau_tile_reg *tile = &dev_priv->tile[i]; 55 int i = tile - dev_priv->tile.reg;
56 unsigned long save;
53 57
54 tile->addr = addr; 58 nouveau_fence_unref(&tile->fence);
55 tile->size = size;
56 tile->used = !!pitch;
57 nouveau_fence_unref((void **)&tile->fence);
58 59
60 if (tile->pitch)
61 pfb->free_tile_region(dev, i);
62
63 if (pitch)
64 pfb->init_tile_region(dev, i, addr, size, pitch, flags);
65
66 spin_lock_irqsave(&dev_priv->context_switch_lock, save);
59 pfifo->reassign(dev, false); 67 pfifo->reassign(dev, false);
60 pfifo->cache_pull(dev, false); 68 pfifo->cache_pull(dev, false);
61 69
62 nouveau_wait_for_idle(dev); 70 nouveau_wait_for_idle(dev);
63 71
64 pgraph->set_region_tiling(dev, i, addr, size, pitch); 72 pfb->set_tile_region(dev, i);
65 pfb->set_region_tiling(dev, i, addr, size, pitch); 73 pgraph->set_tile_region(dev, i);
66 74
67 pfifo->cache_pull(dev, true); 75 pfifo->cache_pull(dev, true);
68 pfifo->reassign(dev, true); 76 pfifo->reassign(dev, true);
77 spin_unlock_irqrestore(&dev_priv->context_switch_lock, save);
69} 78}
70 79
71struct nouveau_tile_reg * 80static struct nouveau_tile_reg *
72nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size, 81nv10_mem_get_tile_region(struct drm_device *dev, int i)
73 uint32_t pitch)
74{ 82{
75 struct drm_nouveau_private *dev_priv = dev->dev_private; 83 struct drm_nouveau_private *dev_priv = dev->dev_private;
76 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; 84 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
77 struct nouveau_tile_reg *found = NULL;
78 unsigned long i, flags;
79
80 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
81
82 for (i = 0; i < pfb->num_tiles; i++) {
83 struct nouveau_tile_reg *tile = &dev_priv->tile[i];
84
85 if (tile->used)
86 /* Tile region in use. */
87 continue;
88 85
89 if (tile->fence && 86 spin_lock(&dev_priv->tile.lock);
90 !nouveau_fence_signalled(tile->fence, NULL))
91 /* Pending tile region. */
92 continue;
93
94 if (max(tile->addr, addr) <
95 min(tile->addr + tile->size, addr + size))
96 /* Kill an intersecting tile region. */
97 nv10_mem_set_region_tiling(dev, i, 0, 0, 0);
98
99 if (pitch && !found) {
100 /* Free tile region. */
101 nv10_mem_set_region_tiling(dev, i, addr, size, pitch);
102 found = tile;
103 }
104 }
105 87
106 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); 88 if (!tile->used &&
89 (!tile->fence || nouveau_fence_signalled(tile->fence)))
90 tile->used = true;
91 else
92 tile = NULL;
107 93
108 return found; 94 spin_unlock(&dev_priv->tile.lock);
95 return tile;
109} 96}
110 97
111void 98void
112nv10_mem_expire_tiling(struct drm_device *dev, struct nouveau_tile_reg *tile, 99nv10_mem_put_tile_region(struct drm_device *dev, struct nouveau_tile_reg *tile,
113 struct nouveau_fence *fence) 100 struct nouveau_fence *fence)
114{
115 if (fence) {
116 /* Mark it as pending. */
117 tile->fence = fence;
118 nouveau_fence_ref(fence);
119 }
120
121 tile->used = false;
122}
123
124/*
125 * NV50 VM helpers
126 */
127int
128nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
129 uint32_t flags, uint64_t phys)
130{ 101{
131 struct drm_nouveau_private *dev_priv = dev->dev_private; 102 struct drm_nouveau_private *dev_priv = dev->dev_private;
132 struct nouveau_gpuobj *pgt;
133 unsigned block;
134 int i;
135 103
136 virt = ((virt - dev_priv->vm_vram_base) >> 16) << 1; 104 if (tile) {
137 size = (size >> 16) << 1; 105 spin_lock(&dev_priv->tile.lock);
138 106 if (fence) {
139 phys |= ((uint64_t)flags << 32); 107 /* Mark it as pending. */
140 phys |= 1; 108 tile->fence = fence;
141 if (dev_priv->vram_sys_base) { 109 nouveau_fence_ref(fence);
142 phys += dev_priv->vram_sys_base;
143 phys |= 0x30;
144 }
145
146 while (size) {
147 unsigned offset_h = upper_32_bits(phys);
148 unsigned offset_l = lower_32_bits(phys);
149 unsigned pte, end;
150
151 for (i = 7; i >= 0; i--) {
152 block = 1 << (i + 1);
153 if (size >= block && !(virt & (block - 1)))
154 break;
155 } 110 }
156 offset_l |= (i << 7);
157
158 phys += block << 15;
159 size -= block;
160
161 while (block) {
162 pgt = dev_priv->vm_vram_pt[virt >> 14];
163 pte = virt & 0x3ffe;
164
165 end = pte + block;
166 if (end > 16384)
167 end = 16384;
168 block -= (end - pte);
169 virt += (end - pte);
170
171 while (pte < end) {
172 nv_wo32(pgt, (pte * 4) + 0, offset_l);
173 nv_wo32(pgt, (pte * 4) + 4, offset_h);
174 pte += 2;
175 }
176 }
177 }
178 111
179 dev_priv->engine.instmem.flush(dev); 112 tile->used = false;
180 dev_priv->engine.fifo.tlb_flush(dev); 113 spin_unlock(&dev_priv->tile.lock);
181 dev_priv->engine.graph.tlb_flush(dev); 114 }
182 nv50_vm_flush(dev, 6);
183 return 0;
184} 115}
185 116
186void 117struct nouveau_tile_reg *
187nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size) 118nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size,
119 uint32_t pitch, uint32_t flags)
188{ 120{
189 struct drm_nouveau_private *dev_priv = dev->dev_private; 121 struct drm_nouveau_private *dev_priv = dev->dev_private;
190 struct nouveau_gpuobj *pgt; 122 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
191 unsigned pages, pte, end; 123 struct nouveau_tile_reg *tile, *found = NULL;
192 124 int i;
193 virt -= dev_priv->vm_vram_base;
194 pages = (size >> 16) << 1;
195 125
196 while (pages) { 126 for (i = 0; i < pfb->num_tiles; i++) {
197 pgt = dev_priv->vm_vram_pt[virt >> 29]; 127 tile = nv10_mem_get_tile_region(dev, i);
198 pte = (virt & 0x1ffe0000ULL) >> 15;
199 128
200 end = pte + pages; 129 if (pitch && !found) {
201 if (end > 16384) 130 found = tile;
202 end = 16384; 131 continue;
203 pages -= (end - pte);
204 virt += (end - pte) << 15;
205 132
206 while (pte < end) { 133 } else if (tile && tile->pitch) {
207 nv_wo32(pgt, (pte * 4), 0); 134 /* Kill an unused tile region. */
208 pte++; 135 nv10_mem_update_tile_region(dev, tile, 0, 0, 0, 0);
209 } 136 }
137
138 nv10_mem_put_tile_region(dev, tile, NULL);
210 } 139 }
211 140
212 dev_priv->engine.instmem.flush(dev); 141 if (found)
213 dev_priv->engine.fifo.tlb_flush(dev); 142 nv10_mem_update_tile_region(dev, found, addr, size,
214 dev_priv->engine.graph.tlb_flush(dev); 143 pitch, flags);
215 nv50_vm_flush(dev, 6); 144 return found;
216} 145}
217 146
218/* 147/*
@@ -312,62 +241,7 @@ nouveau_mem_detect_nforce(struct drm_device *dev)
312 return 0; 241 return 0;
313} 242}
314 243
315static void 244int
316nv50_vram_preinit(struct drm_device *dev)
317{
318 struct drm_nouveau_private *dev_priv = dev->dev_private;
319 int i, parts, colbits, rowbitsa, rowbitsb, banks;
320 u64 rowsize, predicted;
321 u32 r0, r4, rt, ru;
322
323 r0 = nv_rd32(dev, 0x100200);
324 r4 = nv_rd32(dev, 0x100204);
325 rt = nv_rd32(dev, 0x100250);
326 ru = nv_rd32(dev, 0x001540);
327 NV_DEBUG(dev, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt, ru);
328
329 for (i = 0, parts = 0; i < 8; i++) {
330 if (ru & (0x00010000 << i))
331 parts++;
332 }
333
334 colbits = (r4 & 0x0000f000) >> 12;
335 rowbitsa = ((r4 & 0x000f0000) >> 16) + 8;
336 rowbitsb = ((r4 & 0x00f00000) >> 20) + 8;
337 banks = ((r4 & 0x01000000) ? 8 : 4);
338
339 rowsize = parts * banks * (1 << colbits) * 8;
340 predicted = rowsize << rowbitsa;
341 if (r0 & 0x00000004)
342 predicted += rowsize << rowbitsb;
343
344 if (predicted != dev_priv->vram_size) {
345 NV_WARN(dev, "memory controller reports %dMiB VRAM\n",
346 (u32)(dev_priv->vram_size >> 20));
347 NV_WARN(dev, "we calculated %dMiB VRAM\n",
348 (u32)(predicted >> 20));
349 }
350
351 dev_priv->vram_rblock_size = rowsize >> 12;
352 if (rt & 1)
353 dev_priv->vram_rblock_size *= 3;
354
355 NV_DEBUG(dev, "rblock %lld bytes\n",
356 (u64)dev_priv->vram_rblock_size << 12);
357}
358
359static void
360nvaa_vram_preinit(struct drm_device *dev)
361{
362 struct drm_nouveau_private *dev_priv = dev->dev_private;
363
364 /* To our knowledge, there's no large scale reordering of pages
365 * that occurs on IGP chipsets.
366 */
367 dev_priv->vram_rblock_size = 1;
368}
369
370static int
371nouveau_mem_detect(struct drm_device *dev) 245nouveau_mem_detect(struct drm_device *dev)
372{ 246{
373 struct drm_nouveau_private *dev_priv = dev->dev_private; 247 struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -381,33 +255,6 @@ nouveau_mem_detect(struct drm_device *dev)
381 if (dev_priv->card_type < NV_50) { 255 if (dev_priv->card_type < NV_50) {
382 dev_priv->vram_size = nv_rd32(dev, NV04_PFB_FIFO_DATA); 256 dev_priv->vram_size = nv_rd32(dev, NV04_PFB_FIFO_DATA);
383 dev_priv->vram_size &= NV10_PFB_FIFO_DATA_RAM_AMOUNT_MB_MASK; 257 dev_priv->vram_size &= NV10_PFB_FIFO_DATA_RAM_AMOUNT_MB_MASK;
384 } else
385 if (dev_priv->card_type < NV_C0) {
386 dev_priv->vram_size = nv_rd32(dev, NV04_PFB_FIFO_DATA);
387 dev_priv->vram_size |= (dev_priv->vram_size & 0xff) << 32;
388 dev_priv->vram_size &= 0xffffffff00ll;
389
390 switch (dev_priv->chipset) {
391 case 0xaa:
392 case 0xac:
393 case 0xaf:
394 dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10);
395 dev_priv->vram_sys_base <<= 12;
396 nvaa_vram_preinit(dev);
397 break;
398 default:
399 nv50_vram_preinit(dev);
400 break;
401 }
402 } else {
403 dev_priv->vram_size = nv_rd32(dev, 0x10f20c) << 20;
404 dev_priv->vram_size *= nv_rd32(dev, 0x121c74);
405 }
406
407 NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20));
408 if (dev_priv->vram_sys_base) {
409 NV_INFO(dev, "Stolen system memory at: 0x%010llx\n",
410 dev_priv->vram_sys_base);
411 } 258 }
412 259
413 if (dev_priv->vram_size) 260 if (dev_priv->vram_size)
@@ -415,6 +262,15 @@ nouveau_mem_detect(struct drm_device *dev)
415 return -ENOMEM; 262 return -ENOMEM;
416} 263}
417 264
265bool
266nouveau_mem_flags_valid(struct drm_device *dev, u32 tile_flags)
267{
268 if (!(tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK))
269 return true;
270
271 return false;
272}
273
418#if __OS_HAS_AGP 274#if __OS_HAS_AGP
419static unsigned long 275static unsigned long
420get_agp_mode(struct drm_device *dev, unsigned long mode) 276get_agp_mode(struct drm_device *dev, unsigned long mode)
@@ -547,10 +403,6 @@ nouveau_mem_vram_init(struct drm_device *dev)
547 if (ret) 403 if (ret)
548 return ret; 404 return ret;
549 405
550 ret = nouveau_mem_detect(dev);
551 if (ret)
552 return ret;
553
554 dev_priv->fb_phys = pci_resource_start(dev->pdev, 1); 406 dev_priv->fb_phys = pci_resource_start(dev->pdev, 1);
555 407
556 ret = nouveau_ttm_global_init(dev_priv); 408 ret = nouveau_ttm_global_init(dev_priv);
@@ -566,13 +418,6 @@ nouveau_mem_vram_init(struct drm_device *dev)
566 return ret; 418 return ret;
567 } 419 }
568 420
569 dev_priv->fb_available_size = dev_priv->vram_size;
570 dev_priv->fb_mappable_pages = dev_priv->fb_available_size;
571 if (dev_priv->fb_mappable_pages > pci_resource_len(dev->pdev, 1))
572 dev_priv->fb_mappable_pages =
573 pci_resource_len(dev->pdev, 1);
574 dev_priv->fb_mappable_pages >>= PAGE_SHIFT;
575
576 /* reserve space at end of VRAM for PRAMIN */ 421 /* reserve space at end of VRAM for PRAMIN */
577 if (dev_priv->chipset == 0x40 || dev_priv->chipset == 0x47 || 422 if (dev_priv->chipset == 0x40 || dev_priv->chipset == 0x47 ||
578 dev_priv->chipset == 0x49 || dev_priv->chipset == 0x4b) 423 dev_priv->chipset == 0x49 || dev_priv->chipset == 0x4b)
@@ -583,6 +428,22 @@ nouveau_mem_vram_init(struct drm_device *dev)
583 else 428 else
584 dev_priv->ramin_rsvd_vram = (512 * 1024); 429 dev_priv->ramin_rsvd_vram = (512 * 1024);
585 430
431 ret = dev_priv->engine.vram.init(dev);
432 if (ret)
433 return ret;
434
435 NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20));
436 if (dev_priv->vram_sys_base) {
437 NV_INFO(dev, "Stolen system memory at: 0x%010llx\n",
438 dev_priv->vram_sys_base);
439 }
440
441 dev_priv->fb_available_size = dev_priv->vram_size;
442 dev_priv->fb_mappable_pages = dev_priv->fb_available_size;
443 if (dev_priv->fb_mappable_pages > pci_resource_len(dev->pdev, 1))
444 dev_priv->fb_mappable_pages = pci_resource_len(dev->pdev, 1);
445 dev_priv->fb_mappable_pages >>= PAGE_SHIFT;
446
586 dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram; 447 dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram;
587 dev_priv->fb_aper_free = dev_priv->fb_available_size; 448 dev_priv->fb_aper_free = dev_priv->fb_available_size;
588 449
@@ -799,3 +660,118 @@ nouveau_mem_timing_fini(struct drm_device *dev)
799 660
800 kfree(mem->timing); 661 kfree(mem->timing);
801} 662}
663
664static int
665nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long p_size)
666{
667 struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
668 struct nouveau_mm *mm;
669 u32 b_size;
670 int ret;
671
672 p_size = (p_size << PAGE_SHIFT) >> 12;
673 b_size = dev_priv->vram_rblock_size >> 12;
674
675 ret = nouveau_mm_init(&mm, 0, p_size, b_size);
676 if (ret)
677 return ret;
678
679 man->priv = mm;
680 return 0;
681}
682
683static int
684nouveau_vram_manager_fini(struct ttm_mem_type_manager *man)
685{
686 struct nouveau_mm *mm = man->priv;
687 int ret;
688
689 ret = nouveau_mm_fini(&mm);
690 if (ret)
691 return ret;
692
693 man->priv = NULL;
694 return 0;
695}
696
697static void
698nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
699 struct ttm_mem_reg *mem)
700{
701 struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
702 struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
703 struct drm_device *dev = dev_priv->dev;
704
705 vram->put(dev, (struct nouveau_vram **)&mem->mm_node);
706}
707
708static int
709nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
710 struct ttm_buffer_object *bo,
711 struct ttm_placement *placement,
712 struct ttm_mem_reg *mem)
713{
714 struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
715 struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
716 struct drm_device *dev = dev_priv->dev;
717 struct nouveau_bo *nvbo = nouveau_bo(bo);
718 struct nouveau_vram *node;
719 u32 size_nc = 0;
720 int ret;
721
722 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG)
723 size_nc = 1 << nvbo->vma.node->type;
724
725 ret = vram->get(dev, mem->num_pages << PAGE_SHIFT,
726 mem->page_alignment << PAGE_SHIFT, size_nc,
727 (nvbo->tile_flags >> 8) & 0xff, &node);
728 if (ret)
729 return ret;
730
731 node->page_shift = 12;
732 if (nvbo->vma.node)
733 node->page_shift = nvbo->vma.node->type;
734
735 mem->mm_node = node;
736 mem->start = node->offset >> PAGE_SHIFT;
737 return 0;
738}
739
740void
741nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
742{
743 struct nouveau_mm *mm = man->priv;
744 struct nouveau_mm_node *r;
745 u64 total = 0, ttotal[3] = {}, tused[3] = {}, tfree[3] = {};
746 int i;
747
748 mutex_lock(&mm->mutex);
749 list_for_each_entry(r, &mm->nodes, nl_entry) {
750 printk(KERN_DEBUG "%s %s-%d: 0x%010llx 0x%010llx\n",
751 prefix, r->free ? "free" : "used", r->type,
752 ((u64)r->offset << 12),
753 (((u64)r->offset + r->length) << 12));
754 total += r->length;
755 ttotal[r->type] += r->length;
756 if (r->free)
757 tfree[r->type] += r->length;
758 else
759 tused[r->type] += r->length;
760 }
761 mutex_unlock(&mm->mutex);
762
763 printk(KERN_DEBUG "%s total: 0x%010llx\n", prefix, total << 12);
764 for (i = 0; i < 3; i++) {
765 printk(KERN_DEBUG "%s type %d: 0x%010llx, "
766 "used 0x%010llx, free 0x%010llx\n", prefix,
767 i, ttotal[i] << 12, tused[i] << 12, tfree[i] << 12);
768 }
769}
770
771const struct ttm_mem_type_manager_func nouveau_vram_manager = {
772 nouveau_vram_manager_init,
773 nouveau_vram_manager_fini,
774 nouveau_vram_manager_new,
775 nouveau_vram_manager_del,
776 nouveau_vram_manager_debug
777};
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.c b/drivers/gpu/drm/nouveau/nouveau_mm.c
new file mode 100644
index 000000000000..cdbb11eb701b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_mm.c
@@ -0,0 +1,271 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26#include "nouveau_drv.h"
27#include "nouveau_mm.h"
28
29static inline void
30region_put(struct nouveau_mm *rmm, struct nouveau_mm_node *a)
31{
32 list_del(&a->nl_entry);
33 list_del(&a->fl_entry);
34 kfree(a);
35}
36
37static struct nouveau_mm_node *
38region_split(struct nouveau_mm *rmm, struct nouveau_mm_node *a, u32 size)
39{
40 struct nouveau_mm_node *b;
41
42 if (a->length == size)
43 return a;
44
45 b = kmalloc(sizeof(*b), GFP_KERNEL);
46 if (unlikely(b == NULL))
47 return NULL;
48
49 b->offset = a->offset;
50 b->length = size;
51 b->free = a->free;
52 b->type = a->type;
53 a->offset += size;
54 a->length -= size;
55 list_add_tail(&b->nl_entry, &a->nl_entry);
56 if (b->free)
57 list_add_tail(&b->fl_entry, &a->fl_entry);
58 return b;
59}
60
61static struct nouveau_mm_node *
62nouveau_mm_merge(struct nouveau_mm *rmm, struct nouveau_mm_node *this)
63{
64 struct nouveau_mm_node *prev, *next;
65
66 /* try to merge with free adjacent entries of same type */
67 prev = list_entry(this->nl_entry.prev, struct nouveau_mm_node, nl_entry);
68 if (this->nl_entry.prev != &rmm->nodes) {
69 if (prev->free && prev->type == this->type) {
70 prev->length += this->length;
71 region_put(rmm, this);
72 this = prev;
73 }
74 }
75
76 next = list_entry(this->nl_entry.next, struct nouveau_mm_node, nl_entry);
77 if (this->nl_entry.next != &rmm->nodes) {
78 if (next->free && next->type == this->type) {
79 next->offset = this->offset;
80 next->length += this->length;
81 region_put(rmm, this);
82 this = next;
83 }
84 }
85
86 return this;
87}
88
89void
90nouveau_mm_put(struct nouveau_mm *rmm, struct nouveau_mm_node *this)
91{
92 u32 block_s, block_l;
93
94 this->free = true;
95 list_add(&this->fl_entry, &rmm->free);
96 this = nouveau_mm_merge(rmm, this);
97
98 /* any entirely free blocks now? we'll want to remove typing
99 * on them now so they can be use for any memory allocation
100 */
101 block_s = roundup(this->offset, rmm->block_size);
102 if (block_s + rmm->block_size > this->offset + this->length)
103 return;
104
105 /* split off any still-typed region at the start */
106 if (block_s != this->offset) {
107 if (!region_split(rmm, this, block_s - this->offset))
108 return;
109 }
110
111 /* split off the soon-to-be-untyped block(s) */
112 block_l = rounddown(this->length, rmm->block_size);
113 if (block_l != this->length) {
114 this = region_split(rmm, this, block_l);
115 if (!this)
116 return;
117 }
118
119 /* mark as having no type, and retry merge with any adjacent
120 * untyped blocks
121 */
122 this->type = 0;
123 nouveau_mm_merge(rmm, this);
124}
125
126int
127nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc,
128 u32 align, struct nouveau_mm_node **pnode)
129{
130 struct nouveau_mm_node *this, *tmp, *next;
131 u32 splitoff, avail, alloc;
132
133 list_for_each_entry_safe(this, tmp, &rmm->free, fl_entry) {
134 next = list_entry(this->nl_entry.next, struct nouveau_mm_node, nl_entry);
135 if (this->nl_entry.next == &rmm->nodes)
136 next = NULL;
137
138 /* skip wrongly typed blocks */
139 if (this->type && this->type != type)
140 continue;
141
142 /* account for alignment */
143 splitoff = this->offset & (align - 1);
144 if (splitoff)
145 splitoff = align - splitoff;
146
147 if (this->length <= splitoff)
148 continue;
149
150 /* determine total memory available from this, and
151 * the next block (if appropriate)
152 */
153 avail = this->length;
154 if (next && next->free && (!next->type || next->type == type))
155 avail += next->length;
156
157 avail -= splitoff;
158
159 /* determine allocation size */
160 if (size_nc) {
161 alloc = min(avail, size);
162 alloc = rounddown(alloc, size_nc);
163 if (alloc == 0)
164 continue;
165 } else {
166 alloc = size;
167 if (avail < alloc)
168 continue;
169 }
170
171 /* untyped block, split off a chunk that's a multiple
172 * of block_size and type it
173 */
174 if (!this->type) {
175 u32 block = roundup(alloc + splitoff, rmm->block_size);
176 if (this->length < block)
177 continue;
178
179 this = region_split(rmm, this, block);
180 if (!this)
181 return -ENOMEM;
182
183 this->type = type;
184 }
185
186 /* stealing memory from adjacent block */
187 if (alloc > this->length) {
188 u32 amount = alloc - (this->length - splitoff);
189
190 if (!next->type) {
191 amount = roundup(amount, rmm->block_size);
192
193 next = region_split(rmm, next, amount);
194 if (!next)
195 return -ENOMEM;
196
197 next->type = type;
198 }
199
200 this->length += amount;
201 next->offset += amount;
202 next->length -= amount;
203 if (!next->length) {
204 list_del(&next->nl_entry);
205 list_del(&next->fl_entry);
206 kfree(next);
207 }
208 }
209
210 if (splitoff) {
211 if (!region_split(rmm, this, splitoff))
212 return -ENOMEM;
213 }
214
215 this = region_split(rmm, this, alloc);
216 if (this == NULL)
217 return -ENOMEM;
218
219 this->free = false;
220 list_del(&this->fl_entry);
221 *pnode = this;
222 return 0;
223 }
224
225 return -ENOMEM;
226}
227
228int
229nouveau_mm_init(struct nouveau_mm **prmm, u32 offset, u32 length, u32 block)
230{
231 struct nouveau_mm *rmm;
232 struct nouveau_mm_node *heap;
233
234 heap = kzalloc(sizeof(*heap), GFP_KERNEL);
235 if (!heap)
236 return -ENOMEM;
237 heap->free = true;
238 heap->offset = roundup(offset, block);
239 heap->length = rounddown(offset + length, block) - heap->offset;
240
241 rmm = kzalloc(sizeof(*rmm), GFP_KERNEL);
242 if (!rmm) {
243 kfree(heap);
244 return -ENOMEM;
245 }
246 rmm->block_size = block;
247 mutex_init(&rmm->mutex);
248 INIT_LIST_HEAD(&rmm->nodes);
249 INIT_LIST_HEAD(&rmm->free);
250 list_add(&heap->nl_entry, &rmm->nodes);
251 list_add(&heap->fl_entry, &rmm->free);
252
253 *prmm = rmm;
254 return 0;
255}
256
257int
258nouveau_mm_fini(struct nouveau_mm **prmm)
259{
260 struct nouveau_mm *rmm = *prmm;
261 struct nouveau_mm_node *heap =
262 list_first_entry(&rmm->nodes, struct nouveau_mm_node, nl_entry);
263
264 if (!list_is_singular(&rmm->nodes))
265 return -EBUSY;
266
267 kfree(heap);
268 kfree(rmm);
269 *prmm = NULL;
270 return 0;
271}
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.h b/drivers/gpu/drm/nouveau/nouveau_mm.h
new file mode 100644
index 000000000000..af3844933036
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_mm.h
@@ -0,0 +1,67 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#ifndef __NOUVEAU_REGION_H__
26#define __NOUVEAU_REGION_H__
27
28struct nouveau_mm_node {
29 struct list_head nl_entry;
30 struct list_head fl_entry;
31 struct list_head rl_entry;
32
33 bool free;
34 int type;
35
36 u32 offset;
37 u32 length;
38};
39
40struct nouveau_mm {
41 struct list_head nodes;
42 struct list_head free;
43
44 struct mutex mutex;
45
46 u32 block_size;
47};
48
49int nouveau_mm_init(struct nouveau_mm **, u32 offset, u32 length, u32 block);
50int nouveau_mm_fini(struct nouveau_mm **);
51int nouveau_mm_pre(struct nouveau_mm *);
52int nouveau_mm_get(struct nouveau_mm *, int type, u32 size, u32 size_nc,
53 u32 align, struct nouveau_mm_node **);
54void nouveau_mm_put(struct nouveau_mm *, struct nouveau_mm_node *);
55
56int nv50_vram_init(struct drm_device *);
57int nv50_vram_new(struct drm_device *, u64 size, u32 align, u32 size_nc,
58 u32 memtype, struct nouveau_vram **);
59void nv50_vram_del(struct drm_device *, struct nouveau_vram **);
60bool nv50_vram_flags_valid(struct drm_device *, u32 tile_flags);
61
62int nvc0_vram_init(struct drm_device *);
63int nvc0_vram_new(struct drm_device *, u64 size, u32 align, u32 ncmin,
64 u32 memtype, struct nouveau_vram **);
65bool nvc0_vram_flags_valid(struct drm_device *, u32 tile_flags);
66
67#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
index 2cc59f8c658b..fe29d604b820 100644
--- a/drivers/gpu/drm/nouveau/nouveau_notifier.c
+++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c
@@ -99,7 +99,6 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
99 int size, uint32_t *b_offset) 99 int size, uint32_t *b_offset)
100{ 100{
101 struct drm_device *dev = chan->dev; 101 struct drm_device *dev = chan->dev;
102 struct drm_nouveau_private *dev_priv = dev->dev_private;
103 struct nouveau_gpuobj *nobj = NULL; 102 struct nouveau_gpuobj *nobj = NULL;
104 struct drm_mm_node *mem; 103 struct drm_mm_node *mem;
105 uint32_t offset; 104 uint32_t offset;
@@ -113,31 +112,15 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
113 return -ENOMEM; 112 return -ENOMEM;
114 } 113 }
115 114
116 offset = chan->notifier_bo->bo.mem.start << PAGE_SHIFT; 115 if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM)
117 if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM) { 116 target = NV_MEM_TARGET_VRAM;
118 target = NV_DMA_TARGET_VIDMEM; 117 else
119 } else 118 target = NV_MEM_TARGET_GART;
120 if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_TT) { 119 offset = chan->notifier_bo->bo.mem.start << PAGE_SHIFT;
121 if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA &&
122 dev_priv->card_type < NV_50) {
123 ret = nouveau_sgdma_get_page(dev, offset, &offset);
124 if (ret)
125 return ret;
126 target = NV_DMA_TARGET_PCI;
127 } else {
128 target = NV_DMA_TARGET_AGP;
129 if (dev_priv->card_type >= NV_50)
130 offset += dev_priv->vm_gart_base;
131 }
132 } else {
133 NV_ERROR(dev, "Bad DMA target, mem_type %d!\n",
134 chan->notifier_bo->bo.mem.mem_type);
135 return -EINVAL;
136 }
137 offset += mem->start; 120 offset += mem->start;
138 121
139 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, offset, 122 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, offset,
140 mem->size, NV_DMA_ACCESS_RW, target, 123 mem->size, NV_MEM_ACCESS_RW, target,
141 &nobj); 124 &nobj);
142 if (ret) { 125 if (ret) {
143 drm_mm_put_block(mem); 126 drm_mm_put_block(mem);
@@ -181,15 +164,20 @@ int
181nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data, 164nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data,
182 struct drm_file *file_priv) 165 struct drm_file *file_priv)
183{ 166{
167 struct drm_nouveau_private *dev_priv = dev->dev_private;
184 struct drm_nouveau_notifierobj_alloc *na = data; 168 struct drm_nouveau_notifierobj_alloc *na = data;
185 struct nouveau_channel *chan; 169 struct nouveau_channel *chan;
186 int ret; 170 int ret;
187 171
188 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(na->channel, file_priv, chan); 172 /* completely unnecessary for these chipsets... */
173 if (unlikely(dev_priv->card_type >= NV_C0))
174 return -EINVAL;
175
176 chan = nouveau_channel_get(dev, file_priv, na->channel);
177 if (IS_ERR(chan))
178 return PTR_ERR(chan);
189 179
190 ret = nouveau_notifier_alloc(chan, na->handle, na->size, &na->offset); 180 ret = nouveau_notifier_alloc(chan, na->handle, na->size, &na->offset);
191 if (ret) 181 nouveau_channel_put(&chan);
192 return ret; 182 return ret;
193
194 return 0;
195} 183}
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
index dd572adca02a..30b6544467ca 100644
--- a/drivers/gpu/drm/nouveau/nouveau_object.c
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -35,6 +35,102 @@
35#include "nouveau_drv.h" 35#include "nouveau_drv.h"
36#include "nouveau_drm.h" 36#include "nouveau_drm.h"
37#include "nouveau_ramht.h" 37#include "nouveau_ramht.h"
38#include "nouveau_vm.h"
39
40struct nouveau_gpuobj_method {
41 struct list_head head;
42 u32 mthd;
43 int (*exec)(struct nouveau_channel *, u32 class, u32 mthd, u32 data);
44};
45
46struct nouveau_gpuobj_class {
47 struct list_head head;
48 struct list_head methods;
49 u32 id;
50 u32 engine;
51};
52
53int
54nouveau_gpuobj_class_new(struct drm_device *dev, u32 class, u32 engine)
55{
56 struct drm_nouveau_private *dev_priv = dev->dev_private;
57 struct nouveau_gpuobj_class *oc;
58
59 oc = kzalloc(sizeof(*oc), GFP_KERNEL);
60 if (!oc)
61 return -ENOMEM;
62
63 INIT_LIST_HEAD(&oc->methods);
64 oc->id = class;
65 oc->engine = engine;
66 list_add(&oc->head, &dev_priv->classes);
67 return 0;
68}
69
70int
71nouveau_gpuobj_mthd_new(struct drm_device *dev, u32 class, u32 mthd,
72 int (*exec)(struct nouveau_channel *, u32, u32, u32))
73{
74 struct drm_nouveau_private *dev_priv = dev->dev_private;
75 struct nouveau_gpuobj_method *om;
76 struct nouveau_gpuobj_class *oc;
77
78 list_for_each_entry(oc, &dev_priv->classes, head) {
79 if (oc->id == class)
80 goto found;
81 }
82
83 return -EINVAL;
84
85found:
86 om = kzalloc(sizeof(*om), GFP_KERNEL);
87 if (!om)
88 return -ENOMEM;
89
90 om->mthd = mthd;
91 om->exec = exec;
92 list_add(&om->head, &oc->methods);
93 return 0;
94}
95
96int
97nouveau_gpuobj_mthd_call(struct nouveau_channel *chan,
98 u32 class, u32 mthd, u32 data)
99{
100 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
101 struct nouveau_gpuobj_method *om;
102 struct nouveau_gpuobj_class *oc;
103
104 list_for_each_entry(oc, &dev_priv->classes, head) {
105 if (oc->id != class)
106 continue;
107
108 list_for_each_entry(om, &oc->methods, head) {
109 if (om->mthd == mthd)
110 return om->exec(chan, class, mthd, data);
111 }
112 }
113
114 return -ENOENT;
115}
116
117int
118nouveau_gpuobj_mthd_call2(struct drm_device *dev, int chid,
119 u32 class, u32 mthd, u32 data)
120{
121 struct drm_nouveau_private *dev_priv = dev->dev_private;
122 struct nouveau_channel *chan = NULL;
123 unsigned long flags;
124 int ret = -EINVAL;
125
126 spin_lock_irqsave(&dev_priv->channels.lock, flags);
127 if (chid > 0 && chid < dev_priv->engine.fifo.channels)
128 chan = dev_priv->channels.ptr[chid];
129 if (chan)
130 ret = nouveau_gpuobj_mthd_call(chan, class, mthd, data);
131 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
132 return ret;
133}
38 134
39/* NVidia uses context objects to drive drawing operations. 135/* NVidia uses context objects to drive drawing operations.
40 136
@@ -73,17 +169,14 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
73 struct nouveau_gpuobj **gpuobj_ret) 169 struct nouveau_gpuobj **gpuobj_ret)
74{ 170{
75 struct drm_nouveau_private *dev_priv = dev->dev_private; 171 struct drm_nouveau_private *dev_priv = dev->dev_private;
76 struct nouveau_engine *engine = &dev_priv->engine; 172 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
77 struct nouveau_gpuobj *gpuobj; 173 struct nouveau_gpuobj *gpuobj;
78 struct drm_mm_node *ramin = NULL; 174 struct drm_mm_node *ramin = NULL;
79 int ret; 175 int ret, i;
80 176
81 NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n", 177 NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n",
82 chan ? chan->id : -1, size, align, flags); 178 chan ? chan->id : -1, size, align, flags);
83 179
84 if (!dev_priv || !gpuobj_ret || *gpuobj_ret != NULL)
85 return -EINVAL;
86
87 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL); 180 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
88 if (!gpuobj) 181 if (!gpuobj)
89 return -ENOMEM; 182 return -ENOMEM;
@@ -98,88 +191,41 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
98 spin_unlock(&dev_priv->ramin_lock); 191 spin_unlock(&dev_priv->ramin_lock);
99 192
100 if (chan) { 193 if (chan) {
101 NV_DEBUG(dev, "channel heap\n");
102
103 ramin = drm_mm_search_free(&chan->ramin_heap, size, align, 0); 194 ramin = drm_mm_search_free(&chan->ramin_heap, size, align, 0);
104 if (ramin) 195 if (ramin)
105 ramin = drm_mm_get_block(ramin, size, align); 196 ramin = drm_mm_get_block(ramin, size, align);
106
107 if (!ramin) { 197 if (!ramin) {
108 nouveau_gpuobj_ref(NULL, &gpuobj); 198 nouveau_gpuobj_ref(NULL, &gpuobj);
109 return -ENOMEM; 199 return -ENOMEM;
110 } 200 }
111 } else {
112 NV_DEBUG(dev, "global heap\n");
113
114 /* allocate backing pages, sets vinst */
115 ret = engine->instmem.populate(dev, gpuobj, &size);
116 if (ret) {
117 nouveau_gpuobj_ref(NULL, &gpuobj);
118 return ret;
119 }
120
121 /* try and get aperture space */
122 do {
123 if (drm_mm_pre_get(&dev_priv->ramin_heap))
124 return -ENOMEM;
125 201
126 spin_lock(&dev_priv->ramin_lock); 202 gpuobj->pinst = chan->ramin->pinst;
127 ramin = drm_mm_search_free(&dev_priv->ramin_heap, size, 203 if (gpuobj->pinst != ~0)
128 align, 0); 204 gpuobj->pinst += ramin->start;
129 if (ramin == NULL) {
130 spin_unlock(&dev_priv->ramin_lock);
131 nouveau_gpuobj_ref(NULL, &gpuobj);
132 return -ENOMEM;
133 }
134
135 ramin = drm_mm_get_block_atomic(ramin, size, align);
136 spin_unlock(&dev_priv->ramin_lock);
137 } while (ramin == NULL);
138
139 /* on nv50 it's ok to fail, we have a fallback path */
140 if (!ramin && dev_priv->card_type < NV_50) {
141 nouveau_gpuobj_ref(NULL, &gpuobj);
142 return -ENOMEM;
143 }
144 }
145 205
146 /* if we got a chunk of the aperture, map pages into it */ 206 gpuobj->cinst = ramin->start;
147 gpuobj->im_pramin = ramin; 207 gpuobj->vinst = ramin->start + chan->ramin->vinst;
148 if (!chan && gpuobj->im_pramin && dev_priv->ramin_available) { 208 gpuobj->node = ramin;
149 ret = engine->instmem.bind(dev, gpuobj); 209 } else {
210 ret = instmem->get(gpuobj, size, align);
150 if (ret) { 211 if (ret) {
151 nouveau_gpuobj_ref(NULL, &gpuobj); 212 nouveau_gpuobj_ref(NULL, &gpuobj);
152 return ret; 213 return ret;
153 } 214 }
154 }
155
156 /* calculate the various different addresses for the object */
157 if (chan) {
158 gpuobj->pinst = chan->ramin->pinst;
159 if (gpuobj->pinst != ~0)
160 gpuobj->pinst += gpuobj->im_pramin->start;
161 215
162 if (dev_priv->card_type < NV_50) { 216 ret = -ENOSYS;
163 gpuobj->cinst = gpuobj->pinst; 217 if (!(flags & NVOBJ_FLAG_DONT_MAP))
164 } else { 218 ret = instmem->map(gpuobj);
165 gpuobj->cinst = gpuobj->im_pramin->start; 219 if (ret)
166 gpuobj->vinst = gpuobj->im_pramin->start +
167 chan->ramin->vinst;
168 }
169 } else {
170 if (gpuobj->im_pramin)
171 gpuobj->pinst = gpuobj->im_pramin->start;
172 else
173 gpuobj->pinst = ~0; 220 gpuobj->pinst = ~0;
174 gpuobj->cinst = 0xdeadbeef; 221
222 gpuobj->cinst = NVOBJ_CINST_GLOBAL;
175 } 223 }
176 224
177 if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { 225 if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
178 int i;
179
180 for (i = 0; i < gpuobj->size; i += 4) 226 for (i = 0; i < gpuobj->size; i += 4)
181 nv_wo32(gpuobj, i, 0); 227 nv_wo32(gpuobj, i, 0);
182 engine->instmem.flush(dev); 228 instmem->flush(dev);
183 } 229 }
184 230
185 231
@@ -195,6 +241,7 @@ nouveau_gpuobj_init(struct drm_device *dev)
195 NV_DEBUG(dev, "\n"); 241 NV_DEBUG(dev, "\n");
196 242
197 INIT_LIST_HEAD(&dev_priv->gpuobj_list); 243 INIT_LIST_HEAD(&dev_priv->gpuobj_list);
244 INIT_LIST_HEAD(&dev_priv->classes);
198 spin_lock_init(&dev_priv->ramin_lock); 245 spin_lock_init(&dev_priv->ramin_lock);
199 dev_priv->ramin_base = ~0; 246 dev_priv->ramin_base = ~0;
200 247
@@ -205,9 +252,20 @@ void
205nouveau_gpuobj_takedown(struct drm_device *dev) 252nouveau_gpuobj_takedown(struct drm_device *dev)
206{ 253{
207 struct drm_nouveau_private *dev_priv = dev->dev_private; 254 struct drm_nouveau_private *dev_priv = dev->dev_private;
255 struct nouveau_gpuobj_method *om, *tm;
256 struct nouveau_gpuobj_class *oc, *tc;
208 257
209 NV_DEBUG(dev, "\n"); 258 NV_DEBUG(dev, "\n");
210 259
260 list_for_each_entry_safe(oc, tc, &dev_priv->classes, head) {
261 list_for_each_entry_safe(om, tm, &oc->methods, head) {
262 list_del(&om->head);
263 kfree(om);
264 }
265 list_del(&oc->head);
266 kfree(oc);
267 }
268
211 BUG_ON(!list_empty(&dev_priv->gpuobj_list)); 269 BUG_ON(!list_empty(&dev_priv->gpuobj_list));
212} 270}
213 271
@@ -219,26 +277,34 @@ nouveau_gpuobj_del(struct kref *ref)
219 container_of(ref, struct nouveau_gpuobj, refcount); 277 container_of(ref, struct nouveau_gpuobj, refcount);
220 struct drm_device *dev = gpuobj->dev; 278 struct drm_device *dev = gpuobj->dev;
221 struct drm_nouveau_private *dev_priv = dev->dev_private; 279 struct drm_nouveau_private *dev_priv = dev->dev_private;
222 struct nouveau_engine *engine = &dev_priv->engine; 280 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
223 int i; 281 int i;
224 282
225 NV_DEBUG(dev, "gpuobj %p\n", gpuobj); 283 NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
226 284
227 if (gpuobj->im_pramin && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) { 285 if (gpuobj->node && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) {
228 for (i = 0; i < gpuobj->size; i += 4) 286 for (i = 0; i < gpuobj->size; i += 4)
229 nv_wo32(gpuobj, i, 0); 287 nv_wo32(gpuobj, i, 0);
230 engine->instmem.flush(dev); 288 instmem->flush(dev);
231 } 289 }
232 290
233 if (gpuobj->dtor) 291 if (gpuobj->dtor)
234 gpuobj->dtor(dev, gpuobj); 292 gpuobj->dtor(dev, gpuobj);
235 293
236 if (gpuobj->im_backing) 294 if (gpuobj->cinst == NVOBJ_CINST_GLOBAL) {
237 engine->instmem.clear(dev, gpuobj); 295 if (gpuobj->node) {
296 instmem->unmap(gpuobj);
297 instmem->put(gpuobj);
298 }
299 } else {
300 if (gpuobj->node) {
301 spin_lock(&dev_priv->ramin_lock);
302 drm_mm_put_block(gpuobj->node);
303 spin_unlock(&dev_priv->ramin_lock);
304 }
305 }
238 306
239 spin_lock(&dev_priv->ramin_lock); 307 spin_lock(&dev_priv->ramin_lock);
240 if (gpuobj->im_pramin)
241 drm_mm_put_block(gpuobj->im_pramin);
242 list_del(&gpuobj->list); 308 list_del(&gpuobj->list);
243 spin_unlock(&dev_priv->ramin_lock); 309 spin_unlock(&dev_priv->ramin_lock);
244 310
@@ -278,7 +344,7 @@ nouveau_gpuobj_new_fake(struct drm_device *dev, u32 pinst, u64 vinst,
278 kref_init(&gpuobj->refcount); 344 kref_init(&gpuobj->refcount);
279 gpuobj->size = size; 345 gpuobj->size = size;
280 gpuobj->pinst = pinst; 346 gpuobj->pinst = pinst;
281 gpuobj->cinst = 0xdeadbeef; 347 gpuobj->cinst = NVOBJ_CINST_GLOBAL;
282 gpuobj->vinst = vinst; 348 gpuobj->vinst = vinst;
283 349
284 if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { 350 if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
@@ -335,113 +401,150 @@ nouveau_gpuobj_class_instmem_size(struct drm_device *dev, int class)
335 The method below creates a DMA object in instance RAM and returns a handle 401 The method below creates a DMA object in instance RAM and returns a handle
336 to it that can be used to set up context objects. 402 to it that can be used to set up context objects.
337*/ 403*/
338int 404
339nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, 405void
340 uint64_t offset, uint64_t size, int access, 406nv50_gpuobj_dma_init(struct nouveau_gpuobj *obj, u32 offset, int class,
341 int target, struct nouveau_gpuobj **gpuobj) 407 u64 base, u64 size, int target, int access,
408 u32 type, u32 comp)
342{ 409{
343 struct drm_device *dev = chan->dev; 410 struct drm_nouveau_private *dev_priv = obj->dev->dev_private;
344 struct drm_nouveau_private *dev_priv = dev->dev_private; 411 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
345 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; 412 u32 flags0;
346 int ret;
347 413
348 NV_DEBUG(dev, "ch%d class=0x%04x offset=0x%llx size=0x%llx\n", 414 flags0 = (comp << 29) | (type << 22) | class;
349 chan->id, class, offset, size); 415 flags0 |= 0x00100000;
350 NV_DEBUG(dev, "access=%d target=%d\n", access, target); 416
417 switch (access) {
418 case NV_MEM_ACCESS_RO: flags0 |= 0x00040000; break;
419 case NV_MEM_ACCESS_RW:
420 case NV_MEM_ACCESS_WO: flags0 |= 0x00080000; break;
421 default:
422 break;
423 }
351 424
352 switch (target) { 425 switch (target) {
353 case NV_DMA_TARGET_AGP: 426 case NV_MEM_TARGET_VRAM:
354 offset += dev_priv->gart_info.aper_base; 427 flags0 |= 0x00010000;
428 break;
429 case NV_MEM_TARGET_PCI:
430 flags0 |= 0x00020000;
431 break;
432 case NV_MEM_TARGET_PCI_NOSNOOP:
433 flags0 |= 0x00030000;
355 break; 434 break;
435 case NV_MEM_TARGET_GART:
436 base += dev_priv->gart_info.aper_base;
356 default: 437 default:
438 flags0 &= ~0x00100000;
357 break; 439 break;
358 } 440 }
359 441
360 ret = nouveau_gpuobj_new(dev, chan, 442 /* convert to base + limit */
361 nouveau_gpuobj_class_instmem_size(dev, class), 443 size = (base + size) - 1;
362 16, NVOBJ_FLAG_ZERO_ALLOC |
363 NVOBJ_FLAG_ZERO_FREE, gpuobj);
364 if (ret) {
365 NV_ERROR(dev, "Error creating gpuobj: %d\n", ret);
366 return ret;
367 }
368 444
369 if (dev_priv->card_type < NV_50) { 445 nv_wo32(obj, offset + 0x00, flags0);
370 uint32_t frame, adjust, pte_flags = 0; 446 nv_wo32(obj, offset + 0x04, lower_32_bits(size));
371 447 nv_wo32(obj, offset + 0x08, lower_32_bits(base));
372 if (access != NV_DMA_ACCESS_RO) 448 nv_wo32(obj, offset + 0x0c, upper_32_bits(size) << 24 |
373 pte_flags |= (1<<1); 449 upper_32_bits(base));
374 adjust = offset & 0x00000fff; 450 nv_wo32(obj, offset + 0x10, 0x00000000);
375 frame = offset & ~0x00000fff; 451 nv_wo32(obj, offset + 0x14, 0x00000000);
376
377 nv_wo32(*gpuobj, 0, ((1<<12) | (1<<13) | (adjust << 20) |
378 (access << 14) | (target << 16) |
379 class));
380 nv_wo32(*gpuobj, 4, size - 1);
381 nv_wo32(*gpuobj, 8, frame | pte_flags);
382 nv_wo32(*gpuobj, 12, frame | pte_flags);
383 } else {
384 uint64_t limit = offset + size - 1;
385 uint32_t flags0, flags5;
386 452
387 if (target == NV_DMA_TARGET_VIDMEM) { 453 pinstmem->flush(obj->dev);
388 flags0 = 0x00190000; 454}
389 flags5 = 0x00010000;
390 } else {
391 flags0 = 0x7fc00000;
392 flags5 = 0x00080000;
393 }
394 455
395 nv_wo32(*gpuobj, 0, flags0 | class); 456int
396 nv_wo32(*gpuobj, 4, lower_32_bits(limit)); 457nv50_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base, u64 size,
397 nv_wo32(*gpuobj, 8, lower_32_bits(offset)); 458 int target, int access, u32 type, u32 comp,
398 nv_wo32(*gpuobj, 12, ((upper_32_bits(limit) & 0xff) << 24) | 459 struct nouveau_gpuobj **pobj)
399 (upper_32_bits(offset) & 0xff)); 460{
400 nv_wo32(*gpuobj, 20, flags5); 461 struct drm_device *dev = chan->dev;
401 } 462 int ret;
402 463
403 instmem->flush(dev); 464 ret = nouveau_gpuobj_new(dev, chan, 24, 16, NVOBJ_FLAG_ZERO_FREE, pobj);
465 if (ret)
466 return ret;
404 467
405 (*gpuobj)->engine = NVOBJ_ENGINE_SW; 468 nv50_gpuobj_dma_init(*pobj, 0, class, base, size, target,
406 (*gpuobj)->class = class; 469 access, type, comp);
407 return 0; 470 return 0;
408} 471}
409 472
410int 473int
411nouveau_gpuobj_gart_dma_new(struct nouveau_channel *chan, 474nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base,
412 uint64_t offset, uint64_t size, int access, 475 u64 size, int access, int target,
413 struct nouveau_gpuobj **gpuobj, 476 struct nouveau_gpuobj **pobj)
414 uint32_t *o_ret)
415{ 477{
478 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
416 struct drm_device *dev = chan->dev; 479 struct drm_device *dev = chan->dev;
417 struct drm_nouveau_private *dev_priv = dev->dev_private; 480 struct nouveau_gpuobj *obj;
481 u32 flags0, flags2;
418 int ret; 482 int ret;
419 483
420 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP || 484 if (dev_priv->card_type >= NV_50) {
421 (dev_priv->card_type >= NV_50 && 485 u32 comp = (target == NV_MEM_TARGET_VM) ? NV_MEM_COMP_VM : 0;
422 dev_priv->gart_info.type == NOUVEAU_GART_SGDMA)) { 486 u32 type = (target == NV_MEM_TARGET_VM) ? NV_MEM_TYPE_VM : 0;
423 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 487
424 offset + dev_priv->vm_gart_base, 488 return nv50_gpuobj_dma_new(chan, class, base, size,
425 size, access, NV_DMA_TARGET_AGP, 489 target, access, type, comp, pobj);
426 gpuobj); 490 }
427 if (o_ret) 491
428 *o_ret = 0; 492 if (target == NV_MEM_TARGET_GART) {
429 } else 493 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
430 if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) { 494 target = NV_MEM_TARGET_PCI_NOSNOOP;
431 nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma, gpuobj); 495 base += dev_priv->gart_info.aper_base;
432 if (offset & ~0xffffffffULL) { 496 } else
433 NV_ERROR(dev, "obj offset exceeds 32-bits\n"); 497 if (base != 0) {
434 return -EINVAL; 498 base = nouveau_sgdma_get_physical(dev, base);
499 target = NV_MEM_TARGET_PCI;
500 } else {
501 nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma, pobj);
502 return 0;
435 } 503 }
436 if (o_ret)
437 *o_ret = (uint32_t)offset;
438 ret = (*gpuobj != NULL) ? 0 : -EINVAL;
439 } else {
440 NV_ERROR(dev, "Invalid GART type %d\n", dev_priv->gart_info.type);
441 return -EINVAL;
442 } 504 }
443 505
444 return ret; 506 flags0 = class;
507 flags0 |= 0x00003000; /* PT present, PT linear */
508 flags2 = 0;
509
510 switch (target) {
511 case NV_MEM_TARGET_PCI:
512 flags0 |= 0x00020000;
513 break;
514 case NV_MEM_TARGET_PCI_NOSNOOP:
515 flags0 |= 0x00030000;
516 break;
517 default:
518 break;
519 }
520
521 switch (access) {
522 case NV_MEM_ACCESS_RO:
523 flags0 |= 0x00004000;
524 break;
525 case NV_MEM_ACCESS_WO:
526 flags0 |= 0x00008000;
527 default:
528 flags2 |= 0x00000002;
529 break;
530 }
531
532 flags0 |= (base & 0x00000fff) << 20;
533 flags2 |= (base & 0xfffff000);
534
535 ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
536 if (ret)
537 return ret;
538
539 nv_wo32(obj, 0x00, flags0);
540 nv_wo32(obj, 0x04, size - 1);
541 nv_wo32(obj, 0x08, flags2);
542 nv_wo32(obj, 0x0c, flags2);
543
544 obj->engine = NVOBJ_ENGINE_SW;
545 obj->class = class;
546 *pobj = obj;
547 return 0;
445} 548}
446 549
447/* Context objects in the instance RAM have the following structure. 550/* Context objects in the instance RAM have the following structure.
@@ -495,82 +598,130 @@ nouveau_gpuobj_gart_dma_new(struct nouveau_channel *chan,
495 entry[5]: 598 entry[5]:
496 set to 0? 599 set to 0?
497*/ 600*/
601static int
602nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class,
603 struct nouveau_gpuobj **gpuobj_ret)
604{
605 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
606 struct nouveau_gpuobj *gpuobj;
607
608 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
609 if (!gpuobj)
610 return -ENOMEM;
611 gpuobj->dev = chan->dev;
612 gpuobj->engine = NVOBJ_ENGINE_SW;
613 gpuobj->class = class;
614 kref_init(&gpuobj->refcount);
615 gpuobj->cinst = 0x40;
616
617 spin_lock(&dev_priv->ramin_lock);
618 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
619 spin_unlock(&dev_priv->ramin_lock);
620 *gpuobj_ret = gpuobj;
621 return 0;
622}
623
498int 624int
499nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class, 625nouveau_gpuobj_gr_new(struct nouveau_channel *chan, u32 handle, int class)
500 struct nouveau_gpuobj **gpuobj)
501{ 626{
627 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
502 struct drm_device *dev = chan->dev; 628 struct drm_device *dev = chan->dev;
503 struct drm_nouveau_private *dev_priv = dev->dev_private; 629 struct nouveau_gpuobj_class *oc;
630 struct nouveau_gpuobj *gpuobj;
504 int ret; 631 int ret;
505 632
506 NV_DEBUG(dev, "ch%d class=0x%04x\n", chan->id, class); 633 NV_DEBUG(dev, "ch%d class=0x%04x\n", chan->id, class);
507 634
635 list_for_each_entry(oc, &dev_priv->classes, head) {
636 if (oc->id == class)
637 goto found;
638 }
639
640 NV_ERROR(dev, "illegal object class: 0x%x\n", class);
641 return -EINVAL;
642
643found:
644 switch (oc->engine) {
645 case NVOBJ_ENGINE_SW:
646 if (dev_priv->card_type < NV_C0) {
647 ret = nouveau_gpuobj_sw_new(chan, class, &gpuobj);
648 if (ret)
649 return ret;
650 goto insert;
651 }
652 break;
653 case NVOBJ_ENGINE_GR:
654 if ((dev_priv->card_type >= NV_20 && !chan->ramin_grctx) ||
655 (dev_priv->card_type < NV_20 && !chan->pgraph_ctx)) {
656 struct nouveau_pgraph_engine *pgraph =
657 &dev_priv->engine.graph;
658
659 ret = pgraph->create_context(chan);
660 if (ret)
661 return ret;
662 }
663 break;
664 case NVOBJ_ENGINE_CRYPT:
665 if (!chan->crypt_ctx) {
666 struct nouveau_crypt_engine *pcrypt =
667 &dev_priv->engine.crypt;
668
669 ret = pcrypt->create_context(chan);
670 if (ret)
671 return ret;
672 }
673 break;
674 }
675
676 /* we're done if this is fermi */
677 if (dev_priv->card_type >= NV_C0)
678 return 0;
679
508 ret = nouveau_gpuobj_new(dev, chan, 680 ret = nouveau_gpuobj_new(dev, chan,
509 nouveau_gpuobj_class_instmem_size(dev, class), 681 nouveau_gpuobj_class_instmem_size(dev, class),
510 16, 682 16,
511 NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE, 683 NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE,
512 gpuobj); 684 &gpuobj);
513 if (ret) { 685 if (ret) {
514 NV_ERROR(dev, "Error creating gpuobj: %d\n", ret); 686 NV_ERROR(dev, "error creating gpuobj: %d\n", ret);
515 return ret; 687 return ret;
516 } 688 }
517 689
518 if (dev_priv->card_type >= NV_50) { 690 if (dev_priv->card_type >= NV_50) {
519 nv_wo32(*gpuobj, 0, class); 691 nv_wo32(gpuobj, 0, class);
520 nv_wo32(*gpuobj, 20, 0x00010000); 692 nv_wo32(gpuobj, 20, 0x00010000);
521 } else { 693 } else {
522 switch (class) { 694 switch (class) {
523 case NV_CLASS_NULL: 695 case NV_CLASS_NULL:
524 nv_wo32(*gpuobj, 0, 0x00001030); 696 nv_wo32(gpuobj, 0, 0x00001030);
525 nv_wo32(*gpuobj, 4, 0xFFFFFFFF); 697 nv_wo32(gpuobj, 4, 0xFFFFFFFF);
526 break; 698 break;
527 default: 699 default:
528 if (dev_priv->card_type >= NV_40) { 700 if (dev_priv->card_type >= NV_40) {
529 nv_wo32(*gpuobj, 0, class); 701 nv_wo32(gpuobj, 0, class);
530#ifdef __BIG_ENDIAN 702#ifdef __BIG_ENDIAN
531 nv_wo32(*gpuobj, 8, 0x01000000); 703 nv_wo32(gpuobj, 8, 0x01000000);
532#endif 704#endif
533 } else { 705 } else {
534#ifdef __BIG_ENDIAN 706#ifdef __BIG_ENDIAN
535 nv_wo32(*gpuobj, 0, class | 0x00080000); 707 nv_wo32(gpuobj, 0, class | 0x00080000);
536#else 708#else
537 nv_wo32(*gpuobj, 0, class); 709 nv_wo32(gpuobj, 0, class);
538#endif 710#endif
539 } 711 }
540 } 712 }
541 } 713 }
542 dev_priv->engine.instmem.flush(dev); 714 dev_priv->engine.instmem.flush(dev);
543 715
544 (*gpuobj)->engine = NVOBJ_ENGINE_GR; 716 gpuobj->engine = oc->engine;
545 (*gpuobj)->class = class; 717 gpuobj->class = oc->id;
546 return 0;
547}
548 718
549int 719insert:
550nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class, 720 ret = nouveau_ramht_insert(chan, handle, gpuobj);
551 struct nouveau_gpuobj **gpuobj_ret) 721 if (ret)
552{ 722 NV_ERROR(dev, "error adding gpuobj to RAMHT: %d\n", ret);
553 struct drm_nouveau_private *dev_priv; 723 nouveau_gpuobj_ref(NULL, &gpuobj);
554 struct nouveau_gpuobj *gpuobj; 724 return ret;
555
556 if (!chan || !gpuobj_ret || *gpuobj_ret != NULL)
557 return -EINVAL;
558 dev_priv = chan->dev->dev_private;
559
560 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
561 if (!gpuobj)
562 return -ENOMEM;
563 gpuobj->dev = chan->dev;
564 gpuobj->engine = NVOBJ_ENGINE_SW;
565 gpuobj->class = class;
566 kref_init(&gpuobj->refcount);
567 gpuobj->cinst = 0x40;
568
569 spin_lock(&dev_priv->ramin_lock);
570 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
571 spin_unlock(&dev_priv->ramin_lock);
572 *gpuobj_ret = gpuobj;
573 return 0;
574} 725}
575 726
576static int 727static int
@@ -585,7 +736,7 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
585 NV_DEBUG(dev, "ch%d\n", chan->id); 736 NV_DEBUG(dev, "ch%d\n", chan->id);
586 737
587 /* Base amount for object storage (4KiB enough?) */ 738 /* Base amount for object storage (4KiB enough?) */
588 size = 0x1000; 739 size = 0x2000;
589 base = 0; 740 base = 0;
590 741
591 /* PGRAPH context */ 742 /* PGRAPH context */
@@ -624,12 +775,30 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
624{ 775{
625 struct drm_device *dev = chan->dev; 776 struct drm_device *dev = chan->dev;
626 struct drm_nouveau_private *dev_priv = dev->dev_private; 777 struct drm_nouveau_private *dev_priv = dev->dev_private;
627 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
628 struct nouveau_gpuobj *vram = NULL, *tt = NULL; 778 struct nouveau_gpuobj *vram = NULL, *tt = NULL;
629 int ret, i; 779 int ret;
630 780
631 NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h); 781 NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
632 782
783 if (dev_priv->card_type == NV_C0) {
784 struct nouveau_vm *vm = dev_priv->chan_vm;
785 struct nouveau_vm_pgd *vpgd;
786
787 ret = nouveau_gpuobj_new(dev, NULL, 4096, 0x1000, 0,
788 &chan->ramin);
789 if (ret)
790 return ret;
791
792 nouveau_vm_ref(vm, &chan->vm, NULL);
793
794 vpgd = list_first_entry(&vm->pgd_list, struct nouveau_vm_pgd, head);
795 nv_wo32(chan->ramin, 0x0200, lower_32_bits(vpgd->obj->vinst));
796 nv_wo32(chan->ramin, 0x0204, upper_32_bits(vpgd->obj->vinst));
797 nv_wo32(chan->ramin, 0x0208, 0xffffffff);
798 nv_wo32(chan->ramin, 0x020c, 0x000000ff);
799 return 0;
800 }
801
633 /* Allocate a chunk of memory for per-channel object storage */ 802 /* Allocate a chunk of memory for per-channel object storage */
634 ret = nouveau_gpuobj_channel_init_pramin(chan); 803 ret = nouveau_gpuobj_channel_init_pramin(chan);
635 if (ret) { 804 if (ret) {
@@ -639,14 +808,12 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
639 808
640 /* NV50 VM 809 /* NV50 VM
641 * - Allocate per-channel page-directory 810 * - Allocate per-channel page-directory
642 * - Map GART and VRAM into the channel's address space at the 811 * - Link with shared channel VM
643 * locations determined during init.
644 */ 812 */
645 if (dev_priv->card_type >= NV_50) { 813 if (dev_priv->chan_vm) {
646 u32 pgd_offs = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200; 814 u32 pgd_offs = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200;
647 u64 vm_vinst = chan->ramin->vinst + pgd_offs; 815 u64 vm_vinst = chan->ramin->vinst + pgd_offs;
648 u32 vm_pinst = chan->ramin->pinst; 816 u32 vm_pinst = chan->ramin->pinst;
649 u32 pde;
650 817
651 if (vm_pinst != ~0) 818 if (vm_pinst != ~0)
652 vm_pinst += pgd_offs; 819 vm_pinst += pgd_offs;
@@ -655,29 +822,8 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
655 0, &chan->vm_pd); 822 0, &chan->vm_pd);
656 if (ret) 823 if (ret)
657 return ret; 824 return ret;
658 for (i = 0; i < 0x4000; i += 8) {
659 nv_wo32(chan->vm_pd, i + 0, 0x00000000);
660 nv_wo32(chan->vm_pd, i + 4, 0xdeadcafe);
661 }
662
663 nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma,
664 &chan->vm_gart_pt);
665 pde = (dev_priv->vm_gart_base / (512*1024*1024)) * 8;
666 nv_wo32(chan->vm_pd, pde + 0, chan->vm_gart_pt->vinst | 3);
667 nv_wo32(chan->vm_pd, pde + 4, 0x00000000);
668
669 pde = (dev_priv->vm_vram_base / (512*1024*1024)) * 8;
670 for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) {
671 nouveau_gpuobj_ref(dev_priv->vm_vram_pt[i],
672 &chan->vm_vram_pt[i]);
673
674 nv_wo32(chan->vm_pd, pde + 0,
675 chan->vm_vram_pt[i]->vinst | 0x61);
676 nv_wo32(chan->vm_pd, pde + 4, 0x00000000);
677 pde += 8;
678 }
679 825
680 instmem->flush(dev); 826 nouveau_vm_ref(dev_priv->chan_vm, &chan->vm, chan->vm_pd);
681 } 827 }
682 828
683 /* RAMHT */ 829 /* RAMHT */
@@ -700,9 +846,8 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
700 /* VRAM ctxdma */ 846 /* VRAM ctxdma */
701 if (dev_priv->card_type >= NV_50) { 847 if (dev_priv->card_type >= NV_50) {
702 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 848 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
703 0, dev_priv->vm_end, 849 0, (1ULL << 40), NV_MEM_ACCESS_RW,
704 NV_DMA_ACCESS_RW, 850 NV_MEM_TARGET_VM, &vram);
705 NV_DMA_TARGET_AGP, &vram);
706 if (ret) { 851 if (ret) {
707 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret); 852 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
708 return ret; 853 return ret;
@@ -710,8 +855,8 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
710 } else { 855 } else {
711 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 856 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
712 0, dev_priv->fb_available_size, 857 0, dev_priv->fb_available_size,
713 NV_DMA_ACCESS_RW, 858 NV_MEM_ACCESS_RW,
714 NV_DMA_TARGET_VIDMEM, &vram); 859 NV_MEM_TARGET_VRAM, &vram);
715 if (ret) { 860 if (ret) {
716 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret); 861 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
717 return ret; 862 return ret;
@@ -728,21 +873,13 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
728 /* TT memory ctxdma */ 873 /* TT memory ctxdma */
729 if (dev_priv->card_type >= NV_50) { 874 if (dev_priv->card_type >= NV_50) {
730 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 875 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
731 0, dev_priv->vm_end, 876 0, (1ULL << 40), NV_MEM_ACCESS_RW,
732 NV_DMA_ACCESS_RW, 877 NV_MEM_TARGET_VM, &tt);
733 NV_DMA_TARGET_AGP, &tt);
734 if (ret) {
735 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
736 return ret;
737 }
738 } else
739 if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) {
740 ret = nouveau_gpuobj_gart_dma_new(chan, 0,
741 dev_priv->gart_info.aper_size,
742 NV_DMA_ACCESS_RW, &tt, NULL);
743 } else { 878 } else {
744 NV_ERROR(dev, "Invalid GART type %d\n", dev_priv->gart_info.type); 879 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
745 ret = -EINVAL; 880 0, dev_priv->gart_info.aper_size,
881 NV_MEM_ACCESS_RW,
882 NV_MEM_TARGET_GART, &tt);
746 } 883 }
747 884
748 if (ret) { 885 if (ret) {
@@ -763,21 +900,14 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
763void 900void
764nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan) 901nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
765{ 902{
766 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
767 struct drm_device *dev = chan->dev; 903 struct drm_device *dev = chan->dev;
768 int i;
769 904
770 NV_DEBUG(dev, "ch%d\n", chan->id); 905 NV_DEBUG(dev, "ch%d\n", chan->id);
771 906
772 if (!chan->ramht)
773 return;
774
775 nouveau_ramht_ref(NULL, &chan->ramht, chan); 907 nouveau_ramht_ref(NULL, &chan->ramht, chan);
776 908
909 nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
777 nouveau_gpuobj_ref(NULL, &chan->vm_pd); 910 nouveau_gpuobj_ref(NULL, &chan->vm_pd);
778 nouveau_gpuobj_ref(NULL, &chan->vm_gart_pt);
779 for (i = 0; i < dev_priv->vm_vram_pt_nr; i++)
780 nouveau_gpuobj_ref(NULL, &chan->vm_vram_pt[i]);
781 911
782 if (chan->ramin_heap.free_stack.next) 912 if (chan->ramin_heap.free_stack.next)
783 drm_mm_takedown(&chan->ramin_heap); 913 drm_mm_takedown(&chan->ramin_heap);
@@ -791,147 +921,91 @@ nouveau_gpuobj_suspend(struct drm_device *dev)
791 struct nouveau_gpuobj *gpuobj; 921 struct nouveau_gpuobj *gpuobj;
792 int i; 922 int i;
793 923
794 if (dev_priv->card_type < NV_50) {
795 dev_priv->susres.ramin_copy = vmalloc(dev_priv->ramin_rsvd_vram);
796 if (!dev_priv->susres.ramin_copy)
797 return -ENOMEM;
798
799 for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4)
800 dev_priv->susres.ramin_copy[i/4] = nv_ri32(dev, i);
801 return 0;
802 }
803
804 list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) { 924 list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
805 if (!gpuobj->im_backing) 925 if (gpuobj->cinst != NVOBJ_CINST_GLOBAL)
806 continue; 926 continue;
807 927
808 gpuobj->im_backing_suspend = vmalloc(gpuobj->size); 928 gpuobj->suspend = vmalloc(gpuobj->size);
809 if (!gpuobj->im_backing_suspend) { 929 if (!gpuobj->suspend) {
810 nouveau_gpuobj_resume(dev); 930 nouveau_gpuobj_resume(dev);
811 return -ENOMEM; 931 return -ENOMEM;
812 } 932 }
813 933
814 for (i = 0; i < gpuobj->size; i += 4) 934 for (i = 0; i < gpuobj->size; i += 4)
815 gpuobj->im_backing_suspend[i/4] = nv_ro32(gpuobj, i); 935 gpuobj->suspend[i/4] = nv_ro32(gpuobj, i);
816 } 936 }
817 937
818 return 0; 938 return 0;
819} 939}
820 940
821void 941void
822nouveau_gpuobj_suspend_cleanup(struct drm_device *dev)
823{
824 struct drm_nouveau_private *dev_priv = dev->dev_private;
825 struct nouveau_gpuobj *gpuobj;
826
827 if (dev_priv->card_type < NV_50) {
828 vfree(dev_priv->susres.ramin_copy);
829 dev_priv->susres.ramin_copy = NULL;
830 return;
831 }
832
833 list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
834 if (!gpuobj->im_backing_suspend)
835 continue;
836
837 vfree(gpuobj->im_backing_suspend);
838 gpuobj->im_backing_suspend = NULL;
839 }
840}
841
842void
843nouveau_gpuobj_resume(struct drm_device *dev) 942nouveau_gpuobj_resume(struct drm_device *dev)
844{ 943{
845 struct drm_nouveau_private *dev_priv = dev->dev_private; 944 struct drm_nouveau_private *dev_priv = dev->dev_private;
846 struct nouveau_gpuobj *gpuobj; 945 struct nouveau_gpuobj *gpuobj;
847 int i; 946 int i;
848 947
849 if (dev_priv->card_type < NV_50) {
850 for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4)
851 nv_wi32(dev, i, dev_priv->susres.ramin_copy[i/4]);
852 nouveau_gpuobj_suspend_cleanup(dev);
853 return;
854 }
855
856 list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) { 948 list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
857 if (!gpuobj->im_backing_suspend) 949 if (!gpuobj->suspend)
858 continue; 950 continue;
859 951
860 for (i = 0; i < gpuobj->size; i += 4) 952 for (i = 0; i < gpuobj->size; i += 4)
861 nv_wo32(gpuobj, i, gpuobj->im_backing_suspend[i/4]); 953 nv_wo32(gpuobj, i, gpuobj->suspend[i/4]);
862 dev_priv->engine.instmem.flush(dev); 954
955 vfree(gpuobj->suspend);
956 gpuobj->suspend = NULL;
863 } 957 }
864 958
865 nouveau_gpuobj_suspend_cleanup(dev); 959 dev_priv->engine.instmem.flush(dev);
866} 960}
867 961
868int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data, 962int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data,
869 struct drm_file *file_priv) 963 struct drm_file *file_priv)
870{ 964{
871 struct drm_nouveau_private *dev_priv = dev->dev_private;
872 struct drm_nouveau_grobj_alloc *init = data; 965 struct drm_nouveau_grobj_alloc *init = data;
873 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
874 struct nouveau_pgraph_object_class *grc;
875 struct nouveau_gpuobj *gr = NULL;
876 struct nouveau_channel *chan; 966 struct nouveau_channel *chan;
877 int ret; 967 int ret;
878 968
879 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(init->channel, file_priv, chan);
880
881 if (init->handle == ~0) 969 if (init->handle == ~0)
882 return -EINVAL; 970 return -EINVAL;
883 971
884 grc = pgraph->grclass; 972 chan = nouveau_channel_get(dev, file_priv, init->channel);
885 while (grc->id) { 973 if (IS_ERR(chan))
886 if (grc->id == init->class) 974 return PTR_ERR(chan);
887 break;
888 grc++;
889 }
890 975
891 if (!grc->id) { 976 if (nouveau_ramht_find(chan, init->handle)) {
892 NV_ERROR(dev, "Illegal object class: 0x%x\n", init->class); 977 ret = -EEXIST;
893 return -EPERM; 978 goto out;
894 } 979 }
895 980
896 if (nouveau_ramht_find(chan, init->handle)) 981 ret = nouveau_gpuobj_gr_new(chan, init->handle, init->class);
897 return -EEXIST;
898
899 if (!grc->software)
900 ret = nouveau_gpuobj_gr_new(chan, grc->id, &gr);
901 else
902 ret = nouveau_gpuobj_sw_new(chan, grc->id, &gr);
903 if (ret) { 982 if (ret) {
904 NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n", 983 NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n",
905 ret, init->channel, init->handle); 984 ret, init->channel, init->handle);
906 return ret;
907 } 985 }
908 986
909 ret = nouveau_ramht_insert(chan, init->handle, gr); 987out:
910 nouveau_gpuobj_ref(NULL, &gr); 988 nouveau_channel_put(&chan);
911 if (ret) { 989 return ret;
912 NV_ERROR(dev, "Error referencing object: %d (%d/0x%08x)\n",
913 ret, init->channel, init->handle);
914 return ret;
915 }
916
917 return 0;
918} 990}
919 991
920int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data, 992int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data,
921 struct drm_file *file_priv) 993 struct drm_file *file_priv)
922{ 994{
923 struct drm_nouveau_gpuobj_free *objfree = data; 995 struct drm_nouveau_gpuobj_free *objfree = data;
924 struct nouveau_gpuobj *gpuobj;
925 struct nouveau_channel *chan; 996 struct nouveau_channel *chan;
997 int ret;
926 998
927 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(objfree->channel, file_priv, chan); 999 chan = nouveau_channel_get(dev, file_priv, objfree->channel);
1000 if (IS_ERR(chan))
1001 return PTR_ERR(chan);
928 1002
929 gpuobj = nouveau_ramht_find(chan, objfree->handle); 1003 /* Synchronize with the user channel */
930 if (!gpuobj) 1004 nouveau_channel_idle(chan);
931 return -ENOENT;
932 1005
933 nouveau_ramht_remove(chan, objfree->handle); 1006 ret = nouveau_ramht_remove(chan, objfree->handle);
934 return 0; 1007 nouveau_channel_put(&chan);
1008 return ret;
935} 1009}
936 1010
937u32 1011u32
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c
index 9f7b158f5825..fb846a3fef15 100644
--- a/drivers/gpu/drm/nouveau/nouveau_pm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_pm.c
@@ -27,6 +27,10 @@
27#include "nouveau_drv.h" 27#include "nouveau_drv.h"
28#include "nouveau_pm.h" 28#include "nouveau_pm.h"
29 29
30#ifdef CONFIG_ACPI
31#include <linux/acpi.h>
32#endif
33#include <linux/power_supply.h>
30#include <linux/hwmon.h> 34#include <linux/hwmon.h>
31#include <linux/hwmon-sysfs.h> 35#include <linux/hwmon-sysfs.h>
32 36
@@ -418,8 +422,7 @@ nouveau_hwmon_init(struct drm_device *dev)
418 return ret; 422 return ret;
419 } 423 }
420 dev_set_drvdata(hwmon_dev, dev); 424 dev_set_drvdata(hwmon_dev, dev);
421 ret = sysfs_create_group(&hwmon_dev->kobj, 425 ret = sysfs_create_group(&dev->pdev->dev.kobj, &hwmon_attrgroup);
422 &hwmon_attrgroup);
423 if (ret) { 426 if (ret) {
424 NV_ERROR(dev, 427 NV_ERROR(dev,
425 "Unable to create hwmon sysfs file: %d\n", ret); 428 "Unable to create hwmon sysfs file: %d\n", ret);
@@ -446,6 +449,25 @@ nouveau_hwmon_fini(struct drm_device *dev)
446#endif 449#endif
447} 450}
448 451
452#ifdef CONFIG_ACPI
453static int
454nouveau_pm_acpi_event(struct notifier_block *nb, unsigned long val, void *data)
455{
456 struct drm_nouveau_private *dev_priv =
457 container_of(nb, struct drm_nouveau_private, engine.pm.acpi_nb);
458 struct drm_device *dev = dev_priv->dev;
459 struct acpi_bus_event *entry = (struct acpi_bus_event *)data;
460
461 if (strcmp(entry->device_class, "ac_adapter") == 0) {
462 bool ac = power_supply_is_system_supplied();
463
464 NV_DEBUG(dev, "power supply changed: %s\n", ac ? "AC" : "DC");
465 }
466
467 return NOTIFY_OK;
468}
469#endif
470
449int 471int
450nouveau_pm_init(struct drm_device *dev) 472nouveau_pm_init(struct drm_device *dev)
451{ 473{
@@ -485,6 +507,10 @@ nouveau_pm_init(struct drm_device *dev)
485 507
486 nouveau_sysfs_init(dev); 508 nouveau_sysfs_init(dev);
487 nouveau_hwmon_init(dev); 509 nouveau_hwmon_init(dev);
510#ifdef CONFIG_ACPI
511 pm->acpi_nb.notifier_call = nouveau_pm_acpi_event;
512 register_acpi_notifier(&pm->acpi_nb);
513#endif
488 514
489 return 0; 515 return 0;
490} 516}
@@ -503,6 +529,9 @@ nouveau_pm_fini(struct drm_device *dev)
503 nouveau_perf_fini(dev); 529 nouveau_perf_fini(dev);
504 nouveau_volt_fini(dev); 530 nouveau_volt_fini(dev);
505 531
532#ifdef CONFIG_ACPI
533 unregister_acpi_notifier(&pm->acpi_nb);
534#endif
506 nouveau_hwmon_fini(dev); 535 nouveau_hwmon_fini(dev);
507 nouveau_sysfs_fini(dev); 536 nouveau_sysfs_fini(dev);
508} 537}
diff --git a/drivers/gpu/drm/nouveau/nouveau_ramht.c b/drivers/gpu/drm/nouveau/nouveau_ramht.c
index 2d8580927ca4..bef3e6910418 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ramht.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ramht.c
@@ -104,17 +104,17 @@ nouveau_ramht_insert(struct nouveau_channel *chan, u32 handle,
104 nouveau_gpuobj_ref(gpuobj, &entry->gpuobj); 104 nouveau_gpuobj_ref(gpuobj, &entry->gpuobj);
105 105
106 if (dev_priv->card_type < NV_40) { 106 if (dev_priv->card_type < NV_40) {
107 ctx = NV_RAMHT_CONTEXT_VALID | (gpuobj->cinst >> 4) | 107 ctx = NV_RAMHT_CONTEXT_VALID | (gpuobj->pinst >> 4) |
108 (chan->id << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) | 108 (chan->id << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) |
109 (gpuobj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT); 109 (gpuobj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT);
110 } else 110 } else
111 if (dev_priv->card_type < NV_50) { 111 if (dev_priv->card_type < NV_50) {
112 ctx = (gpuobj->cinst >> 4) | 112 ctx = (gpuobj->pinst >> 4) |
113 (chan->id << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) | 113 (chan->id << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) |
114 (gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT); 114 (gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT);
115 } else { 115 } else {
116 if (gpuobj->engine == NVOBJ_ENGINE_DISPLAY) { 116 if (gpuobj->engine == NVOBJ_ENGINE_DISPLAY) {
117 ctx = (gpuobj->cinst << 10) | 2; 117 ctx = (gpuobj->cinst << 10) | chan->id;
118 } else { 118 } else {
119 ctx = (gpuobj->cinst >> 4) | 119 ctx = (gpuobj->cinst >> 4) |
120 ((gpuobj->engine << 120 ((gpuobj->engine <<
@@ -214,18 +214,19 @@ out:
214 spin_unlock_irqrestore(&chan->ramht->lock, flags); 214 spin_unlock_irqrestore(&chan->ramht->lock, flags);
215} 215}
216 216
217void 217int
218nouveau_ramht_remove(struct nouveau_channel *chan, u32 handle) 218nouveau_ramht_remove(struct nouveau_channel *chan, u32 handle)
219{ 219{
220 struct nouveau_ramht_entry *entry; 220 struct nouveau_ramht_entry *entry;
221 221
222 entry = nouveau_ramht_remove_entry(chan, handle); 222 entry = nouveau_ramht_remove_entry(chan, handle);
223 if (!entry) 223 if (!entry)
224 return; 224 return -ENOENT;
225 225
226 nouveau_ramht_remove_hash(chan, entry->handle); 226 nouveau_ramht_remove_hash(chan, entry->handle);
227 nouveau_gpuobj_ref(NULL, &entry->gpuobj); 227 nouveau_gpuobj_ref(NULL, &entry->gpuobj);
228 kfree(entry); 228 kfree(entry);
229 return 0;
229} 230}
230 231
231struct nouveau_gpuobj * 232struct nouveau_gpuobj *
diff --git a/drivers/gpu/drm/nouveau/nouveau_ramht.h b/drivers/gpu/drm/nouveau/nouveau_ramht.h
index b79cb5e1a8f1..c82de98fee0e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ramht.h
+++ b/drivers/gpu/drm/nouveau/nouveau_ramht.h
@@ -48,7 +48,7 @@ extern void nouveau_ramht_ref(struct nouveau_ramht *, struct nouveau_ramht **,
48 48
49extern int nouveau_ramht_insert(struct nouveau_channel *, u32 handle, 49extern int nouveau_ramht_insert(struct nouveau_channel *, u32 handle,
50 struct nouveau_gpuobj *); 50 struct nouveau_gpuobj *);
51extern void nouveau_ramht_remove(struct nouveau_channel *, u32 handle); 51extern int nouveau_ramht_remove(struct nouveau_channel *, u32 handle);
52extern struct nouveau_gpuobj * 52extern struct nouveau_gpuobj *
53nouveau_ramht_find(struct nouveau_channel *chan, u32 handle); 53nouveau_ramht_find(struct nouveau_channel *chan, u32 handle);
54 54
diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h
index 1b42541ca9e5..04e8fb795269 100644
--- a/drivers/gpu/drm/nouveau/nouveau_reg.h
+++ b/drivers/gpu/drm/nouveau/nouveau_reg.h
@@ -45,6 +45,11 @@
45# define NV04_PFB_REF_CMD_REFRESH (1 << 0) 45# define NV04_PFB_REF_CMD_REFRESH (1 << 0)
46#define NV04_PFB_PRE 0x001002d4 46#define NV04_PFB_PRE 0x001002d4
47# define NV04_PFB_PRE_CMD_PRECHARGE (1 << 0) 47# define NV04_PFB_PRE_CMD_PRECHARGE (1 << 0)
48#define NV20_PFB_ZCOMP(i) (0x00100300 + 4*(i))
49# define NV20_PFB_ZCOMP_MODE_32 (4 << 24)
50# define NV20_PFB_ZCOMP_EN (1 << 31)
51# define NV25_PFB_ZCOMP_MODE_16 (1 << 20)
52# define NV25_PFB_ZCOMP_MODE_32 (2 << 20)
48#define NV10_PFB_CLOSE_PAGE2 0x0010033c 53#define NV10_PFB_CLOSE_PAGE2 0x0010033c
49#define NV04_PFB_SCRAMBLE(i) (0x00100400 + 4 * (i)) 54#define NV04_PFB_SCRAMBLE(i) (0x00100400 + 4 * (i))
50#define NV40_PFB_TILE(i) (0x00100600 + (i*16)) 55#define NV40_PFB_TILE(i) (0x00100600 + (i*16))
@@ -74,17 +79,6 @@
74# define NV40_RAMHT_CONTEXT_ENGINE_SHIFT 20 79# define NV40_RAMHT_CONTEXT_ENGINE_SHIFT 20
75# define NV40_RAMHT_CONTEXT_INSTANCE_SHIFT 0 80# define NV40_RAMHT_CONTEXT_INSTANCE_SHIFT 0
76 81
77/* DMA object defines */
78#define NV_DMA_ACCESS_RW 0
79#define NV_DMA_ACCESS_RO 1
80#define NV_DMA_ACCESS_WO 2
81#define NV_DMA_TARGET_VIDMEM 0
82#define NV_DMA_TARGET_PCI 2
83#define NV_DMA_TARGET_AGP 3
84/* The following is not a real value used by the card, it's changed by
85 * nouveau_object_dma_create */
86#define NV_DMA_TARGET_PCI_NONLINEAR 8
87
88/* Some object classes we care about in the drm */ 82/* Some object classes we care about in the drm */
89#define NV_CLASS_DMA_FROM_MEMORY 0x00000002 83#define NV_CLASS_DMA_FROM_MEMORY 0x00000002
90#define NV_CLASS_DMA_TO_MEMORY 0x00000003 84#define NV_CLASS_DMA_TO_MEMORY 0x00000003
@@ -332,6 +326,7 @@
332#define NV04_PGRAPH_BSWIZZLE5 0x004006A0 326#define NV04_PGRAPH_BSWIZZLE5 0x004006A0
333#define NV03_PGRAPH_STATUS 0x004006B0 327#define NV03_PGRAPH_STATUS 0x004006B0
334#define NV04_PGRAPH_STATUS 0x00400700 328#define NV04_PGRAPH_STATUS 0x00400700
329# define NV40_PGRAPH_STATUS_SYNC_STALL 0x00004000
335#define NV04_PGRAPH_TRAPPED_ADDR 0x00400704 330#define NV04_PGRAPH_TRAPPED_ADDR 0x00400704
336#define NV04_PGRAPH_TRAPPED_DATA 0x00400708 331#define NV04_PGRAPH_TRAPPED_DATA 0x00400708
337#define NV04_PGRAPH_SURFACE 0x0040070C 332#define NV04_PGRAPH_SURFACE 0x0040070C
@@ -378,6 +373,7 @@
378#define NV20_PGRAPH_TLIMIT(i) (0x00400904 + (i*16)) 373#define NV20_PGRAPH_TLIMIT(i) (0x00400904 + (i*16))
379#define NV20_PGRAPH_TSIZE(i) (0x00400908 + (i*16)) 374#define NV20_PGRAPH_TSIZE(i) (0x00400908 + (i*16))
380#define NV20_PGRAPH_TSTATUS(i) (0x0040090C + (i*16)) 375#define NV20_PGRAPH_TSTATUS(i) (0x0040090C + (i*16))
376#define NV20_PGRAPH_ZCOMP(i) (0x00400980 + 4*(i))
381#define NV10_PGRAPH_TILE(i) (0x00400B00 + (i*16)) 377#define NV10_PGRAPH_TILE(i) (0x00400B00 + (i*16))
382#define NV10_PGRAPH_TLIMIT(i) (0x00400B04 + (i*16)) 378#define NV10_PGRAPH_TLIMIT(i) (0x00400B04 + (i*16))
383#define NV10_PGRAPH_TSIZE(i) (0x00400B08 + (i*16)) 379#define NV10_PGRAPH_TSIZE(i) (0x00400B08 + (i*16))
@@ -714,31 +710,32 @@
714#define NV50_PDISPLAY_INTR_1_CLK_UNK10 0x00000010 710#define NV50_PDISPLAY_INTR_1_CLK_UNK10 0x00000010
715#define NV50_PDISPLAY_INTR_1_CLK_UNK20 0x00000020 711#define NV50_PDISPLAY_INTR_1_CLK_UNK20 0x00000020
716#define NV50_PDISPLAY_INTR_1_CLK_UNK40 0x00000040 712#define NV50_PDISPLAY_INTR_1_CLK_UNK40 0x00000040
717#define NV50_PDISPLAY_INTR_EN 0x0061002c 713#define NV50_PDISPLAY_INTR_EN_0 0x00610028
718#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC 0x0000000c 714#define NV50_PDISPLAY_INTR_EN_1 0x0061002c
719#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_(n) (1 << ((n) + 2)) 715#define NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC 0x0000000c
720#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_0 0x00000004 716#define NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(n) (1 << ((n) + 2))
721#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_1 0x00000008 717#define NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_0 0x00000004
722#define NV50_PDISPLAY_INTR_EN_CLK_UNK10 0x00000010 718#define NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_1 0x00000008
723#define NV50_PDISPLAY_INTR_EN_CLK_UNK20 0x00000020 719#define NV50_PDISPLAY_INTR_EN_1_CLK_UNK10 0x00000010
724#define NV50_PDISPLAY_INTR_EN_CLK_UNK40 0x00000040 720#define NV50_PDISPLAY_INTR_EN_1_CLK_UNK20 0x00000020
721#define NV50_PDISPLAY_INTR_EN_1_CLK_UNK40 0x00000040
725#define NV50_PDISPLAY_UNK30_CTRL 0x00610030 722#define NV50_PDISPLAY_UNK30_CTRL 0x00610030
726#define NV50_PDISPLAY_UNK30_CTRL_UPDATE_VCLK0 0x00000200 723#define NV50_PDISPLAY_UNK30_CTRL_UPDATE_VCLK0 0x00000200
727#define NV50_PDISPLAY_UNK30_CTRL_UPDATE_VCLK1 0x00000400 724#define NV50_PDISPLAY_UNK30_CTRL_UPDATE_VCLK1 0x00000400
728#define NV50_PDISPLAY_UNK30_CTRL_PENDING 0x80000000 725#define NV50_PDISPLAY_UNK30_CTRL_PENDING 0x80000000
729#define NV50_PDISPLAY_TRAPPED_ADDR 0x00610080 726#define NV50_PDISPLAY_TRAPPED_ADDR(i) ((i) * 0x08 + 0x00610080)
730#define NV50_PDISPLAY_TRAPPED_DATA 0x00610084 727#define NV50_PDISPLAY_TRAPPED_DATA(i) ((i) * 0x08 + 0x00610084)
731#define NV50_PDISPLAY_CHANNEL_STAT(i) ((i) * 0x10 + 0x00610200) 728#define NV50_PDISPLAY_EVO_CTRL(i) ((i) * 0x10 + 0x00610200)
732#define NV50_PDISPLAY_CHANNEL_STAT_DMA 0x00000010 729#define NV50_PDISPLAY_EVO_CTRL_DMA 0x00000010
733#define NV50_PDISPLAY_CHANNEL_STAT_DMA_DISABLED 0x00000000 730#define NV50_PDISPLAY_EVO_CTRL_DMA_DISABLED 0x00000000
734#define NV50_PDISPLAY_CHANNEL_STAT_DMA_ENABLED 0x00000010 731#define NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED 0x00000010
735#define NV50_PDISPLAY_CHANNEL_DMA_CB(i) ((i) * 0x10 + 0x00610204) 732#define NV50_PDISPLAY_EVO_DMA_CB(i) ((i) * 0x10 + 0x00610204)
736#define NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION 0x00000002 733#define NV50_PDISPLAY_EVO_DMA_CB_LOCATION 0x00000002
737#define NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_VRAM 0x00000000 734#define NV50_PDISPLAY_EVO_DMA_CB_LOCATION_VRAM 0x00000000
738#define NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_SYSTEM 0x00000002 735#define NV50_PDISPLAY_EVO_DMA_CB_LOCATION_SYSTEM 0x00000002
739#define NV50_PDISPLAY_CHANNEL_DMA_CB_VALID 0x00000001 736#define NV50_PDISPLAY_EVO_DMA_CB_VALID 0x00000001
740#define NV50_PDISPLAY_CHANNEL_UNK2(i) ((i) * 0x10 + 0x00610208) 737#define NV50_PDISPLAY_EVO_UNK2(i) ((i) * 0x10 + 0x00610208)
741#define NV50_PDISPLAY_CHANNEL_UNK3(i) ((i) * 0x10 + 0x0061020c) 738#define NV50_PDISPLAY_EVO_HASH_TAG(i) ((i) * 0x10 + 0x0061020c)
742 739
743#define NV50_PDISPLAY_CURSOR 0x00610270 740#define NV50_PDISPLAY_CURSOR 0x00610270
744#define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i) ((i) * 0x10 + 0x00610270) 741#define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i) ((i) * 0x10 + 0x00610270)
@@ -746,15 +743,11 @@
746#define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS 0x00030000 743#define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS 0x00030000
747#define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS_ACTIVE 0x00010000 744#define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS_ACTIVE 0x00010000
748 745
749#define NV50_PDISPLAY_CTRL_STATE 0x00610300 746#define NV50_PDISPLAY_PIO_CTRL 0x00610300
750#define NV50_PDISPLAY_CTRL_STATE_PENDING 0x80000000 747#define NV50_PDISPLAY_PIO_CTRL_PENDING 0x80000000
751#define NV50_PDISPLAY_CTRL_STATE_METHOD 0x00001ffc 748#define NV50_PDISPLAY_PIO_CTRL_MTHD 0x00001ffc
752#define NV50_PDISPLAY_CTRL_STATE_ENABLE 0x00000001 749#define NV50_PDISPLAY_PIO_CTRL_ENABLED 0x00000001
753#define NV50_PDISPLAY_CTRL_VAL 0x00610304 750#define NV50_PDISPLAY_PIO_DATA 0x00610304
754#define NV50_PDISPLAY_UNK_380 0x00610380
755#define NV50_PDISPLAY_RAM_AMOUNT 0x00610384
756#define NV50_PDISPLAY_UNK_388 0x00610388
757#define NV50_PDISPLAY_UNK_38C 0x0061038c
758 751
759#define NV50_PDISPLAY_CRTC_P(i, r) ((i) * 0x540 + NV50_PDISPLAY_CRTC_##r) 752#define NV50_PDISPLAY_CRTC_P(i, r) ((i) * 0x540 + NV50_PDISPLAY_CRTC_##r)
760#define NV50_PDISPLAY_CRTC_C(i, r) (4 + (i) * 0x540 + NV50_PDISPLAY_CRTC_##r) 753#define NV50_PDISPLAY_CRTC_C(i, r) (4 + (i) * 0x540 + NV50_PDISPLAY_CRTC_##r)
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index d4ac97007038..9a250eb53098 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -14,7 +14,7 @@ struct nouveau_sgdma_be {
14 dma_addr_t *pages; 14 dma_addr_t *pages;
15 unsigned nr_pages; 15 unsigned nr_pages;
16 16
17 unsigned pte_start; 17 u64 offset;
18 bool bound; 18 bool bound;
19}; 19};
20 20
@@ -74,18 +74,6 @@ nouveau_sgdma_clear(struct ttm_backend *be)
74 } 74 }
75} 75}
76 76
77static inline unsigned
78nouveau_sgdma_pte(struct drm_device *dev, uint64_t offset)
79{
80 struct drm_nouveau_private *dev_priv = dev->dev_private;
81 unsigned pte = (offset >> NV_CTXDMA_PAGE_SHIFT);
82
83 if (dev_priv->card_type < NV_50)
84 return pte + 2;
85
86 return pte << 1;
87}
88
89static int 77static int
90nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) 78nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
91{ 79{
@@ -97,32 +85,17 @@ nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
97 85
98 NV_DEBUG(dev, "pg=0x%lx\n", mem->start); 86 NV_DEBUG(dev, "pg=0x%lx\n", mem->start);
99 87
100 pte = nouveau_sgdma_pte(nvbe->dev, mem->start << PAGE_SHIFT); 88 nvbe->offset = mem->start << PAGE_SHIFT;
101 nvbe->pte_start = pte; 89 pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
102 for (i = 0; i < nvbe->nr_pages; i++) { 90 for (i = 0; i < nvbe->nr_pages; i++) {
103 dma_addr_t dma_offset = nvbe->pages[i]; 91 dma_addr_t dma_offset = nvbe->pages[i];
104 uint32_t offset_l = lower_32_bits(dma_offset); 92 uint32_t offset_l = lower_32_bits(dma_offset);
105 uint32_t offset_h = upper_32_bits(dma_offset);
106
107 for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
108 if (dev_priv->card_type < NV_50) {
109 nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3);
110 pte += 1;
111 } else {
112 nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 0x21);
113 nv_wo32(gpuobj, (pte * 4) + 4, offset_h & 0xff);
114 pte += 2;
115 }
116 93
94 for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) {
95 nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3);
117 dma_offset += NV_CTXDMA_PAGE_SIZE; 96 dma_offset += NV_CTXDMA_PAGE_SIZE;
118 } 97 }
119 } 98 }
120 dev_priv->engine.instmem.flush(nvbe->dev);
121
122 if (dev_priv->card_type == NV_50) {
123 dev_priv->engine.fifo.tlb_flush(dev);
124 dev_priv->engine.graph.tlb_flush(dev);
125 }
126 99
127 nvbe->bound = true; 100 nvbe->bound = true;
128 return 0; 101 return 0;
@@ -142,28 +115,10 @@ nouveau_sgdma_unbind(struct ttm_backend *be)
142 if (!nvbe->bound) 115 if (!nvbe->bound)
143 return 0; 116 return 0;
144 117
145 pte = nvbe->pte_start; 118 pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
146 for (i = 0; i < nvbe->nr_pages; i++) { 119 for (i = 0; i < nvbe->nr_pages; i++) {
147 dma_addr_t dma_offset = dev_priv->gart_info.sg_dummy_bus; 120 for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++)
148 121 nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
149 for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
150 if (dev_priv->card_type < NV_50) {
151 nv_wo32(gpuobj, (pte * 4) + 0, dma_offset | 3);
152 pte += 1;
153 } else {
154 nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
155 nv_wo32(gpuobj, (pte * 4) + 4, 0x00000000);
156 pte += 2;
157 }
158
159 dma_offset += NV_CTXDMA_PAGE_SIZE;
160 }
161 }
162 dev_priv->engine.instmem.flush(nvbe->dev);
163
164 if (dev_priv->card_type == NV_50) {
165 dev_priv->engine.fifo.tlb_flush(dev);
166 dev_priv->engine.graph.tlb_flush(dev);
167 } 122 }
168 123
169 nvbe->bound = false; 124 nvbe->bound = false;
@@ -186,6 +141,35 @@ nouveau_sgdma_destroy(struct ttm_backend *be)
186 } 141 }
187} 142}
188 143
144static int
145nv50_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
146{
147 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
148 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
149
150 nvbe->offset = mem->start << PAGE_SHIFT;
151
152 nouveau_vm_map_sg(&dev_priv->gart_info.vma, nvbe->offset,
153 nvbe->nr_pages << PAGE_SHIFT, nvbe->pages);
154 nvbe->bound = true;
155 return 0;
156}
157
158static int
159nv50_sgdma_unbind(struct ttm_backend *be)
160{
161 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
162 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
163
164 if (!nvbe->bound)
165 return 0;
166
167 nouveau_vm_unmap_at(&dev_priv->gart_info.vma, nvbe->offset,
168 nvbe->nr_pages << PAGE_SHIFT);
169 nvbe->bound = false;
170 return 0;
171}
172
189static struct ttm_backend_func nouveau_sgdma_backend = { 173static struct ttm_backend_func nouveau_sgdma_backend = {
190 .populate = nouveau_sgdma_populate, 174 .populate = nouveau_sgdma_populate,
191 .clear = nouveau_sgdma_clear, 175 .clear = nouveau_sgdma_clear,
@@ -194,23 +178,30 @@ static struct ttm_backend_func nouveau_sgdma_backend = {
194 .destroy = nouveau_sgdma_destroy 178 .destroy = nouveau_sgdma_destroy
195}; 179};
196 180
181static struct ttm_backend_func nv50_sgdma_backend = {
182 .populate = nouveau_sgdma_populate,
183 .clear = nouveau_sgdma_clear,
184 .bind = nv50_sgdma_bind,
185 .unbind = nv50_sgdma_unbind,
186 .destroy = nouveau_sgdma_destroy
187};
188
197struct ttm_backend * 189struct ttm_backend *
198nouveau_sgdma_init_ttm(struct drm_device *dev) 190nouveau_sgdma_init_ttm(struct drm_device *dev)
199{ 191{
200 struct drm_nouveau_private *dev_priv = dev->dev_private; 192 struct drm_nouveau_private *dev_priv = dev->dev_private;
201 struct nouveau_sgdma_be *nvbe; 193 struct nouveau_sgdma_be *nvbe;
202 194
203 if (!dev_priv->gart_info.sg_ctxdma)
204 return NULL;
205
206 nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL); 195 nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
207 if (!nvbe) 196 if (!nvbe)
208 return NULL; 197 return NULL;
209 198
210 nvbe->dev = dev; 199 nvbe->dev = dev;
211 200
212 nvbe->backend.func = &nouveau_sgdma_backend; 201 if (dev_priv->card_type < NV_50)
213 202 nvbe->backend.func = &nouveau_sgdma_backend;
203 else
204 nvbe->backend.func = &nv50_sgdma_backend;
214 return &nvbe->backend; 205 return &nvbe->backend;
215} 206}
216 207
@@ -218,7 +209,6 @@ int
218nouveau_sgdma_init(struct drm_device *dev) 209nouveau_sgdma_init(struct drm_device *dev)
219{ 210{
220 struct drm_nouveau_private *dev_priv = dev->dev_private; 211 struct drm_nouveau_private *dev_priv = dev->dev_private;
221 struct pci_dev *pdev = dev->pdev;
222 struct nouveau_gpuobj *gpuobj = NULL; 212 struct nouveau_gpuobj *gpuobj = NULL;
223 uint32_t aper_size, obj_size; 213 uint32_t aper_size, obj_size;
224 int i, ret; 214 int i, ret;
@@ -231,68 +221,40 @@ nouveau_sgdma_init(struct drm_device *dev)
231 221
232 obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4; 222 obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4;
233 obj_size += 8; /* ctxdma header */ 223 obj_size += 8; /* ctxdma header */
234 } else {
235 /* 1 entire VM page table */
236 aper_size = (512 * 1024 * 1024);
237 obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 8;
238 }
239
240 ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16,
241 NVOBJ_FLAG_ZERO_ALLOC |
242 NVOBJ_FLAG_ZERO_FREE, &gpuobj);
243 if (ret) {
244 NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
245 return ret;
246 }
247
248 dev_priv->gart_info.sg_dummy_page =
249 alloc_page(GFP_KERNEL|__GFP_DMA32|__GFP_ZERO);
250 if (!dev_priv->gart_info.sg_dummy_page) {
251 nouveau_gpuobj_ref(NULL, &gpuobj);
252 return -ENOMEM;
253 }
254 224
255 set_bit(PG_locked, &dev_priv->gart_info.sg_dummy_page->flags); 225 ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16,
256 dev_priv->gart_info.sg_dummy_bus = 226 NVOBJ_FLAG_ZERO_ALLOC |
257 pci_map_page(pdev, dev_priv->gart_info.sg_dummy_page, 0, 227 NVOBJ_FLAG_ZERO_FREE, &gpuobj);
258 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 228 if (ret) {
259 if (pci_dma_mapping_error(pdev, dev_priv->gart_info.sg_dummy_bus)) { 229 NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
260 nouveau_gpuobj_ref(NULL, &gpuobj); 230 return ret;
261 return -EFAULT; 231 }
262 }
263 232
264 if (dev_priv->card_type < NV_50) {
265 /* special case, allocated from global instmem heap so
266 * cinst is invalid, we use it on all channels though so
267 * cinst needs to be valid, set it the same as pinst
268 */
269 gpuobj->cinst = gpuobj->pinst;
270
271 /* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and
272 * confirmed to work on c51. Perhaps means NV_DMA_TARGET_PCIE
273 * on those cards? */
274 nv_wo32(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY | 233 nv_wo32(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY |
275 (1 << 12) /* PT present */ | 234 (1 << 12) /* PT present */ |
276 (0 << 13) /* PT *not* linear */ | 235 (0 << 13) /* PT *not* linear */ |
277 (NV_DMA_ACCESS_RW << 14) | 236 (0 << 14) /* RW */ |
278 (NV_DMA_TARGET_PCI << 16)); 237 (2 << 16) /* PCI */);
279 nv_wo32(gpuobj, 4, aper_size - 1); 238 nv_wo32(gpuobj, 4, aper_size - 1);
280 for (i = 2; i < 2 + (aper_size >> 12); i++) { 239 for (i = 2; i < 2 + (aper_size >> 12); i++)
281 nv_wo32(gpuobj, i * 4, 240 nv_wo32(gpuobj, i * 4, 0x00000000);
282 dev_priv->gart_info.sg_dummy_bus | 3); 241
283 } 242 dev_priv->gart_info.sg_ctxdma = gpuobj;
284 } else { 243 dev_priv->gart_info.aper_base = 0;
285 for (i = 0; i < obj_size; i += 8) { 244 dev_priv->gart_info.aper_size = aper_size;
286 nv_wo32(gpuobj, i + 0, 0x00000000); 245 } else
287 nv_wo32(gpuobj, i + 4, 0x00000000); 246 if (dev_priv->chan_vm) {
288 } 247 ret = nouveau_vm_get(dev_priv->chan_vm, 512 * 1024 * 1024,
248 12, NV_MEM_ACCESS_RW,
249 &dev_priv->gart_info.vma);
250 if (ret)
251 return ret;
252
253 dev_priv->gart_info.aper_base = dev_priv->gart_info.vma.offset;
254 dev_priv->gart_info.aper_size = 512 * 1024 * 1024;
289 } 255 }
290 dev_priv->engine.instmem.flush(dev);
291 256
292 dev_priv->gart_info.type = NOUVEAU_GART_SGDMA; 257 dev_priv->gart_info.type = NOUVEAU_GART_SGDMA;
293 dev_priv->gart_info.aper_base = 0;
294 dev_priv->gart_info.aper_size = aper_size;
295 dev_priv->gart_info.sg_ctxdma = gpuobj;
296 return 0; 258 return 0;
297} 259}
298 260
@@ -301,31 +263,19 @@ nouveau_sgdma_takedown(struct drm_device *dev)
301{ 263{
302 struct drm_nouveau_private *dev_priv = dev->dev_private; 264 struct drm_nouveau_private *dev_priv = dev->dev_private;
303 265
304 if (dev_priv->gart_info.sg_dummy_page) {
305 pci_unmap_page(dev->pdev, dev_priv->gart_info.sg_dummy_bus,
306 NV_CTXDMA_PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
307 unlock_page(dev_priv->gart_info.sg_dummy_page);
308 __free_page(dev_priv->gart_info.sg_dummy_page);
309 dev_priv->gart_info.sg_dummy_page = NULL;
310 dev_priv->gart_info.sg_dummy_bus = 0;
311 }
312
313 nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma); 266 nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma);
267 nouveau_vm_put(&dev_priv->gart_info.vma);
314} 268}
315 269
316int 270uint32_t
317nouveau_sgdma_get_page(struct drm_device *dev, uint32_t offset, uint32_t *page) 271nouveau_sgdma_get_physical(struct drm_device *dev, uint32_t offset)
318{ 272{
319 struct drm_nouveau_private *dev_priv = dev->dev_private; 273 struct drm_nouveau_private *dev_priv = dev->dev_private;
320 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; 274 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
321 int pte; 275 int pte = (offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
322 276
323 pte = (offset >> NV_CTXDMA_PAGE_SHIFT) << 2; 277 BUG_ON(dev_priv->card_type >= NV_50);
324 if (dev_priv->card_type < NV_50) {
325 *page = nv_ro32(gpuobj, (pte + 8)) & ~NV_CTXDMA_PAGE_MASK;
326 return 0;
327 }
328 278
329 NV_ERROR(dev, "Unimplemented on NV50\n"); 279 return (nv_ro32(gpuobj, 4 * pte) & ~NV_CTXDMA_PAGE_MASK) |
330 return -EINVAL; 280 (offset & NV_CTXDMA_PAGE_MASK);
331} 281}
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index 049f755567e5..a54fc431fe98 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -53,10 +53,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
53 engine->instmem.takedown = nv04_instmem_takedown; 53 engine->instmem.takedown = nv04_instmem_takedown;
54 engine->instmem.suspend = nv04_instmem_suspend; 54 engine->instmem.suspend = nv04_instmem_suspend;
55 engine->instmem.resume = nv04_instmem_resume; 55 engine->instmem.resume = nv04_instmem_resume;
56 engine->instmem.populate = nv04_instmem_populate; 56 engine->instmem.get = nv04_instmem_get;
57 engine->instmem.clear = nv04_instmem_clear; 57 engine->instmem.put = nv04_instmem_put;
58 engine->instmem.bind = nv04_instmem_bind; 58 engine->instmem.map = nv04_instmem_map;
59 engine->instmem.unbind = nv04_instmem_unbind; 59 engine->instmem.unmap = nv04_instmem_unmap;
60 engine->instmem.flush = nv04_instmem_flush; 60 engine->instmem.flush = nv04_instmem_flush;
61 engine->mc.init = nv04_mc_init; 61 engine->mc.init = nv04_mc_init;
62 engine->mc.takedown = nv04_mc_takedown; 62 engine->mc.takedown = nv04_mc_takedown;
@@ -65,7 +65,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
65 engine->timer.takedown = nv04_timer_takedown; 65 engine->timer.takedown = nv04_timer_takedown;
66 engine->fb.init = nv04_fb_init; 66 engine->fb.init = nv04_fb_init;
67 engine->fb.takedown = nv04_fb_takedown; 67 engine->fb.takedown = nv04_fb_takedown;
68 engine->graph.grclass = nv04_graph_grclass;
69 engine->graph.init = nv04_graph_init; 68 engine->graph.init = nv04_graph_init;
70 engine->graph.takedown = nv04_graph_takedown; 69 engine->graph.takedown = nv04_graph_takedown;
71 engine->graph.fifo_access = nv04_graph_fifo_access; 70 engine->graph.fifo_access = nv04_graph_fifo_access;
@@ -76,7 +75,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
76 engine->graph.unload_context = nv04_graph_unload_context; 75 engine->graph.unload_context = nv04_graph_unload_context;
77 engine->fifo.channels = 16; 76 engine->fifo.channels = 16;
78 engine->fifo.init = nv04_fifo_init; 77 engine->fifo.init = nv04_fifo_init;
79 engine->fifo.takedown = nouveau_stub_takedown; 78 engine->fifo.takedown = nv04_fifo_fini;
80 engine->fifo.disable = nv04_fifo_disable; 79 engine->fifo.disable = nv04_fifo_disable;
81 engine->fifo.enable = nv04_fifo_enable; 80 engine->fifo.enable = nv04_fifo_enable;
82 engine->fifo.reassign = nv04_fifo_reassign; 81 engine->fifo.reassign = nv04_fifo_reassign;
@@ -99,16 +98,20 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
99 engine->pm.clock_get = nv04_pm_clock_get; 98 engine->pm.clock_get = nv04_pm_clock_get;
100 engine->pm.clock_pre = nv04_pm_clock_pre; 99 engine->pm.clock_pre = nv04_pm_clock_pre;
101 engine->pm.clock_set = nv04_pm_clock_set; 100 engine->pm.clock_set = nv04_pm_clock_set;
101 engine->crypt.init = nouveau_stub_init;
102 engine->crypt.takedown = nouveau_stub_takedown;
103 engine->vram.init = nouveau_mem_detect;
104 engine->vram.flags_valid = nouveau_mem_flags_valid;
102 break; 105 break;
103 case 0x10: 106 case 0x10:
104 engine->instmem.init = nv04_instmem_init; 107 engine->instmem.init = nv04_instmem_init;
105 engine->instmem.takedown = nv04_instmem_takedown; 108 engine->instmem.takedown = nv04_instmem_takedown;
106 engine->instmem.suspend = nv04_instmem_suspend; 109 engine->instmem.suspend = nv04_instmem_suspend;
107 engine->instmem.resume = nv04_instmem_resume; 110 engine->instmem.resume = nv04_instmem_resume;
108 engine->instmem.populate = nv04_instmem_populate; 111 engine->instmem.get = nv04_instmem_get;
109 engine->instmem.clear = nv04_instmem_clear; 112 engine->instmem.put = nv04_instmem_put;
110 engine->instmem.bind = nv04_instmem_bind; 113 engine->instmem.map = nv04_instmem_map;
111 engine->instmem.unbind = nv04_instmem_unbind; 114 engine->instmem.unmap = nv04_instmem_unmap;
112 engine->instmem.flush = nv04_instmem_flush; 115 engine->instmem.flush = nv04_instmem_flush;
113 engine->mc.init = nv04_mc_init; 116 engine->mc.init = nv04_mc_init;
114 engine->mc.takedown = nv04_mc_takedown; 117 engine->mc.takedown = nv04_mc_takedown;
@@ -117,8 +120,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
117 engine->timer.takedown = nv04_timer_takedown; 120 engine->timer.takedown = nv04_timer_takedown;
118 engine->fb.init = nv10_fb_init; 121 engine->fb.init = nv10_fb_init;
119 engine->fb.takedown = nv10_fb_takedown; 122 engine->fb.takedown = nv10_fb_takedown;
120 engine->fb.set_region_tiling = nv10_fb_set_region_tiling; 123 engine->fb.init_tile_region = nv10_fb_init_tile_region;
121 engine->graph.grclass = nv10_graph_grclass; 124 engine->fb.set_tile_region = nv10_fb_set_tile_region;
125 engine->fb.free_tile_region = nv10_fb_free_tile_region;
122 engine->graph.init = nv10_graph_init; 126 engine->graph.init = nv10_graph_init;
123 engine->graph.takedown = nv10_graph_takedown; 127 engine->graph.takedown = nv10_graph_takedown;
124 engine->graph.channel = nv10_graph_channel; 128 engine->graph.channel = nv10_graph_channel;
@@ -127,17 +131,17 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
127 engine->graph.fifo_access = nv04_graph_fifo_access; 131 engine->graph.fifo_access = nv04_graph_fifo_access;
128 engine->graph.load_context = nv10_graph_load_context; 132 engine->graph.load_context = nv10_graph_load_context;
129 engine->graph.unload_context = nv10_graph_unload_context; 133 engine->graph.unload_context = nv10_graph_unload_context;
130 engine->graph.set_region_tiling = nv10_graph_set_region_tiling; 134 engine->graph.set_tile_region = nv10_graph_set_tile_region;
131 engine->fifo.channels = 32; 135 engine->fifo.channels = 32;
132 engine->fifo.init = nv10_fifo_init; 136 engine->fifo.init = nv10_fifo_init;
133 engine->fifo.takedown = nouveau_stub_takedown; 137 engine->fifo.takedown = nv04_fifo_fini;
134 engine->fifo.disable = nv04_fifo_disable; 138 engine->fifo.disable = nv04_fifo_disable;
135 engine->fifo.enable = nv04_fifo_enable; 139 engine->fifo.enable = nv04_fifo_enable;
136 engine->fifo.reassign = nv04_fifo_reassign; 140 engine->fifo.reassign = nv04_fifo_reassign;
137 engine->fifo.cache_pull = nv04_fifo_cache_pull; 141 engine->fifo.cache_pull = nv04_fifo_cache_pull;
138 engine->fifo.channel_id = nv10_fifo_channel_id; 142 engine->fifo.channel_id = nv10_fifo_channel_id;
139 engine->fifo.create_context = nv10_fifo_create_context; 143 engine->fifo.create_context = nv10_fifo_create_context;
140 engine->fifo.destroy_context = nv10_fifo_destroy_context; 144 engine->fifo.destroy_context = nv04_fifo_destroy_context;
141 engine->fifo.load_context = nv10_fifo_load_context; 145 engine->fifo.load_context = nv10_fifo_load_context;
142 engine->fifo.unload_context = nv10_fifo_unload_context; 146 engine->fifo.unload_context = nv10_fifo_unload_context;
143 engine->display.early_init = nv04_display_early_init; 147 engine->display.early_init = nv04_display_early_init;
@@ -153,16 +157,20 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
153 engine->pm.clock_get = nv04_pm_clock_get; 157 engine->pm.clock_get = nv04_pm_clock_get;
154 engine->pm.clock_pre = nv04_pm_clock_pre; 158 engine->pm.clock_pre = nv04_pm_clock_pre;
155 engine->pm.clock_set = nv04_pm_clock_set; 159 engine->pm.clock_set = nv04_pm_clock_set;
160 engine->crypt.init = nouveau_stub_init;
161 engine->crypt.takedown = nouveau_stub_takedown;
162 engine->vram.init = nouveau_mem_detect;
163 engine->vram.flags_valid = nouveau_mem_flags_valid;
156 break; 164 break;
157 case 0x20: 165 case 0x20:
158 engine->instmem.init = nv04_instmem_init; 166 engine->instmem.init = nv04_instmem_init;
159 engine->instmem.takedown = nv04_instmem_takedown; 167 engine->instmem.takedown = nv04_instmem_takedown;
160 engine->instmem.suspend = nv04_instmem_suspend; 168 engine->instmem.suspend = nv04_instmem_suspend;
161 engine->instmem.resume = nv04_instmem_resume; 169 engine->instmem.resume = nv04_instmem_resume;
162 engine->instmem.populate = nv04_instmem_populate; 170 engine->instmem.get = nv04_instmem_get;
163 engine->instmem.clear = nv04_instmem_clear; 171 engine->instmem.put = nv04_instmem_put;
164 engine->instmem.bind = nv04_instmem_bind; 172 engine->instmem.map = nv04_instmem_map;
165 engine->instmem.unbind = nv04_instmem_unbind; 173 engine->instmem.unmap = nv04_instmem_unmap;
166 engine->instmem.flush = nv04_instmem_flush; 174 engine->instmem.flush = nv04_instmem_flush;
167 engine->mc.init = nv04_mc_init; 175 engine->mc.init = nv04_mc_init;
168 engine->mc.takedown = nv04_mc_takedown; 176 engine->mc.takedown = nv04_mc_takedown;
@@ -171,8 +179,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
171 engine->timer.takedown = nv04_timer_takedown; 179 engine->timer.takedown = nv04_timer_takedown;
172 engine->fb.init = nv10_fb_init; 180 engine->fb.init = nv10_fb_init;
173 engine->fb.takedown = nv10_fb_takedown; 181 engine->fb.takedown = nv10_fb_takedown;
174 engine->fb.set_region_tiling = nv10_fb_set_region_tiling; 182 engine->fb.init_tile_region = nv10_fb_init_tile_region;
175 engine->graph.grclass = nv20_graph_grclass; 183 engine->fb.set_tile_region = nv10_fb_set_tile_region;
184 engine->fb.free_tile_region = nv10_fb_free_tile_region;
176 engine->graph.init = nv20_graph_init; 185 engine->graph.init = nv20_graph_init;
177 engine->graph.takedown = nv20_graph_takedown; 186 engine->graph.takedown = nv20_graph_takedown;
178 engine->graph.channel = nv10_graph_channel; 187 engine->graph.channel = nv10_graph_channel;
@@ -181,17 +190,17 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
181 engine->graph.fifo_access = nv04_graph_fifo_access; 190 engine->graph.fifo_access = nv04_graph_fifo_access;
182 engine->graph.load_context = nv20_graph_load_context; 191 engine->graph.load_context = nv20_graph_load_context;
183 engine->graph.unload_context = nv20_graph_unload_context; 192 engine->graph.unload_context = nv20_graph_unload_context;
184 engine->graph.set_region_tiling = nv20_graph_set_region_tiling; 193 engine->graph.set_tile_region = nv20_graph_set_tile_region;
185 engine->fifo.channels = 32; 194 engine->fifo.channels = 32;
186 engine->fifo.init = nv10_fifo_init; 195 engine->fifo.init = nv10_fifo_init;
187 engine->fifo.takedown = nouveau_stub_takedown; 196 engine->fifo.takedown = nv04_fifo_fini;
188 engine->fifo.disable = nv04_fifo_disable; 197 engine->fifo.disable = nv04_fifo_disable;
189 engine->fifo.enable = nv04_fifo_enable; 198 engine->fifo.enable = nv04_fifo_enable;
190 engine->fifo.reassign = nv04_fifo_reassign; 199 engine->fifo.reassign = nv04_fifo_reassign;
191 engine->fifo.cache_pull = nv04_fifo_cache_pull; 200 engine->fifo.cache_pull = nv04_fifo_cache_pull;
192 engine->fifo.channel_id = nv10_fifo_channel_id; 201 engine->fifo.channel_id = nv10_fifo_channel_id;
193 engine->fifo.create_context = nv10_fifo_create_context; 202 engine->fifo.create_context = nv10_fifo_create_context;
194 engine->fifo.destroy_context = nv10_fifo_destroy_context; 203 engine->fifo.destroy_context = nv04_fifo_destroy_context;
195 engine->fifo.load_context = nv10_fifo_load_context; 204 engine->fifo.load_context = nv10_fifo_load_context;
196 engine->fifo.unload_context = nv10_fifo_unload_context; 205 engine->fifo.unload_context = nv10_fifo_unload_context;
197 engine->display.early_init = nv04_display_early_init; 206 engine->display.early_init = nv04_display_early_init;
@@ -207,16 +216,20 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
207 engine->pm.clock_get = nv04_pm_clock_get; 216 engine->pm.clock_get = nv04_pm_clock_get;
208 engine->pm.clock_pre = nv04_pm_clock_pre; 217 engine->pm.clock_pre = nv04_pm_clock_pre;
209 engine->pm.clock_set = nv04_pm_clock_set; 218 engine->pm.clock_set = nv04_pm_clock_set;
219 engine->crypt.init = nouveau_stub_init;
220 engine->crypt.takedown = nouveau_stub_takedown;
221 engine->vram.init = nouveau_mem_detect;
222 engine->vram.flags_valid = nouveau_mem_flags_valid;
210 break; 223 break;
211 case 0x30: 224 case 0x30:
212 engine->instmem.init = nv04_instmem_init; 225 engine->instmem.init = nv04_instmem_init;
213 engine->instmem.takedown = nv04_instmem_takedown; 226 engine->instmem.takedown = nv04_instmem_takedown;
214 engine->instmem.suspend = nv04_instmem_suspend; 227 engine->instmem.suspend = nv04_instmem_suspend;
215 engine->instmem.resume = nv04_instmem_resume; 228 engine->instmem.resume = nv04_instmem_resume;
216 engine->instmem.populate = nv04_instmem_populate; 229 engine->instmem.get = nv04_instmem_get;
217 engine->instmem.clear = nv04_instmem_clear; 230 engine->instmem.put = nv04_instmem_put;
218 engine->instmem.bind = nv04_instmem_bind; 231 engine->instmem.map = nv04_instmem_map;
219 engine->instmem.unbind = nv04_instmem_unbind; 232 engine->instmem.unmap = nv04_instmem_unmap;
220 engine->instmem.flush = nv04_instmem_flush; 233 engine->instmem.flush = nv04_instmem_flush;
221 engine->mc.init = nv04_mc_init; 234 engine->mc.init = nv04_mc_init;
222 engine->mc.takedown = nv04_mc_takedown; 235 engine->mc.takedown = nv04_mc_takedown;
@@ -225,8 +238,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
225 engine->timer.takedown = nv04_timer_takedown; 238 engine->timer.takedown = nv04_timer_takedown;
226 engine->fb.init = nv30_fb_init; 239 engine->fb.init = nv30_fb_init;
227 engine->fb.takedown = nv30_fb_takedown; 240 engine->fb.takedown = nv30_fb_takedown;
228 engine->fb.set_region_tiling = nv10_fb_set_region_tiling; 241 engine->fb.init_tile_region = nv30_fb_init_tile_region;
229 engine->graph.grclass = nv30_graph_grclass; 242 engine->fb.set_tile_region = nv10_fb_set_tile_region;
243 engine->fb.free_tile_region = nv30_fb_free_tile_region;
230 engine->graph.init = nv30_graph_init; 244 engine->graph.init = nv30_graph_init;
231 engine->graph.takedown = nv20_graph_takedown; 245 engine->graph.takedown = nv20_graph_takedown;
232 engine->graph.fifo_access = nv04_graph_fifo_access; 246 engine->graph.fifo_access = nv04_graph_fifo_access;
@@ -235,17 +249,17 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
235 engine->graph.destroy_context = nv20_graph_destroy_context; 249 engine->graph.destroy_context = nv20_graph_destroy_context;
236 engine->graph.load_context = nv20_graph_load_context; 250 engine->graph.load_context = nv20_graph_load_context;
237 engine->graph.unload_context = nv20_graph_unload_context; 251 engine->graph.unload_context = nv20_graph_unload_context;
238 engine->graph.set_region_tiling = nv20_graph_set_region_tiling; 252 engine->graph.set_tile_region = nv20_graph_set_tile_region;
239 engine->fifo.channels = 32; 253 engine->fifo.channels = 32;
240 engine->fifo.init = nv10_fifo_init; 254 engine->fifo.init = nv10_fifo_init;
241 engine->fifo.takedown = nouveau_stub_takedown; 255 engine->fifo.takedown = nv04_fifo_fini;
242 engine->fifo.disable = nv04_fifo_disable; 256 engine->fifo.disable = nv04_fifo_disable;
243 engine->fifo.enable = nv04_fifo_enable; 257 engine->fifo.enable = nv04_fifo_enable;
244 engine->fifo.reassign = nv04_fifo_reassign; 258 engine->fifo.reassign = nv04_fifo_reassign;
245 engine->fifo.cache_pull = nv04_fifo_cache_pull; 259 engine->fifo.cache_pull = nv04_fifo_cache_pull;
246 engine->fifo.channel_id = nv10_fifo_channel_id; 260 engine->fifo.channel_id = nv10_fifo_channel_id;
247 engine->fifo.create_context = nv10_fifo_create_context; 261 engine->fifo.create_context = nv10_fifo_create_context;
248 engine->fifo.destroy_context = nv10_fifo_destroy_context; 262 engine->fifo.destroy_context = nv04_fifo_destroy_context;
249 engine->fifo.load_context = nv10_fifo_load_context; 263 engine->fifo.load_context = nv10_fifo_load_context;
250 engine->fifo.unload_context = nv10_fifo_unload_context; 264 engine->fifo.unload_context = nv10_fifo_unload_context;
251 engine->display.early_init = nv04_display_early_init; 265 engine->display.early_init = nv04_display_early_init;
@@ -263,6 +277,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
263 engine->pm.clock_set = nv04_pm_clock_set; 277 engine->pm.clock_set = nv04_pm_clock_set;
264 engine->pm.voltage_get = nouveau_voltage_gpio_get; 278 engine->pm.voltage_get = nouveau_voltage_gpio_get;
265 engine->pm.voltage_set = nouveau_voltage_gpio_set; 279 engine->pm.voltage_set = nouveau_voltage_gpio_set;
280 engine->crypt.init = nouveau_stub_init;
281 engine->crypt.takedown = nouveau_stub_takedown;
282 engine->vram.init = nouveau_mem_detect;
283 engine->vram.flags_valid = nouveau_mem_flags_valid;
266 break; 284 break;
267 case 0x40: 285 case 0x40:
268 case 0x60: 286 case 0x60:
@@ -270,10 +288,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
270 engine->instmem.takedown = nv04_instmem_takedown; 288 engine->instmem.takedown = nv04_instmem_takedown;
271 engine->instmem.suspend = nv04_instmem_suspend; 289 engine->instmem.suspend = nv04_instmem_suspend;
272 engine->instmem.resume = nv04_instmem_resume; 290 engine->instmem.resume = nv04_instmem_resume;
273 engine->instmem.populate = nv04_instmem_populate; 291 engine->instmem.get = nv04_instmem_get;
274 engine->instmem.clear = nv04_instmem_clear; 292 engine->instmem.put = nv04_instmem_put;
275 engine->instmem.bind = nv04_instmem_bind; 293 engine->instmem.map = nv04_instmem_map;
276 engine->instmem.unbind = nv04_instmem_unbind; 294 engine->instmem.unmap = nv04_instmem_unmap;
277 engine->instmem.flush = nv04_instmem_flush; 295 engine->instmem.flush = nv04_instmem_flush;
278 engine->mc.init = nv40_mc_init; 296 engine->mc.init = nv40_mc_init;
279 engine->mc.takedown = nv40_mc_takedown; 297 engine->mc.takedown = nv40_mc_takedown;
@@ -282,8 +300,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
282 engine->timer.takedown = nv04_timer_takedown; 300 engine->timer.takedown = nv04_timer_takedown;
283 engine->fb.init = nv40_fb_init; 301 engine->fb.init = nv40_fb_init;
284 engine->fb.takedown = nv40_fb_takedown; 302 engine->fb.takedown = nv40_fb_takedown;
285 engine->fb.set_region_tiling = nv40_fb_set_region_tiling; 303 engine->fb.init_tile_region = nv30_fb_init_tile_region;
286 engine->graph.grclass = nv40_graph_grclass; 304 engine->fb.set_tile_region = nv40_fb_set_tile_region;
305 engine->fb.free_tile_region = nv30_fb_free_tile_region;
287 engine->graph.init = nv40_graph_init; 306 engine->graph.init = nv40_graph_init;
288 engine->graph.takedown = nv40_graph_takedown; 307 engine->graph.takedown = nv40_graph_takedown;
289 engine->graph.fifo_access = nv04_graph_fifo_access; 308 engine->graph.fifo_access = nv04_graph_fifo_access;
@@ -292,17 +311,17 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
292 engine->graph.destroy_context = nv40_graph_destroy_context; 311 engine->graph.destroy_context = nv40_graph_destroy_context;
293 engine->graph.load_context = nv40_graph_load_context; 312 engine->graph.load_context = nv40_graph_load_context;
294 engine->graph.unload_context = nv40_graph_unload_context; 313 engine->graph.unload_context = nv40_graph_unload_context;
295 engine->graph.set_region_tiling = nv40_graph_set_region_tiling; 314 engine->graph.set_tile_region = nv40_graph_set_tile_region;
296 engine->fifo.channels = 32; 315 engine->fifo.channels = 32;
297 engine->fifo.init = nv40_fifo_init; 316 engine->fifo.init = nv40_fifo_init;
298 engine->fifo.takedown = nouveau_stub_takedown; 317 engine->fifo.takedown = nv04_fifo_fini;
299 engine->fifo.disable = nv04_fifo_disable; 318 engine->fifo.disable = nv04_fifo_disable;
300 engine->fifo.enable = nv04_fifo_enable; 319 engine->fifo.enable = nv04_fifo_enable;
301 engine->fifo.reassign = nv04_fifo_reassign; 320 engine->fifo.reassign = nv04_fifo_reassign;
302 engine->fifo.cache_pull = nv04_fifo_cache_pull; 321 engine->fifo.cache_pull = nv04_fifo_cache_pull;
303 engine->fifo.channel_id = nv10_fifo_channel_id; 322 engine->fifo.channel_id = nv10_fifo_channel_id;
304 engine->fifo.create_context = nv40_fifo_create_context; 323 engine->fifo.create_context = nv40_fifo_create_context;
305 engine->fifo.destroy_context = nv40_fifo_destroy_context; 324 engine->fifo.destroy_context = nv04_fifo_destroy_context;
306 engine->fifo.load_context = nv40_fifo_load_context; 325 engine->fifo.load_context = nv40_fifo_load_context;
307 engine->fifo.unload_context = nv40_fifo_unload_context; 326 engine->fifo.unload_context = nv40_fifo_unload_context;
308 engine->display.early_init = nv04_display_early_init; 327 engine->display.early_init = nv04_display_early_init;
@@ -321,6 +340,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
321 engine->pm.voltage_get = nouveau_voltage_gpio_get; 340 engine->pm.voltage_get = nouveau_voltage_gpio_get;
322 engine->pm.voltage_set = nouveau_voltage_gpio_set; 341 engine->pm.voltage_set = nouveau_voltage_gpio_set;
323 engine->pm.temp_get = nv40_temp_get; 342 engine->pm.temp_get = nv40_temp_get;
343 engine->crypt.init = nouveau_stub_init;
344 engine->crypt.takedown = nouveau_stub_takedown;
345 engine->vram.init = nouveau_mem_detect;
346 engine->vram.flags_valid = nouveau_mem_flags_valid;
324 break; 347 break;
325 case 0x50: 348 case 0x50:
326 case 0x80: /* gotta love NVIDIA's consistency.. */ 349 case 0x80: /* gotta love NVIDIA's consistency.. */
@@ -330,10 +353,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
330 engine->instmem.takedown = nv50_instmem_takedown; 353 engine->instmem.takedown = nv50_instmem_takedown;
331 engine->instmem.suspend = nv50_instmem_suspend; 354 engine->instmem.suspend = nv50_instmem_suspend;
332 engine->instmem.resume = nv50_instmem_resume; 355 engine->instmem.resume = nv50_instmem_resume;
333 engine->instmem.populate = nv50_instmem_populate; 356 engine->instmem.get = nv50_instmem_get;
334 engine->instmem.clear = nv50_instmem_clear; 357 engine->instmem.put = nv50_instmem_put;
335 engine->instmem.bind = nv50_instmem_bind; 358 engine->instmem.map = nv50_instmem_map;
336 engine->instmem.unbind = nv50_instmem_unbind; 359 engine->instmem.unmap = nv50_instmem_unmap;
337 if (dev_priv->chipset == 0x50) 360 if (dev_priv->chipset == 0x50)
338 engine->instmem.flush = nv50_instmem_flush; 361 engine->instmem.flush = nv50_instmem_flush;
339 else 362 else
@@ -345,7 +368,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
345 engine->timer.takedown = nv04_timer_takedown; 368 engine->timer.takedown = nv04_timer_takedown;
346 engine->fb.init = nv50_fb_init; 369 engine->fb.init = nv50_fb_init;
347 engine->fb.takedown = nv50_fb_takedown; 370 engine->fb.takedown = nv50_fb_takedown;
348 engine->graph.grclass = nv50_graph_grclass;
349 engine->graph.init = nv50_graph_init; 371 engine->graph.init = nv50_graph_init;
350 engine->graph.takedown = nv50_graph_takedown; 372 engine->graph.takedown = nv50_graph_takedown;
351 engine->graph.fifo_access = nv50_graph_fifo_access; 373 engine->graph.fifo_access = nv50_graph_fifo_access;
@@ -381,24 +403,32 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
381 engine->display.init = nv50_display_init; 403 engine->display.init = nv50_display_init;
382 engine->display.destroy = nv50_display_destroy; 404 engine->display.destroy = nv50_display_destroy;
383 engine->gpio.init = nv50_gpio_init; 405 engine->gpio.init = nv50_gpio_init;
384 engine->gpio.takedown = nouveau_stub_takedown; 406 engine->gpio.takedown = nv50_gpio_fini;
385 engine->gpio.get = nv50_gpio_get; 407 engine->gpio.get = nv50_gpio_get;
386 engine->gpio.set = nv50_gpio_set; 408 engine->gpio.set = nv50_gpio_set;
409 engine->gpio.irq_register = nv50_gpio_irq_register;
410 engine->gpio.irq_unregister = nv50_gpio_irq_unregister;
387 engine->gpio.irq_enable = nv50_gpio_irq_enable; 411 engine->gpio.irq_enable = nv50_gpio_irq_enable;
388 switch (dev_priv->chipset) { 412 switch (dev_priv->chipset) {
389 case 0xa3: 413 case 0x84:
390 case 0xa5: 414 case 0x86:
391 case 0xa8: 415 case 0x92:
392 case 0xaf: 416 case 0x94:
393 engine->pm.clock_get = nva3_pm_clock_get; 417 case 0x96:
394 engine->pm.clock_pre = nva3_pm_clock_pre; 418 case 0x98:
395 engine->pm.clock_set = nva3_pm_clock_set; 419 case 0xa0:
396 break; 420 case 0xaa:
397 default: 421 case 0xac:
422 case 0x50:
398 engine->pm.clock_get = nv50_pm_clock_get; 423 engine->pm.clock_get = nv50_pm_clock_get;
399 engine->pm.clock_pre = nv50_pm_clock_pre; 424 engine->pm.clock_pre = nv50_pm_clock_pre;
400 engine->pm.clock_set = nv50_pm_clock_set; 425 engine->pm.clock_set = nv50_pm_clock_set;
401 break; 426 break;
427 default:
428 engine->pm.clock_get = nva3_pm_clock_get;
429 engine->pm.clock_pre = nva3_pm_clock_pre;
430 engine->pm.clock_set = nva3_pm_clock_set;
431 break;
402 } 432 }
403 engine->pm.voltage_get = nouveau_voltage_gpio_get; 433 engine->pm.voltage_get = nouveau_voltage_gpio_get;
404 engine->pm.voltage_set = nouveau_voltage_gpio_set; 434 engine->pm.voltage_set = nouveau_voltage_gpio_set;
@@ -406,17 +436,39 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
406 engine->pm.temp_get = nv84_temp_get; 436 engine->pm.temp_get = nv84_temp_get;
407 else 437 else
408 engine->pm.temp_get = nv40_temp_get; 438 engine->pm.temp_get = nv40_temp_get;
439 switch (dev_priv->chipset) {
440 case 0x84:
441 case 0x86:
442 case 0x92:
443 case 0x94:
444 case 0x96:
445 case 0xa0:
446 engine->crypt.init = nv84_crypt_init;
447 engine->crypt.takedown = nv84_crypt_fini;
448 engine->crypt.create_context = nv84_crypt_create_context;
449 engine->crypt.destroy_context = nv84_crypt_destroy_context;
450 engine->crypt.tlb_flush = nv84_crypt_tlb_flush;
451 break;
452 default:
453 engine->crypt.init = nouveau_stub_init;
454 engine->crypt.takedown = nouveau_stub_takedown;
455 break;
456 }
457 engine->vram.init = nv50_vram_init;
458 engine->vram.get = nv50_vram_new;
459 engine->vram.put = nv50_vram_del;
460 engine->vram.flags_valid = nv50_vram_flags_valid;
409 break; 461 break;
410 case 0xC0: 462 case 0xC0:
411 engine->instmem.init = nvc0_instmem_init; 463 engine->instmem.init = nvc0_instmem_init;
412 engine->instmem.takedown = nvc0_instmem_takedown; 464 engine->instmem.takedown = nvc0_instmem_takedown;
413 engine->instmem.suspend = nvc0_instmem_suspend; 465 engine->instmem.suspend = nvc0_instmem_suspend;
414 engine->instmem.resume = nvc0_instmem_resume; 466 engine->instmem.resume = nvc0_instmem_resume;
415 engine->instmem.populate = nvc0_instmem_populate; 467 engine->instmem.get = nv50_instmem_get;
416 engine->instmem.clear = nvc0_instmem_clear; 468 engine->instmem.put = nv50_instmem_put;
417 engine->instmem.bind = nvc0_instmem_bind; 469 engine->instmem.map = nv50_instmem_map;
418 engine->instmem.unbind = nvc0_instmem_unbind; 470 engine->instmem.unmap = nv50_instmem_unmap;
419 engine->instmem.flush = nvc0_instmem_flush; 471 engine->instmem.flush = nv84_instmem_flush;
420 engine->mc.init = nv50_mc_init; 472 engine->mc.init = nv50_mc_init;
421 engine->mc.takedown = nv50_mc_takedown; 473 engine->mc.takedown = nv50_mc_takedown;
422 engine->timer.init = nv04_timer_init; 474 engine->timer.init = nv04_timer_init;
@@ -424,7 +476,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
424 engine->timer.takedown = nv04_timer_takedown; 476 engine->timer.takedown = nv04_timer_takedown;
425 engine->fb.init = nvc0_fb_init; 477 engine->fb.init = nvc0_fb_init;
426 engine->fb.takedown = nvc0_fb_takedown; 478 engine->fb.takedown = nvc0_fb_takedown;
427 engine->graph.grclass = NULL; //nvc0_graph_grclass;
428 engine->graph.init = nvc0_graph_init; 479 engine->graph.init = nvc0_graph_init;
429 engine->graph.takedown = nvc0_graph_takedown; 480 engine->graph.takedown = nvc0_graph_takedown;
430 engine->graph.fifo_access = nvc0_graph_fifo_access; 481 engine->graph.fifo_access = nvc0_graph_fifo_access;
@@ -453,7 +504,15 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
453 engine->gpio.takedown = nouveau_stub_takedown; 504 engine->gpio.takedown = nouveau_stub_takedown;
454 engine->gpio.get = nv50_gpio_get; 505 engine->gpio.get = nv50_gpio_get;
455 engine->gpio.set = nv50_gpio_set; 506 engine->gpio.set = nv50_gpio_set;
507 engine->gpio.irq_register = nv50_gpio_irq_register;
508 engine->gpio.irq_unregister = nv50_gpio_irq_unregister;
456 engine->gpio.irq_enable = nv50_gpio_irq_enable; 509 engine->gpio.irq_enable = nv50_gpio_irq_enable;
510 engine->crypt.init = nouveau_stub_init;
511 engine->crypt.takedown = nouveau_stub_takedown;
512 engine->vram.init = nvc0_vram_init;
513 engine->vram.get = nvc0_vram_new;
514 engine->vram.put = nv50_vram_del;
515 engine->vram.flags_valid = nvc0_vram_flags_valid;
457 break; 516 break;
458 default: 517 default:
459 NV_ERROR(dev, "NV%02x unsupported\n", dev_priv->chipset); 518 NV_ERROR(dev, "NV%02x unsupported\n", dev_priv->chipset);
@@ -493,9 +552,13 @@ nouveau_card_init_channel(struct drm_device *dev)
493 if (ret) 552 if (ret)
494 return ret; 553 return ret;
495 554
555 /* no dma objects on fermi... */
556 if (dev_priv->card_type >= NV_C0)
557 goto out_done;
558
496 ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY, 559 ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY,
497 0, dev_priv->vram_size, 560 0, dev_priv->vram_size,
498 NV_DMA_ACCESS_RW, NV_DMA_TARGET_VIDMEM, 561 NV_MEM_ACCESS_RW, NV_MEM_TARGET_VRAM,
499 &gpuobj); 562 &gpuobj);
500 if (ret) 563 if (ret)
501 goto out_err; 564 goto out_err;
@@ -505,9 +568,10 @@ nouveau_card_init_channel(struct drm_device *dev)
505 if (ret) 568 if (ret)
506 goto out_err; 569 goto out_err;
507 570
508 ret = nouveau_gpuobj_gart_dma_new(dev_priv->channel, 0, 571 ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY,
509 dev_priv->gart_info.aper_size, 572 0, dev_priv->gart_info.aper_size,
510 NV_DMA_ACCESS_RW, &gpuobj, NULL); 573 NV_MEM_ACCESS_RW, NV_MEM_TARGET_GART,
574 &gpuobj);
511 if (ret) 575 if (ret)
512 goto out_err; 576 goto out_err;
513 577
@@ -516,11 +580,12 @@ nouveau_card_init_channel(struct drm_device *dev)
516 if (ret) 580 if (ret)
517 goto out_err; 581 goto out_err;
518 582
583out_done:
584 mutex_unlock(&dev_priv->channel->mutex);
519 return 0; 585 return 0;
520 586
521out_err: 587out_err:
522 nouveau_channel_free(dev_priv->channel); 588 nouveau_channel_put(&dev_priv->channel);
523 dev_priv->channel = NULL;
524 return ret; 589 return ret;
525} 590}
526 591
@@ -531,15 +596,25 @@ static void nouveau_switcheroo_set_state(struct pci_dev *pdev,
531 pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; 596 pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
532 if (state == VGA_SWITCHEROO_ON) { 597 if (state == VGA_SWITCHEROO_ON) {
533 printk(KERN_ERR "VGA switcheroo: switched nouveau on\n"); 598 printk(KERN_ERR "VGA switcheroo: switched nouveau on\n");
599 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
534 nouveau_pci_resume(pdev); 600 nouveau_pci_resume(pdev);
535 drm_kms_helper_poll_enable(dev); 601 drm_kms_helper_poll_enable(dev);
602 dev->switch_power_state = DRM_SWITCH_POWER_ON;
536 } else { 603 } else {
537 printk(KERN_ERR "VGA switcheroo: switched nouveau off\n"); 604 printk(KERN_ERR "VGA switcheroo: switched nouveau off\n");
605 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
538 drm_kms_helper_poll_disable(dev); 606 drm_kms_helper_poll_disable(dev);
539 nouveau_pci_suspend(pdev, pmm); 607 nouveau_pci_suspend(pdev, pmm);
608 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
540 } 609 }
541} 610}
542 611
612static void nouveau_switcheroo_reprobe(struct pci_dev *pdev)
613{
614 struct drm_device *dev = pci_get_drvdata(pdev);
615 nouveau_fbcon_output_poll_changed(dev);
616}
617
543static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev) 618static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev)
544{ 619{
545 struct drm_device *dev = pci_get_drvdata(pdev); 620 struct drm_device *dev = pci_get_drvdata(pdev);
@@ -560,6 +635,7 @@ nouveau_card_init(struct drm_device *dev)
560 635
561 vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode); 636 vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode);
562 vga_switcheroo_register_client(dev->pdev, nouveau_switcheroo_set_state, 637 vga_switcheroo_register_client(dev->pdev, nouveau_switcheroo_set_state,
638 nouveau_switcheroo_reprobe,
563 nouveau_switcheroo_can_switch); 639 nouveau_switcheroo_can_switch);
564 640
565 /* Initialise internal driver API hooks */ 641 /* Initialise internal driver API hooks */
@@ -567,6 +643,8 @@ nouveau_card_init(struct drm_device *dev)
567 if (ret) 643 if (ret)
568 goto out; 644 goto out;
569 engine = &dev_priv->engine; 645 engine = &dev_priv->engine;
646 spin_lock_init(&dev_priv->channels.lock);
647 spin_lock_init(&dev_priv->tile.lock);
570 spin_lock_init(&dev_priv->context_switch_lock); 648 spin_lock_init(&dev_priv->context_switch_lock);
571 649
572 /* Make the CRTCs and I2C buses accessible */ 650 /* Make the CRTCs and I2C buses accessible */
@@ -625,26 +703,28 @@ nouveau_card_init(struct drm_device *dev)
625 if (ret) 703 if (ret)
626 goto out_fb; 704 goto out_fb;
627 705
706 /* PCRYPT */
707 ret = engine->crypt.init(dev);
708 if (ret)
709 goto out_graph;
710
628 /* PFIFO */ 711 /* PFIFO */
629 ret = engine->fifo.init(dev); 712 ret = engine->fifo.init(dev);
630 if (ret) 713 if (ret)
631 goto out_graph; 714 goto out_crypt;
632 } 715 }
633 716
634 ret = engine->display.create(dev); 717 ret = engine->display.create(dev);
635 if (ret) 718 if (ret)
636 goto out_fifo; 719 goto out_fifo;
637 720
638 /* this call irq_preinstall, register irq handler and 721 ret = drm_vblank_init(dev, nv_two_heads(dev) ? 2 : 1);
639 * call irq_postinstall
640 */
641 ret = drm_irq_install(dev);
642 if (ret) 722 if (ret)
643 goto out_display; 723 goto out_vblank;
644 724
645 ret = drm_vblank_init(dev, 0); 725 ret = nouveau_irq_init(dev);
646 if (ret) 726 if (ret)
647 goto out_irq; 727 goto out_vblank;
648 728
649 /* what about PVIDEO/PCRTC/PRAMDAC etc? */ 729 /* what about PVIDEO/PCRTC/PRAMDAC etc? */
650 730
@@ -669,12 +749,16 @@ nouveau_card_init(struct drm_device *dev)
669out_fence: 749out_fence:
670 nouveau_fence_fini(dev); 750 nouveau_fence_fini(dev);
671out_irq: 751out_irq:
672 drm_irq_uninstall(dev); 752 nouveau_irq_fini(dev);
673out_display: 753out_vblank:
754 drm_vblank_cleanup(dev);
674 engine->display.destroy(dev); 755 engine->display.destroy(dev);
675out_fifo: 756out_fifo:
676 if (!nouveau_noaccel) 757 if (!nouveau_noaccel)
677 engine->fifo.takedown(dev); 758 engine->fifo.takedown(dev);
759out_crypt:
760 if (!nouveau_noaccel)
761 engine->crypt.takedown(dev);
678out_graph: 762out_graph:
679 if (!nouveau_noaccel) 763 if (!nouveau_noaccel)
680 engine->graph.takedown(dev); 764 engine->graph.takedown(dev);
@@ -713,12 +797,12 @@ static void nouveau_card_takedown(struct drm_device *dev)
713 797
714 if (!engine->graph.accel_blocked) { 798 if (!engine->graph.accel_blocked) {
715 nouveau_fence_fini(dev); 799 nouveau_fence_fini(dev);
716 nouveau_channel_free(dev_priv->channel); 800 nouveau_channel_put_unlocked(&dev_priv->channel);
717 dev_priv->channel = NULL;
718 } 801 }
719 802
720 if (!nouveau_noaccel) { 803 if (!nouveau_noaccel) {
721 engine->fifo.takedown(dev); 804 engine->fifo.takedown(dev);
805 engine->crypt.takedown(dev);
722 engine->graph.takedown(dev); 806 engine->graph.takedown(dev);
723 } 807 }
724 engine->fb.takedown(dev); 808 engine->fb.takedown(dev);
@@ -737,7 +821,8 @@ static void nouveau_card_takedown(struct drm_device *dev)
737 nouveau_gpuobj_takedown(dev); 821 nouveau_gpuobj_takedown(dev);
738 nouveau_mem_vram_fini(dev); 822 nouveau_mem_vram_fini(dev);
739 823
740 drm_irq_uninstall(dev); 824 nouveau_irq_fini(dev);
825 drm_vblank_cleanup(dev);
741 826
742 nouveau_pm_fini(dev); 827 nouveau_pm_fini(dev);
743 nouveau_bios_takedown(dev); 828 nouveau_bios_takedown(dev);
@@ -980,6 +1065,7 @@ err_out:
980 1065
981void nouveau_lastclose(struct drm_device *dev) 1066void nouveau_lastclose(struct drm_device *dev)
982{ 1067{
1068 vga_switcheroo_process_delayed_switch();
983} 1069}
984 1070
985int nouveau_unload(struct drm_device *dev) 1071int nouveau_unload(struct drm_device *dev)
@@ -1024,21 +1110,6 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
1024 else 1110 else
1025 getparam->value = NV_PCI; 1111 getparam->value = NV_PCI;
1026 break; 1112 break;
1027 case NOUVEAU_GETPARAM_FB_PHYSICAL:
1028 getparam->value = dev_priv->fb_phys;
1029 break;
1030 case NOUVEAU_GETPARAM_AGP_PHYSICAL:
1031 getparam->value = dev_priv->gart_info.aper_base;
1032 break;
1033 case NOUVEAU_GETPARAM_PCI_PHYSICAL:
1034 if (dev->sg) {
1035 getparam->value = (unsigned long)dev->sg->virtual;
1036 } else {
1037 NV_ERROR(dev, "Requested PCIGART address, "
1038 "while no PCIGART was created\n");
1039 return -EINVAL;
1040 }
1041 break;
1042 case NOUVEAU_GETPARAM_FB_SIZE: 1113 case NOUVEAU_GETPARAM_FB_SIZE:
1043 getparam->value = dev_priv->fb_available_size; 1114 getparam->value = dev_priv->fb_available_size;
1044 break; 1115 break;
@@ -1046,7 +1117,7 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
1046 getparam->value = dev_priv->gart_info.aper_size; 1117 getparam->value = dev_priv->gart_info.aper_size;
1047 break; 1118 break;
1048 case NOUVEAU_GETPARAM_VM_VRAM_BASE: 1119 case NOUVEAU_GETPARAM_VM_VRAM_BASE:
1049 getparam->value = dev_priv->vm_vram_base; 1120 getparam->value = 0; /* deprecated */
1050 break; 1121 break;
1051 case NOUVEAU_GETPARAM_PTIMER_TIME: 1122 case NOUVEAU_GETPARAM_PTIMER_TIME:
1052 getparam->value = dev_priv->engine.timer.read(dev); 1123 getparam->value = dev_priv->engine.timer.read(dev);
@@ -1054,6 +1125,9 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
1054 case NOUVEAU_GETPARAM_HAS_BO_USAGE: 1125 case NOUVEAU_GETPARAM_HAS_BO_USAGE:
1055 getparam->value = 1; 1126 getparam->value = 1;
1056 break; 1127 break;
1128 case NOUVEAU_GETPARAM_HAS_PAGEFLIP:
1129 getparam->value = (dev_priv->card_type < NV_50);
1130 break;
1057 case NOUVEAU_GETPARAM_GRAPH_UNITS: 1131 case NOUVEAU_GETPARAM_GRAPH_UNITS:
1058 /* NV40 and NV50 versions are quite different, but register 1132 /* NV40 and NV50 versions are quite different, but register
1059 * address is the same. User is supposed to know the card 1133 * address is the same. User is supposed to know the card
@@ -1087,8 +1161,9 @@ nouveau_ioctl_setparam(struct drm_device *dev, void *data,
1087} 1161}
1088 1162
1089/* Wait until (value(reg) & mask) == val, up until timeout has hit */ 1163/* Wait until (value(reg) & mask) == val, up until timeout has hit */
1090bool nouveau_wait_until(struct drm_device *dev, uint64_t timeout, 1164bool
1091 uint32_t reg, uint32_t mask, uint32_t val) 1165nouveau_wait_eq(struct drm_device *dev, uint64_t timeout,
1166 uint32_t reg, uint32_t mask, uint32_t val)
1092{ 1167{
1093 struct drm_nouveau_private *dev_priv = dev->dev_private; 1168 struct drm_nouveau_private *dev_priv = dev->dev_private;
1094 struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer; 1169 struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
@@ -1102,10 +1177,33 @@ bool nouveau_wait_until(struct drm_device *dev, uint64_t timeout,
1102 return false; 1177 return false;
1103} 1178}
1104 1179
1180/* Wait until (value(reg) & mask) != val, up until timeout has hit */
1181bool
1182nouveau_wait_ne(struct drm_device *dev, uint64_t timeout,
1183 uint32_t reg, uint32_t mask, uint32_t val)
1184{
1185 struct drm_nouveau_private *dev_priv = dev->dev_private;
1186 struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
1187 uint64_t start = ptimer->read(dev);
1188
1189 do {
1190 if ((nv_rd32(dev, reg) & mask) != val)
1191 return true;
1192 } while (ptimer->read(dev) - start < timeout);
1193
1194 return false;
1195}
1196
1105/* Waits for PGRAPH to go completely idle */ 1197/* Waits for PGRAPH to go completely idle */
1106bool nouveau_wait_for_idle(struct drm_device *dev) 1198bool nouveau_wait_for_idle(struct drm_device *dev)
1107{ 1199{
1108 if (!nv_wait(dev, NV04_PGRAPH_STATUS, 0xffffffff, 0x00000000)) { 1200 struct drm_nouveau_private *dev_priv = dev->dev_private;
1201 uint32_t mask = ~0;
1202
1203 if (dev_priv->card_type == NV_40)
1204 mask &= ~NV40_PGRAPH_STATUS_SYNC_STALL;
1205
1206 if (!nv_wait(dev, NV04_PGRAPH_STATUS, mask, 0)) {
1109 NV_ERROR(dev, "PGRAPH idle timed out with status 0x%08x\n", 1207 NV_ERROR(dev, "PGRAPH idle timed out with status 0x%08x\n",
1110 nv_rd32(dev, NV04_PGRAPH_STATUS)); 1208 nv_rd32(dev, NV04_PGRAPH_STATUS));
1111 return false; 1209 return false;
diff --git a/drivers/gpu/drm/nouveau/nouveau_util.c b/drivers/gpu/drm/nouveau/nouveau_util.c
new file mode 100644
index 000000000000..fbe0fb13bc1e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_util.c
@@ -0,0 +1,69 @@
1/*
2 * Copyright (C) 2010 Nouveau Project
3 *
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sublicense, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial
16 * portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 */
27
28#include <linux/ratelimit.h>
29
30#include "nouveau_util.h"
31
32static DEFINE_RATELIMIT_STATE(nouveau_ratelimit_state, 3 * HZ, 20);
33
34void
35nouveau_bitfield_print(const struct nouveau_bitfield *bf, u32 value)
36{
37 while (bf->name) {
38 if (value & bf->mask) {
39 printk(" %s", bf->name);
40 value &= ~bf->mask;
41 }
42
43 bf++;
44 }
45
46 if (value)
47 printk(" (unknown bits 0x%08x)", value);
48}
49
50void
51nouveau_enum_print(const struct nouveau_enum *en, u32 value)
52{
53 while (en->name) {
54 if (value == en->value) {
55 printk("%s", en->name);
56 return;
57 }
58
59 en++;
60 }
61
62 printk("(unknown enum 0x%08x)", value);
63}
64
65int
66nouveau_ratelimit(void)
67{
68 return __ratelimit(&nouveau_ratelimit_state);
69}
diff --git a/drivers/gpu/drm/nouveau/nouveau_util.h b/drivers/gpu/drm/nouveau/nouveau_util.h
new file mode 100644
index 000000000000..d9ceaea26f4b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_util.h
@@ -0,0 +1,45 @@
1/*
2 * Copyright (C) 2010 Nouveau Project
3 *
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sublicense, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial
16 * portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 */
27
28#ifndef __NOUVEAU_UTIL_H__
29#define __NOUVEAU_UTIL_H__
30
31struct nouveau_bitfield {
32 u32 mask;
33 const char *name;
34};
35
36struct nouveau_enum {
37 u32 value;
38 const char *name;
39};
40
41void nouveau_bitfield_print(const struct nouveau_bitfield *, u32 value);
42void nouveau_enum_print(const struct nouveau_enum *, u32 value);
43int nouveau_ratelimit(void);
44
45#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.c b/drivers/gpu/drm/nouveau/nouveau_vm.c
new file mode 100644
index 000000000000..97d82aedf86b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_vm.c
@@ -0,0 +1,439 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26#include "nouveau_drv.h"
27#include "nouveau_mm.h"
28#include "nouveau_vm.h"
29
30void
31nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_vram *vram)
32{
33 struct nouveau_vm *vm = vma->vm;
34 struct nouveau_mm_node *r;
35 int big = vma->node->type != vm->spg_shift;
36 u32 offset = vma->node->offset + (delta >> 12);
37 u32 bits = vma->node->type - 12;
38 u32 pde = (offset >> vm->pgt_bits) - vm->fpde;
39 u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
40 u32 max = 1 << (vm->pgt_bits - bits);
41 u32 end, len;
42
43 list_for_each_entry(r, &vram->regions, rl_entry) {
44 u64 phys = (u64)r->offset << 12;
45 u32 num = r->length >> bits;
46
47 while (num) {
48 struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
49
50 end = (pte + num);
51 if (unlikely(end >= max))
52 end = max;
53 len = end - pte;
54
55 vm->map(vma, pgt, vram, pte, len, phys);
56
57 num -= len;
58 pte += len;
59 if (unlikely(end >= max)) {
60 pde++;
61 pte = 0;
62 }
63 }
64 }
65
66 vm->flush(vm);
67}
68
69void
70nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_vram *vram)
71{
72 nouveau_vm_map_at(vma, 0, vram);
73}
74
75void
76nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
77 dma_addr_t *list)
78{
79 struct nouveau_vm *vm = vma->vm;
80 int big = vma->node->type != vm->spg_shift;
81 u32 offset = vma->node->offset + (delta >> 12);
82 u32 bits = vma->node->type - 12;
83 u32 num = length >> vma->node->type;
84 u32 pde = (offset >> vm->pgt_bits) - vm->fpde;
85 u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
86 u32 max = 1 << (vm->pgt_bits - bits);
87 u32 end, len;
88
89 while (num) {
90 struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
91
92 end = (pte + num);
93 if (unlikely(end >= max))
94 end = max;
95 len = end - pte;
96
97 vm->map_sg(vma, pgt, pte, list, len);
98
99 num -= len;
100 pte += len;
101 list += len;
102 if (unlikely(end >= max)) {
103 pde++;
104 pte = 0;
105 }
106 }
107
108 vm->flush(vm);
109}
110
111void
112nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length)
113{
114 struct nouveau_vm *vm = vma->vm;
115 int big = vma->node->type != vm->spg_shift;
116 u32 offset = vma->node->offset + (delta >> 12);
117 u32 bits = vma->node->type - 12;
118 u32 num = length >> vma->node->type;
119 u32 pde = (offset >> vm->pgt_bits) - vm->fpde;
120 u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
121 u32 max = 1 << (vm->pgt_bits - bits);
122 u32 end, len;
123
124 while (num) {
125 struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
126
127 end = (pte + num);
128 if (unlikely(end >= max))
129 end = max;
130 len = end - pte;
131
132 vm->unmap(pgt, pte, len);
133
134 num -= len;
135 pte += len;
136 if (unlikely(end >= max)) {
137 pde++;
138 pte = 0;
139 }
140 }
141
142 vm->flush(vm);
143}
144
145void
146nouveau_vm_unmap(struct nouveau_vma *vma)
147{
148 nouveau_vm_unmap_at(vma, 0, (u64)vma->node->length << 12);
149}
150
151static void
152nouveau_vm_unmap_pgt(struct nouveau_vm *vm, int big, u32 fpde, u32 lpde)
153{
154 struct nouveau_vm_pgd *vpgd;
155 struct nouveau_vm_pgt *vpgt;
156 struct nouveau_gpuobj *pgt;
157 u32 pde;
158
159 for (pde = fpde; pde <= lpde; pde++) {
160 vpgt = &vm->pgt[pde - vm->fpde];
161 if (--vpgt->refcount[big])
162 continue;
163
164 pgt = vpgt->obj[big];
165 vpgt->obj[big] = NULL;
166
167 list_for_each_entry(vpgd, &vm->pgd_list, head) {
168 vm->map_pgt(vpgd->obj, pde, vpgt->obj);
169 }
170
171 mutex_unlock(&vm->mm->mutex);
172 nouveau_gpuobj_ref(NULL, &pgt);
173 mutex_lock(&vm->mm->mutex);
174 }
175}
176
177static int
178nouveau_vm_map_pgt(struct nouveau_vm *vm, u32 pde, u32 type)
179{
180 struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
181 struct nouveau_vm_pgd *vpgd;
182 struct nouveau_gpuobj *pgt;
183 int big = (type != vm->spg_shift);
184 u32 pgt_size;
185 int ret;
186
187 pgt_size = (1 << (vm->pgt_bits + 12)) >> type;
188 pgt_size *= 8;
189
190 mutex_unlock(&vm->mm->mutex);
191 ret = nouveau_gpuobj_new(vm->dev, NULL, pgt_size, 0x1000,
192 NVOBJ_FLAG_ZERO_ALLOC, &pgt);
193 mutex_lock(&vm->mm->mutex);
194 if (unlikely(ret))
195 return ret;
196
197 /* someone beat us to filling the PDE while we didn't have the lock */
198 if (unlikely(vpgt->refcount[big]++)) {
199 mutex_unlock(&vm->mm->mutex);
200 nouveau_gpuobj_ref(NULL, &pgt);
201 mutex_lock(&vm->mm->mutex);
202 return 0;
203 }
204
205 vpgt->obj[big] = pgt;
206 list_for_each_entry(vpgd, &vm->pgd_list, head) {
207 vm->map_pgt(vpgd->obj, pde, vpgt->obj);
208 }
209
210 return 0;
211}
212
213int
214nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift,
215 u32 access, struct nouveau_vma *vma)
216{
217 u32 align = (1 << page_shift) >> 12;
218 u32 msize = size >> 12;
219 u32 fpde, lpde, pde;
220 int ret;
221
222 mutex_lock(&vm->mm->mutex);
223 ret = nouveau_mm_get(vm->mm, page_shift, msize, 0, align, &vma->node);
224 if (unlikely(ret != 0)) {
225 mutex_unlock(&vm->mm->mutex);
226 return ret;
227 }
228
229 fpde = (vma->node->offset >> vm->pgt_bits);
230 lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits;
231 for (pde = fpde; pde <= lpde; pde++) {
232 struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
233 int big = (vma->node->type != vm->spg_shift);
234
235 if (likely(vpgt->refcount[big])) {
236 vpgt->refcount[big]++;
237 continue;
238 }
239
240 ret = nouveau_vm_map_pgt(vm, pde, vma->node->type);
241 if (ret) {
242 if (pde != fpde)
243 nouveau_vm_unmap_pgt(vm, big, fpde, pde - 1);
244 nouveau_mm_put(vm->mm, vma->node);
245 mutex_unlock(&vm->mm->mutex);
246 vma->node = NULL;
247 return ret;
248 }
249 }
250 mutex_unlock(&vm->mm->mutex);
251
252 vma->vm = vm;
253 vma->offset = (u64)vma->node->offset << 12;
254 vma->access = access;
255 return 0;
256}
257
258void
259nouveau_vm_put(struct nouveau_vma *vma)
260{
261 struct nouveau_vm *vm = vma->vm;
262 u32 fpde, lpde;
263
264 if (unlikely(vma->node == NULL))
265 return;
266 fpde = (vma->node->offset >> vm->pgt_bits);
267 lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits;
268
269 mutex_lock(&vm->mm->mutex);
270 nouveau_vm_unmap_pgt(vm, vma->node->type != vm->spg_shift, fpde, lpde);
271 nouveau_mm_put(vm->mm, vma->node);
272 vma->node = NULL;
273 mutex_unlock(&vm->mm->mutex);
274}
275
276int
277nouveau_vm_new(struct drm_device *dev, u64 offset, u64 length, u64 mm_offset,
278 struct nouveau_vm **pvm)
279{
280 struct drm_nouveau_private *dev_priv = dev->dev_private;
281 struct nouveau_vm *vm;
282 u64 mm_length = (offset + length) - mm_offset;
283 u32 block, pgt_bits;
284 int ret;
285
286 vm = kzalloc(sizeof(*vm), GFP_KERNEL);
287 if (!vm)
288 return -ENOMEM;
289
290 if (dev_priv->card_type == NV_50) {
291 vm->map_pgt = nv50_vm_map_pgt;
292 vm->map = nv50_vm_map;
293 vm->map_sg = nv50_vm_map_sg;
294 vm->unmap = nv50_vm_unmap;
295 vm->flush = nv50_vm_flush;
296 vm->spg_shift = 12;
297 vm->lpg_shift = 16;
298
299 pgt_bits = 29;
300 block = (1 << pgt_bits);
301 if (length < block)
302 block = length;
303
304 } else
305 if (dev_priv->card_type == NV_C0) {
306 vm->map_pgt = nvc0_vm_map_pgt;
307 vm->map = nvc0_vm_map;
308 vm->map_sg = nvc0_vm_map_sg;
309 vm->unmap = nvc0_vm_unmap;
310 vm->flush = nvc0_vm_flush;
311 vm->spg_shift = 12;
312 vm->lpg_shift = 17;
313 pgt_bits = 27;
314
315 /* Should be 4096 everywhere, this is a hack that's
316 * currently necessary to avoid an elusive bug that
317 * causes corruption when mixing small/large pages
318 */
319 if (length < (1ULL << 40))
320 block = 4096;
321 else {
322 block = (1 << pgt_bits);
323 if (length < block)
324 block = length;
325 }
326 } else {
327 kfree(vm);
328 return -ENOSYS;
329 }
330
331 vm->fpde = offset >> pgt_bits;
332 vm->lpde = (offset + length - 1) >> pgt_bits;
333 vm->pgt = kcalloc(vm->lpde - vm->fpde + 1, sizeof(*vm->pgt), GFP_KERNEL);
334 if (!vm->pgt) {
335 kfree(vm);
336 return -ENOMEM;
337 }
338
339 INIT_LIST_HEAD(&vm->pgd_list);
340 vm->dev = dev;
341 vm->refcount = 1;
342 vm->pgt_bits = pgt_bits - 12;
343
344 ret = nouveau_mm_init(&vm->mm, mm_offset >> 12, mm_length >> 12,
345 block >> 12);
346 if (ret) {
347 kfree(vm);
348 return ret;
349 }
350
351 *pvm = vm;
352 return 0;
353}
354
355static int
356nouveau_vm_link(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd)
357{
358 struct nouveau_vm_pgd *vpgd;
359 int i;
360
361 if (!pgd)
362 return 0;
363
364 vpgd = kzalloc(sizeof(*vpgd), GFP_KERNEL);
365 if (!vpgd)
366 return -ENOMEM;
367
368 nouveau_gpuobj_ref(pgd, &vpgd->obj);
369
370 mutex_lock(&vm->mm->mutex);
371 for (i = vm->fpde; i <= vm->lpde; i++)
372 vm->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj);
373 list_add(&vpgd->head, &vm->pgd_list);
374 mutex_unlock(&vm->mm->mutex);
375 return 0;
376}
377
378static void
379nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd)
380{
381 struct nouveau_vm_pgd *vpgd, *tmp;
382
383 if (!pgd)
384 return;
385
386 mutex_lock(&vm->mm->mutex);
387 list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
388 if (vpgd->obj != pgd)
389 continue;
390
391 list_del(&vpgd->head);
392 nouveau_gpuobj_ref(NULL, &vpgd->obj);
393 kfree(vpgd);
394 }
395 mutex_unlock(&vm->mm->mutex);
396}
397
398static void
399nouveau_vm_del(struct nouveau_vm *vm)
400{
401 struct nouveau_vm_pgd *vpgd, *tmp;
402
403 list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
404 nouveau_vm_unlink(vm, vpgd->obj);
405 }
406 WARN_ON(nouveau_mm_fini(&vm->mm) != 0);
407
408 kfree(vm->pgt);
409 kfree(vm);
410}
411
412int
413nouveau_vm_ref(struct nouveau_vm *ref, struct nouveau_vm **ptr,
414 struct nouveau_gpuobj *pgd)
415{
416 struct nouveau_vm *vm;
417 int ret;
418
419 vm = ref;
420 if (vm) {
421 ret = nouveau_vm_link(vm, pgd);
422 if (ret)
423 return ret;
424
425 vm->refcount++;
426 }
427
428 vm = *ptr;
429 *ptr = ref;
430
431 if (vm) {
432 nouveau_vm_unlink(vm, pgd);
433
434 if (--vm->refcount == 0)
435 nouveau_vm_del(vm);
436 }
437
438 return 0;
439}
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.h b/drivers/gpu/drm/nouveau/nouveau_vm.h
new file mode 100644
index 000000000000..e1193515771b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_vm.h
@@ -0,0 +1,113 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#ifndef __NOUVEAU_VM_H__
26#define __NOUVEAU_VM_H__
27
28#include "drmP.h"
29
30#include "nouveau_drv.h"
31#include "nouveau_mm.h"
32
33struct nouveau_vm_pgt {
34 struct nouveau_gpuobj *obj[2];
35 u32 refcount[2];
36};
37
38struct nouveau_vm_pgd {
39 struct list_head head;
40 struct nouveau_gpuobj *obj;
41};
42
43struct nouveau_vma {
44 struct nouveau_vm *vm;
45 struct nouveau_mm_node *node;
46 u64 offset;
47 u32 access;
48};
49
50struct nouveau_vm {
51 struct drm_device *dev;
52 struct nouveau_mm *mm;
53 int refcount;
54
55 struct list_head pgd_list;
56 atomic_t pgraph_refs;
57 atomic_t pcrypt_refs;
58
59 struct nouveau_vm_pgt *pgt;
60 u32 fpde;
61 u32 lpde;
62
63 u32 pgt_bits;
64 u8 spg_shift;
65 u8 lpg_shift;
66
67 void (*map_pgt)(struct nouveau_gpuobj *pgd, u32 pde,
68 struct nouveau_gpuobj *pgt[2]);
69 void (*map)(struct nouveau_vma *, struct nouveau_gpuobj *,
70 struct nouveau_vram *, u32 pte, u32 cnt, u64 phys);
71 void (*map_sg)(struct nouveau_vma *, struct nouveau_gpuobj *,
72 u32 pte, dma_addr_t *, u32 cnt);
73 void (*unmap)(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt);
74 void (*flush)(struct nouveau_vm *);
75};
76
77/* nouveau_vm.c */
78int nouveau_vm_new(struct drm_device *, u64 offset, u64 length, u64 mm_offset,
79 struct nouveau_vm **);
80int nouveau_vm_ref(struct nouveau_vm *, struct nouveau_vm **,
81 struct nouveau_gpuobj *pgd);
82int nouveau_vm_get(struct nouveau_vm *, u64 size, u32 page_shift,
83 u32 access, struct nouveau_vma *);
84void nouveau_vm_put(struct nouveau_vma *);
85void nouveau_vm_map(struct nouveau_vma *, struct nouveau_vram *);
86void nouveau_vm_map_at(struct nouveau_vma *, u64 offset, struct nouveau_vram *);
87void nouveau_vm_unmap(struct nouveau_vma *);
88void nouveau_vm_unmap_at(struct nouveau_vma *, u64 offset, u64 length);
89void nouveau_vm_map_sg(struct nouveau_vma *, u64 offset, u64 length,
90 dma_addr_t *);
91
92/* nv50_vm.c */
93void nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
94 struct nouveau_gpuobj *pgt[2]);
95void nv50_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *,
96 struct nouveau_vram *, u32 pte, u32 cnt, u64 phys);
97void nv50_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *,
98 u32 pte, dma_addr_t *, u32 cnt);
99void nv50_vm_unmap(struct nouveau_gpuobj *, u32 pte, u32 cnt);
100void nv50_vm_flush(struct nouveau_vm *);
101void nv50_vm_flush_engine(struct drm_device *, int engine);
102
103/* nvc0_vm.c */
104void nvc0_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
105 struct nouveau_gpuobj *pgt[2]);
106void nvc0_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *,
107 struct nouveau_vram *, u32 pte, u32 cnt, u64 phys);
108void nvc0_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *,
109 u32 pte, dma_addr_t *, u32 cnt);
110void nvc0_vm_unmap(struct nouveau_gpuobj *, u32 pte, u32 cnt);
111void nvc0_vm_flush(struct nouveau_vm *);
112
113#endif
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
index 40e180741629..297505eb98d5 100644
--- a/drivers/gpu/drm/nouveau/nv04_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
@@ -551,7 +551,10 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode)
551 if (dev_priv->card_type >= NV_30) 551 if (dev_priv->card_type >= NV_30)
552 regp->gpio_ext = NVReadCRTC(dev, 0, NV_PCRTC_GPIO_EXT); 552 regp->gpio_ext = NVReadCRTC(dev, 0, NV_PCRTC_GPIO_EXT);
553 553
554 regp->crtc_cfg = NV_PCRTC_CONFIG_START_ADDRESS_HSYNC; 554 if (dev_priv->card_type >= NV_10)
555 regp->crtc_cfg = NV10_PCRTC_CONFIG_START_ADDRESS_HSYNC;
556 else
557 regp->crtc_cfg = NV04_PCRTC_CONFIG_START_ADDRESS_HSYNC;
555 558
556 /* Some misc regs */ 559 /* Some misc regs */
557 if (dev_priv->card_type == NV_40) { 560 if (dev_priv->card_type == NV_40) {
@@ -669,6 +672,7 @@ static void nv_crtc_prepare(struct drm_crtc *crtc)
669 if (nv_two_heads(dev)) 672 if (nv_two_heads(dev))
670 NVSetOwner(dev, nv_crtc->index); 673 NVSetOwner(dev, nv_crtc->index);
671 674
675 drm_vblank_pre_modeset(dev, nv_crtc->index);
672 funcs->dpms(crtc, DRM_MODE_DPMS_OFF); 676 funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
673 677
674 NVBlankScreen(dev, nv_crtc->index, true); 678 NVBlankScreen(dev, nv_crtc->index, true);
@@ -701,6 +705,7 @@ static void nv_crtc_commit(struct drm_crtc *crtc)
701#endif 705#endif
702 706
703 funcs->dpms(crtc, DRM_MODE_DPMS_ON); 707 funcs->dpms(crtc, DRM_MODE_DPMS_ON);
708 drm_vblank_post_modeset(dev, nv_crtc->index);
704} 709}
705 710
706static void nv_crtc_destroy(struct drm_crtc *crtc) 711static void nv_crtc_destroy(struct drm_crtc *crtc)
@@ -986,6 +991,7 @@ static const struct drm_crtc_funcs nv04_crtc_funcs = {
986 .cursor_move = nv04_crtc_cursor_move, 991 .cursor_move = nv04_crtc_cursor_move,
987 .gamma_set = nv_crtc_gamma_set, 992 .gamma_set = nv_crtc_gamma_set,
988 .set_config = drm_crtc_helper_set_config, 993 .set_config = drm_crtc_helper_set_config,
994 .page_flip = nouveau_crtc_page_flip,
989 .destroy = nv_crtc_destroy, 995 .destroy = nv_crtc_destroy,
990}; 996};
991 997
diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c
index ba6423f2ffcc..e000455e06d0 100644
--- a/drivers/gpu/drm/nouveau/nv04_dac.c
+++ b/drivers/gpu/drm/nouveau/nv04_dac.c
@@ -74,14 +74,14 @@ static int sample_load_twice(struct drm_device *dev, bool sense[2])
74 * use a 10ms timeout (guards against crtc being inactive, in 74 * use a 10ms timeout (guards against crtc being inactive, in
75 * which case blank state would never change) 75 * which case blank state would never change)
76 */ 76 */
77 if (!nouveau_wait_until(dev, 10000000, NV_PRMCIO_INP0__COLOR, 77 if (!nouveau_wait_eq(dev, 10000000, NV_PRMCIO_INP0__COLOR,
78 0x00000001, 0x00000000)) 78 0x00000001, 0x00000000))
79 return -EBUSY; 79 return -EBUSY;
80 if (!nouveau_wait_until(dev, 10000000, NV_PRMCIO_INP0__COLOR, 80 if (!nouveau_wait_eq(dev, 10000000, NV_PRMCIO_INP0__COLOR,
81 0x00000001, 0x00000001)) 81 0x00000001, 0x00000001))
82 return -EBUSY; 82 return -EBUSY;
83 if (!nouveau_wait_until(dev, 10000000, NV_PRMCIO_INP0__COLOR, 83 if (!nouveau_wait_eq(dev, 10000000, NV_PRMCIO_INP0__COLOR,
84 0x00000001, 0x00000000)) 84 0x00000001, 0x00000000))
85 return -EBUSY; 85 return -EBUSY;
86 86
87 udelay(100); 87 udelay(100);
diff --git a/drivers/gpu/drm/nouveau/nv04_display.c b/drivers/gpu/drm/nouveau/nv04_display.c
index 9e28cf772e3c..1715e1464b7d 100644
--- a/drivers/gpu/drm/nouveau/nv04_display.c
+++ b/drivers/gpu/drm/nouveau/nv04_display.c
@@ -32,6 +32,9 @@
32#include "nouveau_encoder.h" 32#include "nouveau_encoder.h"
33#include "nouveau_connector.h" 33#include "nouveau_connector.h"
34 34
35static void nv04_vblank_crtc0_isr(struct drm_device *);
36static void nv04_vblank_crtc1_isr(struct drm_device *);
37
35static void 38static void
36nv04_display_store_initial_head_owner(struct drm_device *dev) 39nv04_display_store_initial_head_owner(struct drm_device *dev)
37{ 40{
@@ -197,6 +200,8 @@ nv04_display_create(struct drm_device *dev)
197 func->save(encoder); 200 func->save(encoder);
198 } 201 }
199 202
203 nouveau_irq_register(dev, 24, nv04_vblank_crtc0_isr);
204 nouveau_irq_register(dev, 25, nv04_vblank_crtc1_isr);
200 return 0; 205 return 0;
201} 206}
202 207
@@ -208,6 +213,9 @@ nv04_display_destroy(struct drm_device *dev)
208 213
209 NV_DEBUG_KMS(dev, "\n"); 214 NV_DEBUG_KMS(dev, "\n");
210 215
216 nouveau_irq_unregister(dev, 24);
217 nouveau_irq_unregister(dev, 25);
218
211 /* Turn every CRTC off. */ 219 /* Turn every CRTC off. */
212 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 220 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
213 struct drm_mode_set modeset = { 221 struct drm_mode_set modeset = {
@@ -258,3 +266,16 @@ nv04_display_init(struct drm_device *dev)
258 return 0; 266 return 0;
259} 267}
260 268
269static void
270nv04_vblank_crtc0_isr(struct drm_device *dev)
271{
272 nv_wr32(dev, NV_CRTC0_INTSTAT, NV_CRTC_INTR_VBLANK);
273 drm_handle_vblank(dev, 0);
274}
275
276static void
277nv04_vblank_crtc1_isr(struct drm_device *dev)
278{
279 nv_wr32(dev, NV_CRTC1_INTSTAT, NV_CRTC_INTR_VBLANK);
280 drm_handle_vblank(dev, 1);
281}
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index 33e4c9388bc1..7a1189371096 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -28,52 +28,39 @@
28#include "nouveau_ramht.h" 28#include "nouveau_ramht.h"
29#include "nouveau_fbcon.h" 29#include "nouveau_fbcon.h"
30 30
31void 31int
32nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) 32nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
33{ 33{
34 struct nouveau_fbdev *nfbdev = info->par; 34 struct nouveau_fbdev *nfbdev = info->par;
35 struct drm_device *dev = nfbdev->dev; 35 struct drm_device *dev = nfbdev->dev;
36 struct drm_nouveau_private *dev_priv = dev->dev_private; 36 struct drm_nouveau_private *dev_priv = dev->dev_private;
37 struct nouveau_channel *chan = dev_priv->channel; 37 struct nouveau_channel *chan = dev_priv->channel;
38 int ret;
38 39
39 if (info->state != FBINFO_STATE_RUNNING) 40 ret = RING_SPACE(chan, 4);
40 return; 41 if (ret)
41 42 return ret;
42 if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 4)) {
43 nouveau_fbcon_gpu_lockup(info);
44 }
45
46 if (info->flags & FBINFO_HWACCEL_DISABLED) {
47 cfb_copyarea(info, region);
48 return;
49 }
50 43
51 BEGIN_RING(chan, NvSubImageBlit, 0x0300, 3); 44 BEGIN_RING(chan, NvSubImageBlit, 0x0300, 3);
52 OUT_RING(chan, (region->sy << 16) | region->sx); 45 OUT_RING(chan, (region->sy << 16) | region->sx);
53 OUT_RING(chan, (region->dy << 16) | region->dx); 46 OUT_RING(chan, (region->dy << 16) | region->dx);
54 OUT_RING(chan, (region->height << 16) | region->width); 47 OUT_RING(chan, (region->height << 16) | region->width);
55 FIRE_RING(chan); 48 FIRE_RING(chan);
49 return 0;
56} 50}
57 51
58void 52int
59nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) 53nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
60{ 54{
61 struct nouveau_fbdev *nfbdev = info->par; 55 struct nouveau_fbdev *nfbdev = info->par;
62 struct drm_device *dev = nfbdev->dev; 56 struct drm_device *dev = nfbdev->dev;
63 struct drm_nouveau_private *dev_priv = dev->dev_private; 57 struct drm_nouveau_private *dev_priv = dev->dev_private;
64 struct nouveau_channel *chan = dev_priv->channel; 58 struct nouveau_channel *chan = dev_priv->channel;
59 int ret;
65 60
66 if (info->state != FBINFO_STATE_RUNNING) 61 ret = RING_SPACE(chan, 7);
67 return; 62 if (ret)
68 63 return ret;
69 if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 7)) {
70 nouveau_fbcon_gpu_lockup(info);
71 }
72
73 if (info->flags & FBINFO_HWACCEL_DISABLED) {
74 cfb_fillrect(info, rect);
75 return;
76 }
77 64
78 BEGIN_RING(chan, NvSubGdiRect, 0x02fc, 1); 65 BEGIN_RING(chan, NvSubGdiRect, 0x02fc, 1);
79 OUT_RING(chan, (rect->rop != ROP_COPY) ? 1 : 3); 66 OUT_RING(chan, (rect->rop != ROP_COPY) ? 1 : 3);
@@ -87,9 +74,10 @@ nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
87 OUT_RING(chan, (rect->dx << 16) | rect->dy); 74 OUT_RING(chan, (rect->dx << 16) | rect->dy);
88 OUT_RING(chan, (rect->width << 16) | rect->height); 75 OUT_RING(chan, (rect->width << 16) | rect->height);
89 FIRE_RING(chan); 76 FIRE_RING(chan);
77 return 0;
90} 78}
91 79
92void 80int
93nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) 81nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
94{ 82{
95 struct nouveau_fbdev *nfbdev = info->par; 83 struct nouveau_fbdev *nfbdev = info->par;
@@ -101,23 +89,14 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
101 uint32_t dsize; 89 uint32_t dsize;
102 uint32_t width; 90 uint32_t width;
103 uint32_t *data = (uint32_t *)image->data; 91 uint32_t *data = (uint32_t *)image->data;
92 int ret;
104 93
105 if (info->state != FBINFO_STATE_RUNNING) 94 if (image->depth != 1)
106 return; 95 return -ENODEV;
107
108 if (image->depth != 1) {
109 cfb_imageblit(info, image);
110 return;
111 }
112
113 if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 8)) {
114 nouveau_fbcon_gpu_lockup(info);
115 }
116 96
117 if (info->flags & FBINFO_HWACCEL_DISABLED) { 97 ret = RING_SPACE(chan, 8);
118 cfb_imageblit(info, image); 98 if (ret)
119 return; 99 return ret;
120 }
121 100
122 width = ALIGN(image->width, 8); 101 width = ALIGN(image->width, 8);
123 dsize = ALIGN(width * image->height, 32) >> 5; 102 dsize = ALIGN(width * image->height, 32) >> 5;
@@ -144,11 +123,9 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
144 while (dsize) { 123 while (dsize) {
145 int iter_len = dsize > 128 ? 128 : dsize; 124 int iter_len = dsize > 128 ? 128 : dsize;
146 125
147 if (RING_SPACE(chan, iter_len + 1)) { 126 ret = RING_SPACE(chan, iter_len + 1);
148 nouveau_fbcon_gpu_lockup(info); 127 if (ret)
149 cfb_imageblit(info, image); 128 return ret;
150 return;
151 }
152 129
153 BEGIN_RING(chan, NvSubGdiRect, 0x0c00, iter_len); 130 BEGIN_RING(chan, NvSubGdiRect, 0x0c00, iter_len);
154 OUT_RINGp(chan, data, iter_len); 131 OUT_RINGp(chan, data, iter_len);
@@ -157,22 +134,7 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
157 } 134 }
158 135
159 FIRE_RING(chan); 136 FIRE_RING(chan);
160} 137 return 0;
161
162static int
163nv04_fbcon_grobj_new(struct drm_device *dev, int class, uint32_t handle)
164{
165 struct drm_nouveau_private *dev_priv = dev->dev_private;
166 struct nouveau_gpuobj *obj = NULL;
167 int ret;
168
169 ret = nouveau_gpuobj_gr_new(dev_priv->channel, class, &obj);
170 if (ret)
171 return ret;
172
173 ret = nouveau_ramht_insert(dev_priv->channel, handle, obj);
174 nouveau_gpuobj_ref(NULL, &obj);
175 return ret;
176} 138}
177 139
178int 140int
@@ -214,29 +176,31 @@ nv04_fbcon_accel_init(struct fb_info *info)
214 return -EINVAL; 176 return -EINVAL;
215 } 177 }
216 178
217 ret = nv04_fbcon_grobj_new(dev, dev_priv->card_type >= NV_10 ? 179 ret = nouveau_gpuobj_gr_new(chan, NvCtxSurf2D,
218 0x0062 : 0x0042, NvCtxSurf2D); 180 dev_priv->card_type >= NV_10 ?
181 0x0062 : 0x0042);
219 if (ret) 182 if (ret)
220 return ret; 183 return ret;
221 184
222 ret = nv04_fbcon_grobj_new(dev, 0x0019, NvClipRect); 185 ret = nouveau_gpuobj_gr_new(chan, NvClipRect, 0x0019);
223 if (ret) 186 if (ret)
224 return ret; 187 return ret;
225 188
226 ret = nv04_fbcon_grobj_new(dev, 0x0043, NvRop); 189 ret = nouveau_gpuobj_gr_new(chan, NvRop, 0x0043);
227 if (ret) 190 if (ret)
228 return ret; 191 return ret;
229 192
230 ret = nv04_fbcon_grobj_new(dev, 0x0044, NvImagePatt); 193 ret = nouveau_gpuobj_gr_new(chan, NvImagePatt, 0x0044);
231 if (ret) 194 if (ret)
232 return ret; 195 return ret;
233 196
234 ret = nv04_fbcon_grobj_new(dev, 0x004a, NvGdiRect); 197 ret = nouveau_gpuobj_gr_new(chan, NvGdiRect, 0x004a);
235 if (ret) 198 if (ret)
236 return ret; 199 return ret;
237 200
238 ret = nv04_fbcon_grobj_new(dev, dev_priv->chipset >= 0x11 ? 201 ret = nouveau_gpuobj_gr_new(chan, NvImageBlit,
239 0x009f : 0x005f, NvImageBlit); 202 dev_priv->chipset >= 0x11 ?
203 0x009f : 0x005f);
240 if (ret) 204 if (ret)
241 return ret; 205 return ret;
242 206
diff --git a/drivers/gpu/drm/nouveau/nv04_fifo.c b/drivers/gpu/drm/nouveau/nv04_fifo.c
index 708293b7ddcd..f89d104698df 100644
--- a/drivers/gpu/drm/nouveau/nv04_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv04_fifo.c
@@ -28,6 +28,7 @@
28#include "drm.h" 28#include "drm.h"
29#include "nouveau_drv.h" 29#include "nouveau_drv.h"
30#include "nouveau_ramht.h" 30#include "nouveau_ramht.h"
31#include "nouveau_util.h"
31 32
32#define NV04_RAMFC(c) (dev_priv->ramfc->pinst + ((c) * NV04_RAMFC__SIZE)) 33#define NV04_RAMFC(c) (dev_priv->ramfc->pinst + ((c) * NV04_RAMFC__SIZE))
33#define NV04_RAMFC__SIZE 32 34#define NV04_RAMFC__SIZE 32
@@ -128,6 +129,11 @@ nv04_fifo_create_context(struct nouveau_channel *chan)
128 if (ret) 129 if (ret)
129 return ret; 130 return ret;
130 131
132 chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
133 NV03_USER(chan->id), PAGE_SIZE);
134 if (!chan->user)
135 return -ENOMEM;
136
131 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 137 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
132 138
133 /* Setup initial state */ 139 /* Setup initial state */
@@ -151,10 +157,31 @@ void
151nv04_fifo_destroy_context(struct nouveau_channel *chan) 157nv04_fifo_destroy_context(struct nouveau_channel *chan)
152{ 158{
153 struct drm_device *dev = chan->dev; 159 struct drm_device *dev = chan->dev;
160 struct drm_nouveau_private *dev_priv = dev->dev_private;
161 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
162 unsigned long flags;
154 163
155 nv_wr32(dev, NV04_PFIFO_MODE, 164 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
156 nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id)); 165 pfifo->reassign(dev, false);
157 166
167 /* Unload the context if it's the currently active one */
168 if (pfifo->channel_id(dev) == chan->id) {
169 pfifo->disable(dev);
170 pfifo->unload_context(dev);
171 pfifo->enable(dev);
172 }
173
174 /* Keep it from being rescheduled */
175 nv_mask(dev, NV04_PFIFO_MODE, 1 << chan->id, 0);
176
177 pfifo->reassign(dev, true);
178 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
179
180 /* Free the channel resources */
181 if (chan->user) {
182 iounmap(chan->user);
183 chan->user = NULL;
184 }
158 nouveau_gpuobj_ref(NULL, &chan->ramfc); 185 nouveau_gpuobj_ref(NULL, &chan->ramfc);
159} 186}
160 187
@@ -208,7 +235,7 @@ nv04_fifo_unload_context(struct drm_device *dev)
208 if (chid < 0 || chid >= dev_priv->engine.fifo.channels) 235 if (chid < 0 || chid >= dev_priv->engine.fifo.channels)
209 return 0; 236 return 0;
210 237
211 chan = dev_priv->fifos[chid]; 238 chan = dev_priv->channels.ptr[chid];
212 if (!chan) { 239 if (!chan) {
213 NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid); 240 NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid);
214 return -EINVAL; 241 return -EINVAL;
@@ -267,6 +294,7 @@ nv04_fifo_init_ramxx(struct drm_device *dev)
267static void 294static void
268nv04_fifo_init_intr(struct drm_device *dev) 295nv04_fifo_init_intr(struct drm_device *dev)
269{ 296{
297 nouveau_irq_register(dev, 8, nv04_fifo_isr);
270 nv_wr32(dev, 0x002100, 0xffffffff); 298 nv_wr32(dev, 0x002100, 0xffffffff);
271 nv_wr32(dev, 0x002140, 0xffffffff); 299 nv_wr32(dev, 0x002140, 0xffffffff);
272} 300}
@@ -289,7 +317,7 @@ nv04_fifo_init(struct drm_device *dev)
289 pfifo->reassign(dev, true); 317 pfifo->reassign(dev, true);
290 318
291 for (i = 0; i < dev_priv->engine.fifo.channels; i++) { 319 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
292 if (dev_priv->fifos[i]) { 320 if (dev_priv->channels.ptr[i]) {
293 uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE); 321 uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE);
294 nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i)); 322 nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i));
295 } 323 }
@@ -298,3 +326,207 @@ nv04_fifo_init(struct drm_device *dev)
298 return 0; 326 return 0;
299} 327}
300 328
329void
330nv04_fifo_fini(struct drm_device *dev)
331{
332 nv_wr32(dev, 0x2140, 0x00000000);
333 nouveau_irq_unregister(dev, 8);
334}
335
336static bool
337nouveau_fifo_swmthd(struct drm_device *dev, u32 chid, u32 addr, u32 data)
338{
339 struct drm_nouveau_private *dev_priv = dev->dev_private;
340 struct nouveau_channel *chan = NULL;
341 struct nouveau_gpuobj *obj;
342 unsigned long flags;
343 const int subc = (addr >> 13) & 0x7;
344 const int mthd = addr & 0x1ffc;
345 bool handled = false;
346 u32 engine;
347
348 spin_lock_irqsave(&dev_priv->channels.lock, flags);
349 if (likely(chid >= 0 && chid < dev_priv->engine.fifo.channels))
350 chan = dev_priv->channels.ptr[chid];
351 if (unlikely(!chan))
352 goto out;
353
354 switch (mthd) {
355 case 0x0000: /* bind object to subchannel */
356 obj = nouveau_ramht_find(chan, data);
357 if (unlikely(!obj || obj->engine != NVOBJ_ENGINE_SW))
358 break;
359
360 chan->sw_subchannel[subc] = obj->class;
361 engine = 0x0000000f << (subc * 4);
362
363 nv_mask(dev, NV04_PFIFO_CACHE1_ENGINE, engine, 0x00000000);
364 handled = true;
365 break;
366 default:
367 engine = nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE);
368 if (unlikely(((engine >> (subc * 4)) & 0xf) != 0))
369 break;
370
371 if (!nouveau_gpuobj_mthd_call(chan, chan->sw_subchannel[subc],
372 mthd, data))
373 handled = true;
374 break;
375 }
376
377out:
378 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
379 return handled;
380}
381
382void
383nv04_fifo_isr(struct drm_device *dev)
384{
385 struct drm_nouveau_private *dev_priv = dev->dev_private;
386 struct nouveau_engine *engine = &dev_priv->engine;
387 uint32_t status, reassign;
388 int cnt = 0;
389
390 reassign = nv_rd32(dev, NV03_PFIFO_CACHES) & 1;
391 while ((status = nv_rd32(dev, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) {
392 uint32_t chid, get;
393
394 nv_wr32(dev, NV03_PFIFO_CACHES, 0);
395
396 chid = engine->fifo.channel_id(dev);
397 get = nv_rd32(dev, NV03_PFIFO_CACHE1_GET);
398
399 if (status & NV_PFIFO_INTR_CACHE_ERROR) {
400 uint32_t mthd, data;
401 int ptr;
402
403 /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before
404 * wrapping on my G80 chips, but CACHE1 isn't big
405 * enough for this much data.. Tests show that it
406 * wraps around to the start at GET=0x800.. No clue
407 * as to why..
408 */
409 ptr = (get & 0x7ff) >> 2;
410
411 if (dev_priv->card_type < NV_40) {
412 mthd = nv_rd32(dev,
413 NV04_PFIFO_CACHE1_METHOD(ptr));
414 data = nv_rd32(dev,
415 NV04_PFIFO_CACHE1_DATA(ptr));
416 } else {
417 mthd = nv_rd32(dev,
418 NV40_PFIFO_CACHE1_METHOD(ptr));
419 data = nv_rd32(dev,
420 NV40_PFIFO_CACHE1_DATA(ptr));
421 }
422
423 if (!nouveau_fifo_swmthd(dev, chid, mthd, data)) {
424 NV_INFO(dev, "PFIFO_CACHE_ERROR - Ch %d/%d "
425 "Mthd 0x%04x Data 0x%08x\n",
426 chid, (mthd >> 13) & 7, mthd & 0x1ffc,
427 data);
428 }
429
430 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
431 nv_wr32(dev, NV03_PFIFO_INTR_0,
432 NV_PFIFO_INTR_CACHE_ERROR);
433
434 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
435 nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) & ~1);
436 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
437 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
438 nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) | 1);
439 nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
440
441 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH,
442 nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
443 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
444
445 status &= ~NV_PFIFO_INTR_CACHE_ERROR;
446 }
447
448 if (status & NV_PFIFO_INTR_DMA_PUSHER) {
449 u32 dma_get = nv_rd32(dev, 0x003244);
450 u32 dma_put = nv_rd32(dev, 0x003240);
451 u32 push = nv_rd32(dev, 0x003220);
452 u32 state = nv_rd32(dev, 0x003228);
453
454 if (dev_priv->card_type == NV_50) {
455 u32 ho_get = nv_rd32(dev, 0x003328);
456 u32 ho_put = nv_rd32(dev, 0x003320);
457 u32 ib_get = nv_rd32(dev, 0x003334);
458 u32 ib_put = nv_rd32(dev, 0x003330);
459
460 if (nouveau_ratelimit())
461 NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x "
462 "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x "
463 "State 0x%08x Push 0x%08x\n",
464 chid, ho_get, dma_get, ho_put,
465 dma_put, ib_get, ib_put, state,
466 push);
467
468 /* METHOD_COUNT, in DMA_STATE on earlier chipsets */
469 nv_wr32(dev, 0x003364, 0x00000000);
470 if (dma_get != dma_put || ho_get != ho_put) {
471 nv_wr32(dev, 0x003244, dma_put);
472 nv_wr32(dev, 0x003328, ho_put);
473 } else
474 if (ib_get != ib_put) {
475 nv_wr32(dev, 0x003334, ib_put);
476 }
477 } else {
478 NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%08x "
479 "Put 0x%08x State 0x%08x Push 0x%08x\n",
480 chid, dma_get, dma_put, state, push);
481
482 if (dma_get != dma_put)
483 nv_wr32(dev, 0x003244, dma_put);
484 }
485
486 nv_wr32(dev, 0x003228, 0x00000000);
487 nv_wr32(dev, 0x003220, 0x00000001);
488 nv_wr32(dev, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
489 status &= ~NV_PFIFO_INTR_DMA_PUSHER;
490 }
491
492 if (status & NV_PFIFO_INTR_SEMAPHORE) {
493 uint32_t sem;
494
495 status &= ~NV_PFIFO_INTR_SEMAPHORE;
496 nv_wr32(dev, NV03_PFIFO_INTR_0,
497 NV_PFIFO_INTR_SEMAPHORE);
498
499 sem = nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE);
500 nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
501
502 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
503 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
504 }
505
506 if (dev_priv->card_type == NV_50) {
507 if (status & 0x00000010) {
508 nv50_fb_vm_trap(dev, 1, "PFIFO_BAR_FAULT");
509 status &= ~0x00000010;
510 nv_wr32(dev, 0x002100, 0x00000010);
511 }
512 }
513
514 if (status) {
515 if (nouveau_ratelimit())
516 NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n",
517 status, chid);
518 nv_wr32(dev, NV03_PFIFO_INTR_0, status);
519 status = 0;
520 }
521
522 nv_wr32(dev, NV03_PFIFO_CACHES, reassign);
523 }
524
525 if (status) {
526 NV_INFO(dev, "PFIFO still angry after %d spins, halt\n", cnt);
527 nv_wr32(dev, 0x2140, 0);
528 nv_wr32(dev, 0x140, 0);
529 }
530
531 nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING);
532}
diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
index c8973421b635..af75015068d6 100644
--- a/drivers/gpu/drm/nouveau/nv04_graph.c
+++ b/drivers/gpu/drm/nouveau/nv04_graph.c
@@ -26,6 +26,11 @@
26#include "drm.h" 26#include "drm.h"
27#include "nouveau_drm.h" 27#include "nouveau_drm.h"
28#include "nouveau_drv.h" 28#include "nouveau_drv.h"
29#include "nouveau_hw.h"
30#include "nouveau_util.h"
31
32static int nv04_graph_register(struct drm_device *dev);
33static void nv04_graph_isr(struct drm_device *dev);
29 34
30static uint32_t nv04_graph_ctx_regs[] = { 35static uint32_t nv04_graph_ctx_regs[] = {
31 0x0040053c, 36 0x0040053c,
@@ -357,10 +362,10 @@ nv04_graph_channel(struct drm_device *dev)
357 if (chid >= dev_priv->engine.fifo.channels) 362 if (chid >= dev_priv->engine.fifo.channels)
358 return NULL; 363 return NULL;
359 364
360 return dev_priv->fifos[chid]; 365 return dev_priv->channels.ptr[chid];
361} 366}
362 367
363void 368static void
364nv04_graph_context_switch(struct drm_device *dev) 369nv04_graph_context_switch(struct drm_device *dev)
365{ 370{
366 struct drm_nouveau_private *dev_priv = dev->dev_private; 371 struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -368,7 +373,6 @@ nv04_graph_context_switch(struct drm_device *dev)
368 struct nouveau_channel *chan = NULL; 373 struct nouveau_channel *chan = NULL;
369 int chid; 374 int chid;
370 375
371 pgraph->fifo_access(dev, false);
372 nouveau_wait_for_idle(dev); 376 nouveau_wait_for_idle(dev);
373 377
374 /* If previous context is valid, we need to save it */ 378 /* If previous context is valid, we need to save it */
@@ -376,11 +380,9 @@ nv04_graph_context_switch(struct drm_device *dev)
376 380
377 /* Load context for next channel */ 381 /* Load context for next channel */
378 chid = dev_priv->engine.fifo.channel_id(dev); 382 chid = dev_priv->engine.fifo.channel_id(dev);
379 chan = dev_priv->fifos[chid]; 383 chan = dev_priv->channels.ptr[chid];
380 if (chan) 384 if (chan)
381 nv04_graph_load_context(chan); 385 nv04_graph_load_context(chan);
382
383 pgraph->fifo_access(dev, true);
384} 386}
385 387
386static uint32_t *ctx_reg(struct graph_state *ctx, uint32_t reg) 388static uint32_t *ctx_reg(struct graph_state *ctx, uint32_t reg)
@@ -412,10 +414,25 @@ int nv04_graph_create_context(struct nouveau_channel *chan)
412 414
413void nv04_graph_destroy_context(struct nouveau_channel *chan) 415void nv04_graph_destroy_context(struct nouveau_channel *chan)
414{ 416{
417 struct drm_device *dev = chan->dev;
418 struct drm_nouveau_private *dev_priv = dev->dev_private;
419 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
415 struct graph_state *pgraph_ctx = chan->pgraph_ctx; 420 struct graph_state *pgraph_ctx = chan->pgraph_ctx;
421 unsigned long flags;
422
423 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
424 pgraph->fifo_access(dev, false);
425
426 /* Unload the context if it's the currently active one */
427 if (pgraph->channel(dev) == chan)
428 pgraph->unload_context(dev);
416 429
430 /* Free the context resources */
417 kfree(pgraph_ctx); 431 kfree(pgraph_ctx);
418 chan->pgraph_ctx = NULL; 432 chan->pgraph_ctx = NULL;
433
434 pgraph->fifo_access(dev, true);
435 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
419} 436}
420 437
421int nv04_graph_load_context(struct nouveau_channel *chan) 438int nv04_graph_load_context(struct nouveau_channel *chan)
@@ -468,13 +485,19 @@ int nv04_graph_init(struct drm_device *dev)
468{ 485{
469 struct drm_nouveau_private *dev_priv = dev->dev_private; 486 struct drm_nouveau_private *dev_priv = dev->dev_private;
470 uint32_t tmp; 487 uint32_t tmp;
488 int ret;
471 489
472 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & 490 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
473 ~NV_PMC_ENABLE_PGRAPH); 491 ~NV_PMC_ENABLE_PGRAPH);
474 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | 492 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
475 NV_PMC_ENABLE_PGRAPH); 493 NV_PMC_ENABLE_PGRAPH);
476 494
495 ret = nv04_graph_register(dev);
496 if (ret)
497 return ret;
498
477 /* Enable PGRAPH interrupts */ 499 /* Enable PGRAPH interrupts */
500 nouveau_irq_register(dev, 12, nv04_graph_isr);
478 nv_wr32(dev, NV03_PGRAPH_INTR, 0xFFFFFFFF); 501 nv_wr32(dev, NV03_PGRAPH_INTR, 0xFFFFFFFF);
479 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); 502 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
480 503
@@ -510,6 +533,8 @@ int nv04_graph_init(struct drm_device *dev)
510 533
511void nv04_graph_takedown(struct drm_device *dev) 534void nv04_graph_takedown(struct drm_device *dev)
512{ 535{
536 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000);
537 nouveau_irq_unregister(dev, 12);
513} 538}
514 539
515void 540void
@@ -524,13 +549,27 @@ nv04_graph_fifo_access(struct drm_device *dev, bool enabled)
524} 549}
525 550
526static int 551static int
527nv04_graph_mthd_set_ref(struct nouveau_channel *chan, int grclass, 552nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
528 int mthd, uint32_t data) 553 u32 class, u32 mthd, u32 data)
529{ 554{
530 atomic_set(&chan->fence.last_sequence_irq, data); 555 atomic_set(&chan->fence.last_sequence_irq, data);
531 return 0; 556 return 0;
532} 557}
533 558
559int
560nv04_graph_mthd_page_flip(struct nouveau_channel *chan,
561 u32 class, u32 mthd, u32 data)
562{
563 struct drm_device *dev = chan->dev;
564 struct nouveau_page_flip_state s;
565
566 if (!nouveau_finish_page_flip(chan, &s))
567 nv_set_crtc_base(dev, s.crtc,
568 s.offset + s.y * s.pitch + s.x * s.bpp / 8);
569
570 return 0;
571}
572
534/* 573/*
535 * Software methods, why they are needed, and how they all work: 574 * Software methods, why they are needed, and how they all work:
536 * 575 *
@@ -606,12 +645,12 @@ nv04_graph_mthd_set_ref(struct nouveau_channel *chan, int grclass,
606 */ 645 */
607 646
608static void 647static void
609nv04_graph_set_ctx1(struct nouveau_channel *chan, uint32_t mask, uint32_t value) 648nv04_graph_set_ctx1(struct nouveau_channel *chan, u32 mask, u32 value)
610{ 649{
611 struct drm_device *dev = chan->dev; 650 struct drm_device *dev = chan->dev;
612 uint32_t instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4; 651 u32 instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4;
613 int subc = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7; 652 int subc = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7;
614 uint32_t tmp; 653 u32 tmp;
615 654
616 tmp = nv_ri32(dev, instance); 655 tmp = nv_ri32(dev, instance);
617 tmp &= ~mask; 656 tmp &= ~mask;
@@ -623,11 +662,11 @@ nv04_graph_set_ctx1(struct nouveau_channel *chan, uint32_t mask, uint32_t value)
623} 662}
624 663
625static void 664static void
626nv04_graph_set_ctx_val(struct nouveau_channel *chan, uint32_t mask, uint32_t value) 665nv04_graph_set_ctx_val(struct nouveau_channel *chan, u32 mask, u32 value)
627{ 666{
628 struct drm_device *dev = chan->dev; 667 struct drm_device *dev = chan->dev;
629 uint32_t instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4; 668 u32 instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4;
630 uint32_t tmp, ctx1; 669 u32 tmp, ctx1;
631 int class, op, valid = 1; 670 int class, op, valid = 1;
632 671
633 ctx1 = nv_ri32(dev, instance); 672 ctx1 = nv_ri32(dev, instance);
@@ -672,13 +711,13 @@ nv04_graph_set_ctx_val(struct nouveau_channel *chan, uint32_t mask, uint32_t val
672} 711}
673 712
674static int 713static int
675nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass, 714nv04_graph_mthd_set_operation(struct nouveau_channel *chan,
676 int mthd, uint32_t data) 715 u32 class, u32 mthd, u32 data)
677{ 716{
678 if (data > 5) 717 if (data > 5)
679 return 1; 718 return 1;
680 /* Old versions of the objects only accept first three operations. */ 719 /* Old versions of the objects only accept first three operations. */
681 if (data > 2 && grclass < 0x40) 720 if (data > 2 && class < 0x40)
682 return 1; 721 return 1;
683 nv04_graph_set_ctx1(chan, 0x00038000, data << 15); 722 nv04_graph_set_ctx1(chan, 0x00038000, data << 15);
684 /* changing operation changes set of objects needed for validation */ 723 /* changing operation changes set of objects needed for validation */
@@ -687,8 +726,8 @@ nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass,
687} 726}
688 727
689static int 728static int
690nv04_graph_mthd_surf3d_clip_h(struct nouveau_channel *chan, int grclass, 729nv04_graph_mthd_surf3d_clip_h(struct nouveau_channel *chan,
691 int mthd, uint32_t data) 730 u32 class, u32 mthd, u32 data)
692{ 731{
693 uint32_t min = data & 0xffff, max; 732 uint32_t min = data & 0xffff, max;
694 uint32_t w = data >> 16; 733 uint32_t w = data >> 16;
@@ -706,8 +745,8 @@ nv04_graph_mthd_surf3d_clip_h(struct nouveau_channel *chan, int grclass,
706} 745}
707 746
708static int 747static int
709nv04_graph_mthd_surf3d_clip_v(struct nouveau_channel *chan, int grclass, 748nv04_graph_mthd_surf3d_clip_v(struct nouveau_channel *chan,
710 int mthd, uint32_t data) 749 u32 class, u32 mthd, u32 data)
711{ 750{
712 uint32_t min = data & 0xffff, max; 751 uint32_t min = data & 0xffff, max;
713 uint32_t w = data >> 16; 752 uint32_t w = data >> 16;
@@ -725,8 +764,8 @@ nv04_graph_mthd_surf3d_clip_v(struct nouveau_channel *chan, int grclass,
725} 764}
726 765
727static int 766static int
728nv04_graph_mthd_bind_surf2d(struct nouveau_channel *chan, int grclass, 767nv04_graph_mthd_bind_surf2d(struct nouveau_channel *chan,
729 int mthd, uint32_t data) 768 u32 class, u32 mthd, u32 data)
730{ 769{
731 switch (nv_ri32(chan->dev, data << 4) & 0xff) { 770 switch (nv_ri32(chan->dev, data << 4) & 0xff) {
732 case 0x30: 771 case 0x30:
@@ -742,8 +781,8 @@ nv04_graph_mthd_bind_surf2d(struct nouveau_channel *chan, int grclass,
742} 781}
743 782
744static int 783static int
745nv04_graph_mthd_bind_surf2d_swzsurf(struct nouveau_channel *chan, int grclass, 784nv04_graph_mthd_bind_surf2d_swzsurf(struct nouveau_channel *chan,
746 int mthd, uint32_t data) 785 u32 class, u32 mthd, u32 data)
747{ 786{
748 switch (nv_ri32(chan->dev, data << 4) & 0xff) { 787 switch (nv_ri32(chan->dev, data << 4) & 0xff) {
749 case 0x30: 788 case 0x30:
@@ -763,8 +802,8 @@ nv04_graph_mthd_bind_surf2d_swzsurf(struct nouveau_channel *chan, int grclass,
763} 802}
764 803
765static int 804static int
766nv04_graph_mthd_bind_nv01_patt(struct nouveau_channel *chan, int grclass, 805nv04_graph_mthd_bind_nv01_patt(struct nouveau_channel *chan,
767 int mthd, uint32_t data) 806 u32 class, u32 mthd, u32 data)
768{ 807{
769 switch (nv_ri32(chan->dev, data << 4) & 0xff) { 808 switch (nv_ri32(chan->dev, data << 4) & 0xff) {
770 case 0x30: 809 case 0x30:
@@ -778,8 +817,8 @@ nv04_graph_mthd_bind_nv01_patt(struct nouveau_channel *chan, int grclass,
778} 817}
779 818
780static int 819static int
781nv04_graph_mthd_bind_nv04_patt(struct nouveau_channel *chan, int grclass, 820nv04_graph_mthd_bind_nv04_patt(struct nouveau_channel *chan,
782 int mthd, uint32_t data) 821 u32 class, u32 mthd, u32 data)
783{ 822{
784 switch (nv_ri32(chan->dev, data << 4) & 0xff) { 823 switch (nv_ri32(chan->dev, data << 4) & 0xff) {
785 case 0x30: 824 case 0x30:
@@ -793,8 +832,8 @@ nv04_graph_mthd_bind_nv04_patt(struct nouveau_channel *chan, int grclass,
793} 832}
794 833
795static int 834static int
796nv04_graph_mthd_bind_rop(struct nouveau_channel *chan, int grclass, 835nv04_graph_mthd_bind_rop(struct nouveau_channel *chan,
797 int mthd, uint32_t data) 836 u32 class, u32 mthd, u32 data)
798{ 837{
799 switch (nv_ri32(chan->dev, data << 4) & 0xff) { 838 switch (nv_ri32(chan->dev, data << 4) & 0xff) {
800 case 0x30: 839 case 0x30:
@@ -808,8 +847,8 @@ nv04_graph_mthd_bind_rop(struct nouveau_channel *chan, int grclass,
808} 847}
809 848
810static int 849static int
811nv04_graph_mthd_bind_beta1(struct nouveau_channel *chan, int grclass, 850nv04_graph_mthd_bind_beta1(struct nouveau_channel *chan,
812 int mthd, uint32_t data) 851 u32 class, u32 mthd, u32 data)
813{ 852{
814 switch (nv_ri32(chan->dev, data << 4) & 0xff) { 853 switch (nv_ri32(chan->dev, data << 4) & 0xff) {
815 case 0x30: 854 case 0x30:
@@ -823,8 +862,8 @@ nv04_graph_mthd_bind_beta1(struct nouveau_channel *chan, int grclass,
823} 862}
824 863
825static int 864static int
826nv04_graph_mthd_bind_beta4(struct nouveau_channel *chan, int grclass, 865nv04_graph_mthd_bind_beta4(struct nouveau_channel *chan,
827 int mthd, uint32_t data) 866 u32 class, u32 mthd, u32 data)
828{ 867{
829 switch (nv_ri32(chan->dev, data << 4) & 0xff) { 868 switch (nv_ri32(chan->dev, data << 4) & 0xff) {
830 case 0x30: 869 case 0x30:
@@ -838,8 +877,8 @@ nv04_graph_mthd_bind_beta4(struct nouveau_channel *chan, int grclass,
838} 877}
839 878
840static int 879static int
841nv04_graph_mthd_bind_surf_dst(struct nouveau_channel *chan, int grclass, 880nv04_graph_mthd_bind_surf_dst(struct nouveau_channel *chan,
842 int mthd, uint32_t data) 881 u32 class, u32 mthd, u32 data)
843{ 882{
844 switch (nv_ri32(chan->dev, data << 4) & 0xff) { 883 switch (nv_ri32(chan->dev, data << 4) & 0xff) {
845 case 0x30: 884 case 0x30:
@@ -853,8 +892,8 @@ nv04_graph_mthd_bind_surf_dst(struct nouveau_channel *chan, int grclass,
853} 892}
854 893
855static int 894static int
856nv04_graph_mthd_bind_surf_src(struct nouveau_channel *chan, int grclass, 895nv04_graph_mthd_bind_surf_src(struct nouveau_channel *chan,
857 int mthd, uint32_t data) 896 u32 class, u32 mthd, u32 data)
858{ 897{
859 switch (nv_ri32(chan->dev, data << 4) & 0xff) { 898 switch (nv_ri32(chan->dev, data << 4) & 0xff) {
860 case 0x30: 899 case 0x30:
@@ -868,8 +907,8 @@ nv04_graph_mthd_bind_surf_src(struct nouveau_channel *chan, int grclass,
868} 907}
869 908
870static int 909static int
871nv04_graph_mthd_bind_surf_color(struct nouveau_channel *chan, int grclass, 910nv04_graph_mthd_bind_surf_color(struct nouveau_channel *chan,
872 int mthd, uint32_t data) 911 u32 class, u32 mthd, u32 data)
873{ 912{
874 switch (nv_ri32(chan->dev, data << 4) & 0xff) { 913 switch (nv_ri32(chan->dev, data << 4) & 0xff) {
875 case 0x30: 914 case 0x30:
@@ -883,8 +922,8 @@ nv04_graph_mthd_bind_surf_color(struct nouveau_channel *chan, int grclass,
883} 922}
884 923
885static int 924static int
886nv04_graph_mthd_bind_surf_zeta(struct nouveau_channel *chan, int grclass, 925nv04_graph_mthd_bind_surf_zeta(struct nouveau_channel *chan,
887 int mthd, uint32_t data) 926 u32 class, u32 mthd, u32 data)
888{ 927{
889 switch (nv_ri32(chan->dev, data << 4) & 0xff) { 928 switch (nv_ri32(chan->dev, data << 4) & 0xff) {
890 case 0x30: 929 case 0x30:
@@ -898,8 +937,8 @@ nv04_graph_mthd_bind_surf_zeta(struct nouveau_channel *chan, int grclass,
898} 937}
899 938
900static int 939static int
901nv04_graph_mthd_bind_clip(struct nouveau_channel *chan, int grclass, 940nv04_graph_mthd_bind_clip(struct nouveau_channel *chan,
902 int mthd, uint32_t data) 941 u32 class, u32 mthd, u32 data)
903{ 942{
904 switch (nv_ri32(chan->dev, data << 4) & 0xff) { 943 switch (nv_ri32(chan->dev, data << 4) & 0xff) {
905 case 0x30: 944 case 0x30:
@@ -913,8 +952,8 @@ nv04_graph_mthd_bind_clip(struct nouveau_channel *chan, int grclass,
913} 952}
914 953
915static int 954static int
916nv04_graph_mthd_bind_chroma(struct nouveau_channel *chan, int grclass, 955nv04_graph_mthd_bind_chroma(struct nouveau_channel *chan,
917 int mthd, uint32_t data) 956 u32 class, u32 mthd, u32 data)
918{ 957{
919 switch (nv_ri32(chan->dev, data << 4) & 0xff) { 958 switch (nv_ri32(chan->dev, data << 4) & 0xff) {
920 case 0x30: 959 case 0x30:
@@ -930,194 +969,346 @@ nv04_graph_mthd_bind_chroma(struct nouveau_channel *chan, int grclass,
930 return 1; 969 return 1;
931} 970}
932 971
933static struct nouveau_pgraph_object_method nv04_graph_mthds_sw[] = { 972static int
934 { 0x0150, nv04_graph_mthd_set_ref }, 973nv04_graph_register(struct drm_device *dev)
935 {} 974{
936}; 975 struct drm_nouveau_private *dev_priv = dev->dev_private;
937
938static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_gdirect[] = {
939 { 0x0184, nv04_graph_mthd_bind_nv01_patt },
940 { 0x0188, nv04_graph_mthd_bind_rop },
941 { 0x018c, nv04_graph_mthd_bind_beta1 },
942 { 0x0190, nv04_graph_mthd_bind_surf_dst },
943 { 0x02fc, nv04_graph_mthd_set_operation },
944 {},
945};
946
947static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_gdirect[] = {
948 { 0x0188, nv04_graph_mthd_bind_nv04_patt },
949 { 0x018c, nv04_graph_mthd_bind_rop },
950 { 0x0190, nv04_graph_mthd_bind_beta1 },
951 { 0x0194, nv04_graph_mthd_bind_beta4 },
952 { 0x0198, nv04_graph_mthd_bind_surf2d },
953 { 0x02fc, nv04_graph_mthd_set_operation },
954 {},
955};
956
957static struct nouveau_pgraph_object_method nv04_graph_mthds_nv01_imageblit[] = {
958 { 0x0184, nv04_graph_mthd_bind_chroma },
959 { 0x0188, nv04_graph_mthd_bind_clip },
960 { 0x018c, nv04_graph_mthd_bind_nv01_patt },
961 { 0x0190, nv04_graph_mthd_bind_rop },
962 { 0x0194, nv04_graph_mthd_bind_beta1 },
963 { 0x0198, nv04_graph_mthd_bind_surf_dst },
964 { 0x019c, nv04_graph_mthd_bind_surf_src },
965 { 0x02fc, nv04_graph_mthd_set_operation },
966 {},
967};
968
969static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_imageblit_ifc[] = {
970 { 0x0184, nv04_graph_mthd_bind_chroma },
971 { 0x0188, nv04_graph_mthd_bind_clip },
972 { 0x018c, nv04_graph_mthd_bind_nv04_patt },
973 { 0x0190, nv04_graph_mthd_bind_rop },
974 { 0x0194, nv04_graph_mthd_bind_beta1 },
975 { 0x0198, nv04_graph_mthd_bind_beta4 },
976 { 0x019c, nv04_graph_mthd_bind_surf2d },
977 { 0x02fc, nv04_graph_mthd_set_operation },
978 {},
979};
980
981static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_iifc[] = {
982 { 0x0188, nv04_graph_mthd_bind_chroma },
983 { 0x018c, nv04_graph_mthd_bind_clip },
984 { 0x0190, nv04_graph_mthd_bind_nv04_patt },
985 { 0x0194, nv04_graph_mthd_bind_rop },
986 { 0x0198, nv04_graph_mthd_bind_beta1 },
987 { 0x019c, nv04_graph_mthd_bind_beta4 },
988 { 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf },
989 { 0x03e4, nv04_graph_mthd_set_operation },
990 {},
991};
992
993static struct nouveau_pgraph_object_method nv04_graph_mthds_nv01_ifc[] = {
994 { 0x0184, nv04_graph_mthd_bind_chroma },
995 { 0x0188, nv04_graph_mthd_bind_clip },
996 { 0x018c, nv04_graph_mthd_bind_nv01_patt },
997 { 0x0190, nv04_graph_mthd_bind_rop },
998 { 0x0194, nv04_graph_mthd_bind_beta1 },
999 { 0x0198, nv04_graph_mthd_bind_surf_dst },
1000 { 0x02fc, nv04_graph_mthd_set_operation },
1001 {},
1002};
1003
1004static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_sifc[] = {
1005 { 0x0184, nv04_graph_mthd_bind_chroma },
1006 { 0x0188, nv04_graph_mthd_bind_nv01_patt },
1007 { 0x018c, nv04_graph_mthd_bind_rop },
1008 { 0x0190, nv04_graph_mthd_bind_beta1 },
1009 { 0x0194, nv04_graph_mthd_bind_surf_dst },
1010 { 0x02fc, nv04_graph_mthd_set_operation },
1011 {},
1012};
1013
1014static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_sifc[] = {
1015 { 0x0184, nv04_graph_mthd_bind_chroma },
1016 { 0x0188, nv04_graph_mthd_bind_nv04_patt },
1017 { 0x018c, nv04_graph_mthd_bind_rop },
1018 { 0x0190, nv04_graph_mthd_bind_beta1 },
1019 { 0x0194, nv04_graph_mthd_bind_beta4 },
1020 { 0x0198, nv04_graph_mthd_bind_surf2d },
1021 { 0x02fc, nv04_graph_mthd_set_operation },
1022 {},
1023};
1024
1025static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_sifm[] = {
1026 { 0x0188, nv04_graph_mthd_bind_nv01_patt },
1027 { 0x018c, nv04_graph_mthd_bind_rop },
1028 { 0x0190, nv04_graph_mthd_bind_beta1 },
1029 { 0x0194, nv04_graph_mthd_bind_surf_dst },
1030 { 0x0304, nv04_graph_mthd_set_operation },
1031 {},
1032};
1033
1034static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_sifm[] = {
1035 { 0x0188, nv04_graph_mthd_bind_nv04_patt },
1036 { 0x018c, nv04_graph_mthd_bind_rop },
1037 { 0x0190, nv04_graph_mthd_bind_beta1 },
1038 { 0x0194, nv04_graph_mthd_bind_beta4 },
1039 { 0x0198, nv04_graph_mthd_bind_surf2d_swzsurf },
1040 { 0x0304, nv04_graph_mthd_set_operation },
1041 {},
1042};
1043 976
1044static struct nouveau_pgraph_object_method nv04_graph_mthds_nv01_shape[] = { 977 if (dev_priv->engine.graph.registered)
1045 { 0x0184, nv04_graph_mthd_bind_clip }, 978 return 0;
1046 { 0x0188, nv04_graph_mthd_bind_nv01_patt },
1047 { 0x018c, nv04_graph_mthd_bind_rop },
1048 { 0x0190, nv04_graph_mthd_bind_beta1 },
1049 { 0x0194, nv04_graph_mthd_bind_surf_dst },
1050 { 0x02fc, nv04_graph_mthd_set_operation },
1051 {},
1052};
1053 979
1054static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_shape[] = { 980 /* dvd subpicture */
1055 { 0x0184, nv04_graph_mthd_bind_clip }, 981 NVOBJ_CLASS(dev, 0x0038, GR);
1056 { 0x0188, nv04_graph_mthd_bind_nv04_patt }, 982
1057 { 0x018c, nv04_graph_mthd_bind_rop }, 983 /* m2mf */
1058 { 0x0190, nv04_graph_mthd_bind_beta1 }, 984 NVOBJ_CLASS(dev, 0x0039, GR);
1059 { 0x0194, nv04_graph_mthd_bind_beta4 }, 985
1060 { 0x0198, nv04_graph_mthd_bind_surf2d }, 986 /* nv03 gdirect */
1061 { 0x02fc, nv04_graph_mthd_set_operation }, 987 NVOBJ_CLASS(dev, 0x004b, GR);
1062 {}, 988 NVOBJ_MTHD (dev, 0x004b, 0x0184, nv04_graph_mthd_bind_nv01_patt);
989 NVOBJ_MTHD (dev, 0x004b, 0x0188, nv04_graph_mthd_bind_rop);
990 NVOBJ_MTHD (dev, 0x004b, 0x018c, nv04_graph_mthd_bind_beta1);
991 NVOBJ_MTHD (dev, 0x004b, 0x0190, nv04_graph_mthd_bind_surf_dst);
992 NVOBJ_MTHD (dev, 0x004b, 0x02fc, nv04_graph_mthd_set_operation);
993
994 /* nv04 gdirect */
995 NVOBJ_CLASS(dev, 0x004a, GR);
996 NVOBJ_MTHD (dev, 0x004a, 0x0188, nv04_graph_mthd_bind_nv04_patt);
997 NVOBJ_MTHD (dev, 0x004a, 0x018c, nv04_graph_mthd_bind_rop);
998 NVOBJ_MTHD (dev, 0x004a, 0x0190, nv04_graph_mthd_bind_beta1);
999 NVOBJ_MTHD (dev, 0x004a, 0x0194, nv04_graph_mthd_bind_beta4);
1000 NVOBJ_MTHD (dev, 0x004a, 0x0198, nv04_graph_mthd_bind_surf2d);
1001 NVOBJ_MTHD (dev, 0x004a, 0x02fc, nv04_graph_mthd_set_operation);
1002
1003 /* nv01 imageblit */
1004 NVOBJ_CLASS(dev, 0x001f, GR);
1005 NVOBJ_MTHD (dev, 0x001f, 0x0184, nv04_graph_mthd_bind_chroma);
1006 NVOBJ_MTHD (dev, 0x001f, 0x0188, nv04_graph_mthd_bind_clip);
1007 NVOBJ_MTHD (dev, 0x001f, 0x018c, nv04_graph_mthd_bind_nv01_patt);
1008 NVOBJ_MTHD (dev, 0x001f, 0x0190, nv04_graph_mthd_bind_rop);
1009 NVOBJ_MTHD (dev, 0x001f, 0x0194, nv04_graph_mthd_bind_beta1);
1010 NVOBJ_MTHD (dev, 0x001f, 0x0198, nv04_graph_mthd_bind_surf_dst);
1011 NVOBJ_MTHD (dev, 0x001f, 0x019c, nv04_graph_mthd_bind_surf_src);
1012 NVOBJ_MTHD (dev, 0x001f, 0x02fc, nv04_graph_mthd_set_operation);
1013
1014 /* nv04 imageblit */
1015 NVOBJ_CLASS(dev, 0x005f, GR);
1016 NVOBJ_MTHD (dev, 0x005f, 0x0184, nv04_graph_mthd_bind_chroma);
1017 NVOBJ_MTHD (dev, 0x005f, 0x0188, nv04_graph_mthd_bind_clip);
1018 NVOBJ_MTHD (dev, 0x005f, 0x018c, nv04_graph_mthd_bind_nv04_patt);
1019 NVOBJ_MTHD (dev, 0x005f, 0x0190, nv04_graph_mthd_bind_rop);
1020 NVOBJ_MTHD (dev, 0x005f, 0x0194, nv04_graph_mthd_bind_beta1);
1021 NVOBJ_MTHD (dev, 0x005f, 0x0198, nv04_graph_mthd_bind_beta4);
1022 NVOBJ_MTHD (dev, 0x005f, 0x019c, nv04_graph_mthd_bind_surf2d);
1023 NVOBJ_MTHD (dev, 0x005f, 0x02fc, nv04_graph_mthd_set_operation);
1024
1025 /* nv04 iifc */
1026 NVOBJ_CLASS(dev, 0x0060, GR);
1027 NVOBJ_MTHD (dev, 0x0060, 0x0188, nv04_graph_mthd_bind_chroma);
1028 NVOBJ_MTHD (dev, 0x0060, 0x018c, nv04_graph_mthd_bind_clip);
1029 NVOBJ_MTHD (dev, 0x0060, 0x0190, nv04_graph_mthd_bind_nv04_patt);
1030 NVOBJ_MTHD (dev, 0x0060, 0x0194, nv04_graph_mthd_bind_rop);
1031 NVOBJ_MTHD (dev, 0x0060, 0x0198, nv04_graph_mthd_bind_beta1);
1032 NVOBJ_MTHD (dev, 0x0060, 0x019c, nv04_graph_mthd_bind_beta4);
1033 NVOBJ_MTHD (dev, 0x0060, 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf);
1034 NVOBJ_MTHD (dev, 0x0060, 0x03e4, nv04_graph_mthd_set_operation);
1035
1036 /* nv05 iifc */
1037 NVOBJ_CLASS(dev, 0x0064, GR);
1038
1039 /* nv01 ifc */
1040 NVOBJ_CLASS(dev, 0x0021, GR);
1041 NVOBJ_MTHD (dev, 0x0021, 0x0184, nv04_graph_mthd_bind_chroma);
1042 NVOBJ_MTHD (dev, 0x0021, 0x0188, nv04_graph_mthd_bind_clip);
1043 NVOBJ_MTHD (dev, 0x0021, 0x018c, nv04_graph_mthd_bind_nv01_patt);
1044 NVOBJ_MTHD (dev, 0x0021, 0x0190, nv04_graph_mthd_bind_rop);
1045 NVOBJ_MTHD (dev, 0x0021, 0x0194, nv04_graph_mthd_bind_beta1);
1046 NVOBJ_MTHD (dev, 0x0021, 0x0198, nv04_graph_mthd_bind_surf_dst);
1047 NVOBJ_MTHD (dev, 0x0021, 0x02fc, nv04_graph_mthd_set_operation);
1048
1049 /* nv04 ifc */
1050 NVOBJ_CLASS(dev, 0x0061, GR);
1051 NVOBJ_MTHD (dev, 0x0061, 0x0184, nv04_graph_mthd_bind_chroma);
1052 NVOBJ_MTHD (dev, 0x0061, 0x0188, nv04_graph_mthd_bind_clip);
1053 NVOBJ_MTHD (dev, 0x0061, 0x018c, nv04_graph_mthd_bind_nv04_patt);
1054 NVOBJ_MTHD (dev, 0x0061, 0x0190, nv04_graph_mthd_bind_rop);
1055 NVOBJ_MTHD (dev, 0x0061, 0x0194, nv04_graph_mthd_bind_beta1);
1056 NVOBJ_MTHD (dev, 0x0061, 0x0198, nv04_graph_mthd_bind_beta4);
1057 NVOBJ_MTHD (dev, 0x0061, 0x019c, nv04_graph_mthd_bind_surf2d);
1058 NVOBJ_MTHD (dev, 0x0061, 0x02fc, nv04_graph_mthd_set_operation);
1059
1060 /* nv05 ifc */
1061 NVOBJ_CLASS(dev, 0x0065, GR);
1062
1063 /* nv03 sifc */
1064 NVOBJ_CLASS(dev, 0x0036, GR);
1065 NVOBJ_MTHD (dev, 0x0036, 0x0184, nv04_graph_mthd_bind_chroma);
1066 NVOBJ_MTHD (dev, 0x0036, 0x0188, nv04_graph_mthd_bind_nv01_patt);
1067 NVOBJ_MTHD (dev, 0x0036, 0x018c, nv04_graph_mthd_bind_rop);
1068 NVOBJ_MTHD (dev, 0x0036, 0x0190, nv04_graph_mthd_bind_beta1);
1069 NVOBJ_MTHD (dev, 0x0036, 0x0194, nv04_graph_mthd_bind_surf_dst);
1070 NVOBJ_MTHD (dev, 0x0036, 0x02fc, nv04_graph_mthd_set_operation);
1071
1072 /* nv04 sifc */
1073 NVOBJ_CLASS(dev, 0x0076, GR);
1074 NVOBJ_MTHD (dev, 0x0076, 0x0184, nv04_graph_mthd_bind_chroma);
1075 NVOBJ_MTHD (dev, 0x0076, 0x0188, nv04_graph_mthd_bind_nv04_patt);
1076 NVOBJ_MTHD (dev, 0x0076, 0x018c, nv04_graph_mthd_bind_rop);
1077 NVOBJ_MTHD (dev, 0x0076, 0x0190, nv04_graph_mthd_bind_beta1);
1078 NVOBJ_MTHD (dev, 0x0076, 0x0194, nv04_graph_mthd_bind_beta4);
1079 NVOBJ_MTHD (dev, 0x0076, 0x0198, nv04_graph_mthd_bind_surf2d);
1080 NVOBJ_MTHD (dev, 0x0076, 0x02fc, nv04_graph_mthd_set_operation);
1081
1082 /* nv05 sifc */
1083 NVOBJ_CLASS(dev, 0x0066, GR);
1084
1085 /* nv03 sifm */
1086 NVOBJ_CLASS(dev, 0x0037, GR);
1087 NVOBJ_MTHD (dev, 0x0037, 0x0188, nv04_graph_mthd_bind_nv01_patt);
1088 NVOBJ_MTHD (dev, 0x0037, 0x018c, nv04_graph_mthd_bind_rop);
1089 NVOBJ_MTHD (dev, 0x0037, 0x0190, nv04_graph_mthd_bind_beta1);
1090 NVOBJ_MTHD (dev, 0x0037, 0x0194, nv04_graph_mthd_bind_surf_dst);
1091 NVOBJ_MTHD (dev, 0x0037, 0x0304, nv04_graph_mthd_set_operation);
1092
1093 /* nv04 sifm */
1094 NVOBJ_CLASS(dev, 0x0077, GR);
1095 NVOBJ_MTHD (dev, 0x0077, 0x0188, nv04_graph_mthd_bind_nv04_patt);
1096 NVOBJ_MTHD (dev, 0x0077, 0x018c, nv04_graph_mthd_bind_rop);
1097 NVOBJ_MTHD (dev, 0x0077, 0x0190, nv04_graph_mthd_bind_beta1);
1098 NVOBJ_MTHD (dev, 0x0077, 0x0194, nv04_graph_mthd_bind_beta4);
1099 NVOBJ_MTHD (dev, 0x0077, 0x0198, nv04_graph_mthd_bind_surf2d_swzsurf);
1100 NVOBJ_MTHD (dev, 0x0077, 0x0304, nv04_graph_mthd_set_operation);
1101
1102 /* null */
1103 NVOBJ_CLASS(dev, 0x0030, GR);
1104
1105 /* surf2d */
1106 NVOBJ_CLASS(dev, 0x0042, GR);
1107
1108 /* rop */
1109 NVOBJ_CLASS(dev, 0x0043, GR);
1110
1111 /* beta1 */
1112 NVOBJ_CLASS(dev, 0x0012, GR);
1113
1114 /* beta4 */
1115 NVOBJ_CLASS(dev, 0x0072, GR);
1116
1117 /* cliprect */
1118 NVOBJ_CLASS(dev, 0x0019, GR);
1119
1120 /* nv01 pattern */
1121 NVOBJ_CLASS(dev, 0x0018, GR);
1122
1123 /* nv04 pattern */
1124 NVOBJ_CLASS(dev, 0x0044, GR);
1125
1126 /* swzsurf */
1127 NVOBJ_CLASS(dev, 0x0052, GR);
1128
1129 /* surf3d */
1130 NVOBJ_CLASS(dev, 0x0053, GR);
1131 NVOBJ_MTHD (dev, 0x0053, 0x02f8, nv04_graph_mthd_surf3d_clip_h);
1132 NVOBJ_MTHD (dev, 0x0053, 0x02fc, nv04_graph_mthd_surf3d_clip_v);
1133
1134 /* nv03 tex_tri */
1135 NVOBJ_CLASS(dev, 0x0048, GR);
1136 NVOBJ_MTHD (dev, 0x0048, 0x0188, nv04_graph_mthd_bind_clip);
1137 NVOBJ_MTHD (dev, 0x0048, 0x018c, nv04_graph_mthd_bind_surf_color);
1138 NVOBJ_MTHD (dev, 0x0048, 0x0190, nv04_graph_mthd_bind_surf_zeta);
1139
1140 /* tex_tri */
1141 NVOBJ_CLASS(dev, 0x0054, GR);
1142
1143 /* multitex_tri */
1144 NVOBJ_CLASS(dev, 0x0055, GR);
1145
1146 /* nv01 chroma */
1147 NVOBJ_CLASS(dev, 0x0017, GR);
1148
1149 /* nv04 chroma */
1150 NVOBJ_CLASS(dev, 0x0057, GR);
1151
1152 /* surf_dst */
1153 NVOBJ_CLASS(dev, 0x0058, GR);
1154
1155 /* surf_src */
1156 NVOBJ_CLASS(dev, 0x0059, GR);
1157
1158 /* surf_color */
1159 NVOBJ_CLASS(dev, 0x005a, GR);
1160
1161 /* surf_zeta */
1162 NVOBJ_CLASS(dev, 0x005b, GR);
1163
1164 /* nv01 line */
1165 NVOBJ_CLASS(dev, 0x001c, GR);
1166 NVOBJ_MTHD (dev, 0x001c, 0x0184, nv04_graph_mthd_bind_clip);
1167 NVOBJ_MTHD (dev, 0x001c, 0x0188, nv04_graph_mthd_bind_nv01_patt);
1168 NVOBJ_MTHD (dev, 0x001c, 0x018c, nv04_graph_mthd_bind_rop);
1169 NVOBJ_MTHD (dev, 0x001c, 0x0190, nv04_graph_mthd_bind_beta1);
1170 NVOBJ_MTHD (dev, 0x001c, 0x0194, nv04_graph_mthd_bind_surf_dst);
1171 NVOBJ_MTHD (dev, 0x001c, 0x02fc, nv04_graph_mthd_set_operation);
1172
1173 /* nv04 line */
1174 NVOBJ_CLASS(dev, 0x005c, GR);
1175 NVOBJ_MTHD (dev, 0x005c, 0x0184, nv04_graph_mthd_bind_clip);
1176 NVOBJ_MTHD (dev, 0x005c, 0x0188, nv04_graph_mthd_bind_nv04_patt);
1177 NVOBJ_MTHD (dev, 0x005c, 0x018c, nv04_graph_mthd_bind_rop);
1178 NVOBJ_MTHD (dev, 0x005c, 0x0190, nv04_graph_mthd_bind_beta1);
1179 NVOBJ_MTHD (dev, 0x005c, 0x0194, nv04_graph_mthd_bind_beta4);
1180 NVOBJ_MTHD (dev, 0x005c, 0x0198, nv04_graph_mthd_bind_surf2d);
1181 NVOBJ_MTHD (dev, 0x005c, 0x02fc, nv04_graph_mthd_set_operation);
1182
1183 /* nv01 tri */
1184 NVOBJ_CLASS(dev, 0x001d, GR);
1185 NVOBJ_MTHD (dev, 0x001d, 0x0184, nv04_graph_mthd_bind_clip);
1186 NVOBJ_MTHD (dev, 0x001d, 0x0188, nv04_graph_mthd_bind_nv01_patt);
1187 NVOBJ_MTHD (dev, 0x001d, 0x018c, nv04_graph_mthd_bind_rop);
1188 NVOBJ_MTHD (dev, 0x001d, 0x0190, nv04_graph_mthd_bind_beta1);
1189 NVOBJ_MTHD (dev, 0x001d, 0x0194, nv04_graph_mthd_bind_surf_dst);
1190 NVOBJ_MTHD (dev, 0x001d, 0x02fc, nv04_graph_mthd_set_operation);
1191
1192 /* nv04 tri */
1193 NVOBJ_CLASS(dev, 0x005d, GR);
1194 NVOBJ_MTHD (dev, 0x005d, 0x0184, nv04_graph_mthd_bind_clip);
1195 NVOBJ_MTHD (dev, 0x005d, 0x0188, nv04_graph_mthd_bind_nv04_patt);
1196 NVOBJ_MTHD (dev, 0x005d, 0x018c, nv04_graph_mthd_bind_rop);
1197 NVOBJ_MTHD (dev, 0x005d, 0x0190, nv04_graph_mthd_bind_beta1);
1198 NVOBJ_MTHD (dev, 0x005d, 0x0194, nv04_graph_mthd_bind_beta4);
1199 NVOBJ_MTHD (dev, 0x005d, 0x0198, nv04_graph_mthd_bind_surf2d);
1200 NVOBJ_MTHD (dev, 0x005d, 0x02fc, nv04_graph_mthd_set_operation);
1201
1202 /* nv01 rect */
1203 NVOBJ_CLASS(dev, 0x001e, GR);
1204 NVOBJ_MTHD (dev, 0x001e, 0x0184, nv04_graph_mthd_bind_clip);
1205 NVOBJ_MTHD (dev, 0x001e, 0x0188, nv04_graph_mthd_bind_nv01_patt);
1206 NVOBJ_MTHD (dev, 0x001e, 0x018c, nv04_graph_mthd_bind_rop);
1207 NVOBJ_MTHD (dev, 0x001e, 0x0190, nv04_graph_mthd_bind_beta1);
1208 NVOBJ_MTHD (dev, 0x001e, 0x0194, nv04_graph_mthd_bind_surf_dst);
1209 NVOBJ_MTHD (dev, 0x001e, 0x02fc, nv04_graph_mthd_set_operation);
1210
1211 /* nv04 rect */
1212 NVOBJ_CLASS(dev, 0x005e, GR);
1213 NVOBJ_MTHD (dev, 0x005e, 0x0184, nv04_graph_mthd_bind_clip);
1214 NVOBJ_MTHD (dev, 0x005e, 0x0188, nv04_graph_mthd_bind_nv04_patt);
1215 NVOBJ_MTHD (dev, 0x005e, 0x018c, nv04_graph_mthd_bind_rop);
1216 NVOBJ_MTHD (dev, 0x005e, 0x0190, nv04_graph_mthd_bind_beta1);
1217 NVOBJ_MTHD (dev, 0x005e, 0x0194, nv04_graph_mthd_bind_beta4);
1218 NVOBJ_MTHD (dev, 0x005e, 0x0198, nv04_graph_mthd_bind_surf2d);
1219 NVOBJ_MTHD (dev, 0x005e, 0x02fc, nv04_graph_mthd_set_operation);
1220
1221 /* nvsw */
1222 NVOBJ_CLASS(dev, 0x506e, SW);
1223 NVOBJ_MTHD (dev, 0x506e, 0x0150, nv04_graph_mthd_set_ref);
1224 NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
1225
1226 dev_priv->engine.graph.registered = true;
1227 return 0;
1063}; 1228};
1064 1229
1065static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_tex_tri[] = { 1230static struct nouveau_bitfield nv04_graph_intr[] = {
1066 { 0x0188, nv04_graph_mthd_bind_clip }, 1231 { NV_PGRAPH_INTR_NOTIFY, "NOTIFY" },
1067 { 0x018c, nv04_graph_mthd_bind_surf_color }, 1232 {}
1068 { 0x0190, nv04_graph_mthd_bind_surf_zeta },
1069 {},
1070}; 1233};
1071 1234
1072static struct nouveau_pgraph_object_method nv04_graph_mthds_surf3d[] = { 1235static struct nouveau_bitfield nv04_graph_nstatus[] =
1073 { 0x02f8, nv04_graph_mthd_surf3d_clip_h }, 1236{
1074 { 0x02fc, nv04_graph_mthd_surf3d_clip_v }, 1237 { NV04_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
1075 {}, 1238 { NV04_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
1239 { NV04_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
1240 { NV04_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" },
1241 {}
1076}; 1242};
1077 1243
1078struct nouveau_pgraph_object_class nv04_graph_grclass[] = { 1244struct nouveau_bitfield nv04_graph_nsource[] =
1079 { 0x0038, false, NULL }, /* dvd subpicture */ 1245{
1080 { 0x0039, false, NULL }, /* m2mf */ 1246 { NV03_PGRAPH_NSOURCE_NOTIFICATION, "NOTIFICATION" },
1081 { 0x004b, false, nv04_graph_mthds_nv03_gdirect }, /* nv03 gdirect */ 1247 { NV03_PGRAPH_NSOURCE_DATA_ERROR, "DATA_ERROR" },
1082 { 0x004a, false, nv04_graph_mthds_nv04_gdirect }, /* nv04 gdirect */ 1248 { NV03_PGRAPH_NSOURCE_PROTECTION_ERROR, "PROTECTION_ERROR" },
1083 { 0x001f, false, nv04_graph_mthds_nv01_imageblit }, /* nv01 imageblit */ 1249 { NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION, "RANGE_EXCEPTION" },
1084 { 0x005f, false, nv04_graph_mthds_nv04_imageblit_ifc }, /* nv04 imageblit */ 1250 { NV03_PGRAPH_NSOURCE_LIMIT_COLOR, "LIMIT_COLOR" },
1085 { 0x0060, false, nv04_graph_mthds_nv04_iifc }, /* nv04 iifc */ 1251 { NV03_PGRAPH_NSOURCE_LIMIT_ZETA, "LIMIT_ZETA" },
1086 { 0x0064, false, NULL }, /* nv05 iifc */ 1252 { NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD, "ILLEGAL_MTHD" },
1087 { 0x0021, false, nv04_graph_mthds_nv01_ifc }, /* nv01 ifc */ 1253 { NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION, "DMA_R_PROTECTION" },
1088 { 0x0061, false, nv04_graph_mthds_nv04_imageblit_ifc }, /* nv04 ifc */ 1254 { NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION, "DMA_W_PROTECTION" },
1089 { 0x0065, false, NULL }, /* nv05 ifc */ 1255 { NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION, "FORMAT_EXCEPTION" },
1090 { 0x0036, false, nv04_graph_mthds_nv03_sifc }, /* nv03 sifc */ 1256 { NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION, "PATCH_EXCEPTION" },
1091 { 0x0076, false, nv04_graph_mthds_nv04_sifc }, /* nv04 sifc */ 1257 { NV03_PGRAPH_NSOURCE_STATE_INVALID, "STATE_INVALID" },
1092 { 0x0066, false, NULL }, /* nv05 sifc */ 1258 { NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY, "DOUBLE_NOTIFY" },
1093 { 0x0037, false, nv04_graph_mthds_nv03_sifm }, /* nv03 sifm */ 1259 { NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE, "NOTIFY_IN_USE" },
1094 { 0x0077, false, nv04_graph_mthds_nv04_sifm }, /* nv04 sifm */ 1260 { NV03_PGRAPH_NSOURCE_METHOD_CNT, "METHOD_CNT" },
1095 { 0x0030, false, NULL }, /* null */ 1261 { NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION, "BFR_NOTIFICATION" },
1096 { 0x0042, false, NULL }, /* surf2d */ 1262 { NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION, "DMA_VTX_PROTECTION" },
1097 { 0x0043, false, NULL }, /* rop */ 1263 { NV03_PGRAPH_NSOURCE_DMA_WIDTH_A, "DMA_WIDTH_A" },
1098 { 0x0012, false, NULL }, /* beta1 */ 1264 { NV03_PGRAPH_NSOURCE_DMA_WIDTH_B, "DMA_WIDTH_B" },
1099 { 0x0072, false, NULL }, /* beta4 */
1100 { 0x0019, false, NULL }, /* cliprect */
1101 { 0x0018, false, NULL }, /* nv01 pattern */
1102 { 0x0044, false, NULL }, /* nv04 pattern */
1103 { 0x0052, false, NULL }, /* swzsurf */
1104 { 0x0053, false, nv04_graph_mthds_surf3d }, /* surf3d */
1105 { 0x0048, false, nv04_graph_mthds_nv03_tex_tri }, /* nv03 tex_tri */
1106 { 0x0054, false, NULL }, /* tex_tri */
1107 { 0x0055, false, NULL }, /* multitex_tri */
1108 { 0x0017, false, NULL }, /* nv01 chroma */
1109 { 0x0057, false, NULL }, /* nv04 chroma */
1110 { 0x0058, false, NULL }, /* surf_dst */
1111 { 0x0059, false, NULL }, /* surf_src */
1112 { 0x005a, false, NULL }, /* surf_color */
1113 { 0x005b, false, NULL }, /* surf_zeta */
1114 { 0x001c, false, nv04_graph_mthds_nv01_shape }, /* nv01 line */
1115 { 0x005c, false, nv04_graph_mthds_nv04_shape }, /* nv04 line */
1116 { 0x001d, false, nv04_graph_mthds_nv01_shape }, /* nv01 tri */
1117 { 0x005d, false, nv04_graph_mthds_nv04_shape }, /* nv04 tri */
1118 { 0x001e, false, nv04_graph_mthds_nv01_shape }, /* nv01 rect */
1119 { 0x005e, false, nv04_graph_mthds_nv04_shape }, /* nv04 rect */
1120 { 0x506e, true, nv04_graph_mthds_sw },
1121 {} 1265 {}
1122}; 1266};
1123 1267
1268static void
1269nv04_graph_isr(struct drm_device *dev)
1270{
1271 u32 stat;
1272
1273 while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) {
1274 u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
1275 u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
1276 u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
1277 u32 chid = (addr & 0x0f000000) >> 24;
1278 u32 subc = (addr & 0x0000e000) >> 13;
1279 u32 mthd = (addr & 0x00001ffc);
1280 u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
1281 u32 class = nv_rd32(dev, 0x400180 + subc * 4) & 0xff;
1282 u32 show = stat;
1283
1284 if (stat & NV_PGRAPH_INTR_NOTIFY) {
1285 if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
1286 if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data))
1287 show &= ~NV_PGRAPH_INTR_NOTIFY;
1288 }
1289 }
1290
1291 if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
1292 nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
1293 stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
1294 show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
1295 nv04_graph_context_switch(dev);
1296 }
1297
1298 nv_wr32(dev, NV03_PGRAPH_INTR, stat);
1299 nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001);
1300
1301 if (show && nouveau_ratelimit()) {
1302 NV_INFO(dev, "PGRAPH -");
1303 nouveau_bitfield_print(nv04_graph_intr, show);
1304 printk(" nsource:");
1305 nouveau_bitfield_print(nv04_graph_nsource, nsource);
1306 printk(" nstatus:");
1307 nouveau_bitfield_print(nv04_graph_nstatus, nstatus);
1308 printk("\n");
1309 NV_INFO(dev, "PGRAPH - ch %d/%d class 0x%04x "
1310 "mthd 0x%04x data 0x%08x\n",
1311 chid, subc, class, mthd, data);
1312 }
1313 }
1314}
diff --git a/drivers/gpu/drm/nouveau/nv04_instmem.c b/drivers/gpu/drm/nouveau/nv04_instmem.c
index 0b5ae297abde..b8e3edb5c063 100644
--- a/drivers/gpu/drm/nouveau/nv04_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv04_instmem.c
@@ -98,42 +98,66 @@ nv04_instmem_takedown(struct drm_device *dev)
98} 98}
99 99
100int 100int
101nv04_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, 101nv04_instmem_suspend(struct drm_device *dev)
102 uint32_t *sz)
103{ 102{
104 return 0; 103 return 0;
105} 104}
106 105
107void 106void
108nv04_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) 107nv04_instmem_resume(struct drm_device *dev)
109{
110}
111
112int
113nv04_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
114{ 108{
115 return 0;
116} 109}
117 110
118int 111int
119nv04_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) 112nv04_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align)
120{ 113{
114 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
115 struct drm_mm_node *ramin = NULL;
116
117 do {
118 if (drm_mm_pre_get(&dev_priv->ramin_heap))
119 return -ENOMEM;
120
121 spin_lock(&dev_priv->ramin_lock);
122 ramin = drm_mm_search_free(&dev_priv->ramin_heap, size, align, 0);
123 if (ramin == NULL) {
124 spin_unlock(&dev_priv->ramin_lock);
125 return -ENOMEM;
126 }
127
128 ramin = drm_mm_get_block_atomic(ramin, size, align);
129 spin_unlock(&dev_priv->ramin_lock);
130 } while (ramin == NULL);
131
132 gpuobj->node = ramin;
133 gpuobj->vinst = ramin->start;
121 return 0; 134 return 0;
122} 135}
123 136
124void 137void
125nv04_instmem_flush(struct drm_device *dev) 138nv04_instmem_put(struct nouveau_gpuobj *gpuobj)
126{ 139{
140 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
141
142 spin_lock(&dev_priv->ramin_lock);
143 drm_mm_put_block(gpuobj->node);
144 gpuobj->node = NULL;
145 spin_unlock(&dev_priv->ramin_lock);
127} 146}
128 147
129int 148int
130nv04_instmem_suspend(struct drm_device *dev) 149nv04_instmem_map(struct nouveau_gpuobj *gpuobj)
131{ 150{
151 gpuobj->pinst = gpuobj->vinst;
132 return 0; 152 return 0;
133} 153}
134 154
135void 155void
136nv04_instmem_resume(struct drm_device *dev) 156nv04_instmem_unmap(struct nouveau_gpuobj *gpuobj)
137{ 157{
138} 158}
139 159
160void
161nv04_instmem_flush(struct drm_device *dev)
162{
163}
diff --git a/drivers/gpu/drm/nouveau/nv10_fb.c b/drivers/gpu/drm/nouveau/nv10_fb.c
index cc5cda44e501..f78181a59b4a 100644
--- a/drivers/gpu/drm/nouveau/nv10_fb.c
+++ b/drivers/gpu/drm/nouveau/nv10_fb.c
@@ -3,23 +3,109 @@
3#include "nouveau_drv.h" 3#include "nouveau_drv.h"
4#include "nouveau_drm.h" 4#include "nouveau_drm.h"
5 5
6static struct drm_mm_node *
7nv20_fb_alloc_tag(struct drm_device *dev, uint32_t size)
8{
9 struct drm_nouveau_private *dev_priv = dev->dev_private;
10 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
11 struct drm_mm_node *mem;
12 int ret;
13
14 ret = drm_mm_pre_get(&pfb->tag_heap);
15 if (ret)
16 return NULL;
17
18 spin_lock(&dev_priv->tile.lock);
19 mem = drm_mm_search_free(&pfb->tag_heap, size, 0, 0);
20 if (mem)
21 mem = drm_mm_get_block_atomic(mem, size, 0);
22 spin_unlock(&dev_priv->tile.lock);
23
24 return mem;
25}
26
27static void
28nv20_fb_free_tag(struct drm_device *dev, struct drm_mm_node *mem)
29{
30 struct drm_nouveau_private *dev_priv = dev->dev_private;
31
32 spin_lock(&dev_priv->tile.lock);
33 drm_mm_put_block(mem);
34 spin_unlock(&dev_priv->tile.lock);
35}
36
37void
38nv10_fb_init_tile_region(struct drm_device *dev, int i, uint32_t addr,
39 uint32_t size, uint32_t pitch, uint32_t flags)
40{
41 struct drm_nouveau_private *dev_priv = dev->dev_private;
42 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
43 int bpp = (flags & NOUVEAU_GEM_TILE_32BPP ? 32 : 16);
44
45 tile->addr = addr;
46 tile->limit = max(1u, addr + size) - 1;
47 tile->pitch = pitch;
48
49 if (dev_priv->card_type == NV_20) {
50 if (flags & NOUVEAU_GEM_TILE_ZETA) {
51 /*
52 * Allocate some of the on-die tag memory,
53 * used to store Z compression meta-data (most
54 * likely just a bitmap determining if a given
55 * tile is compressed or not).
56 */
57 tile->tag_mem = nv20_fb_alloc_tag(dev, size / 256);
58
59 if (tile->tag_mem) {
60 /* Enable Z compression */
61 if (dev_priv->chipset >= 0x25)
62 tile->zcomp = tile->tag_mem->start |
63 (bpp == 16 ?
64 NV25_PFB_ZCOMP_MODE_16 :
65 NV25_PFB_ZCOMP_MODE_32);
66 else
67 tile->zcomp = tile->tag_mem->start |
68 NV20_PFB_ZCOMP_EN |
69 (bpp == 16 ? 0 :
70 NV20_PFB_ZCOMP_MODE_32);
71 }
72
73 tile->addr |= 3;
74 } else {
75 tile->addr |= 1;
76 }
77
78 } else {
79 tile->addr |= 1 << 31;
80 }
81}
82
6void 83void
7nv10_fb_set_region_tiling(struct drm_device *dev, int i, uint32_t addr, 84nv10_fb_free_tile_region(struct drm_device *dev, int i)
8 uint32_t size, uint32_t pitch)
9{ 85{
10 struct drm_nouveau_private *dev_priv = dev->dev_private; 86 struct drm_nouveau_private *dev_priv = dev->dev_private;
11 uint32_t limit = max(1u, addr + size) - 1; 87 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
12 88
13 if (pitch) { 89 if (tile->tag_mem) {
14 if (dev_priv->card_type >= NV_20) 90 nv20_fb_free_tag(dev, tile->tag_mem);
15 addr |= 1; 91 tile->tag_mem = NULL;
16 else
17 addr |= 1 << 31;
18 } 92 }
19 93
20 nv_wr32(dev, NV10_PFB_TLIMIT(i), limit); 94 tile->addr = tile->limit = tile->pitch = tile->zcomp = 0;
21 nv_wr32(dev, NV10_PFB_TSIZE(i), pitch); 95}
22 nv_wr32(dev, NV10_PFB_TILE(i), addr); 96
97void
98nv10_fb_set_tile_region(struct drm_device *dev, int i)
99{
100 struct drm_nouveau_private *dev_priv = dev->dev_private;
101 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
102
103 nv_wr32(dev, NV10_PFB_TLIMIT(i), tile->limit);
104 nv_wr32(dev, NV10_PFB_TSIZE(i), tile->pitch);
105 nv_wr32(dev, NV10_PFB_TILE(i), tile->addr);
106
107 if (dev_priv->card_type == NV_20)
108 nv_wr32(dev, NV20_PFB_ZCOMP(i), tile->zcomp);
23} 109}
24 110
25int 111int
@@ -31,9 +117,14 @@ nv10_fb_init(struct drm_device *dev)
31 117
32 pfb->num_tiles = NV10_PFB_TILE__SIZE; 118 pfb->num_tiles = NV10_PFB_TILE__SIZE;
33 119
120 if (dev_priv->card_type == NV_20)
121 drm_mm_init(&pfb->tag_heap, 0,
122 (dev_priv->chipset >= 0x25 ?
123 64 * 1024 : 32 * 1024));
124
34 /* Turn all the tiling regions off. */ 125 /* Turn all the tiling regions off. */
35 for (i = 0; i < pfb->num_tiles; i++) 126 for (i = 0; i < pfb->num_tiles; i++)
36 pfb->set_region_tiling(dev, i, 0, 0, 0); 127 pfb->set_tile_region(dev, i);
37 128
38 return 0; 129 return 0;
39} 130}
@@ -41,4 +132,13 @@ nv10_fb_init(struct drm_device *dev)
41void 132void
42nv10_fb_takedown(struct drm_device *dev) 133nv10_fb_takedown(struct drm_device *dev)
43{ 134{
135 struct drm_nouveau_private *dev_priv = dev->dev_private;
136 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
137 int i;
138
139 for (i = 0; i < pfb->num_tiles; i++)
140 pfb->free_tile_region(dev, i);
141
142 if (dev_priv->card_type == NV_20)
143 drm_mm_takedown(&pfb->tag_heap);
44} 144}
diff --git a/drivers/gpu/drm/nouveau/nv10_fifo.c b/drivers/gpu/drm/nouveau/nv10_fifo.c
index f1b03ad58fd5..d2ecbff4bee1 100644
--- a/drivers/gpu/drm/nouveau/nv10_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv10_fifo.c
@@ -53,6 +53,11 @@ nv10_fifo_create_context(struct nouveau_channel *chan)
53 if (ret) 53 if (ret)
54 return ret; 54 return ret;
55 55
56 chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
57 NV03_USER(chan->id), PAGE_SIZE);
58 if (!chan->user)
59 return -ENOMEM;
60
56 /* Fill entries that are seen filled in dumps of nvidia driver just 61 /* Fill entries that are seen filled in dumps of nvidia driver just
57 * after channel's is put into DMA mode 62 * after channel's is put into DMA mode
58 */ 63 */
@@ -73,17 +78,6 @@ nv10_fifo_create_context(struct nouveau_channel *chan)
73 return 0; 78 return 0;
74} 79}
75 80
76void
77nv10_fifo_destroy_context(struct nouveau_channel *chan)
78{
79 struct drm_device *dev = chan->dev;
80
81 nv_wr32(dev, NV04_PFIFO_MODE,
82 nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id));
83
84 nouveau_gpuobj_ref(NULL, &chan->ramfc);
85}
86
87static void 81static void
88nv10_fifo_do_load_context(struct drm_device *dev, int chid) 82nv10_fifo_do_load_context(struct drm_device *dev, int chid)
89{ 83{
@@ -219,6 +213,7 @@ nv10_fifo_init_ramxx(struct drm_device *dev)
219static void 213static void
220nv10_fifo_init_intr(struct drm_device *dev) 214nv10_fifo_init_intr(struct drm_device *dev)
221{ 215{
216 nouveau_irq_register(dev, 8, nv04_fifo_isr);
222 nv_wr32(dev, 0x002100, 0xffffffff); 217 nv_wr32(dev, 0x002100, 0xffffffff);
223 nv_wr32(dev, 0x002140, 0xffffffff); 218 nv_wr32(dev, 0x002140, 0xffffffff);
224} 219}
@@ -241,7 +236,7 @@ nv10_fifo_init(struct drm_device *dev)
241 pfifo->reassign(dev, true); 236 pfifo->reassign(dev, true);
242 237
243 for (i = 0; i < dev_priv->engine.fifo.channels; i++) { 238 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
244 if (dev_priv->fifos[i]) { 239 if (dev_priv->channels.ptr[i]) {
245 uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE); 240 uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE);
246 nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i)); 241 nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i));
247 } 242 }
diff --git a/drivers/gpu/drm/nouveau/nv10_graph.c b/drivers/gpu/drm/nouveau/nv10_graph.c
index 8e68c9731159..8c92edb7bbcd 100644
--- a/drivers/gpu/drm/nouveau/nv10_graph.c
+++ b/drivers/gpu/drm/nouveau/nv10_graph.c
@@ -26,6 +26,10 @@
26#include "drm.h" 26#include "drm.h"
27#include "nouveau_drm.h" 27#include "nouveau_drm.h"
28#include "nouveau_drv.h" 28#include "nouveau_drv.h"
29#include "nouveau_util.h"
30
31static int nv10_graph_register(struct drm_device *);
32static void nv10_graph_isr(struct drm_device *);
29 33
30#define NV10_FIFO_NUMBER 32 34#define NV10_FIFO_NUMBER 32
31 35
@@ -786,15 +790,13 @@ nv10_graph_unload_context(struct drm_device *dev)
786 return 0; 790 return 0;
787} 791}
788 792
789void 793static void
790nv10_graph_context_switch(struct drm_device *dev) 794nv10_graph_context_switch(struct drm_device *dev)
791{ 795{
792 struct drm_nouveau_private *dev_priv = dev->dev_private; 796 struct drm_nouveau_private *dev_priv = dev->dev_private;
793 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
794 struct nouveau_channel *chan = NULL; 797 struct nouveau_channel *chan = NULL;
795 int chid; 798 int chid;
796 799
797 pgraph->fifo_access(dev, false);
798 nouveau_wait_for_idle(dev); 800 nouveau_wait_for_idle(dev);
799 801
800 /* If previous context is valid, we need to save it */ 802 /* If previous context is valid, we need to save it */
@@ -802,11 +804,9 @@ nv10_graph_context_switch(struct drm_device *dev)
802 804
803 /* Load context for next channel */ 805 /* Load context for next channel */
804 chid = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f; 806 chid = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
805 chan = dev_priv->fifos[chid]; 807 chan = dev_priv->channels.ptr[chid];
806 if (chan && chan->pgraph_ctx) 808 if (chan && chan->pgraph_ctx)
807 nv10_graph_load_context(chan); 809 nv10_graph_load_context(chan);
808
809 pgraph->fifo_access(dev, true);
810} 810}
811 811
812#define NV_WRITE_CTX(reg, val) do { \ 812#define NV_WRITE_CTX(reg, val) do { \
@@ -833,7 +833,7 @@ nv10_graph_channel(struct drm_device *dev)
833 if (chid >= dev_priv->engine.fifo.channels) 833 if (chid >= dev_priv->engine.fifo.channels)
834 return NULL; 834 return NULL;
835 835
836 return dev_priv->fifos[chid]; 836 return dev_priv->channels.ptr[chid];
837} 837}
838 838
839int nv10_graph_create_context(struct nouveau_channel *chan) 839int nv10_graph_create_context(struct nouveau_channel *chan)
@@ -875,37 +875,54 @@ int nv10_graph_create_context(struct nouveau_channel *chan)
875 875
876void nv10_graph_destroy_context(struct nouveau_channel *chan) 876void nv10_graph_destroy_context(struct nouveau_channel *chan)
877{ 877{
878 struct drm_device *dev = chan->dev;
879 struct drm_nouveau_private *dev_priv = dev->dev_private;
880 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
878 struct graph_state *pgraph_ctx = chan->pgraph_ctx; 881 struct graph_state *pgraph_ctx = chan->pgraph_ctx;
882 unsigned long flags;
883
884 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
885 pgraph->fifo_access(dev, false);
886
887 /* Unload the context if it's the currently active one */
888 if (pgraph->channel(dev) == chan)
889 pgraph->unload_context(dev);
879 890
891 /* Free the context resources */
880 kfree(pgraph_ctx); 892 kfree(pgraph_ctx);
881 chan->pgraph_ctx = NULL; 893 chan->pgraph_ctx = NULL;
894
895 pgraph->fifo_access(dev, true);
896 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
882} 897}
883 898
884void 899void
885nv10_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr, 900nv10_graph_set_tile_region(struct drm_device *dev, int i)
886 uint32_t size, uint32_t pitch)
887{ 901{
888 uint32_t limit = max(1u, addr + size) - 1; 902 struct drm_nouveau_private *dev_priv = dev->dev_private;
889 903 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
890 if (pitch)
891 addr |= 1 << 31;
892 904
893 nv_wr32(dev, NV10_PGRAPH_TLIMIT(i), limit); 905 nv_wr32(dev, NV10_PGRAPH_TLIMIT(i), tile->limit);
894 nv_wr32(dev, NV10_PGRAPH_TSIZE(i), pitch); 906 nv_wr32(dev, NV10_PGRAPH_TSIZE(i), tile->pitch);
895 nv_wr32(dev, NV10_PGRAPH_TILE(i), addr); 907 nv_wr32(dev, NV10_PGRAPH_TILE(i), tile->addr);
896} 908}
897 909
898int nv10_graph_init(struct drm_device *dev) 910int nv10_graph_init(struct drm_device *dev)
899{ 911{
900 struct drm_nouveau_private *dev_priv = dev->dev_private; 912 struct drm_nouveau_private *dev_priv = dev->dev_private;
901 uint32_t tmp; 913 uint32_t tmp;
902 int i; 914 int ret, i;
903 915
904 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & 916 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
905 ~NV_PMC_ENABLE_PGRAPH); 917 ~NV_PMC_ENABLE_PGRAPH);
906 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | 918 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
907 NV_PMC_ENABLE_PGRAPH); 919 NV_PMC_ENABLE_PGRAPH);
908 920
921 ret = nv10_graph_register(dev);
922 if (ret)
923 return ret;
924
925 nouveau_irq_register(dev, 12, nv10_graph_isr);
909 nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF); 926 nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
910 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); 927 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
911 928
@@ -928,7 +945,7 @@ int nv10_graph_init(struct drm_device *dev)
928 945
929 /* Turn all the tiling regions off. */ 946 /* Turn all the tiling regions off. */
930 for (i = 0; i < NV10_PFB_TILE__SIZE; i++) 947 for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
931 nv10_graph_set_region_tiling(dev, i, 0, 0, 0); 948 nv10_graph_set_tile_region(dev, i);
932 949
933 nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(0), 0x00000000); 950 nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(0), 0x00000000);
934 nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(1), 0x00000000); 951 nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(1), 0x00000000);
@@ -948,17 +965,17 @@ int nv10_graph_init(struct drm_device *dev)
948 965
949void nv10_graph_takedown(struct drm_device *dev) 966void nv10_graph_takedown(struct drm_device *dev)
950{ 967{
968 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000);
969 nouveau_irq_unregister(dev, 12);
951} 970}
952 971
953static int 972static int
954nv17_graph_mthd_lma_window(struct nouveau_channel *chan, int grclass, 973nv17_graph_mthd_lma_window(struct nouveau_channel *chan,
955 int mthd, uint32_t data) 974 u32 class, u32 mthd, u32 data)
956{ 975{
957 struct drm_device *dev = chan->dev; 976 struct drm_device *dev = chan->dev;
958 struct graph_state *ctx = chan->pgraph_ctx; 977 struct graph_state *ctx = chan->pgraph_ctx;
959 struct pipe_state *pipe = &ctx->pipe_state; 978 struct pipe_state *pipe = &ctx->pipe_state;
960 struct drm_nouveau_private *dev_priv = dev->dev_private;
961 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
962 uint32_t pipe_0x0040[1], pipe_0x64c0[8], pipe_0x6a80[3], pipe_0x6ab0[3]; 979 uint32_t pipe_0x0040[1], pipe_0x64c0[8], pipe_0x6a80[3], pipe_0x6ab0[3];
963 uint32_t xfmode0, xfmode1; 980 uint32_t xfmode0, xfmode1;
964 int i; 981 int i;
@@ -1025,18 +1042,14 @@ nv17_graph_mthd_lma_window(struct nouveau_channel *chan, int grclass,
1025 1042
1026 nouveau_wait_for_idle(dev); 1043 nouveau_wait_for_idle(dev);
1027 1044
1028 pgraph->fifo_access(dev, true);
1029
1030 return 0; 1045 return 0;
1031} 1046}
1032 1047
1033static int 1048static int
1034nv17_graph_mthd_lma_enable(struct nouveau_channel *chan, int grclass, 1049nv17_graph_mthd_lma_enable(struct nouveau_channel *chan,
1035 int mthd, uint32_t data) 1050 u32 class, u32 mthd, u32 data)
1036{ 1051{
1037 struct drm_device *dev = chan->dev; 1052 struct drm_device *dev = chan->dev;
1038 struct drm_nouveau_private *dev_priv = dev->dev_private;
1039 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
1040 1053
1041 nouveau_wait_for_idle(dev); 1054 nouveau_wait_for_idle(dev);
1042 1055
@@ -1045,40 +1058,118 @@ nv17_graph_mthd_lma_enable(struct nouveau_channel *chan, int grclass,
1045 nv_wr32(dev, 0x004006b0, 1058 nv_wr32(dev, 0x004006b0,
1046 nv_rd32(dev, 0x004006b0) | 0x8 << 24); 1059 nv_rd32(dev, 0x004006b0) | 0x8 << 24);
1047 1060
1048 pgraph->fifo_access(dev, true); 1061 return 0;
1062}
1063
1064static int
1065nv10_graph_register(struct drm_device *dev)
1066{
1067 struct drm_nouveau_private *dev_priv = dev->dev_private;
1068
1069 if (dev_priv->engine.graph.registered)
1070 return 0;
1071
1072 NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
1073 NVOBJ_CLASS(dev, 0x0030, GR); /* null */
1074 NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
1075 NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
1076 NVOBJ_CLASS(dev, 0x005f, GR); /* imageblit */
1077 NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
1078 NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
1079 NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
1080 NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
1081 NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
1082 NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
1083 NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
1084 NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
1085 NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
1086 NVOBJ_CLASS(dev, 0x0052, GR); /* swzsurf */
1087 NVOBJ_CLASS(dev, 0x0093, GR); /* surf3d */
1088 NVOBJ_CLASS(dev, 0x0094, GR); /* tex_tri */
1089 NVOBJ_CLASS(dev, 0x0095, GR); /* multitex_tri */
1090
1091 /* celcius */
1092 if (dev_priv->chipset <= 0x10) {
1093 NVOBJ_CLASS(dev, 0x0056, GR);
1094 } else
1095 if (dev_priv->chipset < 0x17 || dev_priv->chipset == 0x1a) {
1096 NVOBJ_CLASS(dev, 0x0096, GR);
1097 } else {
1098 NVOBJ_CLASS(dev, 0x0099, GR);
1099 NVOBJ_MTHD (dev, 0x0099, 0x1638, nv17_graph_mthd_lma_window);
1100 NVOBJ_MTHD (dev, 0x0099, 0x163c, nv17_graph_mthd_lma_window);
1101 NVOBJ_MTHD (dev, 0x0099, 0x1640, nv17_graph_mthd_lma_window);
1102 NVOBJ_MTHD (dev, 0x0099, 0x1644, nv17_graph_mthd_lma_window);
1103 NVOBJ_MTHD (dev, 0x0099, 0x1658, nv17_graph_mthd_lma_enable);
1104 }
1049 1105
1106 /* nvsw */
1107 NVOBJ_CLASS(dev, 0x506e, SW);
1108 NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
1109
1110 dev_priv->engine.graph.registered = true;
1050 return 0; 1111 return 0;
1051} 1112}
1052 1113
1053static struct nouveau_pgraph_object_method nv17_graph_celsius_mthds[] = { 1114struct nouveau_bitfield nv10_graph_intr[] = {
1054 { 0x1638, nv17_graph_mthd_lma_window }, 1115 { NV_PGRAPH_INTR_NOTIFY, "NOTIFY" },
1055 { 0x163c, nv17_graph_mthd_lma_window }, 1116 { NV_PGRAPH_INTR_ERROR, "ERROR" },
1056 { 0x1640, nv17_graph_mthd_lma_window },
1057 { 0x1644, nv17_graph_mthd_lma_window },
1058 { 0x1658, nv17_graph_mthd_lma_enable },
1059 {} 1117 {}
1060}; 1118};
1061 1119
1062struct nouveau_pgraph_object_class nv10_graph_grclass[] = { 1120struct nouveau_bitfield nv10_graph_nstatus[] =
1063 { 0x0030, false, NULL }, /* null */ 1121{
1064 { 0x0039, false, NULL }, /* m2mf */ 1122 { NV10_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
1065 { 0x004a, false, NULL }, /* gdirect */ 1123 { NV10_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
1066 { 0x005f, false, NULL }, /* imageblit */ 1124 { NV10_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
1067 { 0x009f, false, NULL }, /* imageblit (nv12) */ 1125 { NV10_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" },
1068 { 0x008a, false, NULL }, /* ifc */
1069 { 0x0089, false, NULL }, /* sifm */
1070 { 0x0062, false, NULL }, /* surf2d */
1071 { 0x0043, false, NULL }, /* rop */
1072 { 0x0012, false, NULL }, /* beta1 */
1073 { 0x0072, false, NULL }, /* beta4 */
1074 { 0x0019, false, NULL }, /* cliprect */
1075 { 0x0044, false, NULL }, /* pattern */
1076 { 0x0052, false, NULL }, /* swzsurf */
1077 { 0x0093, false, NULL }, /* surf3d */
1078 { 0x0094, false, NULL }, /* tex_tri */
1079 { 0x0095, false, NULL }, /* multitex_tri */
1080 { 0x0056, false, NULL }, /* celcius (nv10) */
1081 { 0x0096, false, NULL }, /* celcius (nv11) */
1082 { 0x0099, false, nv17_graph_celsius_mthds }, /* celcius (nv17) */
1083 {} 1126 {}
1084}; 1127};
1128
1129static void
1130nv10_graph_isr(struct drm_device *dev)
1131{
1132 u32 stat;
1133
1134 while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) {
1135 u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
1136 u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
1137 u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
1138 u32 chid = (addr & 0x01f00000) >> 20;
1139 u32 subc = (addr & 0x00070000) >> 16;
1140 u32 mthd = (addr & 0x00001ffc);
1141 u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
1142 u32 class = nv_rd32(dev, 0x400160 + subc * 4) & 0xfff;
1143 u32 show = stat;
1144
1145 if (stat & NV_PGRAPH_INTR_ERROR) {
1146 if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
1147 if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data))
1148 show &= ~NV_PGRAPH_INTR_ERROR;
1149 }
1150 }
1151
1152 if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
1153 nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
1154 stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
1155 show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
1156 nv10_graph_context_switch(dev);
1157 }
1158
1159 nv_wr32(dev, NV03_PGRAPH_INTR, stat);
1160 nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001);
1161
1162 if (show && nouveau_ratelimit()) {
1163 NV_INFO(dev, "PGRAPH -");
1164 nouveau_bitfield_print(nv10_graph_intr, show);
1165 printk(" nsource:");
1166 nouveau_bitfield_print(nv04_graph_nsource, nsource);
1167 printk(" nstatus:");
1168 nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
1169 printk("\n");
1170 NV_INFO(dev, "PGRAPH - ch %d/%d class 0x%04x "
1171 "mthd 0x%04x data 0x%08x\n",
1172 chid, subc, class, mthd, data);
1173 }
1174 }
1175}
diff --git a/drivers/gpu/drm/nouveau/nv20_graph.c b/drivers/gpu/drm/nouveau/nv20_graph.c
index 12ab9cd56eca..8464b76798d5 100644
--- a/drivers/gpu/drm/nouveau/nv20_graph.c
+++ b/drivers/gpu/drm/nouveau/nv20_graph.c
@@ -32,6 +32,10 @@
32#define NV34_GRCTX_SIZE (18140) 32#define NV34_GRCTX_SIZE (18140)
33#define NV35_36_GRCTX_SIZE (22396) 33#define NV35_36_GRCTX_SIZE (22396)
34 34
35static int nv20_graph_register(struct drm_device *);
36static int nv30_graph_register(struct drm_device *);
37static void nv20_graph_isr(struct drm_device *);
38
35static void 39static void
36nv20_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) 40nv20_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
37{ 41{
@@ -425,9 +429,21 @@ nv20_graph_destroy_context(struct nouveau_channel *chan)
425 struct drm_device *dev = chan->dev; 429 struct drm_device *dev = chan->dev;
426 struct drm_nouveau_private *dev_priv = dev->dev_private; 430 struct drm_nouveau_private *dev_priv = dev->dev_private;
427 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; 431 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
432 unsigned long flags;
428 433
429 nouveau_gpuobj_ref(NULL, &chan->ramin_grctx); 434 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
435 pgraph->fifo_access(dev, false);
436
437 /* Unload the context if it's the currently active one */
438 if (pgraph->channel(dev) == chan)
439 pgraph->unload_context(dev);
440
441 pgraph->fifo_access(dev, true);
442 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
443
444 /* Free the context resources */
430 nv_wo32(pgraph->ctx_table, chan->id * 4, 0); 445 nv_wo32(pgraph->ctx_table, chan->id * 4, 0);
446 nouveau_gpuobj_ref(NULL, &chan->ramin_grctx);
431} 447}
432 448
433int 449int
@@ -496,24 +512,27 @@ nv20_graph_rdi(struct drm_device *dev)
496} 512}
497 513
498void 514void
499nv20_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr, 515nv20_graph_set_tile_region(struct drm_device *dev, int i)
500 uint32_t size, uint32_t pitch)
501{ 516{
502 uint32_t limit = max(1u, addr + size) - 1; 517 struct drm_nouveau_private *dev_priv = dev->dev_private;
503 518 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
504 if (pitch)
505 addr |= 1;
506 519
507 nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit); 520 nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
508 nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch); 521 nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
509 nv_wr32(dev, NV20_PGRAPH_TILE(i), addr); 522 nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
510 523
511 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0030 + 4 * i); 524 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0030 + 4 * i);
512 nv_wr32(dev, NV10_PGRAPH_RDI_DATA, limit); 525 nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->limit);
513 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0050 + 4 * i); 526 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0050 + 4 * i);
514 nv_wr32(dev, NV10_PGRAPH_RDI_DATA, pitch); 527 nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->pitch);
515 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + 4 * i); 528 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + 4 * i);
516 nv_wr32(dev, NV10_PGRAPH_RDI_DATA, addr); 529 nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->addr);
530
531 if (dev_priv->card_type == NV_20) {
532 nv_wr32(dev, NV20_PGRAPH_ZCOMP(i), tile->zcomp);
533 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00ea0090 + 4 * i);
534 nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->zcomp);
535 }
517} 536}
518 537
519int 538int
@@ -560,6 +579,13 @@ nv20_graph_init(struct drm_device *dev)
560 579
561 nv20_graph_rdi(dev); 580 nv20_graph_rdi(dev);
562 581
582 ret = nv20_graph_register(dev);
583 if (ret) {
584 nouveau_gpuobj_ref(NULL, &pgraph->ctx_table);
585 return ret;
586 }
587
588 nouveau_irq_register(dev, 12, nv20_graph_isr);
563 nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF); 589 nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
564 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); 590 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
565 591
@@ -571,16 +597,17 @@ nv20_graph_init(struct drm_device *dev)
571 nv_wr32(dev, 0x40009C , 0x00000040); 597 nv_wr32(dev, 0x40009C , 0x00000040);
572 598
573 if (dev_priv->chipset >= 0x25) { 599 if (dev_priv->chipset >= 0x25) {
574 nv_wr32(dev, 0x400890, 0x00080000); 600 nv_wr32(dev, 0x400890, 0x00a8cfff);
575 nv_wr32(dev, 0x400610, 0x304B1FB6); 601 nv_wr32(dev, 0x400610, 0x304B1FB6);
576 nv_wr32(dev, 0x400B80, 0x18B82880); 602 nv_wr32(dev, 0x400B80, 0x1cbd3883);
577 nv_wr32(dev, 0x400B84, 0x44000000); 603 nv_wr32(dev, 0x400B84, 0x44000000);
578 nv_wr32(dev, 0x400098, 0x40000080); 604 nv_wr32(dev, 0x400098, 0x40000080);
579 nv_wr32(dev, 0x400B88, 0x000000ff); 605 nv_wr32(dev, 0x400B88, 0x000000ff);
606
580 } else { 607 } else {
581 nv_wr32(dev, 0x400880, 0x00080000); /* 0x0008c7df */ 608 nv_wr32(dev, 0x400880, 0x0008c7df);
582 nv_wr32(dev, 0x400094, 0x00000005); 609 nv_wr32(dev, 0x400094, 0x00000005);
583 nv_wr32(dev, 0x400B80, 0x45CAA208); /* 0x45eae20e */ 610 nv_wr32(dev, 0x400B80, 0x45eae20e);
584 nv_wr32(dev, 0x400B84, 0x24000000); 611 nv_wr32(dev, 0x400B84, 0x24000000);
585 nv_wr32(dev, 0x400098, 0x00000040); 612 nv_wr32(dev, 0x400098, 0x00000040);
586 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00E00038); 613 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00E00038);
@@ -591,14 +618,8 @@ nv20_graph_init(struct drm_device *dev)
591 618
592 /* Turn all the tiling regions off. */ 619 /* Turn all the tiling regions off. */
593 for (i = 0; i < NV10_PFB_TILE__SIZE; i++) 620 for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
594 nv20_graph_set_region_tiling(dev, i, 0, 0, 0); 621 nv20_graph_set_tile_region(dev, i);
595 622
596 for (i = 0; i < 8; i++) {
597 nv_wr32(dev, 0x400980 + i * 4, nv_rd32(dev, 0x100300 + i * 4));
598 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0090 + i * 4);
599 nv_wr32(dev, NV10_PGRAPH_RDI_DATA,
600 nv_rd32(dev, 0x100300 + i * 4));
601 }
602 nv_wr32(dev, 0x4009a0, nv_rd32(dev, 0x100324)); 623 nv_wr32(dev, 0x4009a0, nv_rd32(dev, 0x100324));
603 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA000C); 624 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA000C);
604 nv_wr32(dev, NV10_PGRAPH_RDI_DATA, nv_rd32(dev, 0x100324)); 625 nv_wr32(dev, NV10_PGRAPH_RDI_DATA, nv_rd32(dev, 0x100324));
@@ -642,6 +663,9 @@ nv20_graph_takedown(struct drm_device *dev)
642 struct drm_nouveau_private *dev_priv = dev->dev_private; 663 struct drm_nouveau_private *dev_priv = dev->dev_private;
643 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; 664 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
644 665
666 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000);
667 nouveau_irq_unregister(dev, 12);
668
645 nouveau_gpuobj_ref(NULL, &pgraph->ctx_table); 669 nouveau_gpuobj_ref(NULL, &pgraph->ctx_table);
646} 670}
647 671
@@ -684,9 +708,16 @@ nv30_graph_init(struct drm_device *dev)
684 return ret; 708 return ret;
685 } 709 }
686 710
711 ret = nv30_graph_register(dev);
712 if (ret) {
713 nouveau_gpuobj_ref(NULL, &pgraph->ctx_table);
714 return ret;
715 }
716
687 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE, 717 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE,
688 pgraph->ctx_table->pinst >> 4); 718 pgraph->ctx_table->pinst >> 4);
689 719
720 nouveau_irq_register(dev, 12, nv20_graph_isr);
690 nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF); 721 nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
691 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); 722 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
692 723
@@ -724,7 +755,7 @@ nv30_graph_init(struct drm_device *dev)
724 755
725 /* Turn all the tiling regions off. */ 756 /* Turn all the tiling regions off. */
726 for (i = 0; i < NV10_PFB_TILE__SIZE; i++) 757 for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
727 nv20_graph_set_region_tiling(dev, i, 0, 0, 0); 758 nv20_graph_set_tile_region(dev, i);
728 759
729 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100); 760 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
730 nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF); 761 nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF);
@@ -744,46 +775,125 @@ nv30_graph_init(struct drm_device *dev)
744 return 0; 775 return 0;
745} 776}
746 777
747struct nouveau_pgraph_object_class nv20_graph_grclass[] = { 778static int
748 { 0x0030, false, NULL }, /* null */ 779nv20_graph_register(struct drm_device *dev)
749 { 0x0039, false, NULL }, /* m2mf */ 780{
750 { 0x004a, false, NULL }, /* gdirect */ 781 struct drm_nouveau_private *dev_priv = dev->dev_private;
751 { 0x009f, false, NULL }, /* imageblit (nv12) */ 782
752 { 0x008a, false, NULL }, /* ifc */ 783 if (dev_priv->engine.graph.registered)
753 { 0x0089, false, NULL }, /* sifm */ 784 return 0;
754 { 0x0062, false, NULL }, /* surf2d */ 785
755 { 0x0043, false, NULL }, /* rop */ 786 NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
756 { 0x0012, false, NULL }, /* beta1 */ 787 NVOBJ_CLASS(dev, 0x0030, GR); /* null */
757 { 0x0072, false, NULL }, /* beta4 */ 788 NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
758 { 0x0019, false, NULL }, /* cliprect */ 789 NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
759 { 0x0044, false, NULL }, /* pattern */ 790 NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
760 { 0x009e, false, NULL }, /* swzsurf */ 791 NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
761 { 0x0096, false, NULL }, /* celcius */ 792 NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
762 { 0x0097, false, NULL }, /* kelvin (nv20) */ 793 NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
763 { 0x0597, false, NULL }, /* kelvin (nv25) */ 794 NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
764 {} 795 NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
765}; 796 NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
766 797 NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
767struct nouveau_pgraph_object_class nv30_graph_grclass[] = { 798 NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
768 { 0x0030, false, NULL }, /* null */ 799 NVOBJ_CLASS(dev, 0x009e, GR); /* swzsurf */
769 { 0x0039, false, NULL }, /* m2mf */ 800 NVOBJ_CLASS(dev, 0x0096, GR); /* celcius */
770 { 0x004a, false, NULL }, /* gdirect */ 801
771 { 0x009f, false, NULL }, /* imageblit (nv12) */ 802 /* kelvin */
772 { 0x008a, false, NULL }, /* ifc */ 803 if (dev_priv->chipset < 0x25)
773 { 0x038a, false, NULL }, /* ifc (nv30) */ 804 NVOBJ_CLASS(dev, 0x0097, GR);
774 { 0x0089, false, NULL }, /* sifm */ 805 else
775 { 0x0389, false, NULL }, /* sifm (nv30) */ 806 NVOBJ_CLASS(dev, 0x0597, GR);
776 { 0x0062, false, NULL }, /* surf2d */ 807
777 { 0x0362, false, NULL }, /* surf2d (nv30) */ 808 /* nvsw */
778 { 0x0043, false, NULL }, /* rop */ 809 NVOBJ_CLASS(dev, 0x506e, SW);
779 { 0x0012, false, NULL }, /* beta1 */ 810 NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
780 { 0x0072, false, NULL }, /* beta4 */ 811
781 { 0x0019, false, NULL }, /* cliprect */ 812 dev_priv->engine.graph.registered = true;
782 { 0x0044, false, NULL }, /* pattern */ 813 return 0;
783 { 0x039e, false, NULL }, /* swzsurf */ 814}
784 { 0x0397, false, NULL }, /* rankine (nv30) */ 815
785 { 0x0497, false, NULL }, /* rankine (nv35) */ 816static int
786 { 0x0697, false, NULL }, /* rankine (nv34) */ 817nv30_graph_register(struct drm_device *dev)
787 {} 818{
788}; 819 struct drm_nouveau_private *dev_priv = dev->dev_private;
789 820
821 if (dev_priv->engine.graph.registered)
822 return 0;
823
824 NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
825 NVOBJ_CLASS(dev, 0x0030, GR); /* null */
826 NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
827 NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
828 NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
829 NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
830 NVOBJ_CLASS(dev, 0x038a, GR); /* ifc (nv30) */
831 NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
832 NVOBJ_CLASS(dev, 0x0389, GR); /* sifm (nv30) */
833 NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
834 NVOBJ_CLASS(dev, 0x0362, GR); /* surf2d (nv30) */
835 NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
836 NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
837 NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
838 NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
839 NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
840 NVOBJ_CLASS(dev, 0x039e, GR); /* swzsurf */
841
842 /* rankine */
843 if (0x00000003 & (1 << (dev_priv->chipset & 0x0f)))
844 NVOBJ_CLASS(dev, 0x0397, GR);
845 else
846 if (0x00000010 & (1 << (dev_priv->chipset & 0x0f)))
847 NVOBJ_CLASS(dev, 0x0697, GR);
848 else
849 if (0x000001e0 & (1 << (dev_priv->chipset & 0x0f)))
850 NVOBJ_CLASS(dev, 0x0497, GR);
851
852 /* nvsw */
853 NVOBJ_CLASS(dev, 0x506e, SW);
854 NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
855
856 dev_priv->engine.graph.registered = true;
857 return 0;
858}
859
860static void
861nv20_graph_isr(struct drm_device *dev)
862{
863 u32 stat;
864
865 while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) {
866 u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
867 u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
868 u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
869 u32 chid = (addr & 0x01f00000) >> 20;
870 u32 subc = (addr & 0x00070000) >> 16;
871 u32 mthd = (addr & 0x00001ffc);
872 u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
873 u32 class = nv_rd32(dev, 0x400160 + subc * 4) & 0xfff;
874 u32 show = stat;
875
876 if (stat & NV_PGRAPH_INTR_ERROR) {
877 if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
878 if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data))
879 show &= ~NV_PGRAPH_INTR_ERROR;
880 }
881 }
882
883 nv_wr32(dev, NV03_PGRAPH_INTR, stat);
884 nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001);
885
886 if (show && nouveau_ratelimit()) {
887 NV_INFO(dev, "PGRAPH -");
888 nouveau_bitfield_print(nv10_graph_intr, show);
889 printk(" nsource:");
890 nouveau_bitfield_print(nv04_graph_nsource, nsource);
891 printk(" nstatus:");
892 nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
893 printk("\n");
894 NV_INFO(dev, "PGRAPH - ch %d/%d class 0x%04x "
895 "mthd 0x%04x data 0x%08x\n",
896 chid, subc, class, mthd, data);
897 }
898 }
899}
diff --git a/drivers/gpu/drm/nouveau/nv30_fb.c b/drivers/gpu/drm/nouveau/nv30_fb.c
index 4a3f2f095128..e0135f0e2144 100644
--- a/drivers/gpu/drm/nouveau/nv30_fb.c
+++ b/drivers/gpu/drm/nouveau/nv30_fb.c
@@ -29,6 +29,27 @@
29#include "nouveau_drv.h" 29#include "nouveau_drv.h"
30#include "nouveau_drm.h" 30#include "nouveau_drm.h"
31 31
32void
33nv30_fb_init_tile_region(struct drm_device *dev, int i, uint32_t addr,
34 uint32_t size, uint32_t pitch, uint32_t flags)
35{
36 struct drm_nouveau_private *dev_priv = dev->dev_private;
37 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
38
39 tile->addr = addr | 1;
40 tile->limit = max(1u, addr + size) - 1;
41 tile->pitch = pitch;
42}
43
44void
45nv30_fb_free_tile_region(struct drm_device *dev, int i)
46{
47 struct drm_nouveau_private *dev_priv = dev->dev_private;
48 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
49
50 tile->addr = tile->limit = tile->pitch = 0;
51}
52
32static int 53static int
33calc_bias(struct drm_device *dev, int k, int i, int j) 54calc_bias(struct drm_device *dev, int k, int i, int j)
34{ 55{
@@ -65,7 +86,7 @@ nv30_fb_init(struct drm_device *dev)
65 86
66 /* Turn all the tiling regions off. */ 87 /* Turn all the tiling regions off. */
67 for (i = 0; i < pfb->num_tiles; i++) 88 for (i = 0; i < pfb->num_tiles; i++)
68 pfb->set_region_tiling(dev, i, 0, 0, 0); 89 pfb->set_tile_region(dev, i);
69 90
70 /* Init the memory timing regs at 0x10037c/0x1003ac */ 91 /* Init the memory timing regs at 0x10037c/0x1003ac */
71 if (dev_priv->chipset == 0x30 || 92 if (dev_priv->chipset == 0x30 ||
diff --git a/drivers/gpu/drm/nouveau/nv40_fb.c b/drivers/gpu/drm/nouveau/nv40_fb.c
index 3cd07d8d5bd7..f3d9c0505f7b 100644
--- a/drivers/gpu/drm/nouveau/nv40_fb.c
+++ b/drivers/gpu/drm/nouveau/nv40_fb.c
@@ -4,26 +4,22 @@
4#include "nouveau_drm.h" 4#include "nouveau_drm.h"
5 5
6void 6void
7nv40_fb_set_region_tiling(struct drm_device *dev, int i, uint32_t addr, 7nv40_fb_set_tile_region(struct drm_device *dev, int i)
8 uint32_t size, uint32_t pitch)
9{ 8{
10 struct drm_nouveau_private *dev_priv = dev->dev_private; 9 struct drm_nouveau_private *dev_priv = dev->dev_private;
11 uint32_t limit = max(1u, addr + size) - 1; 10 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
12
13 if (pitch)
14 addr |= 1;
15 11
16 switch (dev_priv->chipset) { 12 switch (dev_priv->chipset) {
17 case 0x40: 13 case 0x40:
18 nv_wr32(dev, NV10_PFB_TLIMIT(i), limit); 14 nv_wr32(dev, NV10_PFB_TLIMIT(i), tile->limit);
19 nv_wr32(dev, NV10_PFB_TSIZE(i), pitch); 15 nv_wr32(dev, NV10_PFB_TSIZE(i), tile->pitch);
20 nv_wr32(dev, NV10_PFB_TILE(i), addr); 16 nv_wr32(dev, NV10_PFB_TILE(i), tile->addr);
21 break; 17 break;
22 18
23 default: 19 default:
24 nv_wr32(dev, NV40_PFB_TLIMIT(i), limit); 20 nv_wr32(dev, NV40_PFB_TLIMIT(i), tile->limit);
25 nv_wr32(dev, NV40_PFB_TSIZE(i), pitch); 21 nv_wr32(dev, NV40_PFB_TSIZE(i), tile->pitch);
26 nv_wr32(dev, NV40_PFB_TILE(i), addr); 22 nv_wr32(dev, NV40_PFB_TILE(i), tile->addr);
27 break; 23 break;
28 } 24 }
29} 25}
@@ -64,7 +60,7 @@ nv40_fb_init(struct drm_device *dev)
64 60
65 /* Turn all the tiling regions off. */ 61 /* Turn all the tiling regions off. */
66 for (i = 0; i < pfb->num_tiles; i++) 62 for (i = 0; i < pfb->num_tiles; i++)
67 pfb->set_region_tiling(dev, i, 0, 0, 0); 63 pfb->set_tile_region(dev, i);
68 64
69 return 0; 65 return 0;
70} 66}
diff --git a/drivers/gpu/drm/nouveau/nv40_fifo.c b/drivers/gpu/drm/nouveau/nv40_fifo.c
index d337b8b28cdd..49b9a35a9cd6 100644
--- a/drivers/gpu/drm/nouveau/nv40_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv40_fifo.c
@@ -47,6 +47,11 @@ nv40_fifo_create_context(struct nouveau_channel *chan)
47 if (ret) 47 if (ret)
48 return ret; 48 return ret;
49 49
50 chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
51 NV40_USER(chan->id), PAGE_SIZE);
52 if (!chan->user)
53 return -ENOMEM;
54
50 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 55 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
51 56
52 nv_wi32(dev, fc + 0, chan->pushbuf_base); 57 nv_wi32(dev, fc + 0, chan->pushbuf_base);
@@ -59,7 +64,6 @@ nv40_fifo_create_context(struct nouveau_channel *chan)
59 NV_PFIFO_CACHE1_BIG_ENDIAN | 64 NV_PFIFO_CACHE1_BIG_ENDIAN |
60#endif 65#endif
61 0x30000000 /* no idea.. */); 66 0x30000000 /* no idea.. */);
62 nv_wi32(dev, fc + 56, chan->ramin_grctx->pinst >> 4);
63 nv_wi32(dev, fc + 60, 0x0001FFFF); 67 nv_wi32(dev, fc + 60, 0x0001FFFF);
64 68
65 /* enable the fifo dma operation */ 69 /* enable the fifo dma operation */
@@ -70,17 +74,6 @@ nv40_fifo_create_context(struct nouveau_channel *chan)
70 return 0; 74 return 0;
71} 75}
72 76
73void
74nv40_fifo_destroy_context(struct nouveau_channel *chan)
75{
76 struct drm_device *dev = chan->dev;
77
78 nv_wr32(dev, NV04_PFIFO_MODE,
79 nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id));
80
81 nouveau_gpuobj_ref(NULL, &chan->ramfc);
82}
83
84static void 77static void
85nv40_fifo_do_load_context(struct drm_device *dev, int chid) 78nv40_fifo_do_load_context(struct drm_device *dev, int chid)
86{ 79{
@@ -279,6 +272,7 @@ nv40_fifo_init_ramxx(struct drm_device *dev)
279static void 272static void
280nv40_fifo_init_intr(struct drm_device *dev) 273nv40_fifo_init_intr(struct drm_device *dev)
281{ 274{
275 nouveau_irq_register(dev, 8, nv04_fifo_isr);
282 nv_wr32(dev, 0x002100, 0xffffffff); 276 nv_wr32(dev, 0x002100, 0xffffffff);
283 nv_wr32(dev, 0x002140, 0xffffffff); 277 nv_wr32(dev, 0x002140, 0xffffffff);
284} 278}
@@ -301,7 +295,7 @@ nv40_fifo_init(struct drm_device *dev)
301 pfifo->reassign(dev, true); 295 pfifo->reassign(dev, true);
302 296
303 for (i = 0; i < dev_priv->engine.fifo.channels; i++) { 297 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
304 if (dev_priv->fifos[i]) { 298 if (dev_priv->channels.ptr[i]) {
305 uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE); 299 uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE);
306 nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i)); 300 nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i));
307 } 301 }
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c
index 7ee1b91569b8..19ef92a0375a 100644
--- a/drivers/gpu/drm/nouveau/nv40_graph.c
+++ b/drivers/gpu/drm/nouveau/nv40_graph.c
@@ -29,6 +29,9 @@
29#include "nouveau_drv.h" 29#include "nouveau_drv.h"
30#include "nouveau_grctx.h" 30#include "nouveau_grctx.h"
31 31
32static int nv40_graph_register(struct drm_device *);
33static void nv40_graph_isr(struct drm_device *);
34
32struct nouveau_channel * 35struct nouveau_channel *
33nv40_graph_channel(struct drm_device *dev) 36nv40_graph_channel(struct drm_device *dev)
34{ 37{
@@ -42,7 +45,7 @@ nv40_graph_channel(struct drm_device *dev)
42 inst = (inst & NV40_PGRAPH_CTXCTL_CUR_INSTANCE) << 4; 45 inst = (inst & NV40_PGRAPH_CTXCTL_CUR_INSTANCE) << 4;
43 46
44 for (i = 0; i < dev_priv->engine.fifo.channels; i++) { 47 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
45 struct nouveau_channel *chan = dev_priv->fifos[i]; 48 struct nouveau_channel *chan = dev_priv->channels.ptr[i];
46 49
47 if (chan && chan->ramin_grctx && 50 if (chan && chan->ramin_grctx &&
48 chan->ramin_grctx->pinst == inst) 51 chan->ramin_grctx->pinst == inst)
@@ -59,6 +62,7 @@ nv40_graph_create_context(struct nouveau_channel *chan)
59 struct drm_nouveau_private *dev_priv = dev->dev_private; 62 struct drm_nouveau_private *dev_priv = dev->dev_private;
60 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; 63 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
61 struct nouveau_grctx ctx = {}; 64 struct nouveau_grctx ctx = {};
65 unsigned long flags;
62 int ret; 66 int ret;
63 67
64 ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 16, 68 ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 16,
@@ -73,12 +77,39 @@ nv40_graph_create_context(struct nouveau_channel *chan)
73 nv40_grctx_init(&ctx); 77 nv40_grctx_init(&ctx);
74 78
75 nv_wo32(chan->ramin_grctx, 0, chan->ramin_grctx->pinst); 79 nv_wo32(chan->ramin_grctx, 0, chan->ramin_grctx->pinst);
80
81 /* init grctx pointer in ramfc, and on PFIFO if channel is
82 * already active there
83 */
84 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
85 nv_wo32(chan->ramfc, 0x38, chan->ramin_grctx->pinst >> 4);
86 nv_mask(dev, 0x002500, 0x00000001, 0x00000000);
87 if ((nv_rd32(dev, 0x003204) & 0x0000001f) == chan->id)
88 nv_wr32(dev, 0x0032e0, chan->ramin_grctx->pinst >> 4);
89 nv_mask(dev, 0x002500, 0x00000001, 0x00000001);
90 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
76 return 0; 91 return 0;
77} 92}
78 93
79void 94void
80nv40_graph_destroy_context(struct nouveau_channel *chan) 95nv40_graph_destroy_context(struct nouveau_channel *chan)
81{ 96{
97 struct drm_device *dev = chan->dev;
98 struct drm_nouveau_private *dev_priv = dev->dev_private;
99 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
100 unsigned long flags;
101
102 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
103 pgraph->fifo_access(dev, false);
104
105 /* Unload the context if it's the currently active one */
106 if (pgraph->channel(dev) == chan)
107 pgraph->unload_context(dev);
108
109 pgraph->fifo_access(dev, true);
110 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
111
112 /* Free the context resources */
82 nouveau_gpuobj_ref(NULL, &chan->ramin_grctx); 113 nouveau_gpuobj_ref(NULL, &chan->ramin_grctx);
83} 114}
84 115
@@ -174,43 +205,39 @@ nv40_graph_unload_context(struct drm_device *dev)
174} 205}
175 206
176void 207void
177nv40_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr, 208nv40_graph_set_tile_region(struct drm_device *dev, int i)
178 uint32_t size, uint32_t pitch)
179{ 209{
180 struct drm_nouveau_private *dev_priv = dev->dev_private; 210 struct drm_nouveau_private *dev_priv = dev->dev_private;
181 uint32_t limit = max(1u, addr + size) - 1; 211 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
182
183 if (pitch)
184 addr |= 1;
185 212
186 switch (dev_priv->chipset) { 213 switch (dev_priv->chipset) {
187 case 0x44: 214 case 0x44:
188 case 0x4a: 215 case 0x4a:
189 case 0x4e: 216 case 0x4e:
190 nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch); 217 nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
191 nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit); 218 nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
192 nv_wr32(dev, NV20_PGRAPH_TILE(i), addr); 219 nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
193 break; 220 break;
194 221
195 case 0x46: 222 case 0x46:
196 case 0x47: 223 case 0x47:
197 case 0x49: 224 case 0x49:
198 case 0x4b: 225 case 0x4b:
199 nv_wr32(dev, NV47_PGRAPH_TSIZE(i), pitch); 226 nv_wr32(dev, NV47_PGRAPH_TSIZE(i), tile->pitch);
200 nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), limit); 227 nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), tile->limit);
201 nv_wr32(dev, NV47_PGRAPH_TILE(i), addr); 228 nv_wr32(dev, NV47_PGRAPH_TILE(i), tile->addr);
202 nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), pitch); 229 nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch);
203 nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), limit); 230 nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit);
204 nv_wr32(dev, NV40_PGRAPH_TILE1(i), addr); 231 nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr);
205 break; 232 break;
206 233
207 default: 234 default:
208 nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch); 235 nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
209 nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit); 236 nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
210 nv_wr32(dev, NV20_PGRAPH_TILE(i), addr); 237 nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
211 nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), pitch); 238 nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch);
212 nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), limit); 239 nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit);
213 nv_wr32(dev, NV40_PGRAPH_TILE1(i), addr); 240 nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr);
214 break; 241 break;
215 } 242 }
216} 243}
@@ -232,7 +259,7 @@ nv40_graph_init(struct drm_device *dev)
232 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; 259 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
233 struct nouveau_grctx ctx = {}; 260 struct nouveau_grctx ctx = {};
234 uint32_t vramsz, *cp; 261 uint32_t vramsz, *cp;
235 int i, j; 262 int ret, i, j;
236 263
237 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & 264 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
238 ~NV_PMC_ENABLE_PGRAPH); 265 ~NV_PMC_ENABLE_PGRAPH);
@@ -256,9 +283,14 @@ nv40_graph_init(struct drm_device *dev)
256 283
257 kfree(cp); 284 kfree(cp);
258 285
286 ret = nv40_graph_register(dev);
287 if (ret)
288 return ret;
289
259 /* No context present currently */ 290 /* No context present currently */
260 nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0x00000000); 291 nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
261 292
293 nouveau_irq_register(dev, 12, nv40_graph_isr);
262 nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF); 294 nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
263 nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF); 295 nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF);
264 296
@@ -347,7 +379,7 @@ nv40_graph_init(struct drm_device *dev)
347 379
348 /* Turn all the tiling regions off. */ 380 /* Turn all the tiling regions off. */
349 for (i = 0; i < pfb->num_tiles; i++) 381 for (i = 0; i < pfb->num_tiles; i++)
350 nv40_graph_set_region_tiling(dev, i, 0, 0, 0); 382 nv40_graph_set_tile_region(dev, i);
351 383
352 /* begin RAM config */ 384 /* begin RAM config */
353 vramsz = pci_resource_len(dev->pdev, 0) - 1; 385 vramsz = pci_resource_len(dev->pdev, 0) - 1;
@@ -390,26 +422,111 @@ nv40_graph_init(struct drm_device *dev)
390 422
391void nv40_graph_takedown(struct drm_device *dev) 423void nv40_graph_takedown(struct drm_device *dev)
392{ 424{
425 nouveau_irq_unregister(dev, 12);
393} 426}
394 427
395struct nouveau_pgraph_object_class nv40_graph_grclass[] = { 428static int
396 { 0x0030, false, NULL }, /* null */ 429nv40_graph_register(struct drm_device *dev)
397 { 0x0039, false, NULL }, /* m2mf */ 430{
398 { 0x004a, false, NULL }, /* gdirect */ 431 struct drm_nouveau_private *dev_priv = dev->dev_private;
399 { 0x009f, false, NULL }, /* imageblit (nv12) */ 432
400 { 0x008a, false, NULL }, /* ifc */ 433 if (dev_priv->engine.graph.registered)
401 { 0x0089, false, NULL }, /* sifm */ 434 return 0;
402 { 0x3089, false, NULL }, /* sifm (nv40) */
403 { 0x0062, false, NULL }, /* surf2d */
404 { 0x3062, false, NULL }, /* surf2d (nv40) */
405 { 0x0043, false, NULL }, /* rop */
406 { 0x0012, false, NULL }, /* beta1 */
407 { 0x0072, false, NULL }, /* beta4 */
408 { 0x0019, false, NULL }, /* cliprect */
409 { 0x0044, false, NULL }, /* pattern */
410 { 0x309e, false, NULL }, /* swzsurf */
411 { 0x4097, false, NULL }, /* curie (nv40) */
412 { 0x4497, false, NULL }, /* curie (nv44) */
413 {}
414};
415 435
436 NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
437 NVOBJ_CLASS(dev, 0x0030, GR); /* null */
438 NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
439 NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
440 NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
441 NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
442 NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
443 NVOBJ_CLASS(dev, 0x3089, GR); /* sifm (nv40) */
444 NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
445 NVOBJ_CLASS(dev, 0x3062, GR); /* surf2d (nv40) */
446 NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
447 NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
448 NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
449 NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
450 NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
451 NVOBJ_CLASS(dev, 0x309e, GR); /* swzsurf */
452
453 /* curie */
454 if (dev_priv->chipset >= 0x60 ||
455 0x00005450 & (1 << (dev_priv->chipset & 0x0f)))
456 NVOBJ_CLASS(dev, 0x4497, GR);
457 else
458 NVOBJ_CLASS(dev, 0x4097, GR);
459
460 /* nvsw */
461 NVOBJ_CLASS(dev, 0x506e, SW);
462 NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
463
464 dev_priv->engine.graph.registered = true;
465 return 0;
466}
467
468static int
469nv40_graph_isr_chid(struct drm_device *dev, u32 inst)
470{
471 struct drm_nouveau_private *dev_priv = dev->dev_private;
472 struct nouveau_channel *chan;
473 unsigned long flags;
474 int i;
475
476 spin_lock_irqsave(&dev_priv->channels.lock, flags);
477 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
478 chan = dev_priv->channels.ptr[i];
479 if (!chan || !chan->ramin_grctx)
480 continue;
481
482 if (inst == chan->ramin_grctx->pinst)
483 break;
484 }
485 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
486 return i;
487}
488
489static void
490nv40_graph_isr(struct drm_device *dev)
491{
492 u32 stat;
493
494 while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) {
495 u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
496 u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
497 u32 inst = (nv_rd32(dev, 0x40032c) & 0x000fffff) << 4;
498 u32 chid = nv40_graph_isr_chid(dev, inst);
499 u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
500 u32 subc = (addr & 0x00070000) >> 16;
501 u32 mthd = (addr & 0x00001ffc);
502 u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
503 u32 class = nv_rd32(dev, 0x400160 + subc * 4) & 0xffff;
504 u32 show = stat;
505
506 if (stat & NV_PGRAPH_INTR_ERROR) {
507 if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
508 if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data))
509 show &= ~NV_PGRAPH_INTR_ERROR;
510 } else
511 if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) {
512 nv_mask(dev, 0x402000, 0, 0);
513 }
514 }
515
516 nv_wr32(dev, NV03_PGRAPH_INTR, stat);
517 nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001);
518
519 if (show && nouveau_ratelimit()) {
520 NV_INFO(dev, "PGRAPH -");
521 nouveau_bitfield_print(nv10_graph_intr, show);
522 printk(" nsource:");
523 nouveau_bitfield_print(nv04_graph_nsource, nsource);
524 printk(" nstatus:");
525 nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
526 printk("\n");
527 NV_INFO(dev, "PGRAPH - ch %d (0x%08x) subc %d "
528 "class 0x%04x mthd 0x%04x data 0x%08x\n",
529 chid, inst, subc, class, mthd, data);
530 }
531 }
532}
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index 56476d0c6de8..9023c4dbb449 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -115,15 +115,16 @@ nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked)
115 OUT_RING(evo, 0); 115 OUT_RING(evo, 0);
116 BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1); 116 BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1);
117 if (dev_priv->chipset != 0x50) 117 if (dev_priv->chipset != 0x50)
118 if (nv_crtc->fb.tile_flags == 0x7a00) 118 if (nv_crtc->fb.tile_flags == 0x7a00 ||
119 nv_crtc->fb.tile_flags == 0xfe00)
119 OUT_RING(evo, NvEvoFB32); 120 OUT_RING(evo, NvEvoFB32);
120 else 121 else
121 if (nv_crtc->fb.tile_flags == 0x7000) 122 if (nv_crtc->fb.tile_flags == 0x7000)
122 OUT_RING(evo, NvEvoFB16); 123 OUT_RING(evo, NvEvoFB16);
123 else 124 else
124 OUT_RING(evo, NvEvoVRAM); 125 OUT_RING(evo, NvEvoVRAM_LP);
125 else 126 else
126 OUT_RING(evo, NvEvoVRAM); 127 OUT_RING(evo, NvEvoVRAM_LP);
127 } 128 }
128 129
129 nv_crtc->fb.blanked = blanked; 130 nv_crtc->fb.blanked = blanked;
@@ -345,7 +346,6 @@ nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
345 uint32_t buffer_handle, uint32_t width, uint32_t height) 346 uint32_t buffer_handle, uint32_t width, uint32_t height)
346{ 347{
347 struct drm_device *dev = crtc->dev; 348 struct drm_device *dev = crtc->dev;
348 struct drm_nouveau_private *dev_priv = dev->dev_private;
349 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 349 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
350 struct nouveau_bo *cursor = NULL; 350 struct nouveau_bo *cursor = NULL;
351 struct drm_gem_object *gem; 351 struct drm_gem_object *gem;
@@ -374,8 +374,7 @@ nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
374 374
375 nouveau_bo_unmap(cursor); 375 nouveau_bo_unmap(cursor);
376 376
377 nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.offset - 377 nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.mem.start << PAGE_SHIFT);
378 dev_priv->vm_vram_base);
379 nv_crtc->cursor.show(nv_crtc, true); 378 nv_crtc->cursor.show(nv_crtc, true);
380 379
381out: 380out:
@@ -437,6 +436,7 @@ static const struct drm_crtc_funcs nv50_crtc_funcs = {
437 .cursor_move = nv50_crtc_cursor_move, 436 .cursor_move = nv50_crtc_cursor_move,
438 .gamma_set = nv50_crtc_gamma_set, 437 .gamma_set = nv50_crtc_gamma_set,
439 .set_config = drm_crtc_helper_set_config, 438 .set_config = drm_crtc_helper_set_config,
439 .page_flip = nouveau_crtc_page_flip,
440 .destroy = nv50_crtc_destroy, 440 .destroy = nv50_crtc_destroy,
441}; 441};
442 442
@@ -453,6 +453,7 @@ nv50_crtc_prepare(struct drm_crtc *crtc)
453 453
454 NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); 454 NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
455 455
456 drm_vblank_pre_modeset(dev, nv_crtc->index);
456 nv50_crtc_blank(nv_crtc, true); 457 nv50_crtc_blank(nv_crtc, true);
457} 458}
458 459
@@ -468,6 +469,7 @@ nv50_crtc_commit(struct drm_crtc *crtc)
468 NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); 469 NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
469 470
470 nv50_crtc_blank(nv_crtc, false); 471 nv50_crtc_blank(nv_crtc, false);
472 drm_vblank_post_modeset(dev, nv_crtc->index);
471 473
472 ret = RING_SPACE(evo, 2); 474 ret = RING_SPACE(evo, 2);
473 if (ret) { 475 if (ret) {
@@ -545,7 +547,7 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
545 return -EINVAL; 547 return -EINVAL;
546 } 548 }
547 549
548 nv_crtc->fb.offset = fb->nvbo->bo.offset - dev_priv->vm_vram_base; 550 nv_crtc->fb.offset = fb->nvbo->bo.mem.start << PAGE_SHIFT;
549 nv_crtc->fb.tile_flags = nouveau_bo_tile_layout(fb->nvbo); 551 nv_crtc->fb.tile_flags = nouveau_bo_tile_layout(fb->nvbo);
550 nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8; 552 nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8;
551 if (!nv_crtc->fb.blanked && dev_priv->chipset != 0x50) { 553 if (!nv_crtc->fb.blanked && dev_priv->chipset != 0x50) {
@@ -554,13 +556,14 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
554 return ret; 556 return ret;
555 557
556 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_DMA), 1); 558 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_DMA), 1);
557 if (nv_crtc->fb.tile_flags == 0x7a00) 559 if (nv_crtc->fb.tile_flags == 0x7a00 ||
560 nv_crtc->fb.tile_flags == 0xfe00)
558 OUT_RING(evo, NvEvoFB32); 561 OUT_RING(evo, NvEvoFB32);
559 else 562 else
560 if (nv_crtc->fb.tile_flags == 0x7000) 563 if (nv_crtc->fb.tile_flags == 0x7000)
561 OUT_RING(evo, NvEvoFB16); 564 OUT_RING(evo, NvEvoFB16);
562 else 565 else
563 OUT_RING(evo, NvEvoVRAM); 566 OUT_RING(evo, NvEvoVRAM_LP);
564 } 567 }
565 568
566 ret = RING_SPACE(evo, 12); 569 ret = RING_SPACE(evo, 12);
@@ -574,8 +577,10 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
574 if (!nv_crtc->fb.tile_flags) { 577 if (!nv_crtc->fb.tile_flags) {
575 OUT_RING(evo, drm_fb->pitch | (1 << 20)); 578 OUT_RING(evo, drm_fb->pitch | (1 << 20));
576 } else { 579 } else {
577 OUT_RING(evo, ((drm_fb->pitch / 4) << 4) | 580 u32 tile_mode = fb->nvbo->tile_mode;
578 fb->nvbo->tile_mode); 581 if (dev_priv->card_type >= NV_C0)
582 tile_mode >>= 4;
583 OUT_RING(evo, ((drm_fb->pitch / 4) << 4) | tile_mode);
579 } 584 }
580 if (dev_priv->chipset == 0x50) 585 if (dev_priv->chipset == 0x50)
581 OUT_RING(evo, (nv_crtc->fb.tile_flags << 8) | format); 586 OUT_RING(evo, (nv_crtc->fb.tile_flags << 8) | format);
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index f624c611ddea..7cc94ed9ed95 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -33,6 +33,8 @@
33#include "nouveau_ramht.h" 33#include "nouveau_ramht.h"
34#include "drm_crtc_helper.h" 34#include "drm_crtc_helper.h"
35 35
36static void nv50_display_isr(struct drm_device *);
37
36static inline int 38static inline int
37nv50_sor_nr(struct drm_device *dev) 39nv50_sor_nr(struct drm_device *dev)
38{ 40{
@@ -46,159 +48,6 @@ nv50_sor_nr(struct drm_device *dev)
46 return 4; 48 return 4;
47} 49}
48 50
49static void
50nv50_evo_channel_del(struct nouveau_channel **pchan)
51{
52 struct nouveau_channel *chan = *pchan;
53
54 if (!chan)
55 return;
56 *pchan = NULL;
57
58 nouveau_gpuobj_channel_takedown(chan);
59 nouveau_bo_unmap(chan->pushbuf_bo);
60 nouveau_bo_ref(NULL, &chan->pushbuf_bo);
61
62 if (chan->user)
63 iounmap(chan->user);
64
65 kfree(chan);
66}
67
68static int
69nv50_evo_dmaobj_new(struct nouveau_channel *evo, uint32_t class, uint32_t name,
70 uint32_t tile_flags, uint32_t magic_flags,
71 uint32_t offset, uint32_t limit)
72{
73 struct drm_nouveau_private *dev_priv = evo->dev->dev_private;
74 struct drm_device *dev = evo->dev;
75 struct nouveau_gpuobj *obj = NULL;
76 int ret;
77
78 ret = nouveau_gpuobj_new(dev, evo, 6*4, 32, 0, &obj);
79 if (ret)
80 return ret;
81 obj->engine = NVOBJ_ENGINE_DISPLAY;
82
83 nv_wo32(obj, 0, (tile_flags << 22) | (magic_flags << 16) | class);
84 nv_wo32(obj, 4, limit);
85 nv_wo32(obj, 8, offset);
86 nv_wo32(obj, 12, 0x00000000);
87 nv_wo32(obj, 16, 0x00000000);
88 if (dev_priv->card_type < NV_C0)
89 nv_wo32(obj, 20, 0x00010000);
90 else
91 nv_wo32(obj, 20, 0x00020000);
92 dev_priv->engine.instmem.flush(dev);
93
94 ret = nouveau_ramht_insert(evo, name, obj);
95 nouveau_gpuobj_ref(NULL, &obj);
96 if (ret) {
97 return ret;
98 }
99
100 return 0;
101}
102
103static int
104nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pchan)
105{
106 struct drm_nouveau_private *dev_priv = dev->dev_private;
107 struct nouveau_gpuobj *ramht = NULL;
108 struct nouveau_channel *chan;
109 int ret;
110
111 chan = kzalloc(sizeof(struct nouveau_channel), GFP_KERNEL);
112 if (!chan)
113 return -ENOMEM;
114 *pchan = chan;
115
116 chan->id = -1;
117 chan->dev = dev;
118 chan->user_get = 4;
119 chan->user_put = 0;
120
121 ret = nouveau_gpuobj_new(dev, NULL, 32768, 0x1000,
122 NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin);
123 if (ret) {
124 NV_ERROR(dev, "Error allocating EVO channel memory: %d\n", ret);
125 nv50_evo_channel_del(pchan);
126 return ret;
127 }
128
129 ret = drm_mm_init(&chan->ramin_heap, 0, 32768);
130 if (ret) {
131 NV_ERROR(dev, "Error initialising EVO PRAMIN heap: %d\n", ret);
132 nv50_evo_channel_del(pchan);
133 return ret;
134 }
135
136 ret = nouveau_gpuobj_new(dev, chan, 4096, 16, 0, &ramht);
137 if (ret) {
138 NV_ERROR(dev, "Unable to allocate EVO RAMHT: %d\n", ret);
139 nv50_evo_channel_del(pchan);
140 return ret;
141 }
142
143 ret = nouveau_ramht_new(dev, ramht, &chan->ramht);
144 nouveau_gpuobj_ref(NULL, &ramht);
145 if (ret) {
146 nv50_evo_channel_del(pchan);
147 return ret;
148 }
149
150 if (dev_priv->chipset != 0x50) {
151 ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoFB16, 0x70, 0x19,
152 0, 0xffffffff);
153 if (ret) {
154 nv50_evo_channel_del(pchan);
155 return ret;
156 }
157
158
159 ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoFB32, 0x7a, 0x19,
160 0, 0xffffffff);
161 if (ret) {
162 nv50_evo_channel_del(pchan);
163 return ret;
164 }
165 }
166
167 ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoVRAM, 0, 0x19,
168 0, dev_priv->vram_size);
169 if (ret) {
170 nv50_evo_channel_del(pchan);
171 return ret;
172 }
173
174 ret = nouveau_bo_new(dev, NULL, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0,
175 false, true, &chan->pushbuf_bo);
176 if (ret == 0)
177 ret = nouveau_bo_pin(chan->pushbuf_bo, TTM_PL_FLAG_VRAM);
178 if (ret) {
179 NV_ERROR(dev, "Error creating EVO DMA push buffer: %d\n", ret);
180 nv50_evo_channel_del(pchan);
181 return ret;
182 }
183
184 ret = nouveau_bo_map(chan->pushbuf_bo);
185 if (ret) {
186 NV_ERROR(dev, "Error mapping EVO DMA push buffer: %d\n", ret);
187 nv50_evo_channel_del(pchan);
188 return ret;
189 }
190
191 chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
192 NV50_PDISPLAY_USER(0), PAGE_SIZE);
193 if (!chan->user) {
194 NV_ERROR(dev, "Error mapping EVO control regs.\n");
195 nv50_evo_channel_del(pchan);
196 return -ENOMEM;
197 }
198
199 return 0;
200}
201
202int 51int
203nv50_display_early_init(struct drm_device *dev) 52nv50_display_early_init(struct drm_device *dev)
204{ 53{
@@ -214,17 +63,16 @@ int
214nv50_display_init(struct drm_device *dev) 63nv50_display_init(struct drm_device *dev)
215{ 64{
216 struct drm_nouveau_private *dev_priv = dev->dev_private; 65 struct drm_nouveau_private *dev_priv = dev->dev_private;
217 struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
218 struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio; 66 struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
219 struct nouveau_channel *evo = dev_priv->evo;
220 struct drm_connector *connector; 67 struct drm_connector *connector;
221 uint32_t val, ram_amount; 68 struct nouveau_channel *evo;
222 uint64_t start;
223 int ret, i; 69 int ret, i;
70 u32 val;
224 71
225 NV_DEBUG_KMS(dev, "\n"); 72 NV_DEBUG_KMS(dev, "\n");
226 73
227 nv_wr32(dev, 0x00610184, nv_rd32(dev, 0x00614004)); 74 nv_wr32(dev, 0x00610184, nv_rd32(dev, 0x00614004));
75
228 /* 76 /*
229 * I think the 0x006101XX range is some kind of main control area 77 * I think the 0x006101XX range is some kind of main control area
230 * that enables things. 78 * that enables things.
@@ -240,16 +88,19 @@ nv50_display_init(struct drm_device *dev)
240 val = nv_rd32(dev, 0x0061610c + (i * 0x800)); 88 val = nv_rd32(dev, 0x0061610c + (i * 0x800));
241 nv_wr32(dev, 0x0061019c + (i * 0x10), val); 89 nv_wr32(dev, 0x0061019c + (i * 0x10), val);
242 } 90 }
91
243 /* DAC */ 92 /* DAC */
244 for (i = 0; i < 3; i++) { 93 for (i = 0; i < 3; i++) {
245 val = nv_rd32(dev, 0x0061a000 + (i * 0x800)); 94 val = nv_rd32(dev, 0x0061a000 + (i * 0x800));
246 nv_wr32(dev, 0x006101d0 + (i * 0x04), val); 95 nv_wr32(dev, 0x006101d0 + (i * 0x04), val);
247 } 96 }
97
248 /* SOR */ 98 /* SOR */
249 for (i = 0; i < nv50_sor_nr(dev); i++) { 99 for (i = 0; i < nv50_sor_nr(dev); i++) {
250 val = nv_rd32(dev, 0x0061c000 + (i * 0x800)); 100 val = nv_rd32(dev, 0x0061c000 + (i * 0x800));
251 nv_wr32(dev, 0x006101e0 + (i * 0x04), val); 101 nv_wr32(dev, 0x006101e0 + (i * 0x04), val);
252 } 102 }
103
253 /* EXT */ 104 /* EXT */
254 for (i = 0; i < 3; i++) { 105 for (i = 0; i < 3; i++) {
255 val = nv_rd32(dev, 0x0061e000 + (i * 0x800)); 106 val = nv_rd32(dev, 0x0061e000 + (i * 0x800));
@@ -262,17 +113,6 @@ nv50_display_init(struct drm_device *dev)
262 nv_wr32(dev, NV50_PDISPLAY_DAC_CLK_CTRL1(i), 0x00000001); 113 nv_wr32(dev, NV50_PDISPLAY_DAC_CLK_CTRL1(i), 0x00000001);
263 } 114 }
264 115
265 /* This used to be in crtc unblank, but seems out of place there. */
266 nv_wr32(dev, NV50_PDISPLAY_UNK_380, 0);
267 /* RAM is clamped to 256 MiB. */
268 ram_amount = dev_priv->vram_size;
269 NV_DEBUG_KMS(dev, "ram_amount %d\n", ram_amount);
270 if (ram_amount > 256*1024*1024)
271 ram_amount = 256*1024*1024;
272 nv_wr32(dev, NV50_PDISPLAY_RAM_AMOUNT, ram_amount - 1);
273 nv_wr32(dev, NV50_PDISPLAY_UNK_388, 0x150000);
274 nv_wr32(dev, NV50_PDISPLAY_UNK_38C, 0);
275
276 /* The precise purpose is unknown, i suspect it has something to do 116 /* The precise purpose is unknown, i suspect it has something to do
277 * with text mode. 117 * with text mode.
278 */ 118 */
@@ -287,37 +127,6 @@ nv50_display_init(struct drm_device *dev)
287 } 127 }
288 } 128 }
289 129
290 /* taken from nv bug #12637, attempts to un-wedge the hw if it's
291 * stuck in some unspecified state
292 */
293 start = ptimer->read(dev);
294 nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x2b00);
295 while ((val = nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0))) & 0x1e0000) {
296 if ((val & 0x9f0000) == 0x20000)
297 nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0),
298 val | 0x800000);
299
300 if ((val & 0x3f0000) == 0x30000)
301 nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0),
302 val | 0x200000);
303
304 if (ptimer->read(dev) - start > 1000000000ULL) {
305 NV_ERROR(dev, "timeout: (0x610200 & 0x1e0000) != 0\n");
306 NV_ERROR(dev, "0x610200 = 0x%08x\n", val);
307 return -EBUSY;
308 }
309 }
310
311 nv_wr32(dev, NV50_PDISPLAY_CTRL_STATE, NV50_PDISPLAY_CTRL_STATE_ENABLE);
312 nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x1000b03);
313 if (!nv_wait(dev, NV50_PDISPLAY_CHANNEL_STAT(0),
314 0x40000000, 0x40000000)) {
315 NV_ERROR(dev, "timeout: (0x610200 & 0x40000000) == 0x40000000\n");
316 NV_ERROR(dev, "0x610200 = 0x%08x\n",
317 nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0)));
318 return -EBUSY;
319 }
320
321 for (i = 0; i < 2; i++) { 130 for (i = 0; i < 2; i++) {
322 nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 0x2000); 131 nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 0x2000);
323 if (!nv_wait(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 132 if (!nv_wait(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
@@ -341,39 +150,31 @@ nv50_display_init(struct drm_device *dev)
341 } 150 }
342 } 151 }
343 152
344 nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->vinst >> 8) | 9); 153 nv_wr32(dev, NV50_PDISPLAY_PIO_CTRL, 0x00000000);
154 nv_mask(dev, NV50_PDISPLAY_INTR_0, 0x00000000, 0x00000000);
155 nv_wr32(dev, NV50_PDISPLAY_INTR_EN_0, 0x00000000);
156 nv_mask(dev, NV50_PDISPLAY_INTR_1, 0x00000000, 0x00000000);
157 nv_wr32(dev, NV50_PDISPLAY_INTR_EN_1,
158 NV50_PDISPLAY_INTR_EN_1_CLK_UNK10 |
159 NV50_PDISPLAY_INTR_EN_1_CLK_UNK20 |
160 NV50_PDISPLAY_INTR_EN_1_CLK_UNK40);
161
162 /* enable hotplug interrupts */
163 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
164 struct nouveau_connector *conn = nouveau_connector(connector);
345 165
346 /* initialise fifo */ 166 if (conn->dcb->gpio_tag == 0xff)
347 nv_wr32(dev, NV50_PDISPLAY_CHANNEL_DMA_CB(0), 167 continue;
348 ((evo->pushbuf_bo->bo.mem.start << PAGE_SHIFT) >> 8) | 168
349 NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_VRAM | 169 pgpio->irq_enable(dev, conn->dcb->gpio_tag, true);
350 NV50_PDISPLAY_CHANNEL_DMA_CB_VALID);
351 nv_wr32(dev, NV50_PDISPLAY_CHANNEL_UNK2(0), 0x00010000);
352 nv_wr32(dev, NV50_PDISPLAY_CHANNEL_UNK3(0), 0x00000002);
353 if (!nv_wait(dev, 0x610200, 0x80000000, 0x00000000)) {
354 NV_ERROR(dev, "timeout: (0x610200 & 0x80000000) == 0\n");
355 NV_ERROR(dev, "0x610200 = 0x%08x\n", nv_rd32(dev, 0x610200));
356 return -EBUSY;
357 } 170 }
358 nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 171
359 (nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0)) & ~0x00000003) | 172 ret = nv50_evo_init(dev);
360 NV50_PDISPLAY_CHANNEL_STAT_DMA_ENABLED);
361 nv_wr32(dev, NV50_PDISPLAY_USER_PUT(0), 0);
362 nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x01000003 |
363 NV50_PDISPLAY_CHANNEL_STAT_DMA_ENABLED);
364 nv_wr32(dev, 0x610300, nv_rd32(dev, 0x610300) & ~1);
365
366 evo->dma.max = (4096/4) - 2;
367 evo->dma.put = 0;
368 evo->dma.cur = evo->dma.put;
369 evo->dma.free = evo->dma.max - evo->dma.cur;
370
371 ret = RING_SPACE(evo, NOUVEAU_DMA_SKIPS);
372 if (ret) 173 if (ret)
373 return ret; 174 return ret;
175 evo = dev_priv->evo;
374 176
375 for (i = 0; i < NOUVEAU_DMA_SKIPS; i++) 177 nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->vinst >> 8) | 9);
376 OUT_RING(evo, 0);
377 178
378 ret = RING_SPACE(evo, 11); 179 ret = RING_SPACE(evo, 11);
379 if (ret) 180 if (ret)
@@ -393,21 +194,6 @@ nv50_display_init(struct drm_device *dev)
393 if (!nv_wait(dev, 0x640004, 0xffffffff, evo->dma.put << 2)) 194 if (!nv_wait(dev, 0x640004, 0xffffffff, evo->dma.put << 2))
394 NV_ERROR(dev, "evo pushbuf stalled\n"); 195 NV_ERROR(dev, "evo pushbuf stalled\n");
395 196
396 /* enable clock change interrupts. */
397 nv_wr32(dev, 0x610028, 0x00010001);
398 nv_wr32(dev, NV50_PDISPLAY_INTR_EN, (NV50_PDISPLAY_INTR_EN_CLK_UNK10 |
399 NV50_PDISPLAY_INTR_EN_CLK_UNK20 |
400 NV50_PDISPLAY_INTR_EN_CLK_UNK40));
401
402 /* enable hotplug interrupts */
403 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
404 struct nouveau_connector *conn = nouveau_connector(connector);
405
406 if (conn->dcb->gpio_tag == 0xff)
407 continue;
408
409 pgpio->irq_enable(dev, conn->dcb->gpio_tag, true);
410 }
411 197
412 return 0; 198 return 0;
413} 199}
@@ -452,13 +238,7 @@ static int nv50_display_disable(struct drm_device *dev)
452 } 238 }
453 } 239 }
454 240
455 nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0); 241 nv50_evo_fini(dev);
456 nv_wr32(dev, NV50_PDISPLAY_CTRL_STATE, 0);
457 if (!nv_wait(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x1e0000, 0)) {
458 NV_ERROR(dev, "timeout: (0x610200 & 0x1e0000) == 0\n");
459 NV_ERROR(dev, "0x610200 = 0x%08x\n",
460 nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0)));
461 }
462 242
463 for (i = 0; i < 3; i++) { 243 for (i = 0; i < 3; i++) {
464 if (!nv_wait(dev, NV50_PDISPLAY_SOR_DPMS_STATE(i), 244 if (!nv_wait(dev, NV50_PDISPLAY_SOR_DPMS_STATE(i),
@@ -470,7 +250,7 @@ static int nv50_display_disable(struct drm_device *dev)
470 } 250 }
471 251
472 /* disable interrupts. */ 252 /* disable interrupts. */
473 nv_wr32(dev, NV50_PDISPLAY_INTR_EN, 0x00000000); 253 nv_wr32(dev, NV50_PDISPLAY_INTR_EN_1, 0x00000000);
474 254
475 /* disable hotplug interrupts */ 255 /* disable hotplug interrupts */
476 nv_wr32(dev, 0xe054, 0xffffffff); 256 nv_wr32(dev, 0xe054, 0xffffffff);
@@ -508,13 +288,6 @@ int nv50_display_create(struct drm_device *dev)
508 288
509 dev->mode_config.fb_base = dev_priv->fb_phys; 289 dev->mode_config.fb_base = dev_priv->fb_phys;
510 290
511 /* Create EVO channel */
512 ret = nv50_evo_channel_new(dev, &dev_priv->evo);
513 if (ret) {
514 NV_ERROR(dev, "Error creating EVO channel: %d\n", ret);
515 return ret;
516 }
517
518 /* Create CRTC objects */ 291 /* Create CRTC objects */
519 for (i = 0; i < 2; i++) 292 for (i = 0; i < 2; i++)
520 nv50_crtc_create(dev, i); 293 nv50_crtc_create(dev, i);
@@ -557,6 +330,9 @@ int nv50_display_create(struct drm_device *dev)
557 } 330 }
558 } 331 }
559 332
333 INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh);
334 nouveau_irq_register(dev, 26, nv50_display_isr);
335
560 ret = nv50_display_init(dev); 336 ret = nv50_display_init(dev);
561 if (ret) { 337 if (ret) {
562 nv50_display_destroy(dev); 338 nv50_display_destroy(dev);
@@ -569,14 +345,12 @@ int nv50_display_create(struct drm_device *dev)
569void 345void
570nv50_display_destroy(struct drm_device *dev) 346nv50_display_destroy(struct drm_device *dev)
571{ 347{
572 struct drm_nouveau_private *dev_priv = dev->dev_private;
573
574 NV_DEBUG_KMS(dev, "\n"); 348 NV_DEBUG_KMS(dev, "\n");
575 349
576 drm_mode_config_cleanup(dev); 350 drm_mode_config_cleanup(dev);
577 351
578 nv50_display_disable(dev); 352 nv50_display_disable(dev);
579 nv50_evo_channel_del(&dev_priv->evo); 353 nouveau_irq_unregister(dev, 26);
580} 354}
581 355
582static u16 356static u16
@@ -660,32 +434,32 @@ static void
660nv50_display_vblank_crtc_handler(struct drm_device *dev, int crtc) 434nv50_display_vblank_crtc_handler(struct drm_device *dev, int crtc)
661{ 435{
662 struct drm_nouveau_private *dev_priv = dev->dev_private; 436 struct drm_nouveau_private *dev_priv = dev->dev_private;
663 struct nouveau_channel *chan; 437 struct nouveau_channel *chan, *tmp;
664 struct list_head *entry, *tmp;
665 438
666 list_for_each_safe(entry, tmp, &dev_priv->vbl_waiting) { 439 list_for_each_entry_safe(chan, tmp, &dev_priv->vbl_waiting,
667 chan = list_entry(entry, struct nouveau_channel, nvsw.vbl_wait); 440 nvsw.vbl_wait) {
441 if (chan->nvsw.vblsem_head != crtc)
442 continue;
668 443
669 nouveau_bo_wr32(chan->notifier_bo, chan->nvsw.vblsem_offset, 444 nouveau_bo_wr32(chan->notifier_bo, chan->nvsw.vblsem_offset,
670 chan->nvsw.vblsem_rval); 445 chan->nvsw.vblsem_rval);
671 list_del(&chan->nvsw.vbl_wait); 446 list_del(&chan->nvsw.vbl_wait);
447 drm_vblank_put(dev, crtc);
672 } 448 }
449
450 drm_handle_vblank(dev, crtc);
673} 451}
674 452
675static void 453static void
676nv50_display_vblank_handler(struct drm_device *dev, uint32_t intr) 454nv50_display_vblank_handler(struct drm_device *dev, uint32_t intr)
677{ 455{
678 intr &= NV50_PDISPLAY_INTR_1_VBLANK_CRTC;
679
680 if (intr & NV50_PDISPLAY_INTR_1_VBLANK_CRTC_0) 456 if (intr & NV50_PDISPLAY_INTR_1_VBLANK_CRTC_0)
681 nv50_display_vblank_crtc_handler(dev, 0); 457 nv50_display_vblank_crtc_handler(dev, 0);
682 458
683 if (intr & NV50_PDISPLAY_INTR_1_VBLANK_CRTC_1) 459 if (intr & NV50_PDISPLAY_INTR_1_VBLANK_CRTC_1)
684 nv50_display_vblank_crtc_handler(dev, 1); 460 nv50_display_vblank_crtc_handler(dev, 1);
685 461
686 nv_wr32(dev, NV50_PDISPLAY_INTR_EN, nv_rd32(dev, 462 nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_VBLANK_CRTC);
687 NV50_PDISPLAY_INTR_EN) & ~intr);
688 nv_wr32(dev, NV50_PDISPLAY_INTR_1, intr);
689} 463}
690 464
691static void 465static void
@@ -1011,108 +785,31 @@ nv50_display_irq_handler_bh(struct work_struct *work)
1011static void 785static void
1012nv50_display_error_handler(struct drm_device *dev) 786nv50_display_error_handler(struct drm_device *dev)
1013{ 787{
1014 uint32_t addr, data; 788 u32 channels = (nv_rd32(dev, NV50_PDISPLAY_INTR_0) & 0x001f0000) >> 16;
1015 789 u32 addr, data;
1016 nv_wr32(dev, NV50_PDISPLAY_INTR_0, 0x00010000); 790 int chid;
1017 addr = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_ADDR);
1018 data = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_DATA);
1019
1020 NV_ERROR(dev, "EvoCh %d Mthd 0x%04x Data 0x%08x (0x%04x 0x%02x)\n",
1021 0, addr & 0xffc, data, addr >> 16, (addr >> 12) & 0xf);
1022
1023 nv_wr32(dev, NV50_PDISPLAY_TRAPPED_ADDR, 0x90000000);
1024}
1025
1026void
1027nv50_display_irq_hotplug_bh(struct work_struct *work)
1028{
1029 struct drm_nouveau_private *dev_priv =
1030 container_of(work, struct drm_nouveau_private, hpd_work);
1031 struct drm_device *dev = dev_priv->dev;
1032 struct drm_connector *connector;
1033 const uint32_t gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 };
1034 uint32_t unplug_mask, plug_mask, change_mask;
1035 uint32_t hpd0, hpd1;
1036
1037 spin_lock_irq(&dev_priv->hpd_state.lock);
1038 hpd0 = dev_priv->hpd_state.hpd0_bits;
1039 dev_priv->hpd_state.hpd0_bits = 0;
1040 hpd1 = dev_priv->hpd_state.hpd1_bits;
1041 dev_priv->hpd_state.hpd1_bits = 0;
1042 spin_unlock_irq(&dev_priv->hpd_state.lock);
1043
1044 hpd0 &= nv_rd32(dev, 0xe050);
1045 if (dev_priv->chipset >= 0x90)
1046 hpd1 &= nv_rd32(dev, 0xe070);
1047
1048 plug_mask = (hpd0 & 0x0000ffff) | (hpd1 << 16);
1049 unplug_mask = (hpd0 >> 16) | (hpd1 & 0xffff0000);
1050 change_mask = plug_mask | unplug_mask;
1051
1052 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1053 struct drm_encoder_helper_funcs *helper;
1054 struct nouveau_connector *nv_connector =
1055 nouveau_connector(connector);
1056 struct nouveau_encoder *nv_encoder;
1057 struct dcb_gpio_entry *gpio;
1058 uint32_t reg;
1059 bool plugged;
1060
1061 if (!nv_connector->dcb)
1062 continue;
1063 791
1064 gpio = nouveau_bios_gpio_entry(dev, nv_connector->dcb->gpio_tag); 792 for (chid = 0; chid < 5; chid++) {
1065 if (!gpio || !(change_mask & (1 << gpio->line))) 793 if (!(channels & (1 << chid)))
1066 continue; 794 continue;
1067 795
1068 reg = nv_rd32(dev, gpio_reg[gpio->line >> 3]); 796 nv_wr32(dev, NV50_PDISPLAY_INTR_0, 0x00010000 << chid);
1069 plugged = !!(reg & (4 << ((gpio->line & 7) << 2))); 797 addr = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_ADDR(chid));
1070 NV_INFO(dev, "%splugged %s\n", plugged ? "" : "un", 798 data = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_DATA(chid));
1071 drm_get_connector_name(connector)) ; 799 NV_ERROR(dev, "EvoCh %d Mthd 0x%04x Data 0x%08x "
1072 800 "(0x%04x 0x%02x)\n", chid,
1073 if (!connector->encoder || !connector->encoder->crtc || 801 addr & 0xffc, data, addr >> 16, (addr >> 12) & 0xf);
1074 !connector->encoder->crtc->enabled)
1075 continue;
1076 nv_encoder = nouveau_encoder(connector->encoder);
1077 helper = connector->encoder->helper_private;
1078
1079 if (nv_encoder->dcb->type != OUTPUT_DP)
1080 continue;
1081 802
1082 if (plugged) 803 nv_wr32(dev, NV50_PDISPLAY_TRAPPED_ADDR(chid), 0x90000000);
1083 helper->dpms(connector->encoder, DRM_MODE_DPMS_ON);
1084 else
1085 helper->dpms(connector->encoder, DRM_MODE_DPMS_OFF);
1086 } 804 }
1087
1088 drm_helper_hpd_irq_event(dev);
1089} 805}
1090 806
1091void 807static void
1092nv50_display_irq_handler(struct drm_device *dev) 808nv50_display_isr(struct drm_device *dev)
1093{ 809{
1094 struct drm_nouveau_private *dev_priv = dev->dev_private; 810 struct drm_nouveau_private *dev_priv = dev->dev_private;
1095 uint32_t delayed = 0; 811 uint32_t delayed = 0;
1096 812
1097 if (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_HOTPLUG) {
1098 uint32_t hpd0_bits, hpd1_bits = 0;
1099
1100 hpd0_bits = nv_rd32(dev, 0xe054);
1101 nv_wr32(dev, 0xe054, hpd0_bits);
1102
1103 if (dev_priv->chipset >= 0x90) {
1104 hpd1_bits = nv_rd32(dev, 0xe074);
1105 nv_wr32(dev, 0xe074, hpd1_bits);
1106 }
1107
1108 spin_lock(&dev_priv->hpd_state.lock);
1109 dev_priv->hpd_state.hpd0_bits |= hpd0_bits;
1110 dev_priv->hpd_state.hpd1_bits |= hpd1_bits;
1111 spin_unlock(&dev_priv->hpd_state.lock);
1112
1113 queue_work(dev_priv->wq, &dev_priv->hpd_work);
1114 }
1115
1116 while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) { 813 while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) {
1117 uint32_t intr0 = nv_rd32(dev, NV50_PDISPLAY_INTR_0); 814 uint32_t intr0 = nv_rd32(dev, NV50_PDISPLAY_INTR_0);
1118 uint32_t intr1 = nv_rd32(dev, NV50_PDISPLAY_INTR_1); 815 uint32_t intr1 = nv_rd32(dev, NV50_PDISPLAY_INTR_1);
@@ -1123,9 +820,9 @@ nv50_display_irq_handler(struct drm_device *dev)
1123 if (!intr0 && !(intr1 & ~delayed)) 820 if (!intr0 && !(intr1 & ~delayed))
1124 break; 821 break;
1125 822
1126 if (intr0 & 0x00010000) { 823 if (intr0 & 0x001f0000) {
1127 nv50_display_error_handler(dev); 824 nv50_display_error_handler(dev);
1128 intr0 &= ~0x00010000; 825 intr0 &= ~0x001f0000;
1129 } 826 }
1130 827
1131 if (intr1 & NV50_PDISPLAY_INTR_1_VBLANK_CRTC) { 828 if (intr1 & NV50_PDISPLAY_INTR_1_VBLANK_CRTC) {
@@ -1156,4 +853,3 @@ nv50_display_irq_handler(struct drm_device *dev)
1156 } 853 }
1157 } 854 }
1158} 855}
1159
diff --git a/drivers/gpu/drm/nouveau/nv50_display.h b/drivers/gpu/drm/nouveau/nv50_display.h
index c551f0b85ee0..f0e30b78ef6b 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.h
+++ b/drivers/gpu/drm/nouveau/nv50_display.h
@@ -35,9 +35,7 @@
35#include "nouveau_crtc.h" 35#include "nouveau_crtc.h"
36#include "nv50_evo.h" 36#include "nv50_evo.h"
37 37
38void nv50_display_irq_handler(struct drm_device *dev);
39void nv50_display_irq_handler_bh(struct work_struct *work); 38void nv50_display_irq_handler_bh(struct work_struct *work);
40void nv50_display_irq_hotplug_bh(struct work_struct *work);
41int nv50_display_early_init(struct drm_device *dev); 39int nv50_display_early_init(struct drm_device *dev);
42void nv50_display_late_takedown(struct drm_device *dev); 40void nv50_display_late_takedown(struct drm_device *dev);
43int nv50_display_create(struct drm_device *dev); 41int nv50_display_create(struct drm_device *dev);
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.c b/drivers/gpu/drm/nouveau/nv50_evo.c
new file mode 100644
index 000000000000..14e24e906ee8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_evo.c
@@ -0,0 +1,345 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26
27#include "nouveau_drv.h"
28#include "nouveau_dma.h"
29#include "nouveau_ramht.h"
30
31static void
32nv50_evo_channel_del(struct nouveau_channel **pevo)
33{
34 struct drm_nouveau_private *dev_priv;
35 struct nouveau_channel *evo = *pevo;
36
37 if (!evo)
38 return;
39 *pevo = NULL;
40
41 dev_priv = evo->dev->dev_private;
42 dev_priv->evo_alloc &= ~(1 << evo->id);
43
44 nouveau_gpuobj_channel_takedown(evo);
45 nouveau_bo_unmap(evo->pushbuf_bo);
46 nouveau_bo_ref(NULL, &evo->pushbuf_bo);
47
48 if (evo->user)
49 iounmap(evo->user);
50
51 kfree(evo);
52}
53
54int
55nv50_evo_dmaobj_new(struct nouveau_channel *evo, u32 class, u32 name,
56 u32 tile_flags, u32 magic_flags, u32 offset, u32 limit,
57 u32 flags5)
58{
59 struct drm_nouveau_private *dev_priv = evo->dev->dev_private;
60 struct drm_device *dev = evo->dev;
61 struct nouveau_gpuobj *obj = NULL;
62 int ret;
63
64 ret = nouveau_gpuobj_new(dev, dev_priv->evo, 6*4, 32, 0, &obj);
65 if (ret)
66 return ret;
67 obj->engine = NVOBJ_ENGINE_DISPLAY;
68
69 nv_wo32(obj, 0, (tile_flags << 22) | (magic_flags << 16) | class);
70 nv_wo32(obj, 4, limit);
71 nv_wo32(obj, 8, offset);
72 nv_wo32(obj, 12, 0x00000000);
73 nv_wo32(obj, 16, 0x00000000);
74 nv_wo32(obj, 20, flags5);
75 dev_priv->engine.instmem.flush(dev);
76
77 ret = nouveau_ramht_insert(evo, name, obj);
78 nouveau_gpuobj_ref(NULL, &obj);
79 if (ret) {
80 return ret;
81 }
82
83 return 0;
84}
85
86static int
87nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pevo)
88{
89 struct drm_nouveau_private *dev_priv = dev->dev_private;
90 struct nouveau_channel *evo;
91 int ret;
92
93 evo = kzalloc(sizeof(struct nouveau_channel), GFP_KERNEL);
94 if (!evo)
95 return -ENOMEM;
96 *pevo = evo;
97
98 for (evo->id = 0; evo->id < 5; evo->id++) {
99 if (dev_priv->evo_alloc & (1 << evo->id))
100 continue;
101
102 dev_priv->evo_alloc |= (1 << evo->id);
103 break;
104 }
105
106 if (evo->id == 5) {
107 kfree(evo);
108 return -ENODEV;
109 }
110
111 evo->dev = dev;
112 evo->user_get = 4;
113 evo->user_put = 0;
114
115 ret = nouveau_bo_new(dev, NULL, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0,
116 false, true, &evo->pushbuf_bo);
117 if (ret == 0)
118 ret = nouveau_bo_pin(evo->pushbuf_bo, TTM_PL_FLAG_VRAM);
119 if (ret) {
120 NV_ERROR(dev, "Error creating EVO DMA push buffer: %d\n", ret);
121 nv50_evo_channel_del(pevo);
122 return ret;
123 }
124
125 ret = nouveau_bo_map(evo->pushbuf_bo);
126 if (ret) {
127 NV_ERROR(dev, "Error mapping EVO DMA push buffer: %d\n", ret);
128 nv50_evo_channel_del(pevo);
129 return ret;
130 }
131
132 evo->user = ioremap(pci_resource_start(dev->pdev, 0) +
133 NV50_PDISPLAY_USER(evo->id), PAGE_SIZE);
134 if (!evo->user) {
135 NV_ERROR(dev, "Error mapping EVO control regs.\n");
136 nv50_evo_channel_del(pevo);
137 return -ENOMEM;
138 }
139
140 /* bind primary evo channel's ramht to the channel */
141 if (dev_priv->evo && evo != dev_priv->evo)
142 nouveau_ramht_ref(dev_priv->evo->ramht, &evo->ramht, NULL);
143
144 return 0;
145}
146
147static int
148nv50_evo_channel_init(struct nouveau_channel *evo)
149{
150 struct drm_device *dev = evo->dev;
151 int id = evo->id, ret, i;
152 u64 pushbuf = evo->pushbuf_bo->bo.mem.start << PAGE_SHIFT;
153 u32 tmp;
154
155 tmp = nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id));
156 if ((tmp & 0x009f0000) == 0x00020000)
157 nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00800000);
158
159 tmp = nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id));
160 if ((tmp & 0x003f0000) == 0x00030000)
161 nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00600000);
162
163 /* initialise fifo */
164 nv_wr32(dev, NV50_PDISPLAY_EVO_DMA_CB(id), pushbuf >> 8 |
165 NV50_PDISPLAY_EVO_DMA_CB_LOCATION_VRAM |
166 NV50_PDISPLAY_EVO_DMA_CB_VALID);
167 nv_wr32(dev, NV50_PDISPLAY_EVO_UNK2(id), 0x00010000);
168 nv_wr32(dev, NV50_PDISPLAY_EVO_HASH_TAG(id), id);
169 nv_mask(dev, NV50_PDISPLAY_EVO_CTRL(id), NV50_PDISPLAY_EVO_CTRL_DMA,
170 NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED);
171
172 nv_wr32(dev, NV50_PDISPLAY_USER_PUT(id), 0x00000000);
173 nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x01000003 |
174 NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED);
175 if (!nv_wait(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x80000000, 0x00000000)) {
176 NV_ERROR(dev, "EvoCh %d init timeout: 0x%08x\n", id,
177 nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id)));
178 return -EBUSY;
179 }
180
181 /* enable error reporting on the channel */
182 nv_mask(dev, 0x610028, 0x00000000, 0x00010001 << id);
183
184 evo->dma.max = (4096/4) - 2;
185 evo->dma.put = 0;
186 evo->dma.cur = evo->dma.put;
187 evo->dma.free = evo->dma.max - evo->dma.cur;
188
189 ret = RING_SPACE(evo, NOUVEAU_DMA_SKIPS);
190 if (ret)
191 return ret;
192
193 for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
194 OUT_RING(evo, 0);
195
196 return 0;
197}
198
199static void
200nv50_evo_channel_fini(struct nouveau_channel *evo)
201{
202 struct drm_device *dev = evo->dev;
203 int id = evo->id;
204
205 nv_mask(dev, 0x610028, 0x00010001 << id, 0x00000000);
206 nv_mask(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x00001010, 0x00001000);
207 nv_wr32(dev, NV50_PDISPLAY_INTR_0, (1 << id));
208 nv_mask(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x00000003, 0x00000000);
209 if (!nv_wait(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x001e0000, 0x00000000)) {
210 NV_ERROR(dev, "EvoCh %d takedown timeout: 0x%08x\n", id,
211 nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id)));
212 }
213}
214
215static int
216nv50_evo_create(struct drm_device *dev)
217{
218 struct drm_nouveau_private *dev_priv = dev->dev_private;
219 struct nouveau_gpuobj *ramht = NULL;
220 struct nouveau_channel *evo;
221 int ret;
222
223 /* create primary evo channel, the one we use for modesetting
224 * purporses
225 */
226 ret = nv50_evo_channel_new(dev, &dev_priv->evo);
227 if (ret)
228 return ret;
229 evo = dev_priv->evo;
230
231 /* setup object management on it, any other evo channel will
232 * use this also as there's no per-channel support on the
233 * hardware
234 */
235 ret = nouveau_gpuobj_new(dev, NULL, 32768, 65536,
236 NVOBJ_FLAG_ZERO_ALLOC, &evo->ramin);
237 if (ret) {
238 NV_ERROR(dev, "Error allocating EVO channel memory: %d\n", ret);
239 nv50_evo_channel_del(&dev_priv->evo);
240 return ret;
241 }
242
243 ret = drm_mm_init(&evo->ramin_heap, 0, 32768);
244 if (ret) {
245 NV_ERROR(dev, "Error initialising EVO PRAMIN heap: %d\n", ret);
246 nv50_evo_channel_del(&dev_priv->evo);
247 return ret;
248 }
249
250 ret = nouveau_gpuobj_new(dev, evo, 4096, 16, 0, &ramht);
251 if (ret) {
252 NV_ERROR(dev, "Unable to allocate EVO RAMHT: %d\n", ret);
253 nv50_evo_channel_del(&dev_priv->evo);
254 return ret;
255 }
256
257 ret = nouveau_ramht_new(dev, ramht, &evo->ramht);
258 nouveau_gpuobj_ref(NULL, &ramht);
259 if (ret) {
260 nv50_evo_channel_del(&dev_priv->evo);
261 return ret;
262 }
263
264 /* create some default objects for the scanout memtypes we support */
265 if (dev_priv->card_type >= NV_C0) {
266 ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoFB32, 0xfe, 0x19,
267 0, 0xffffffff, 0x00000000);
268 if (ret) {
269 nv50_evo_channel_del(&dev_priv->evo);
270 return ret;
271 }
272
273 ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoVRAM, 0, 0x19,
274 0, dev_priv->vram_size, 0x00020000);
275 if (ret) {
276 nv50_evo_channel_del(&dev_priv->evo);
277 return ret;
278 }
279
280 ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoVRAM_LP, 0, 0x19,
281 0, dev_priv->vram_size, 0x00000000);
282 if (ret) {
283 nv50_evo_channel_del(&dev_priv->evo);
284 return ret;
285 }
286 } else
287 if (dev_priv->chipset != 0x50) {
288 ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoFB16, 0x70, 0x19,
289 0, 0xffffffff, 0x00010000);
290 if (ret) {
291 nv50_evo_channel_del(&dev_priv->evo);
292 return ret;
293 }
294
295
296 ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoFB32, 0x7a, 0x19,
297 0, 0xffffffff, 0x00010000);
298 if (ret) {
299 nv50_evo_channel_del(&dev_priv->evo);
300 return ret;
301 }
302
303 ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoVRAM, 0, 0x19,
304 0, dev_priv->vram_size, 0x00010000);
305 if (ret) {
306 nv50_evo_channel_del(&dev_priv->evo);
307 return ret;
308 }
309
310 ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoVRAM_LP, 0, 0x19,
311 0, dev_priv->vram_size, 0x00010000);
312 if (ret) {
313 nv50_evo_channel_del(&dev_priv->evo);
314 return ret;
315 }
316 }
317
318 return 0;
319}
320
321int
322nv50_evo_init(struct drm_device *dev)
323{
324 struct drm_nouveau_private *dev_priv = dev->dev_private;
325 int ret;
326
327 if (!dev_priv->evo) {
328 ret = nv50_evo_create(dev);
329 if (ret)
330 return ret;
331 }
332
333 return nv50_evo_channel_init(dev_priv->evo);
334}
335
336void
337nv50_evo_fini(struct drm_device *dev)
338{
339 struct drm_nouveau_private *dev_priv = dev->dev_private;
340
341 if (dev_priv->evo) {
342 nv50_evo_channel_fini(dev_priv->evo);
343 nv50_evo_channel_del(&dev_priv->evo);
344 }
345}
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.h b/drivers/gpu/drm/nouveau/nv50_evo.h
index aae13343bcec..aa4f0d3cea8e 100644
--- a/drivers/gpu/drm/nouveau/nv50_evo.h
+++ b/drivers/gpu/drm/nouveau/nv50_evo.h
@@ -24,6 +24,15 @@
24 * 24 *
25 */ 25 */
26 26
27#ifndef __NV50_EVO_H__
28#define __NV50_EVO_H__
29
30int nv50_evo_init(struct drm_device *dev);
31void nv50_evo_fini(struct drm_device *dev);
32int nv50_evo_dmaobj_new(struct nouveau_channel *, u32 class, u32 name,
33 u32 tile_flags, u32 magic_flags,
34 u32 offset, u32 limit);
35
27#define NV50_EVO_UPDATE 0x00000080 36#define NV50_EVO_UPDATE 0x00000080
28#define NV50_EVO_UNK84 0x00000084 37#define NV50_EVO_UNK84 0x00000084
29#define NV50_EVO_UNK84_NOTIFY 0x40000000 38#define NV50_EVO_UNK84_NOTIFY 0x40000000
@@ -111,3 +120,4 @@
111#define NV50_EVO_CRTC_SCALE_RES1 0x000008d8 120#define NV50_EVO_CRTC_SCALE_RES1 0x000008d8
112#define NV50_EVO_CRTC_SCALE_RES2 0x000008dc 121#define NV50_EVO_CRTC_SCALE_RES2 0x000008dc
113 122
123#endif
diff --git a/drivers/gpu/drm/nouveau/nv50_fb.c b/drivers/gpu/drm/nouveau/nv50_fb.c
index cd1988b15d2c..50290dea0ac4 100644
--- a/drivers/gpu/drm/nouveau/nv50_fb.c
+++ b/drivers/gpu/drm/nouveau/nv50_fb.c
@@ -3,30 +3,75 @@
3#include "nouveau_drv.h" 3#include "nouveau_drv.h"
4#include "nouveau_drm.h" 4#include "nouveau_drm.h"
5 5
6struct nv50_fb_priv {
7 struct page *r100c08_page;
8 dma_addr_t r100c08;
9};
10
11static int
12nv50_fb_create(struct drm_device *dev)
13{
14 struct drm_nouveau_private *dev_priv = dev->dev_private;
15 struct nv50_fb_priv *priv;
16
17 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
18 if (!priv)
19 return -ENOMEM;
20
21 priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
22 if (!priv->r100c08_page) {
23 kfree(priv);
24 return -ENOMEM;
25 }
26
27 priv->r100c08 = pci_map_page(dev->pdev, priv->r100c08_page, 0,
28 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
29 if (pci_dma_mapping_error(dev->pdev, priv->r100c08)) {
30 __free_page(priv->r100c08_page);
31 kfree(priv);
32 return -EFAULT;
33 }
34
35 dev_priv->engine.fb.priv = priv;
36 return 0;
37}
38
6int 39int
7nv50_fb_init(struct drm_device *dev) 40nv50_fb_init(struct drm_device *dev)
8{ 41{
9 struct drm_nouveau_private *dev_priv = dev->dev_private; 42 struct drm_nouveau_private *dev_priv = dev->dev_private;
43 struct nv50_fb_priv *priv;
44 int ret;
45
46 if (!dev_priv->engine.fb.priv) {
47 ret = nv50_fb_create(dev);
48 if (ret)
49 return ret;
50 }
51 priv = dev_priv->engine.fb.priv;
10 52
11 /* Not a clue what this is exactly. Without pointing it at a 53 /* Not a clue what this is exactly. Without pointing it at a
12 * scratch page, VRAM->GART blits with M2MF (as in DDX DFS) 54 * scratch page, VRAM->GART blits with M2MF (as in DDX DFS)
13 * cause IOMMU "read from address 0" errors (rh#561267) 55 * cause IOMMU "read from address 0" errors (rh#561267)
14 */ 56 */
15 nv_wr32(dev, 0x100c08, dev_priv->gart_info.sg_dummy_bus >> 8); 57 nv_wr32(dev, 0x100c08, priv->r100c08 >> 8);
16 58
17 /* This is needed to get meaningful information from 100c90 59 /* This is needed to get meaningful information from 100c90
18 * on traps. No idea what these values mean exactly. */ 60 * on traps. No idea what these values mean exactly. */
19 switch (dev_priv->chipset) { 61 switch (dev_priv->chipset) {
20 case 0x50: 62 case 0x50:
21 nv_wr32(dev, 0x100c90, 0x0707ff); 63 nv_wr32(dev, 0x100c90, 0x000707ff);
22 break; 64 break;
23 case 0xa3: 65 case 0xa3:
24 case 0xa5: 66 case 0xa5:
25 case 0xa8: 67 case 0xa8:
26 nv_wr32(dev, 0x100c90, 0x0d0fff); 68 nv_wr32(dev, 0x100c90, 0x000d0fff);
69 break;
70 case 0xaf:
71 nv_wr32(dev, 0x100c90, 0x089d1fff);
27 break; 72 break;
28 default: 73 default:
29 nv_wr32(dev, 0x100c90, 0x1d07ff); 74 nv_wr32(dev, 0x100c90, 0x001d07ff);
30 break; 75 break;
31 } 76 }
32 77
@@ -36,12 +81,25 @@ nv50_fb_init(struct drm_device *dev)
36void 81void
37nv50_fb_takedown(struct drm_device *dev) 82nv50_fb_takedown(struct drm_device *dev)
38{ 83{
84 struct drm_nouveau_private *dev_priv = dev->dev_private;
85 struct nv50_fb_priv *priv;
86
87 priv = dev_priv->engine.fb.priv;
88 if (!priv)
89 return;
90 dev_priv->engine.fb.priv = NULL;
91
92 pci_unmap_page(dev->pdev, priv->r100c08, PAGE_SIZE,
93 PCI_DMA_BIDIRECTIONAL);
94 __free_page(priv->r100c08_page);
95 kfree(priv);
39} 96}
40 97
41void 98void
42nv50_fb_vm_trap(struct drm_device *dev, int display, const char *name) 99nv50_fb_vm_trap(struct drm_device *dev, int display, const char *name)
43{ 100{
44 struct drm_nouveau_private *dev_priv = dev->dev_private; 101 struct drm_nouveau_private *dev_priv = dev->dev_private;
102 unsigned long flags;
45 u32 trap[6], idx, chinst; 103 u32 trap[6], idx, chinst;
46 int i, ch; 104 int i, ch;
47 105
@@ -60,8 +118,10 @@ nv50_fb_vm_trap(struct drm_device *dev, int display, const char *name)
60 return; 118 return;
61 119
62 chinst = (trap[2] << 16) | trap[1]; 120 chinst = (trap[2] << 16) | trap[1];
121
122 spin_lock_irqsave(&dev_priv->channels.lock, flags);
63 for (ch = 0; ch < dev_priv->engine.fifo.channels; ch++) { 123 for (ch = 0; ch < dev_priv->engine.fifo.channels; ch++) {
64 struct nouveau_channel *chan = dev_priv->fifos[ch]; 124 struct nouveau_channel *chan = dev_priv->channels.ptr[ch];
65 125
66 if (!chan || !chan->ramin) 126 if (!chan || !chan->ramin)
67 continue; 127 continue;
@@ -69,6 +129,7 @@ nv50_fb_vm_trap(struct drm_device *dev, int display, const char *name)
69 if (chinst == chan->ramin->vinst >> 12) 129 if (chinst == chan->ramin->vinst >> 12)
70 break; 130 break;
71 } 131 }
132 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
72 133
73 NV_INFO(dev, "%s - VM: Trapped %s at %02x%04x%04x status %08x " 134 NV_INFO(dev, "%s - VM: Trapped %s at %02x%04x%04x status %08x "
74 "channel %d (0x%08x)\n", 135 "channel %d (0x%08x)\n",
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index 6dcf048eddbc..791ded1c5c6d 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -1,29 +1,46 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
1#include "drmP.h" 25#include "drmP.h"
2#include "nouveau_drv.h" 26#include "nouveau_drv.h"
3#include "nouveau_dma.h" 27#include "nouveau_dma.h"
4#include "nouveau_ramht.h" 28#include "nouveau_ramht.h"
5#include "nouveau_fbcon.h" 29#include "nouveau_fbcon.h"
30#include "nouveau_mm.h"
6 31
7void 32int
8nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) 33nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
9{ 34{
10 struct nouveau_fbdev *nfbdev = info->par; 35 struct nouveau_fbdev *nfbdev = info->par;
11 struct drm_device *dev = nfbdev->dev; 36 struct drm_device *dev = nfbdev->dev;
12 struct drm_nouveau_private *dev_priv = dev->dev_private; 37 struct drm_nouveau_private *dev_priv = dev->dev_private;
13 struct nouveau_channel *chan = dev_priv->channel; 38 struct nouveau_channel *chan = dev_priv->channel;
39 int ret;
14 40
15 if (info->state != FBINFO_STATE_RUNNING) 41 ret = RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11);
16 return; 42 if (ret)
17 43 return ret;
18 if (!(info->flags & FBINFO_HWACCEL_DISABLED) &&
19 RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11)) {
20 nouveau_fbcon_gpu_lockup(info);
21 }
22
23 if (info->flags & FBINFO_HWACCEL_DISABLED) {
24 cfb_fillrect(info, rect);
25 return;
26 }
27 44
28 if (rect->rop != ROP_COPY) { 45 if (rect->rop != ROP_COPY) {
29 BEGIN_RING(chan, NvSub2D, 0x02ac, 1); 46 BEGIN_RING(chan, NvSub2D, 0x02ac, 1);
@@ -45,27 +62,21 @@ nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
45 OUT_RING(chan, 3); 62 OUT_RING(chan, 3);
46 } 63 }
47 FIRE_RING(chan); 64 FIRE_RING(chan);
65 return 0;
48} 66}
49 67
50void 68int
51nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) 69nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
52{ 70{
53 struct nouveau_fbdev *nfbdev = info->par; 71 struct nouveau_fbdev *nfbdev = info->par;
54 struct drm_device *dev = nfbdev->dev; 72 struct drm_device *dev = nfbdev->dev;
55 struct drm_nouveau_private *dev_priv = dev->dev_private; 73 struct drm_nouveau_private *dev_priv = dev->dev_private;
56 struct nouveau_channel *chan = dev_priv->channel; 74 struct nouveau_channel *chan = dev_priv->channel;
75 int ret;
57 76
58 if (info->state != FBINFO_STATE_RUNNING) 77 ret = RING_SPACE(chan, 12);
59 return; 78 if (ret)
60 79 return ret;
61 if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 12)) {
62 nouveau_fbcon_gpu_lockup(info);
63 }
64
65 if (info->flags & FBINFO_HWACCEL_DISABLED) {
66 cfb_copyarea(info, region);
67 return;
68 }
69 80
70 BEGIN_RING(chan, NvSub2D, 0x0110, 1); 81 BEGIN_RING(chan, NvSub2D, 0x0110, 1);
71 OUT_RING(chan, 0); 82 OUT_RING(chan, 0);
@@ -80,9 +91,10 @@ nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
80 OUT_RING(chan, 0); 91 OUT_RING(chan, 0);
81 OUT_RING(chan, region->sy); 92 OUT_RING(chan, region->sy);
82 FIRE_RING(chan); 93 FIRE_RING(chan);
94 return 0;
83} 95}
84 96
85void 97int
86nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) 98nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
87{ 99{
88 struct nouveau_fbdev *nfbdev = info->par; 100 struct nouveau_fbdev *nfbdev = info->par;
@@ -92,23 +104,14 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
92 uint32_t width, dwords, *data = (uint32_t *)image->data; 104 uint32_t width, dwords, *data = (uint32_t *)image->data;
93 uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel)); 105 uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
94 uint32_t *palette = info->pseudo_palette; 106 uint32_t *palette = info->pseudo_palette;
107 int ret;
95 108
96 if (info->state != FBINFO_STATE_RUNNING) 109 if (image->depth != 1)
97 return; 110 return -ENODEV;
98
99 if (image->depth != 1) {
100 cfb_imageblit(info, image);
101 return;
102 }
103 111
104 if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 11)) { 112 ret = RING_SPACE(chan, 11);
105 nouveau_fbcon_gpu_lockup(info); 113 if (ret)
106 } 114 return ret;
107
108 if (info->flags & FBINFO_HWACCEL_DISABLED) {
109 cfb_imageblit(info, image);
110 return;
111 }
112 115
113 width = ALIGN(image->width, 32); 116 width = ALIGN(image->width, 32);
114 dwords = (width * image->height) >> 5; 117 dwords = (width * image->height) >> 5;
@@ -134,11 +137,9 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
134 while (dwords) { 137 while (dwords) {
135 int push = dwords > 2047 ? 2047 : dwords; 138 int push = dwords > 2047 ? 2047 : dwords;
136 139
137 if (RING_SPACE(chan, push + 1)) { 140 ret = RING_SPACE(chan, push + 1);
138 nouveau_fbcon_gpu_lockup(info); 141 if (ret)
139 cfb_imageblit(info, image); 142 return ret;
140 return;
141 }
142 143
143 dwords -= push; 144 dwords -= push;
144 145
@@ -148,6 +149,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
148 } 149 }
149 150
150 FIRE_RING(chan); 151 FIRE_RING(chan);
152 return 0;
151} 153}
152 154
153int 155int
@@ -157,12 +159,9 @@ nv50_fbcon_accel_init(struct fb_info *info)
157 struct drm_device *dev = nfbdev->dev; 159 struct drm_device *dev = nfbdev->dev;
158 struct drm_nouveau_private *dev_priv = dev->dev_private; 160 struct drm_nouveau_private *dev_priv = dev->dev_private;
159 struct nouveau_channel *chan = dev_priv->channel; 161 struct nouveau_channel *chan = dev_priv->channel;
160 struct nouveau_gpuobj *eng2d = NULL; 162 struct nouveau_bo *nvbo = nfbdev->nouveau_fb.nvbo;
161 uint64_t fb;
162 int ret, format; 163 int ret, format;
163 164
164 fb = info->fix.smem_start - dev_priv->fb_phys + dev_priv->vm_vram_base;
165
166 switch (info->var.bits_per_pixel) { 165 switch (info->var.bits_per_pixel) {
167 case 8: 166 case 8:
168 format = 0xf3; 167 format = 0xf3;
@@ -190,12 +189,7 @@ nv50_fbcon_accel_init(struct fb_info *info)
190 return -EINVAL; 189 return -EINVAL;
191 } 190 }
192 191
193 ret = nouveau_gpuobj_gr_new(dev_priv->channel, 0x502d, &eng2d); 192 ret = nouveau_gpuobj_gr_new(dev_priv->channel, Nv2D, 0x502d);
194 if (ret)
195 return ret;
196
197 ret = nouveau_ramht_insert(dev_priv->channel, Nv2D, eng2d);
198 nouveau_gpuobj_ref(NULL, &eng2d);
199 if (ret) 193 if (ret)
200 return ret; 194 return ret;
201 195
@@ -253,8 +247,8 @@ nv50_fbcon_accel_init(struct fb_info *info)
253 OUT_RING(chan, info->fix.line_length); 247 OUT_RING(chan, info->fix.line_length);
254 OUT_RING(chan, info->var.xres_virtual); 248 OUT_RING(chan, info->var.xres_virtual);
255 OUT_RING(chan, info->var.yres_virtual); 249 OUT_RING(chan, info->var.yres_virtual);
256 OUT_RING(chan, upper_32_bits(fb)); 250 OUT_RING(chan, upper_32_bits(nvbo->vma.offset));
257 OUT_RING(chan, lower_32_bits(fb)); 251 OUT_RING(chan, lower_32_bits(nvbo->vma.offset));
258 BEGIN_RING(chan, NvSub2D, 0x0230, 2); 252 BEGIN_RING(chan, NvSub2D, 0x0230, 2);
259 OUT_RING(chan, format); 253 OUT_RING(chan, format);
260 OUT_RING(chan, 1); 254 OUT_RING(chan, 1);
@@ -262,8 +256,8 @@ nv50_fbcon_accel_init(struct fb_info *info)
262 OUT_RING(chan, info->fix.line_length); 256 OUT_RING(chan, info->fix.line_length);
263 OUT_RING(chan, info->var.xres_virtual); 257 OUT_RING(chan, info->var.xres_virtual);
264 OUT_RING(chan, info->var.yres_virtual); 258 OUT_RING(chan, info->var.yres_virtual);
265 OUT_RING(chan, upper_32_bits(fb)); 259 OUT_RING(chan, upper_32_bits(nvbo->vma.offset));
266 OUT_RING(chan, lower_32_bits(fb)); 260 OUT_RING(chan, lower_32_bits(nvbo->vma.offset));
267 261
268 return 0; 262 return 0;
269} 263}
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c
index 1da65bd60c10..8dd04c5dac67 100644
--- a/drivers/gpu/drm/nouveau/nv50_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv50_fifo.c
@@ -28,6 +28,7 @@
28#include "drm.h" 28#include "drm.h"
29#include "nouveau_drv.h" 29#include "nouveau_drv.h"
30#include "nouveau_ramht.h" 30#include "nouveau_ramht.h"
31#include "nouveau_vm.h"
31 32
32static void 33static void
33nv50_fifo_playlist_update(struct drm_device *dev) 34nv50_fifo_playlist_update(struct drm_device *dev)
@@ -44,7 +45,8 @@ nv50_fifo_playlist_update(struct drm_device *dev)
44 45
45 /* We never schedule channel 0 or 127 */ 46 /* We never schedule channel 0 or 127 */
46 for (i = 1, nr = 0; i < 127; i++) { 47 for (i = 1, nr = 0; i < 127; i++) {
47 if (dev_priv->fifos[i] && dev_priv->fifos[i]->ramfc) { 48 if (dev_priv->channels.ptr[i] &&
49 dev_priv->channels.ptr[i]->ramfc) {
48 nv_wo32(cur, (nr * 4), i); 50 nv_wo32(cur, (nr * 4), i);
49 nr++; 51 nr++;
50 } 52 }
@@ -60,7 +62,7 @@ static void
60nv50_fifo_channel_enable(struct drm_device *dev, int channel) 62nv50_fifo_channel_enable(struct drm_device *dev, int channel)
61{ 63{
62 struct drm_nouveau_private *dev_priv = dev->dev_private; 64 struct drm_nouveau_private *dev_priv = dev->dev_private;
63 struct nouveau_channel *chan = dev_priv->fifos[channel]; 65 struct nouveau_channel *chan = dev_priv->channels.ptr[channel];
64 uint32_t inst; 66 uint32_t inst;
65 67
66 NV_DEBUG(dev, "ch%d\n", channel); 68 NV_DEBUG(dev, "ch%d\n", channel);
@@ -105,6 +107,7 @@ nv50_fifo_init_intr(struct drm_device *dev)
105{ 107{
106 NV_DEBUG(dev, "\n"); 108 NV_DEBUG(dev, "\n");
107 109
110 nouveau_irq_register(dev, 8, nv04_fifo_isr);
108 nv_wr32(dev, NV03_PFIFO_INTR_0, 0xFFFFFFFF); 111 nv_wr32(dev, NV03_PFIFO_INTR_0, 0xFFFFFFFF);
109 nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xFFFFFFFF); 112 nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xFFFFFFFF);
110} 113}
@@ -118,7 +121,7 @@ nv50_fifo_init_context_table(struct drm_device *dev)
118 NV_DEBUG(dev, "\n"); 121 NV_DEBUG(dev, "\n");
119 122
120 for (i = 0; i < NV50_PFIFO_CTX_TABLE__SIZE; i++) { 123 for (i = 0; i < NV50_PFIFO_CTX_TABLE__SIZE; i++) {
121 if (dev_priv->fifos[i]) 124 if (dev_priv->channels.ptr[i])
122 nv50_fifo_channel_enable(dev, i); 125 nv50_fifo_channel_enable(dev, i);
123 else 126 else
124 nv50_fifo_channel_disable(dev, i); 127 nv50_fifo_channel_disable(dev, i);
@@ -206,6 +209,9 @@ nv50_fifo_takedown(struct drm_device *dev)
206 if (!pfifo->playlist[0]) 209 if (!pfifo->playlist[0])
207 return; 210 return;
208 211
212 nv_wr32(dev, 0x2140, 0x00000000);
213 nouveau_irq_unregister(dev, 8);
214
209 nouveau_gpuobj_ref(NULL, &pfifo->playlist[0]); 215 nouveau_gpuobj_ref(NULL, &pfifo->playlist[0]);
210 nouveau_gpuobj_ref(NULL, &pfifo->playlist[1]); 216 nouveau_gpuobj_ref(NULL, &pfifo->playlist[1]);
211} 217}
@@ -256,6 +262,11 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
256 } 262 }
257 ramfc = chan->ramfc; 263 ramfc = chan->ramfc;
258 264
265 chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
266 NV50_USER(chan->id), PAGE_SIZE);
267 if (!chan->user)
268 return -ENOMEM;
269
259 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 270 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
260 271
261 nv_wo32(ramfc, 0x48, chan->pushbuf->cinst >> 4); 272 nv_wo32(ramfc, 0x48, chan->pushbuf->cinst >> 4);
@@ -291,10 +302,23 @@ void
291nv50_fifo_destroy_context(struct nouveau_channel *chan) 302nv50_fifo_destroy_context(struct nouveau_channel *chan)
292{ 303{
293 struct drm_device *dev = chan->dev; 304 struct drm_device *dev = chan->dev;
305 struct drm_nouveau_private *dev_priv = dev->dev_private;
306 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
294 struct nouveau_gpuobj *ramfc = NULL; 307 struct nouveau_gpuobj *ramfc = NULL;
308 unsigned long flags;
295 309
296 NV_DEBUG(dev, "ch%d\n", chan->id); 310 NV_DEBUG(dev, "ch%d\n", chan->id);
297 311
312 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
313 pfifo->reassign(dev, false);
314
315 /* Unload the context if it's the currently active one */
316 if (pfifo->channel_id(dev) == chan->id) {
317 pfifo->disable(dev);
318 pfifo->unload_context(dev);
319 pfifo->enable(dev);
320 }
321
298 /* This will ensure the channel is seen as disabled. */ 322 /* This will ensure the channel is seen as disabled. */
299 nouveau_gpuobj_ref(chan->ramfc, &ramfc); 323 nouveau_gpuobj_ref(chan->ramfc, &ramfc);
300 nouveau_gpuobj_ref(NULL, &chan->ramfc); 324 nouveau_gpuobj_ref(NULL, &chan->ramfc);
@@ -305,6 +329,14 @@ nv50_fifo_destroy_context(struct nouveau_channel *chan)
305 nv50_fifo_channel_disable(dev, 127); 329 nv50_fifo_channel_disable(dev, 127);
306 nv50_fifo_playlist_update(dev); 330 nv50_fifo_playlist_update(dev);
307 331
332 pfifo->reassign(dev, true);
333 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
334
335 /* Free the channel resources */
336 if (chan->user) {
337 iounmap(chan->user);
338 chan->user = NULL;
339 }
308 nouveau_gpuobj_ref(NULL, &ramfc); 340 nouveau_gpuobj_ref(NULL, &ramfc);
309 nouveau_gpuobj_ref(NULL, &chan->cache); 341 nouveau_gpuobj_ref(NULL, &chan->cache);
310} 342}
@@ -392,7 +424,7 @@ nv50_fifo_unload_context(struct drm_device *dev)
392 if (chid < 1 || chid >= dev_priv->engine.fifo.channels - 1) 424 if (chid < 1 || chid >= dev_priv->engine.fifo.channels - 1)
393 return 0; 425 return 0;
394 426
395 chan = dev_priv->fifos[chid]; 427 chan = dev_priv->channels.ptr[chid];
396 if (!chan) { 428 if (!chan) {
397 NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid); 429 NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid);
398 return -EINVAL; 430 return -EINVAL;
@@ -467,5 +499,5 @@ nv50_fifo_unload_context(struct drm_device *dev)
467void 499void
468nv50_fifo_tlb_flush(struct drm_device *dev) 500nv50_fifo_tlb_flush(struct drm_device *dev)
469{ 501{
470 nv50_vm_flush(dev, 5); 502 nv50_vm_flush_engine(dev, 5);
471} 503}
diff --git a/drivers/gpu/drm/nouveau/nv50_gpio.c b/drivers/gpu/drm/nouveau/nv50_gpio.c
index b2fab2bf3d61..6b149c0cc06d 100644
--- a/drivers/gpu/drm/nouveau/nv50_gpio.c
+++ b/drivers/gpu/drm/nouveau/nv50_gpio.c
@@ -26,6 +26,28 @@
26#include "nouveau_drv.h" 26#include "nouveau_drv.h"
27#include "nouveau_hw.h" 27#include "nouveau_hw.h"
28 28
29#include "nv50_display.h"
30
31static void nv50_gpio_isr(struct drm_device *dev);
32static void nv50_gpio_isr_bh(struct work_struct *work);
33
34struct nv50_gpio_priv {
35 struct list_head handlers;
36 spinlock_t lock;
37};
38
39struct nv50_gpio_handler {
40 struct drm_device *dev;
41 struct list_head head;
42 struct work_struct work;
43 bool inhibit;
44
45 struct dcb_gpio_entry *gpio;
46
47 void (*handler)(void *data, int state);
48 void *data;
49};
50
29static int 51static int
30nv50_gpio_location(struct dcb_gpio_entry *gpio, uint32_t *reg, uint32_t *shift) 52nv50_gpio_location(struct dcb_gpio_entry *gpio, uint32_t *reg, uint32_t *shift)
31{ 53{
@@ -75,29 +97,123 @@ nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state)
75 return 0; 97 return 0;
76} 98}
77 99
100int
101nv50_gpio_irq_register(struct drm_device *dev, enum dcb_gpio_tag tag,
102 void (*handler)(void *, int), void *data)
103{
104 struct drm_nouveau_private *dev_priv = dev->dev_private;
105 struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
106 struct nv50_gpio_priv *priv = pgpio->priv;
107 struct nv50_gpio_handler *gpioh;
108 struct dcb_gpio_entry *gpio;
109 unsigned long flags;
110
111 gpio = nouveau_bios_gpio_entry(dev, tag);
112 if (!gpio)
113 return -ENOENT;
114
115 gpioh = kzalloc(sizeof(*gpioh), GFP_KERNEL);
116 if (!gpioh)
117 return -ENOMEM;
118
119 INIT_WORK(&gpioh->work, nv50_gpio_isr_bh);
120 gpioh->dev = dev;
121 gpioh->gpio = gpio;
122 gpioh->handler = handler;
123 gpioh->data = data;
124
125 spin_lock_irqsave(&priv->lock, flags);
126 list_add(&gpioh->head, &priv->handlers);
127 spin_unlock_irqrestore(&priv->lock, flags);
128 return 0;
129}
130
78void 131void
79nv50_gpio_irq_enable(struct drm_device *dev, enum dcb_gpio_tag tag, bool on) 132nv50_gpio_irq_unregister(struct drm_device *dev, enum dcb_gpio_tag tag,
133 void (*handler)(void *, int), void *data)
80{ 134{
135 struct drm_nouveau_private *dev_priv = dev->dev_private;
136 struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
137 struct nv50_gpio_priv *priv = pgpio->priv;
138 struct nv50_gpio_handler *gpioh, *tmp;
81 struct dcb_gpio_entry *gpio; 139 struct dcb_gpio_entry *gpio;
82 u32 reg, mask; 140 unsigned long flags;
83 141
84 gpio = nouveau_bios_gpio_entry(dev, tag); 142 gpio = nouveau_bios_gpio_entry(dev, tag);
85 if (!gpio) { 143 if (!gpio)
86 NV_ERROR(dev, "gpio tag 0x%02x not found\n", tag);
87 return; 144 return;
145
146 spin_lock_irqsave(&priv->lock, flags);
147 list_for_each_entry_safe(gpioh, tmp, &priv->handlers, head) {
148 if (gpioh->gpio != gpio ||
149 gpioh->handler != handler ||
150 gpioh->data != data)
151 continue;
152 list_del(&gpioh->head);
153 kfree(gpioh);
88 } 154 }
155 spin_unlock_irqrestore(&priv->lock, flags);
156}
157
158bool
159nv50_gpio_irq_enable(struct drm_device *dev, enum dcb_gpio_tag tag, bool on)
160{
161 struct dcb_gpio_entry *gpio;
162 u32 reg, mask;
163
164 gpio = nouveau_bios_gpio_entry(dev, tag);
165 if (!gpio)
166 return false;
89 167
90 reg = gpio->line < 16 ? 0xe050 : 0xe070; 168 reg = gpio->line < 16 ? 0xe050 : 0xe070;
91 mask = 0x00010001 << (gpio->line & 0xf); 169 mask = 0x00010001 << (gpio->line & 0xf);
92 170
93 nv_wr32(dev, reg + 4, mask); 171 nv_wr32(dev, reg + 4, mask);
94 nv_mask(dev, reg + 0, mask, on ? mask : 0); 172 reg = nv_mask(dev, reg + 0, mask, on ? mask : 0);
173 return (reg & mask) == mask;
174}
175
176static int
177nv50_gpio_create(struct drm_device *dev)
178{
179 struct drm_nouveau_private *dev_priv = dev->dev_private;
180 struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
181 struct nv50_gpio_priv *priv;
182
183 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
184 if (!priv)
185 return -ENOMEM;
186
187 INIT_LIST_HEAD(&priv->handlers);
188 spin_lock_init(&priv->lock);
189 pgpio->priv = priv;
190 return 0;
191}
192
193static void
194nv50_gpio_destroy(struct drm_device *dev)
195{
196 struct drm_nouveau_private *dev_priv = dev->dev_private;
197 struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
198
199 kfree(pgpio->priv);
200 pgpio->priv = NULL;
95} 201}
96 202
97int 203int
98nv50_gpio_init(struct drm_device *dev) 204nv50_gpio_init(struct drm_device *dev)
99{ 205{
100 struct drm_nouveau_private *dev_priv = dev->dev_private; 206 struct drm_nouveau_private *dev_priv = dev->dev_private;
207 struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
208 struct nv50_gpio_priv *priv;
209 int ret;
210
211 if (!pgpio->priv) {
212 ret = nv50_gpio_create(dev);
213 if (ret)
214 return ret;
215 }
216 priv = pgpio->priv;
101 217
102 /* disable, and ack any pending gpio interrupts */ 218 /* disable, and ack any pending gpio interrupts */
103 nv_wr32(dev, 0xe050, 0x00000000); 219 nv_wr32(dev, 0xe050, 0x00000000);
@@ -107,5 +223,77 @@ nv50_gpio_init(struct drm_device *dev)
107 nv_wr32(dev, 0xe074, 0xffffffff); 223 nv_wr32(dev, 0xe074, 0xffffffff);
108 } 224 }
109 225
226 nouveau_irq_register(dev, 21, nv50_gpio_isr);
110 return 0; 227 return 0;
111} 228}
229
230void
231nv50_gpio_fini(struct drm_device *dev)
232{
233 struct drm_nouveau_private *dev_priv = dev->dev_private;
234
235 nv_wr32(dev, 0xe050, 0x00000000);
236 if (dev_priv->chipset >= 0x90)
237 nv_wr32(dev, 0xe070, 0x00000000);
238 nouveau_irq_unregister(dev, 21);
239
240 nv50_gpio_destroy(dev);
241}
242
243static void
244nv50_gpio_isr_bh(struct work_struct *work)
245{
246 struct nv50_gpio_handler *gpioh =
247 container_of(work, struct nv50_gpio_handler, work);
248 struct drm_nouveau_private *dev_priv = gpioh->dev->dev_private;
249 struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
250 struct nv50_gpio_priv *priv = pgpio->priv;
251 unsigned long flags;
252 int state;
253
254 state = pgpio->get(gpioh->dev, gpioh->gpio->tag);
255 if (state < 0)
256 return;
257
258 gpioh->handler(gpioh->data, state);
259
260 spin_lock_irqsave(&priv->lock, flags);
261 gpioh->inhibit = false;
262 spin_unlock_irqrestore(&priv->lock, flags);
263}
264
265static void
266nv50_gpio_isr(struct drm_device *dev)
267{
268 struct drm_nouveau_private *dev_priv = dev->dev_private;
269 struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
270 struct nv50_gpio_priv *priv = pgpio->priv;
271 struct nv50_gpio_handler *gpioh;
272 u32 intr0, intr1 = 0;
273 u32 hi, lo, ch;
274
275 intr0 = nv_rd32(dev, 0xe054) & nv_rd32(dev, 0xe050);
276 if (dev_priv->chipset >= 0x90)
277 intr1 = nv_rd32(dev, 0xe074) & nv_rd32(dev, 0xe070);
278
279 hi = (intr0 & 0x0000ffff) | (intr1 << 16);
280 lo = (intr0 >> 16) | (intr1 & 0xffff0000);
281 ch = hi | lo;
282
283 nv_wr32(dev, 0xe054, intr0);
284 if (dev_priv->chipset >= 0x90)
285 nv_wr32(dev, 0xe074, intr1);
286
287 spin_lock(&priv->lock);
288 list_for_each_entry(gpioh, &priv->handlers, head) {
289 if (!(ch & (1 << gpioh->gpio->line)))
290 continue;
291
292 if (gpioh->inhibit)
293 continue;
294 gpioh->inhibit = true;
295
296 queue_work(dev_priv->wq, &gpioh->work);
297 }
298 spin_unlock(&priv->lock);
299}
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index 8b669d0af610..2d7ea75a09d4 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -29,6 +29,12 @@
29#include "nouveau_drv.h" 29#include "nouveau_drv.h"
30#include "nouveau_ramht.h" 30#include "nouveau_ramht.h"
31#include "nouveau_grctx.h" 31#include "nouveau_grctx.h"
32#include "nouveau_dma.h"
33#include "nouveau_vm.h"
34#include "nv50_evo.h"
35
36static int nv50_graph_register(struct drm_device *);
37static void nv50_graph_isr(struct drm_device *);
32 38
33static void 39static void
34nv50_graph_init_reset(struct drm_device *dev) 40nv50_graph_init_reset(struct drm_device *dev)
@@ -46,6 +52,7 @@ nv50_graph_init_intr(struct drm_device *dev)
46{ 52{
47 NV_DEBUG(dev, "\n"); 53 NV_DEBUG(dev, "\n");
48 54
55 nouveau_irq_register(dev, 12, nv50_graph_isr);
49 nv_wr32(dev, NV03_PGRAPH_INTR, 0xffffffff); 56 nv_wr32(dev, NV03_PGRAPH_INTR, 0xffffffff);
50 nv_wr32(dev, 0x400138, 0xffffffff); 57 nv_wr32(dev, 0x400138, 0xffffffff);
51 nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xffffffff); 58 nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xffffffff);
@@ -145,12 +152,15 @@ nv50_graph_init(struct drm_device *dev)
145 nv50_graph_init_reset(dev); 152 nv50_graph_init_reset(dev);
146 nv50_graph_init_regs__nv(dev); 153 nv50_graph_init_regs__nv(dev);
147 nv50_graph_init_regs(dev); 154 nv50_graph_init_regs(dev);
148 nv50_graph_init_intr(dev);
149 155
150 ret = nv50_graph_init_ctxctl(dev); 156 ret = nv50_graph_init_ctxctl(dev);
151 if (ret) 157 if (ret)
152 return ret; 158 return ret;
153 159
160 ret = nv50_graph_register(dev);
161 if (ret)
162 return ret;
163 nv50_graph_init_intr(dev);
154 return 0; 164 return 0;
155} 165}
156 166
@@ -158,6 +168,8 @@ void
158nv50_graph_takedown(struct drm_device *dev) 168nv50_graph_takedown(struct drm_device *dev)
159{ 169{
160 NV_DEBUG(dev, "\n"); 170 NV_DEBUG(dev, "\n");
171 nv_wr32(dev, 0x40013c, 0x00000000);
172 nouveau_irq_unregister(dev, 12);
161} 173}
162 174
163void 175void
@@ -190,7 +202,7 @@ nv50_graph_channel(struct drm_device *dev)
190 inst = (inst & NV50_PGRAPH_CTXCTL_CUR_INSTANCE) << 12; 202 inst = (inst & NV50_PGRAPH_CTXCTL_CUR_INSTANCE) << 12;
191 203
192 for (i = 0; i < dev_priv->engine.fifo.channels; i++) { 204 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
193 struct nouveau_channel *chan = dev_priv->fifos[i]; 205 struct nouveau_channel *chan = dev_priv->channels.ptr[i];
194 206
195 if (chan && chan->ramin && chan->ramin->vinst == inst) 207 if (chan && chan->ramin && chan->ramin->vinst == inst)
196 return chan; 208 return chan;
@@ -211,7 +223,7 @@ nv50_graph_create_context(struct nouveau_channel *chan)
211 223
212 NV_DEBUG(dev, "ch%d\n", chan->id); 224 NV_DEBUG(dev, "ch%d\n", chan->id);
213 225
214 ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 0x1000, 226 ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 0,
215 NVOBJ_FLAG_ZERO_ALLOC | 227 NVOBJ_FLAG_ZERO_ALLOC |
216 NVOBJ_FLAG_ZERO_FREE, &chan->ramin_grctx); 228 NVOBJ_FLAG_ZERO_FREE, &chan->ramin_grctx);
217 if (ret) 229 if (ret)
@@ -234,6 +246,7 @@ nv50_graph_create_context(struct nouveau_channel *chan)
234 nv_wo32(chan->ramin_grctx, 0x00000, chan->ramin->vinst >> 12); 246 nv_wo32(chan->ramin_grctx, 0x00000, chan->ramin->vinst >> 12);
235 247
236 dev_priv->engine.instmem.flush(dev); 248 dev_priv->engine.instmem.flush(dev);
249 atomic_inc(&chan->vm->pgraph_refs);
237 return 0; 250 return 0;
238} 251}
239 252
@@ -242,18 +255,31 @@ nv50_graph_destroy_context(struct nouveau_channel *chan)
242{ 255{
243 struct drm_device *dev = chan->dev; 256 struct drm_device *dev = chan->dev;
244 struct drm_nouveau_private *dev_priv = dev->dev_private; 257 struct drm_nouveau_private *dev_priv = dev->dev_private;
258 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
245 int i, hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20; 259 int i, hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20;
260 unsigned long flags;
246 261
247 NV_DEBUG(dev, "ch%d\n", chan->id); 262 NV_DEBUG(dev, "ch%d\n", chan->id);
248 263
249 if (!chan->ramin) 264 if (!chan->ramin)
250 return; 265 return;
251 266
267 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
268 pgraph->fifo_access(dev, false);
269
270 if (pgraph->channel(dev) == chan)
271 pgraph->unload_context(dev);
272
252 for (i = hdr; i < hdr + 24; i += 4) 273 for (i = hdr; i < hdr + 24; i += 4)
253 nv_wo32(chan->ramin, i, 0); 274 nv_wo32(chan->ramin, i, 0);
254 dev_priv->engine.instmem.flush(dev); 275 dev_priv->engine.instmem.flush(dev);
255 276
277 pgraph->fifo_access(dev, true);
278 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
279
256 nouveau_gpuobj_ref(NULL, &chan->ramin_grctx); 280 nouveau_gpuobj_ref(NULL, &chan->ramin_grctx);
281
282 atomic_dec(&chan->vm->pgraph_refs);
257} 283}
258 284
259static int 285static int
@@ -306,7 +332,7 @@ nv50_graph_unload_context(struct drm_device *dev)
306 return 0; 332 return 0;
307} 333}
308 334
309void 335static void
310nv50_graph_context_switch(struct drm_device *dev) 336nv50_graph_context_switch(struct drm_device *dev)
311{ 337{
312 uint32_t inst; 338 uint32_t inst;
@@ -322,8 +348,8 @@ nv50_graph_context_switch(struct drm_device *dev)
322} 348}
323 349
324static int 350static int
325nv50_graph_nvsw_dma_vblsem(struct nouveau_channel *chan, int grclass, 351nv50_graph_nvsw_dma_vblsem(struct nouveau_channel *chan,
326 int mthd, uint32_t data) 352 u32 class, u32 mthd, u32 data)
327{ 353{
328 struct nouveau_gpuobj *gpuobj; 354 struct nouveau_gpuobj *gpuobj;
329 355
@@ -340,8 +366,8 @@ nv50_graph_nvsw_dma_vblsem(struct nouveau_channel *chan, int grclass,
340} 366}
341 367
342static int 368static int
343nv50_graph_nvsw_vblsem_offset(struct nouveau_channel *chan, int grclass, 369nv50_graph_nvsw_vblsem_offset(struct nouveau_channel *chan,
344 int mthd, uint32_t data) 370 u32 class, u32 mthd, u32 data)
345{ 371{
346 if (nouveau_notifier_offset(chan->nvsw.vblsem, &data)) 372 if (nouveau_notifier_offset(chan->nvsw.vblsem, &data))
347 return -ERANGE; 373 return -ERANGE;
@@ -351,16 +377,16 @@ nv50_graph_nvsw_vblsem_offset(struct nouveau_channel *chan, int grclass,
351} 377}
352 378
353static int 379static int
354nv50_graph_nvsw_vblsem_release_val(struct nouveau_channel *chan, int grclass, 380nv50_graph_nvsw_vblsem_release_val(struct nouveau_channel *chan,
355 int mthd, uint32_t data) 381 u32 class, u32 mthd, u32 data)
356{ 382{
357 chan->nvsw.vblsem_rval = data; 383 chan->nvsw.vblsem_rval = data;
358 return 0; 384 return 0;
359} 385}
360 386
361static int 387static int
362nv50_graph_nvsw_vblsem_release(struct nouveau_channel *chan, int grclass, 388nv50_graph_nvsw_vblsem_release(struct nouveau_channel *chan,
363 int mthd, uint32_t data) 389 u32 class, u32 mthd, u32 data)
364{ 390{
365 struct drm_device *dev = chan->dev; 391 struct drm_device *dev = chan->dev;
366 struct drm_nouveau_private *dev_priv = dev->dev_private; 392 struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -368,45 +394,85 @@ nv50_graph_nvsw_vblsem_release(struct nouveau_channel *chan, int grclass,
368 if (!chan->nvsw.vblsem || chan->nvsw.vblsem_offset == ~0 || data > 1) 394 if (!chan->nvsw.vblsem || chan->nvsw.vblsem_offset == ~0 || data > 1)
369 return -EINVAL; 395 return -EINVAL;
370 396
371 if (!(nv_rd32(dev, NV50_PDISPLAY_INTR_EN) & 397 drm_vblank_get(dev, data);
372 NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_(data))) {
373 nv_wr32(dev, NV50_PDISPLAY_INTR_1,
374 NV50_PDISPLAY_INTR_1_VBLANK_CRTC_(data));
375 nv_wr32(dev, NV50_PDISPLAY_INTR_EN, nv_rd32(dev,
376 NV50_PDISPLAY_INTR_EN) |
377 NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_(data));
378 }
379 398
399 chan->nvsw.vblsem_head = data;
380 list_add(&chan->nvsw.vbl_wait, &dev_priv->vbl_waiting); 400 list_add(&chan->nvsw.vbl_wait, &dev_priv->vbl_waiting);
401
381 return 0; 402 return 0;
382} 403}
383 404
384static struct nouveau_pgraph_object_method nv50_graph_nvsw_methods[] = { 405static int
385 { 0x018c, nv50_graph_nvsw_dma_vblsem }, 406nv50_graph_nvsw_mthd_page_flip(struct nouveau_channel *chan,
386 { 0x0400, nv50_graph_nvsw_vblsem_offset }, 407 u32 class, u32 mthd, u32 data)
387 { 0x0404, nv50_graph_nvsw_vblsem_release_val }, 408{
388 { 0x0408, nv50_graph_nvsw_vblsem_release }, 409 struct nouveau_page_flip_state s;
389 {}
390};
391 410
392struct nouveau_pgraph_object_class nv50_graph_grclass[] = { 411 if (!nouveau_finish_page_flip(chan, &s)) {
393 { 0x506e, true, nv50_graph_nvsw_methods }, /* nvsw */ 412 /* XXX - Do something here */
394 { 0x0030, false, NULL }, /* null */ 413 }
395 { 0x5039, false, NULL }, /* m2mf */ 414
396 { 0x502d, false, NULL }, /* 2d */ 415 return 0;
397 { 0x50c0, false, NULL }, /* compute */ 416}
398 { 0x85c0, false, NULL }, /* compute (nva3, nva5, nva8) */ 417
399 { 0x5097, false, NULL }, /* tesla (nv50) */ 418static int
400 { 0x8297, false, NULL }, /* tesla (nv8x/nv9x) */ 419nv50_graph_register(struct drm_device *dev)
401 { 0x8397, false, NULL }, /* tesla (nva0, nvaa, nvac) */ 420{
402 { 0x8597, false, NULL }, /* tesla (nva3, nva5, nva8) */ 421 struct drm_nouveau_private *dev_priv = dev->dev_private;
403 {} 422
404}; 423 if (dev_priv->engine.graph.registered)
424 return 0;
425
426 NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
427 NVOBJ_MTHD (dev, 0x506e, 0x018c, nv50_graph_nvsw_dma_vblsem);
428 NVOBJ_MTHD (dev, 0x506e, 0x0400, nv50_graph_nvsw_vblsem_offset);
429 NVOBJ_MTHD (dev, 0x506e, 0x0404, nv50_graph_nvsw_vblsem_release_val);
430 NVOBJ_MTHD (dev, 0x506e, 0x0408, nv50_graph_nvsw_vblsem_release);
431 NVOBJ_MTHD (dev, 0x506e, 0x0500, nv50_graph_nvsw_mthd_page_flip);
432
433 NVOBJ_CLASS(dev, 0x0030, GR); /* null */
434 NVOBJ_CLASS(dev, 0x5039, GR); /* m2mf */
435 NVOBJ_CLASS(dev, 0x502d, GR); /* 2d */
436
437 /* tesla */
438 if (dev_priv->chipset == 0x50)
439 NVOBJ_CLASS(dev, 0x5097, GR); /* tesla (nv50) */
440 else
441 if (dev_priv->chipset < 0xa0)
442 NVOBJ_CLASS(dev, 0x8297, GR); /* tesla (nv8x/nv9x) */
443 else {
444 switch (dev_priv->chipset) {
445 case 0xa0:
446 case 0xaa:
447 case 0xac:
448 NVOBJ_CLASS(dev, 0x8397, GR);
449 break;
450 case 0xa3:
451 case 0xa5:
452 case 0xa8:
453 NVOBJ_CLASS(dev, 0x8597, GR);
454 break;
455 case 0xaf:
456 NVOBJ_CLASS(dev, 0x8697, GR);
457 break;
458 }
459 }
460
461 /* compute */
462 NVOBJ_CLASS(dev, 0x50c0, GR);
463 if (dev_priv->chipset > 0xa0 &&
464 dev_priv->chipset != 0xaa &&
465 dev_priv->chipset != 0xac)
466 NVOBJ_CLASS(dev, 0x85c0, GR);
467
468 dev_priv->engine.graph.registered = true;
469 return 0;
470}
405 471
406void 472void
407nv50_graph_tlb_flush(struct drm_device *dev) 473nv50_graph_tlb_flush(struct drm_device *dev)
408{ 474{
409 nv50_vm_flush(dev, 0); 475 nv50_vm_flush_engine(dev, 0);
410} 476}
411 477
412void 478void
@@ -449,8 +515,535 @@ nv86_graph_tlb_flush(struct drm_device *dev)
449 nv_rd32(dev, 0x400384), nv_rd32(dev, 0x400388)); 515 nv_rd32(dev, 0x400384), nv_rd32(dev, 0x400388));
450 } 516 }
451 517
452 nv50_vm_flush(dev, 0); 518 nv50_vm_flush_engine(dev, 0);
453 519
454 nv_mask(dev, 0x400500, 0x00000001, 0x00000001); 520 nv_mask(dev, 0x400500, 0x00000001, 0x00000001);
455 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); 521 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
456} 522}
523
524static struct nouveau_enum nv50_mp_exec_error_names[] =
525{
526 { 3, "STACK_UNDERFLOW" },
527 { 4, "QUADON_ACTIVE" },
528 { 8, "TIMEOUT" },
529 { 0x10, "INVALID_OPCODE" },
530 { 0x40, "BREAKPOINT" },
531 {}
532};
533
534static struct nouveau_bitfield nv50_graph_trap_m2mf[] = {
535 { 0x00000001, "NOTIFY" },
536 { 0x00000002, "IN" },
537 { 0x00000004, "OUT" },
538 {}
539};
540
541static struct nouveau_bitfield nv50_graph_trap_vfetch[] = {
542 { 0x00000001, "FAULT" },
543 {}
544};
545
546static struct nouveau_bitfield nv50_graph_trap_strmout[] = {
547 { 0x00000001, "FAULT" },
548 {}
549};
550
551static struct nouveau_bitfield nv50_graph_trap_ccache[] = {
552 { 0x00000001, "FAULT" },
553 {}
554};
555
556/* There must be a *lot* of these. Will take some time to gather them up. */
557struct nouveau_enum nv50_data_error_names[] = {
558 { 0x00000003, "INVALID_QUERY_OR_TEXTURE" },
559 { 0x00000004, "INVALID_VALUE" },
560 { 0x00000005, "INVALID_ENUM" },
561 { 0x00000008, "INVALID_OBJECT" },
562 { 0x00000009, "READ_ONLY_OBJECT" },
563 { 0x0000000a, "SUPERVISOR_OBJECT" },
564 { 0x0000000b, "INVALID_ADDRESS_ALIGNMENT" },
565 { 0x0000000c, "INVALID_BITFIELD" },
566 { 0x0000000d, "BEGIN_END_ACTIVE" },
567 { 0x0000000e, "SEMANTIC_COLOR_BACK_OVER_LIMIT" },
568 { 0x0000000f, "VIEWPORT_ID_NEEDS_GP" },
569 { 0x00000010, "RT_DOUBLE_BIND" },
570 { 0x00000011, "RT_TYPES_MISMATCH" },
571 { 0x00000012, "RT_LINEAR_WITH_ZETA" },
572 { 0x00000015, "FP_TOO_FEW_REGS" },
573 { 0x00000016, "ZETA_FORMAT_CSAA_MISMATCH" },
574 { 0x00000017, "RT_LINEAR_WITH_MSAA" },
575 { 0x00000018, "FP_INTERPOLANT_START_OVER_LIMIT" },
576 { 0x00000019, "SEMANTIC_LAYER_OVER_LIMIT" },
577 { 0x0000001a, "RT_INVALID_ALIGNMENT" },
578 { 0x0000001b, "SAMPLER_OVER_LIMIT" },
579 { 0x0000001c, "TEXTURE_OVER_LIMIT" },
580 { 0x0000001e, "GP_TOO_MANY_OUTPUTS" },
581 { 0x0000001f, "RT_BPP128_WITH_MS8" },
582 { 0x00000021, "Z_OUT_OF_BOUNDS" },
583 { 0x00000023, "XY_OUT_OF_BOUNDS" },
584 { 0x00000027, "CP_MORE_PARAMS_THAN_SHARED" },
585 { 0x00000028, "CP_NO_REG_SPACE_STRIPED" },
586 { 0x00000029, "CP_NO_REG_SPACE_PACKED" },
587 { 0x0000002a, "CP_NOT_ENOUGH_WARPS" },
588 { 0x0000002b, "CP_BLOCK_SIZE_MISMATCH" },
589 { 0x0000002c, "CP_NOT_ENOUGH_LOCAL_WARPS" },
590 { 0x0000002d, "CP_NOT_ENOUGH_STACK_WARPS" },
591 { 0x0000002e, "CP_NO_BLOCKDIM_LATCH" },
592 { 0x00000031, "ENG2D_FORMAT_MISMATCH" },
593 { 0x0000003f, "PRIMITIVE_ID_NEEDS_GP" },
594 { 0x00000044, "SEMANTIC_VIEWPORT_OVER_LIMIT" },
595 { 0x00000045, "SEMANTIC_COLOR_FRONT_OVER_LIMIT" },
596 { 0x00000046, "LAYER_ID_NEEDS_GP" },
597 { 0x00000047, "SEMANTIC_CLIP_OVER_LIMIT" },
598 { 0x00000048, "SEMANTIC_PTSZ_OVER_LIMIT" },
599 {}
600};
601
602static struct nouveau_bitfield nv50_graph_intr[] = {
603 { 0x00000001, "NOTIFY" },
604 { 0x00000002, "COMPUTE_QUERY" },
605 { 0x00000010, "ILLEGAL_MTHD" },
606 { 0x00000020, "ILLEGAL_CLASS" },
607 { 0x00000040, "DOUBLE_NOTIFY" },
608 { 0x00001000, "CONTEXT_SWITCH" },
609 { 0x00010000, "BUFFER_NOTIFY" },
610 { 0x00100000, "DATA_ERROR" },
611 { 0x00200000, "TRAP" },
612 { 0x01000000, "SINGLE_STEP" },
613 {}
614};
615
616static void
617nv50_pgraph_mp_trap(struct drm_device *dev, int tpid, int display)
618{
619 struct drm_nouveau_private *dev_priv = dev->dev_private;
620 uint32_t units = nv_rd32(dev, 0x1540);
621 uint32_t addr, mp10, status, pc, oplow, ophigh;
622 int i;
623 int mps = 0;
624 for (i = 0; i < 4; i++) {
625 if (!(units & 1 << (i+24)))
626 continue;
627 if (dev_priv->chipset < 0xa0)
628 addr = 0x408200 + (tpid << 12) + (i << 7);
629 else
630 addr = 0x408100 + (tpid << 11) + (i << 7);
631 mp10 = nv_rd32(dev, addr + 0x10);
632 status = nv_rd32(dev, addr + 0x14);
633 if (!status)
634 continue;
635 if (display) {
636 nv_rd32(dev, addr + 0x20);
637 pc = nv_rd32(dev, addr + 0x24);
638 oplow = nv_rd32(dev, addr + 0x70);
639 ophigh= nv_rd32(dev, addr + 0x74);
640 NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - "
641 "TP %d MP %d: ", tpid, i);
642 nouveau_enum_print(nv50_mp_exec_error_names, status);
643 printk(" at %06x warp %d, opcode %08x %08x\n",
644 pc&0xffffff, pc >> 24,
645 oplow, ophigh);
646 }
647 nv_wr32(dev, addr + 0x10, mp10);
648 nv_wr32(dev, addr + 0x14, 0);
649 mps++;
650 }
651 if (!mps && display)
652 NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - TP %d: "
653 "No MPs claiming errors?\n", tpid);
654}
655
656static void
657nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old,
658 uint32_t ustatus_new, int display, const char *name)
659{
660 struct drm_nouveau_private *dev_priv = dev->dev_private;
661 int tps = 0;
662 uint32_t units = nv_rd32(dev, 0x1540);
663 int i, r;
664 uint32_t ustatus_addr, ustatus;
665 for (i = 0; i < 16; i++) {
666 if (!(units & (1 << i)))
667 continue;
668 if (dev_priv->chipset < 0xa0)
669 ustatus_addr = ustatus_old + (i << 12);
670 else
671 ustatus_addr = ustatus_new + (i << 11);
672 ustatus = nv_rd32(dev, ustatus_addr) & 0x7fffffff;
673 if (!ustatus)
674 continue;
675 tps++;
676 switch (type) {
677 case 6: /* texture error... unknown for now */
678 nv50_fb_vm_trap(dev, display, name);
679 if (display) {
680 NV_ERROR(dev, "magic set %d:\n", i);
681 for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4)
682 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
683 nv_rd32(dev, r));
684 }
685 break;
686 case 7: /* MP error */
687 if (ustatus & 0x00010000) {
688 nv50_pgraph_mp_trap(dev, i, display);
689 ustatus &= ~0x00010000;
690 }
691 break;
692 case 8: /* TPDMA error */
693 {
694 uint32_t e0c = nv_rd32(dev, ustatus_addr + 4);
695 uint32_t e10 = nv_rd32(dev, ustatus_addr + 8);
696 uint32_t e14 = nv_rd32(dev, ustatus_addr + 0xc);
697 uint32_t e18 = nv_rd32(dev, ustatus_addr + 0x10);
698 uint32_t e1c = nv_rd32(dev, ustatus_addr + 0x14);
699 uint32_t e20 = nv_rd32(dev, ustatus_addr + 0x18);
700 uint32_t e24 = nv_rd32(dev, ustatus_addr + 0x1c);
701 nv50_fb_vm_trap(dev, display, name);
702 /* 2d engine destination */
703 if (ustatus & 0x00000010) {
704 if (display) {
705 NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - Unknown fault at address %02x%08x\n",
706 i, e14, e10);
707 NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
708 i, e0c, e18, e1c, e20, e24);
709 }
710 ustatus &= ~0x00000010;
711 }
712 /* Render target */
713 if (ustatus & 0x00000040) {
714 if (display) {
715 NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - Unknown fault at address %02x%08x\n",
716 i, e14, e10);
717 NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
718 i, e0c, e18, e1c, e20, e24);
719 }
720 ustatus &= ~0x00000040;
721 }
722 /* CUDA memory: l[], g[] or stack. */
723 if (ustatus & 0x00000080) {
724 if (display) {
725 if (e18 & 0x80000000) {
726 /* g[] read fault? */
727 NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global read fault at address %02x%08x\n",
728 i, e14, e10 | ((e18 >> 24) & 0x1f));
729 e18 &= ~0x1f000000;
730 } else if (e18 & 0xc) {
731 /* g[] write fault? */
732 NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global write fault at address %02x%08x\n",
733 i, e14, e10 | ((e18 >> 7) & 0x1f));
734 e18 &= ~0x00000f80;
735 } else {
736 NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Unknown CUDA fault at address %02x%08x\n",
737 i, e14, e10);
738 }
739 NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
740 i, e0c, e18, e1c, e20, e24);
741 }
742 ustatus &= ~0x00000080;
743 }
744 }
745 break;
746 }
747 if (ustatus) {
748 if (display)
749 NV_INFO(dev, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus);
750 }
751 nv_wr32(dev, ustatus_addr, 0xc0000000);
752 }
753
754 if (!tps && display)
755 NV_INFO(dev, "%s - No TPs claiming errors?\n", name);
756}
757
758static int
759nv50_pgraph_trap_handler(struct drm_device *dev, u32 display, u64 inst, u32 chid)
760{
761 u32 status = nv_rd32(dev, 0x400108);
762 u32 ustatus;
763
764 if (!status && display) {
765 NV_INFO(dev, "PGRAPH - TRAP: no units reporting traps?\n");
766 return 1;
767 }
768
769 /* DISPATCH: Relays commands to other units and handles NOTIFY,
770 * COND, QUERY. If you get a trap from it, the command is still stuck
771 * in DISPATCH and you need to do something about it. */
772 if (status & 0x001) {
773 ustatus = nv_rd32(dev, 0x400804) & 0x7fffffff;
774 if (!ustatus && display) {
775 NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - no ustatus?\n");
776 }
777
778 nv_wr32(dev, 0x400500, 0x00000000);
779
780 /* Known to be triggered by screwed up NOTIFY and COND... */
781 if (ustatus & 0x00000001) {
782 u32 addr = nv_rd32(dev, 0x400808);
783 u32 subc = (addr & 0x00070000) >> 16;
784 u32 mthd = (addr & 0x00001ffc);
785 u32 datal = nv_rd32(dev, 0x40080c);
786 u32 datah = nv_rd32(dev, 0x400810);
787 u32 class = nv_rd32(dev, 0x400814);
788 u32 r848 = nv_rd32(dev, 0x400848);
789
790 NV_INFO(dev, "PGRAPH - TRAP DISPATCH_FAULT\n");
791 if (display && (addr & 0x80000000)) {
792 NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) "
793 "subc %d class 0x%04x mthd 0x%04x "
794 "data 0x%08x%08x "
795 "400808 0x%08x 400848 0x%08x\n",
796 chid, inst, subc, class, mthd, datah,
797 datal, addr, r848);
798 } else
799 if (display) {
800 NV_INFO(dev, "PGRAPH - no stuck command?\n");
801 }
802
803 nv_wr32(dev, 0x400808, 0);
804 nv_wr32(dev, 0x4008e8, nv_rd32(dev, 0x4008e8) & 3);
805 nv_wr32(dev, 0x400848, 0);
806 ustatus &= ~0x00000001;
807 }
808
809 if (ustatus & 0x00000002) {
810 u32 addr = nv_rd32(dev, 0x40084c);
811 u32 subc = (addr & 0x00070000) >> 16;
812 u32 mthd = (addr & 0x00001ffc);
813 u32 data = nv_rd32(dev, 0x40085c);
814 u32 class = nv_rd32(dev, 0x400814);
815
816 NV_INFO(dev, "PGRAPH - TRAP DISPATCH_QUERY\n");
817 if (display && (addr & 0x80000000)) {
818 NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) "
819 "subc %d class 0x%04x mthd 0x%04x "
820 "data 0x%08x 40084c 0x%08x\n",
821 chid, inst, subc, class, mthd,
822 data, addr);
823 } else
824 if (display) {
825 NV_INFO(dev, "PGRAPH - no stuck command?\n");
826 }
827
828 nv_wr32(dev, 0x40084c, 0);
829 ustatus &= ~0x00000002;
830 }
831
832 if (ustatus && display) {
833 NV_INFO(dev, "PGRAPH - TRAP_DISPATCH (unknown "
834 "0x%08x)\n", ustatus);
835 }
836
837 nv_wr32(dev, 0x400804, 0xc0000000);
838 nv_wr32(dev, 0x400108, 0x001);
839 status &= ~0x001;
840 if (!status)
841 return 0;
842 }
843
844 /* M2MF: Memory to memory copy engine. */
845 if (status & 0x002) {
846 u32 ustatus = nv_rd32(dev, 0x406800) & 0x7fffffff;
847 if (display) {
848 NV_INFO(dev, "PGRAPH - TRAP_M2MF");
849 nouveau_bitfield_print(nv50_graph_trap_m2mf, ustatus);
850 printk("\n");
851 NV_INFO(dev, "PGRAPH - TRAP_M2MF %08x %08x %08x %08x\n",
852 nv_rd32(dev, 0x406804), nv_rd32(dev, 0x406808),
853 nv_rd32(dev, 0x40680c), nv_rd32(dev, 0x406810));
854
855 }
856
857 /* No sane way found yet -- just reset the bugger. */
858 nv_wr32(dev, 0x400040, 2);
859 nv_wr32(dev, 0x400040, 0);
860 nv_wr32(dev, 0x406800, 0xc0000000);
861 nv_wr32(dev, 0x400108, 0x002);
862 status &= ~0x002;
863 }
864
865 /* VFETCH: Fetches data from vertex buffers. */
866 if (status & 0x004) {
867 u32 ustatus = nv_rd32(dev, 0x400c04) & 0x7fffffff;
868 if (display) {
869 NV_INFO(dev, "PGRAPH - TRAP_VFETCH");
870 nouveau_bitfield_print(nv50_graph_trap_vfetch, ustatus);
871 printk("\n");
872 NV_INFO(dev, "PGRAPH - TRAP_VFETCH %08x %08x %08x %08x\n",
873 nv_rd32(dev, 0x400c00), nv_rd32(dev, 0x400c08),
874 nv_rd32(dev, 0x400c0c), nv_rd32(dev, 0x400c10));
875 }
876
877 nv_wr32(dev, 0x400c04, 0xc0000000);
878 nv_wr32(dev, 0x400108, 0x004);
879 status &= ~0x004;
880 }
881
882 /* STRMOUT: DirectX streamout / OpenGL transform feedback. */
883 if (status & 0x008) {
884 ustatus = nv_rd32(dev, 0x401800) & 0x7fffffff;
885 if (display) {
886 NV_INFO(dev, "PGRAPH - TRAP_STRMOUT");
887 nouveau_bitfield_print(nv50_graph_trap_strmout, ustatus);
888 printk("\n");
889 NV_INFO(dev, "PGRAPH - TRAP_STRMOUT %08x %08x %08x %08x\n",
890 nv_rd32(dev, 0x401804), nv_rd32(dev, 0x401808),
891 nv_rd32(dev, 0x40180c), nv_rd32(dev, 0x401810));
892
893 }
894
895 /* No sane way found yet -- just reset the bugger. */
896 nv_wr32(dev, 0x400040, 0x80);
897 nv_wr32(dev, 0x400040, 0);
898 nv_wr32(dev, 0x401800, 0xc0000000);
899 nv_wr32(dev, 0x400108, 0x008);
900 status &= ~0x008;
901 }
902
903 /* CCACHE: Handles code and c[] caches and fills them. */
904 if (status & 0x010) {
905 ustatus = nv_rd32(dev, 0x405018) & 0x7fffffff;
906 if (display) {
907 NV_INFO(dev, "PGRAPH - TRAP_CCACHE");
908 nouveau_bitfield_print(nv50_graph_trap_ccache, ustatus);
909 printk("\n");
910 NV_INFO(dev, "PGRAPH - TRAP_CCACHE %08x %08x %08x %08x"
911 " %08x %08x %08x\n",
912 nv_rd32(dev, 0x405800), nv_rd32(dev, 0x405804),
913 nv_rd32(dev, 0x405808), nv_rd32(dev, 0x40580c),
914 nv_rd32(dev, 0x405810), nv_rd32(dev, 0x405814),
915 nv_rd32(dev, 0x40581c));
916
917 }
918
919 nv_wr32(dev, 0x405018, 0xc0000000);
920 nv_wr32(dev, 0x400108, 0x010);
921 status &= ~0x010;
922 }
923
924 /* Unknown, not seen yet... 0x402000 is the only trap status reg
925 * remaining, so try to handle it anyway. Perhaps related to that
926 * unknown DMA slot on tesla? */
927 if (status & 0x20) {
928 ustatus = nv_rd32(dev, 0x402000) & 0x7fffffff;
929 if (display)
930 NV_INFO(dev, "PGRAPH - TRAP_UNKC04 0x%08x\n", ustatus);
931 nv_wr32(dev, 0x402000, 0xc0000000);
932 /* no status modifiction on purpose */
933 }
934
935 /* TEXTURE: CUDA texturing units */
936 if (status & 0x040) {
937 nv50_pgraph_tp_trap(dev, 6, 0x408900, 0x408600, display,
938 "PGRAPH - TRAP_TEXTURE");
939 nv_wr32(dev, 0x400108, 0x040);
940 status &= ~0x040;
941 }
942
943 /* MP: CUDA execution engines. */
944 if (status & 0x080) {
945 nv50_pgraph_tp_trap(dev, 7, 0x408314, 0x40831c, display,
946 "PGRAPH - TRAP_MP");
947 nv_wr32(dev, 0x400108, 0x080);
948 status &= ~0x080;
949 }
950
951 /* TPDMA: Handles TP-initiated uncached memory accesses:
952 * l[], g[], stack, 2d surfaces, render targets. */
953 if (status & 0x100) {
954 nv50_pgraph_tp_trap(dev, 8, 0x408e08, 0x408708, display,
955 "PGRAPH - TRAP_TPDMA");
956 nv_wr32(dev, 0x400108, 0x100);
957 status &= ~0x100;
958 }
959
960 if (status) {
961 if (display)
962 NV_INFO(dev, "PGRAPH - TRAP: unknown 0x%08x\n", status);
963 nv_wr32(dev, 0x400108, status);
964 }
965
966 return 1;
967}
968
969static int
970nv50_graph_isr_chid(struct drm_device *dev, u64 inst)
971{
972 struct drm_nouveau_private *dev_priv = dev->dev_private;
973 struct nouveau_channel *chan;
974 unsigned long flags;
975 int i;
976
977 spin_lock_irqsave(&dev_priv->channels.lock, flags);
978 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
979 chan = dev_priv->channels.ptr[i];
980 if (!chan || !chan->ramin)
981 continue;
982
983 if (inst == chan->ramin->vinst)
984 break;
985 }
986 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
987 return i;
988}
989
990static void
991nv50_graph_isr(struct drm_device *dev)
992{
993 u32 stat;
994
995 while ((stat = nv_rd32(dev, 0x400100))) {
996 u64 inst = (u64)(nv_rd32(dev, 0x40032c) & 0x0fffffff) << 12;
997 u32 chid = nv50_graph_isr_chid(dev, inst);
998 u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
999 u32 subc = (addr & 0x00070000) >> 16;
1000 u32 mthd = (addr & 0x00001ffc);
1001 u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
1002 u32 class = nv_rd32(dev, 0x400814);
1003 u32 show = stat;
1004
1005 if (stat & 0x00000010) {
1006 if (!nouveau_gpuobj_mthd_call2(dev, chid, class,
1007 mthd, data))
1008 show &= ~0x00000010;
1009 }
1010
1011 if (stat & 0x00001000) {
1012 nv_wr32(dev, 0x400500, 0x00000000);
1013 nv_wr32(dev, 0x400100, 0x00001000);
1014 nv_mask(dev, 0x40013c, 0x00001000, 0x00000000);
1015 nv50_graph_context_switch(dev);
1016 stat &= ~0x00001000;
1017 show &= ~0x00001000;
1018 }
1019
1020 show = (show && nouveau_ratelimit()) ? show : 0;
1021
1022 if (show & 0x00100000) {
1023 u32 ecode = nv_rd32(dev, 0x400110);
1024 NV_INFO(dev, "PGRAPH - DATA_ERROR ");
1025 nouveau_enum_print(nv50_data_error_names, ecode);
1026 printk("\n");
1027 }
1028
1029 if (stat & 0x00200000) {
1030 if (!nv50_pgraph_trap_handler(dev, show, inst, chid))
1031 show &= ~0x00200000;
1032 }
1033
1034 nv_wr32(dev, 0x400100, stat);
1035 nv_wr32(dev, 0x400500, 0x00010001);
1036
1037 if (show) {
1038 NV_INFO(dev, "PGRAPH -");
1039 nouveau_bitfield_print(nv50_graph_intr, show);
1040 printk("\n");
1041 NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) subc %d "
1042 "class 0x%04x mthd 0x%04x data 0x%08x\n",
1043 chid, inst, subc, class, mthd, data);
1044 }
1045 }
1046
1047 if (nv_rd32(dev, 0x400824) & (1 << 31))
1048 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
1049}
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index b773229b7647..2e1b1cd19a4b 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -27,14 +27,20 @@
27 27
28#include "drmP.h" 28#include "drmP.h"
29#include "drm.h" 29#include "drm.h"
30
30#include "nouveau_drv.h" 31#include "nouveau_drv.h"
32#include "nouveau_vm.h"
33
34#define BAR1_VM_BASE 0x0020000000ULL
35#define BAR1_VM_SIZE pci_resource_len(dev->pdev, 1)
36#define BAR3_VM_BASE 0x0000000000ULL
37#define BAR3_VM_SIZE pci_resource_len(dev->pdev, 3)
31 38
32struct nv50_instmem_priv { 39struct nv50_instmem_priv {
33 uint32_t save1700[5]; /* 0x1700->0x1710 */ 40 uint32_t save1700[5]; /* 0x1700->0x1710 */
34 41
35 struct nouveau_gpuobj *pramin_pt; 42 struct nouveau_gpuobj *bar1_dmaobj;
36 struct nouveau_gpuobj *pramin_bar; 43 struct nouveau_gpuobj *bar3_dmaobj;
37 struct nouveau_gpuobj *fb_bar;
38}; 44};
39 45
40static void 46static void
@@ -48,6 +54,7 @@ nv50_channel_del(struct nouveau_channel **pchan)
48 return; 54 return;
49 55
50 nouveau_gpuobj_ref(NULL, &chan->ramfc); 56 nouveau_gpuobj_ref(NULL, &chan->ramfc);
57 nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
51 nouveau_gpuobj_ref(NULL, &chan->vm_pd); 58 nouveau_gpuobj_ref(NULL, &chan->vm_pd);
52 if (chan->ramin_heap.free_stack.next) 59 if (chan->ramin_heap.free_stack.next)
53 drm_mm_takedown(&chan->ramin_heap); 60 drm_mm_takedown(&chan->ramin_heap);
@@ -56,14 +63,14 @@ nv50_channel_del(struct nouveau_channel **pchan)
56} 63}
57 64
58static int 65static int
59nv50_channel_new(struct drm_device *dev, u32 size, 66nv50_channel_new(struct drm_device *dev, u32 size, struct nouveau_vm *vm,
60 struct nouveau_channel **pchan) 67 struct nouveau_channel **pchan)
61{ 68{
62 struct drm_nouveau_private *dev_priv = dev->dev_private; 69 struct drm_nouveau_private *dev_priv = dev->dev_private;
63 u32 pgd = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200; 70 u32 pgd = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200;
64 u32 fc = (dev_priv->chipset == 0x50) ? 0x0000 : 0x4200; 71 u32 fc = (dev_priv->chipset == 0x50) ? 0x0000 : 0x4200;
65 struct nouveau_channel *chan; 72 struct nouveau_channel *chan;
66 int ret; 73 int ret, i;
67 74
68 chan = kzalloc(sizeof(*chan), GFP_KERNEL); 75 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
69 if (!chan) 76 if (!chan)
@@ -92,6 +99,17 @@ nv50_channel_new(struct drm_device *dev, u32 size,
92 return ret; 99 return ret;
93 } 100 }
94 101
102 for (i = 0; i < 0x4000; i += 8) {
103 nv_wo32(chan->vm_pd, i + 0, 0x00000000);
104 nv_wo32(chan->vm_pd, i + 4, 0xdeadcafe);
105 }
106
107 ret = nouveau_vm_ref(vm, &chan->vm, chan->vm_pd);
108 if (ret) {
109 nv50_channel_del(&chan);
110 return ret;
111 }
112
95 ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst == ~0 ? ~0 : 113 ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst == ~0 ? ~0 :
96 chan->ramin->pinst + fc, 114 chan->ramin->pinst + fc,
97 chan->ramin->vinst + fc, 0x100, 115 chan->ramin->vinst + fc, 0x100,
@@ -111,6 +129,7 @@ nv50_instmem_init(struct drm_device *dev)
111 struct drm_nouveau_private *dev_priv = dev->dev_private; 129 struct drm_nouveau_private *dev_priv = dev->dev_private;
112 struct nv50_instmem_priv *priv; 130 struct nv50_instmem_priv *priv;
113 struct nouveau_channel *chan; 131 struct nouveau_channel *chan;
132 struct nouveau_vm *vm;
114 int ret, i; 133 int ret, i;
115 u32 tmp; 134 u32 tmp;
116 135
@@ -127,112 +146,87 @@ nv50_instmem_init(struct drm_device *dev)
127 ret = drm_mm_init(&dev_priv->ramin_heap, 0, dev_priv->ramin_size); 146 ret = drm_mm_init(&dev_priv->ramin_heap, 0, dev_priv->ramin_size);
128 if (ret) { 147 if (ret) {
129 NV_ERROR(dev, "Failed to init RAMIN heap\n"); 148 NV_ERROR(dev, "Failed to init RAMIN heap\n");
130 return -ENOMEM; 149 goto error;
131 } 150 }
132 151
133 /* we need a channel to plug into the hw to control the BARs */ 152 /* BAR3 */
134 ret = nv50_channel_new(dev, 128*1024, &dev_priv->fifos[0]); 153 ret = nouveau_vm_new(dev, BAR3_VM_BASE, BAR3_VM_SIZE, BAR3_VM_BASE,
154 &dev_priv->bar3_vm);
135 if (ret) 155 if (ret)
136 return ret; 156 goto error;
137 chan = dev_priv->fifos[127] = dev_priv->fifos[0];
138 157
139 /* allocate page table for PRAMIN BAR */ 158 ret = nouveau_gpuobj_new(dev, NULL, (BAR3_VM_SIZE >> 12) * 8,
140 ret = nouveau_gpuobj_new(dev, chan, (dev_priv->ramin_size >> 12) * 8, 159 0x1000, NVOBJ_FLAG_DONT_MAP |
141 0x1000, NVOBJ_FLAG_ZERO_ALLOC, 160 NVOBJ_FLAG_ZERO_ALLOC,
142 &priv->pramin_pt); 161 &dev_priv->bar3_vm->pgt[0].obj[0]);
143 if (ret) 162 if (ret)
144 return ret; 163 goto error;
164 dev_priv->bar3_vm->pgt[0].refcount[0] = 1;
145 165
146 nv_wo32(chan->vm_pd, 0x0000, priv->pramin_pt->vinst | 0x63); 166 nv50_instmem_map(dev_priv->bar3_vm->pgt[0].obj[0]);
147 nv_wo32(chan->vm_pd, 0x0004, 0);
148 167
149 /* DMA object for PRAMIN BAR */ 168 ret = nv50_channel_new(dev, 128 * 1024, dev_priv->bar3_vm, &chan);
150 ret = nouveau_gpuobj_new(dev, chan, 6*4, 16, 0, &priv->pramin_bar);
151 if (ret) 169 if (ret)
152 return ret; 170 goto error;
153 nv_wo32(priv->pramin_bar, 0x00, 0x7fc00000); 171 dev_priv->channels.ptr[0] = dev_priv->channels.ptr[127] = chan;
154 nv_wo32(priv->pramin_bar, 0x04, dev_priv->ramin_size - 1); 172
155 nv_wo32(priv->pramin_bar, 0x08, 0x00000000); 173 ret = nv50_gpuobj_dma_new(chan, 0x0000, BAR3_VM_BASE, BAR3_VM_SIZE,
156 nv_wo32(priv->pramin_bar, 0x0c, 0x00000000); 174 NV_MEM_TARGET_VM, NV_MEM_ACCESS_VM,
157 nv_wo32(priv->pramin_bar, 0x10, 0x00000000); 175 NV_MEM_TYPE_VM, NV_MEM_COMP_VM,
158 nv_wo32(priv->pramin_bar, 0x14, 0x00000000); 176 &priv->bar3_dmaobj);
159
160 /* map channel into PRAMIN, gpuobj didn't do it for us */
161 ret = nv50_instmem_bind(dev, chan->ramin);
162 if (ret) 177 if (ret)
163 return ret; 178 goto error;
164 179
165 /* poke regs... */
166 nv_wr32(dev, 0x001704, 0x00000000 | (chan->ramin->vinst >> 12)); 180 nv_wr32(dev, 0x001704, 0x00000000 | (chan->ramin->vinst >> 12));
167 nv_wr32(dev, 0x001704, 0x40000000 | (chan->ramin->vinst >> 12)); 181 nv_wr32(dev, 0x001704, 0x40000000 | (chan->ramin->vinst >> 12));
168 nv_wr32(dev, 0x00170c, 0x80000000 | (priv->pramin_bar->cinst >> 4)); 182 nv_wr32(dev, 0x00170c, 0x80000000 | (priv->bar3_dmaobj->cinst >> 4));
169
170 tmp = nv_ri32(dev, 0);
171 nv_wi32(dev, 0, ~tmp);
172 if (nv_ri32(dev, 0) != ~tmp) {
173 NV_ERROR(dev, "PRAMIN readback failed\n");
174 return -EIO;
175 }
176 nv_wi32(dev, 0, tmp);
177 183
184 dev_priv->engine.instmem.flush(dev);
178 dev_priv->ramin_available = true; 185 dev_priv->ramin_available = true;
179 186
180 /* Determine VM layout */ 187 tmp = nv_ro32(chan->ramin, 0);
181 dev_priv->vm_gart_base = roundup(NV50_VM_BLOCK, NV50_VM_BLOCK); 188 nv_wo32(chan->ramin, 0, ~tmp);
182 dev_priv->vm_gart_size = NV50_VM_BLOCK; 189 if (nv_ro32(chan->ramin, 0) != ~tmp) {
183 190 NV_ERROR(dev, "PRAMIN readback failed\n");
184 dev_priv->vm_vram_base = dev_priv->vm_gart_base + dev_priv->vm_gart_size; 191 ret = -EIO;
185 dev_priv->vm_vram_size = dev_priv->vram_size; 192 goto error;
186 if (dev_priv->vm_vram_size > NV50_VM_MAX_VRAM)
187 dev_priv->vm_vram_size = NV50_VM_MAX_VRAM;
188 dev_priv->vm_vram_size = roundup(dev_priv->vm_vram_size, NV50_VM_BLOCK);
189 dev_priv->vm_vram_pt_nr = dev_priv->vm_vram_size / NV50_VM_BLOCK;
190
191 dev_priv->vm_end = dev_priv->vm_vram_base + dev_priv->vm_vram_size;
192
193 NV_DEBUG(dev, "NV50VM: GART 0x%016llx-0x%016llx\n",
194 dev_priv->vm_gart_base,
195 dev_priv->vm_gart_base + dev_priv->vm_gart_size - 1);
196 NV_DEBUG(dev, "NV50VM: VRAM 0x%016llx-0x%016llx\n",
197 dev_priv->vm_vram_base,
198 dev_priv->vm_vram_base + dev_priv->vm_vram_size - 1);
199
200 /* VRAM page table(s), mapped into VM at +1GiB */
201 for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) {
202 ret = nouveau_gpuobj_new(dev, NULL, NV50_VM_BLOCK / 0x10000 * 8,
203 0, NVOBJ_FLAG_ZERO_ALLOC,
204 &chan->vm_vram_pt[i]);
205 if (ret) {
206 NV_ERROR(dev, "Error creating VRAM PGT: %d\n", ret);
207 dev_priv->vm_vram_pt_nr = i;
208 return ret;
209 }
210 dev_priv->vm_vram_pt[i] = chan->vm_vram_pt[i];
211
212 nv_wo32(chan->vm_pd, 0x10 + (i*8),
213 chan->vm_vram_pt[i]->vinst | 0x61);
214 nv_wo32(chan->vm_pd, 0x14 + (i*8), 0);
215 } 193 }
194 nv_wo32(chan->ramin, 0, tmp);
216 195
217 /* DMA object for FB BAR */ 196 /* BAR1 */
218 ret = nouveau_gpuobj_new(dev, chan, 6*4, 16, 0, &priv->fb_bar); 197 ret = nouveau_vm_new(dev, BAR1_VM_BASE, BAR1_VM_SIZE, BAR1_VM_BASE, &vm);
219 if (ret) 198 if (ret)
220 return ret; 199 goto error;
221 nv_wo32(priv->fb_bar, 0x00, 0x7fc00000);
222 nv_wo32(priv->fb_bar, 0x04, 0x40000000 +
223 pci_resource_len(dev->pdev, 1) - 1);
224 nv_wo32(priv->fb_bar, 0x08, 0x40000000);
225 nv_wo32(priv->fb_bar, 0x0c, 0x00000000);
226 nv_wo32(priv->fb_bar, 0x10, 0x00000000);
227 nv_wo32(priv->fb_bar, 0x14, 0x00000000);
228 200
229 dev_priv->engine.instmem.flush(dev); 201 ret = nouveau_vm_ref(vm, &dev_priv->bar1_vm, chan->vm_pd);
202 if (ret)
203 goto error;
204 nouveau_vm_ref(NULL, &vm, NULL);
205
206 ret = nv50_gpuobj_dma_new(chan, 0x0000, BAR1_VM_BASE, BAR1_VM_SIZE,
207 NV_MEM_TARGET_VM, NV_MEM_ACCESS_VM,
208 NV_MEM_TYPE_VM, NV_MEM_COMP_VM,
209 &priv->bar1_dmaobj);
210 if (ret)
211 goto error;
230 212
231 nv_wr32(dev, 0x001708, 0x80000000 | (priv->fb_bar->cinst >> 4)); 213 nv_wr32(dev, 0x001708, 0x80000000 | (priv->bar1_dmaobj->cinst >> 4));
232 for (i = 0; i < 8; i++) 214 for (i = 0; i < 8; i++)
233 nv_wr32(dev, 0x1900 + (i*4), 0); 215 nv_wr32(dev, 0x1900 + (i*4), 0);
234 216
217 /* Create shared channel VM, space is reserved at the beginning
218 * to catch "NULL pointer" references
219 */
220 ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0020000000ULL,
221 &dev_priv->chan_vm);
222 if (ret)
223 return ret;
224
235 return 0; 225 return 0;
226
227error:
228 nv50_instmem_takedown(dev);
229 return ret;
236} 230}
237 231
238void 232void
@@ -240,7 +234,7 @@ nv50_instmem_takedown(struct drm_device *dev)
240{ 234{
241 struct drm_nouveau_private *dev_priv = dev->dev_private; 235 struct drm_nouveau_private *dev_priv = dev->dev_private;
242 struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; 236 struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
243 struct nouveau_channel *chan = dev_priv->fifos[0]; 237 struct nouveau_channel *chan = dev_priv->channels.ptr[0];
244 int i; 238 int i;
245 239
246 NV_DEBUG(dev, "\n"); 240 NV_DEBUG(dev, "\n");
@@ -250,23 +244,23 @@ nv50_instmem_takedown(struct drm_device *dev)
250 244
251 dev_priv->ramin_available = false; 245 dev_priv->ramin_available = false;
252 246
253 /* Restore state from before init */ 247 nouveau_vm_ref(NULL, &dev_priv->chan_vm, NULL);
248
254 for (i = 0x1700; i <= 0x1710; i += 4) 249 for (i = 0x1700; i <= 0x1710; i += 4)
255 nv_wr32(dev, i, priv->save1700[(i - 0x1700) / 4]); 250 nv_wr32(dev, i, priv->save1700[(i - 0x1700) / 4]);
256 251
257 nouveau_gpuobj_ref(NULL, &priv->fb_bar); 252 nouveau_gpuobj_ref(NULL, &priv->bar3_dmaobj);
258 nouveau_gpuobj_ref(NULL, &priv->pramin_bar); 253 nouveau_gpuobj_ref(NULL, &priv->bar1_dmaobj);
259 nouveau_gpuobj_ref(NULL, &priv->pramin_pt);
260 254
261 /* Destroy dummy channel */ 255 nouveau_vm_ref(NULL, &dev_priv->bar1_vm, chan->vm_pd);
262 if (chan) { 256 dev_priv->channels.ptr[127] = 0;
263 for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) 257 nv50_channel_del(&dev_priv->channels.ptr[0]);
264 nouveau_gpuobj_ref(NULL, &chan->vm_vram_pt[i]);
265 dev_priv->vm_vram_pt_nr = 0;
266 258
267 nv50_channel_del(&dev_priv->fifos[0]); 259 nouveau_gpuobj_ref(NULL, &dev_priv->bar3_vm->pgt[0].obj[0]);
268 dev_priv->fifos[127] = NULL; 260 nouveau_vm_ref(NULL, &dev_priv->bar3_vm, NULL);
269 } 261
262 if (dev_priv->ramin_heap.free_stack.next)
263 drm_mm_takedown(&dev_priv->ramin_heap);
270 264
271 dev_priv->engine.instmem.priv = NULL; 265 dev_priv->engine.instmem.priv = NULL;
272 kfree(priv); 266 kfree(priv);
@@ -276,16 +270,8 @@ int
276nv50_instmem_suspend(struct drm_device *dev) 270nv50_instmem_suspend(struct drm_device *dev)
277{ 271{
278 struct drm_nouveau_private *dev_priv = dev->dev_private; 272 struct drm_nouveau_private *dev_priv = dev->dev_private;
279 struct nouveau_channel *chan = dev_priv->fifos[0];
280 struct nouveau_gpuobj *ramin = chan->ramin;
281 int i;
282 273
283 ramin->im_backing_suspend = vmalloc(ramin->size); 274 dev_priv->ramin_available = false;
284 if (!ramin->im_backing_suspend)
285 return -ENOMEM;
286
287 for (i = 0; i < ramin->size; i += 4)
288 ramin->im_backing_suspend[i/4] = nv_ri32(dev, i);
289 return 0; 275 return 0;
290} 276}
291 277
@@ -294,146 +280,121 @@ nv50_instmem_resume(struct drm_device *dev)
294{ 280{
295 struct drm_nouveau_private *dev_priv = dev->dev_private; 281 struct drm_nouveau_private *dev_priv = dev->dev_private;
296 struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; 282 struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
297 struct nouveau_channel *chan = dev_priv->fifos[0]; 283 struct nouveau_channel *chan = dev_priv->channels.ptr[0];
298 struct nouveau_gpuobj *ramin = chan->ramin;
299 int i; 284 int i;
300 285
301 dev_priv->ramin_available = false;
302 dev_priv->ramin_base = ~0;
303 for (i = 0; i < ramin->size; i += 4)
304 nv_wo32(ramin, i, ramin->im_backing_suspend[i/4]);
305 dev_priv->ramin_available = true;
306 vfree(ramin->im_backing_suspend);
307 ramin->im_backing_suspend = NULL;
308
309 /* Poke the relevant regs, and pray it works :) */ 286 /* Poke the relevant regs, and pray it works :) */
310 nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->vinst >> 12)); 287 nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->vinst >> 12));
311 nv_wr32(dev, NV50_PUNK_UNK1710, 0); 288 nv_wr32(dev, NV50_PUNK_UNK1710, 0);
312 nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->vinst >> 12) | 289 nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->vinst >> 12) |
313 NV50_PUNK_BAR_CFG_BASE_VALID); 290 NV50_PUNK_BAR_CFG_BASE_VALID);
314 nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->fb_bar->cinst >> 4) | 291 nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->bar1_dmaobj->cinst >> 4) |
315 NV50_PUNK_BAR1_CTXDMA_VALID); 292 NV50_PUNK_BAR1_CTXDMA_VALID);
316 nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->pramin_bar->cinst >> 4) | 293 nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->bar3_dmaobj->cinst >> 4) |
317 NV50_PUNK_BAR3_CTXDMA_VALID); 294 NV50_PUNK_BAR3_CTXDMA_VALID);
318 295
319 for (i = 0; i < 8; i++) 296 for (i = 0; i < 8; i++)
320 nv_wr32(dev, 0x1900 + (i*4), 0); 297 nv_wr32(dev, 0x1900 + (i*4), 0);
298
299 dev_priv->ramin_available = true;
321} 300}
322 301
302struct nv50_gpuobj_node {
303 struct nouveau_vram *vram;
304 struct nouveau_vma chan_vma;
305 u32 align;
306};
307
308
323int 309int
324nv50_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, 310nv50_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align)
325 uint32_t *sz)
326{ 311{
312 struct drm_device *dev = gpuobj->dev;
313 struct drm_nouveau_private *dev_priv = dev->dev_private;
314 struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
315 struct nv50_gpuobj_node *node = NULL;
327 int ret; 316 int ret;
328 317
329 if (gpuobj->im_backing) 318 node = kzalloc(sizeof(*node), GFP_KERNEL);
330 return -EINVAL; 319 if (!node)
320 return -ENOMEM;
321 node->align = align;
331 322
332 *sz = ALIGN(*sz, 4096); 323 size = (size + 4095) & ~4095;
333 if (*sz == 0) 324 align = max(align, (u32)4096);
334 return -EINVAL;
335 325
336 ret = nouveau_bo_new(dev, NULL, *sz, 0, TTM_PL_FLAG_VRAM, 0, 0x0000, 326 ret = vram->get(dev, size, align, 0, 0, &node->vram);
337 true, false, &gpuobj->im_backing);
338 if (ret) { 327 if (ret) {
339 NV_ERROR(dev, "error getting PRAMIN backing pages: %d\n", ret); 328 kfree(node);
340 return ret; 329 return ret;
341 } 330 }
342 331
343 ret = nouveau_bo_pin(gpuobj->im_backing, TTM_PL_FLAG_VRAM); 332 gpuobj->vinst = node->vram->offset;
344 if (ret) { 333
345 NV_ERROR(dev, "error pinning PRAMIN backing VRAM: %d\n", ret); 334 if (gpuobj->flags & NVOBJ_FLAG_VM) {
346 nouveau_bo_ref(NULL, &gpuobj->im_backing); 335 ret = nouveau_vm_get(dev_priv->chan_vm, size, 12,
347 return ret; 336 NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS,
337 &node->chan_vma);
338 if (ret) {
339 vram->put(dev, &node->vram);
340 kfree(node);
341 return ret;
342 }
343
344 nouveau_vm_map(&node->chan_vma, node->vram);
345 gpuobj->vinst = node->chan_vma.offset;
348 } 346 }
349 347
350 gpuobj->vinst = gpuobj->im_backing->bo.mem.start << PAGE_SHIFT; 348 gpuobj->size = size;
349 gpuobj->node = node;
351 return 0; 350 return 0;
352} 351}
353 352
354void 353void
355nv50_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) 354nv50_instmem_put(struct nouveau_gpuobj *gpuobj)
356{ 355{
356 struct drm_device *dev = gpuobj->dev;
357 struct drm_nouveau_private *dev_priv = dev->dev_private; 357 struct drm_nouveau_private *dev_priv = dev->dev_private;
358 struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
359 struct nv50_gpuobj_node *node;
360
361 node = gpuobj->node;
362 gpuobj->node = NULL;
358 363
359 if (gpuobj && gpuobj->im_backing) { 364 if (node->chan_vma.node) {
360 if (gpuobj->im_bound) 365 nouveau_vm_unmap(&node->chan_vma);
361 dev_priv->engine.instmem.unbind(dev, gpuobj); 366 nouveau_vm_put(&node->chan_vma);
362 nouveau_bo_unpin(gpuobj->im_backing);
363 nouveau_bo_ref(NULL, &gpuobj->im_backing);
364 gpuobj->im_backing = NULL;
365 } 367 }
368 vram->put(dev, &node->vram);
369 kfree(node);
366} 370}
367 371
368int 372int
369nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) 373nv50_instmem_map(struct nouveau_gpuobj *gpuobj)
370{ 374{
371 struct drm_nouveau_private *dev_priv = dev->dev_private; 375 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
372 struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; 376 struct nv50_gpuobj_node *node = gpuobj->node;
373 struct nouveau_gpuobj *pramin_pt = priv->pramin_pt; 377 int ret;
374 uint32_t pte, pte_end;
375 uint64_t vram;
376
377 if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound)
378 return -EINVAL;
379
380 NV_DEBUG(dev, "st=0x%lx sz=0x%lx\n",
381 gpuobj->im_pramin->start, gpuobj->im_pramin->size);
382
383 pte = (gpuobj->im_pramin->start >> 12) << 1;
384 pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte;
385 vram = gpuobj->vinst;
386
387 NV_DEBUG(dev, "pramin=0x%lx, pte=%d, pte_end=%d\n",
388 gpuobj->im_pramin->start, pte, pte_end);
389 NV_DEBUG(dev, "first vram page: 0x%010llx\n", gpuobj->vinst);
390
391 vram |= 1;
392 if (dev_priv->vram_sys_base) {
393 vram += dev_priv->vram_sys_base;
394 vram |= 0x30;
395 }
396
397 while (pte < pte_end) {
398 nv_wo32(pramin_pt, (pte * 4) + 0, lower_32_bits(vram));
399 nv_wo32(pramin_pt, (pte * 4) + 4, upper_32_bits(vram));
400 vram += 0x1000;
401 pte += 2;
402 }
403 dev_priv->engine.instmem.flush(dev);
404 378
405 nv50_vm_flush(dev, 6); 379 ret = nouveau_vm_get(dev_priv->bar3_vm, gpuobj->size, 12,
380 NV_MEM_ACCESS_RW, &node->vram->bar_vma);
381 if (ret)
382 return ret;
406 383
407 gpuobj->im_bound = 1; 384 nouveau_vm_map(&node->vram->bar_vma, node->vram);
385 gpuobj->pinst = node->vram->bar_vma.offset;
408 return 0; 386 return 0;
409} 387}
410 388
411int 389void
412nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) 390nv50_instmem_unmap(struct nouveau_gpuobj *gpuobj)
413{ 391{
414 struct drm_nouveau_private *dev_priv = dev->dev_private; 392 struct nv50_gpuobj_node *node = gpuobj->node;
415 struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
416 uint32_t pte, pte_end;
417
418 if (gpuobj->im_bound == 0)
419 return -EINVAL;
420
421 /* can happen during late takedown */
422 if (unlikely(!dev_priv->ramin_available))
423 return 0;
424 393
425 pte = (gpuobj->im_pramin->start >> 12) << 1; 394 if (node->vram->bar_vma.node) {
426 pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte; 395 nouveau_vm_unmap(&node->vram->bar_vma);
427 396 nouveau_vm_put(&node->vram->bar_vma);
428 while (pte < pte_end) {
429 nv_wo32(priv->pramin_pt, (pte * 4) + 0, 0x00000000);
430 nv_wo32(priv->pramin_pt, (pte * 4) + 4, 0x00000000);
431 pte += 2;
432 } 397 }
433 dev_priv->engine.instmem.flush(dev);
434
435 gpuobj->im_bound = 0;
436 return 0;
437} 398}
438 399
439void 400void
@@ -452,11 +413,3 @@ nv84_instmem_flush(struct drm_device *dev)
452 NV_ERROR(dev, "PRAMIN flush timeout\n"); 413 NV_ERROR(dev, "PRAMIN flush timeout\n");
453} 414}
454 415
455void
456nv50_vm_flush(struct drm_device *dev, int engine)
457{
458 nv_wr32(dev, 0x100c80, (engine << 16) | 1);
459 if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000))
460 NV_ERROR(dev, "vm flush timeout: engine %d\n", engine);
461}
462
diff --git a/drivers/gpu/drm/nouveau/nv50_vm.c b/drivers/gpu/drm/nouveau/nv50_vm.c
new file mode 100644
index 000000000000..38e523e10995
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_vm.c
@@ -0,0 +1,180 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26
27#include "nouveau_drv.h"
28#include "nouveau_vm.h"
29
30void
31nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
32 struct nouveau_gpuobj *pgt[2])
33{
34 struct drm_nouveau_private *dev_priv = pgd->dev->dev_private;
35 u64 phys = 0xdeadcafe00000000ULL;
36 u32 coverage = 0;
37
38 if (pgt[0]) {
39 phys = 0x00000003 | pgt[0]->vinst; /* present, 4KiB pages */
40 coverage = (pgt[0]->size >> 3) << 12;
41 } else
42 if (pgt[1]) {
43 phys = 0x00000001 | pgt[1]->vinst; /* present */
44 coverage = (pgt[1]->size >> 3) << 16;
45 }
46
47 if (phys & 1) {
48 if (dev_priv->vram_sys_base) {
49 phys += dev_priv->vram_sys_base;
50 phys |= 0x30;
51 }
52
53 if (coverage <= 32 * 1024 * 1024)
54 phys |= 0x60;
55 else if (coverage <= 64 * 1024 * 1024)
56 phys |= 0x40;
57 else if (coverage < 128 * 1024 * 1024)
58 phys |= 0x20;
59 }
60
61 nv_wo32(pgd, (pde * 8) + 0, lower_32_bits(phys));
62 nv_wo32(pgd, (pde * 8) + 4, upper_32_bits(phys));
63}
64
65static inline u64
66nv50_vm_addr(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
67 u64 phys, u32 memtype, u32 target)
68{
69 struct drm_nouveau_private *dev_priv = pgt->dev->dev_private;
70
71 phys |= 1; /* present */
72 phys |= (u64)memtype << 40;
73
74 /* IGPs don't have real VRAM, re-target to stolen system memory */
75 if (target == 0 && dev_priv->vram_sys_base) {
76 phys += dev_priv->vram_sys_base;
77 target = 3;
78 }
79
80 phys |= target << 4;
81
82 if (vma->access & NV_MEM_ACCESS_SYS)
83 phys |= (1 << 6);
84
85 if (!(vma->access & NV_MEM_ACCESS_WO))
86 phys |= (1 << 3);
87
88 return phys;
89}
90
91void
92nv50_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
93 struct nouveau_vram *mem, u32 pte, u32 cnt, u64 phys)
94{
95 u32 block;
96 int i;
97
98 phys = nv50_vm_addr(vma, pgt, phys, mem->memtype, 0);
99 pte <<= 3;
100 cnt <<= 3;
101
102 while (cnt) {
103 u32 offset_h = upper_32_bits(phys);
104 u32 offset_l = lower_32_bits(phys);
105
106 for (i = 7; i >= 0; i--) {
107 block = 1 << (i + 3);
108 if (cnt >= block && !(pte & (block - 1)))
109 break;
110 }
111 offset_l |= (i << 7);
112
113 phys += block << (vma->node->type - 3);
114 cnt -= block;
115
116 while (block) {
117 nv_wo32(pgt, pte + 0, offset_l);
118 nv_wo32(pgt, pte + 4, offset_h);
119 pte += 8;
120 block -= 8;
121 }
122 }
123}
124
125void
126nv50_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
127 u32 pte, dma_addr_t *list, u32 cnt)
128{
129 pte <<= 3;
130 while (cnt--) {
131 u64 phys = nv50_vm_addr(vma, pgt, (u64)*list++, 0, 2);
132 nv_wo32(pgt, pte + 0, lower_32_bits(phys));
133 nv_wo32(pgt, pte + 4, upper_32_bits(phys));
134 pte += 8;
135 }
136}
137
138void
139nv50_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
140{
141 pte <<= 3;
142 while (cnt--) {
143 nv_wo32(pgt, pte + 0, 0x00000000);
144 nv_wo32(pgt, pte + 4, 0x00000000);
145 pte += 8;
146 }
147}
148
149void
150nv50_vm_flush(struct nouveau_vm *vm)
151{
152 struct drm_nouveau_private *dev_priv = vm->dev->dev_private;
153 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
154 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
155 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
156 struct nouveau_crypt_engine *pcrypt = &dev_priv->engine.crypt;
157
158 pinstmem->flush(vm->dev);
159
160 /* BAR */
161 if (vm != dev_priv->chan_vm) {
162 nv50_vm_flush_engine(vm->dev, 6);
163 return;
164 }
165
166 pfifo->tlb_flush(vm->dev);
167
168 if (atomic_read(&vm->pgraph_refs))
169 pgraph->tlb_flush(vm->dev);
170 if (atomic_read(&vm->pcrypt_refs))
171 pcrypt->tlb_flush(vm->dev);
172}
173
174void
175nv50_vm_flush_engine(struct drm_device *dev, int engine)
176{
177 nv_wr32(dev, 0x100c80, (engine << 16) | 1);
178 if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000))
179 NV_ERROR(dev, "vm flush timeout: engine %d\n", engine);
180}
diff --git a/drivers/gpu/drm/nouveau/nv50_vram.c b/drivers/gpu/drm/nouveau/nv50_vram.c
new file mode 100644
index 000000000000..58e98ad36347
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_vram.c
@@ -0,0 +1,190 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26#include "nouveau_drv.h"
27#include "nouveau_mm.h"
28
29static int types[0x80] = {
30 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
31 1, 1, 1, 1, 0, 0, 0, 0, 2, 2, 2, 2, 0, 0, 0, 0,
32 1, 1, 1, 1, 1, 1, 1, 0, 2, 2, 2, 2, 2, 2, 2, 0,
33 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
34 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 0, 0,
35 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
36 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 2, 2, 2, 2,
37 1, 0, 2, 0, 1, 0, 2, 0, 1, 1, 2, 2, 1, 1, 0, 0
38};
39
40bool
41nv50_vram_flags_valid(struct drm_device *dev, u32 tile_flags)
42{
43 int type = (tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK) >> 8;
44
45 if (likely(type < ARRAY_SIZE(types) && types[type]))
46 return true;
47 return false;
48}
49
50void
51nv50_vram_del(struct drm_device *dev, struct nouveau_vram **pvram)
52{
53 struct drm_nouveau_private *dev_priv = dev->dev_private;
54 struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
55 struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM];
56 struct nouveau_mm *mm = man->priv;
57 struct nouveau_mm_node *this;
58 struct nouveau_vram *vram;
59
60 vram = *pvram;
61 *pvram = NULL;
62 if (unlikely(vram == NULL))
63 return;
64
65 mutex_lock(&mm->mutex);
66 while (!list_empty(&vram->regions)) {
67 this = list_first_entry(&vram->regions, struct nouveau_mm_node, rl_entry);
68
69 list_del(&this->rl_entry);
70 nouveau_mm_put(mm, this);
71 }
72 mutex_unlock(&mm->mutex);
73
74 kfree(vram);
75}
76
77int
78nv50_vram_new(struct drm_device *dev, u64 size, u32 align, u32 size_nc,
79 u32 type, struct nouveau_vram **pvram)
80{
81 struct drm_nouveau_private *dev_priv = dev->dev_private;
82 struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
83 struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM];
84 struct nouveau_mm *mm = man->priv;
85 struct nouveau_mm_node *r;
86 struct nouveau_vram *vram;
87 int ret;
88
89 if (!types[type])
90 return -EINVAL;
91 size >>= 12;
92 align >>= 12;
93 size_nc >>= 12;
94
95 vram = kzalloc(sizeof(*vram), GFP_KERNEL);
96 if (!vram)
97 return -ENOMEM;
98
99 INIT_LIST_HEAD(&vram->regions);
100 vram->dev = dev_priv->dev;
101 vram->memtype = type;
102 vram->size = size;
103
104 mutex_lock(&mm->mutex);
105 do {
106 ret = nouveau_mm_get(mm, types[type], size, size_nc, align, &r);
107 if (ret) {
108 mutex_unlock(&mm->mutex);
109 nv50_vram_del(dev, &vram);
110 return ret;
111 }
112
113 list_add_tail(&r->rl_entry, &vram->regions);
114 size -= r->length;
115 } while (size);
116 mutex_unlock(&mm->mutex);
117
118 r = list_first_entry(&vram->regions, struct nouveau_mm_node, rl_entry);
119 vram->offset = (u64)r->offset << 12;
120 *pvram = vram;
121 return 0;
122}
123
124static u32
125nv50_vram_rblock(struct drm_device *dev)
126{
127 struct drm_nouveau_private *dev_priv = dev->dev_private;
128 int i, parts, colbits, rowbitsa, rowbitsb, banks;
129 u64 rowsize, predicted;
130 u32 r0, r4, rt, ru, rblock_size;
131
132 r0 = nv_rd32(dev, 0x100200);
133 r4 = nv_rd32(dev, 0x100204);
134 rt = nv_rd32(dev, 0x100250);
135 ru = nv_rd32(dev, 0x001540);
136 NV_DEBUG(dev, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt, ru);
137
138 for (i = 0, parts = 0; i < 8; i++) {
139 if (ru & (0x00010000 << i))
140 parts++;
141 }
142
143 colbits = (r4 & 0x0000f000) >> 12;
144 rowbitsa = ((r4 & 0x000f0000) >> 16) + 8;
145 rowbitsb = ((r4 & 0x00f00000) >> 20) + 8;
146 banks = ((r4 & 0x01000000) ? 8 : 4);
147
148 rowsize = parts * banks * (1 << colbits) * 8;
149 predicted = rowsize << rowbitsa;
150 if (r0 & 0x00000004)
151 predicted += rowsize << rowbitsb;
152
153 if (predicted != dev_priv->vram_size) {
154 NV_WARN(dev, "memory controller reports %dMiB VRAM\n",
155 (u32)(dev_priv->vram_size >> 20));
156 NV_WARN(dev, "we calculated %dMiB VRAM\n",
157 (u32)(predicted >> 20));
158 }
159
160 rblock_size = rowsize;
161 if (rt & 1)
162 rblock_size *= 3;
163
164 NV_DEBUG(dev, "rblock %d bytes\n", rblock_size);
165 return rblock_size;
166}
167
168int
169nv50_vram_init(struct drm_device *dev)
170{
171 struct drm_nouveau_private *dev_priv = dev->dev_private;
172
173 dev_priv->vram_size = nv_rd32(dev, 0x10020c);
174 dev_priv->vram_size |= (dev_priv->vram_size & 0xff) << 32;
175 dev_priv->vram_size &= 0xffffffff00ULL;
176
177 switch (dev_priv->chipset) {
178 case 0xaa:
179 case 0xac:
180 case 0xaf:
181 dev_priv->vram_sys_base = (u64)nv_rd32(dev, 0x100e10) << 12;
182 dev_priv->vram_rblock_size = 4096;
183 break;
184 default:
185 dev_priv->vram_rblock_size = nv50_vram_rblock(dev);
186 break;
187 }
188
189 return 0;
190}
diff --git a/drivers/gpu/drm/nouveau/nv84_crypt.c b/drivers/gpu/drm/nouveau/nv84_crypt.c
new file mode 100644
index 000000000000..ec18ae1c3886
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv84_crypt.c
@@ -0,0 +1,140 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26#include "nouveau_drv.h"
27#include "nouveau_util.h"
28#include "nouveau_vm.h"
29
30static void nv84_crypt_isr(struct drm_device *);
31
32int
33nv84_crypt_create_context(struct nouveau_channel *chan)
34{
35 struct drm_device *dev = chan->dev;
36 struct drm_nouveau_private *dev_priv = dev->dev_private;
37 struct nouveau_gpuobj *ramin = chan->ramin;
38 int ret;
39
40 NV_DEBUG(dev, "ch%d\n", chan->id);
41
42 ret = nouveau_gpuobj_new(dev, chan, 256, 0,
43 NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE,
44 &chan->crypt_ctx);
45 if (ret)
46 return ret;
47
48 nv_wo32(ramin, 0xa0, 0x00190000);
49 nv_wo32(ramin, 0xa4, chan->crypt_ctx->vinst + 0xff);
50 nv_wo32(ramin, 0xa8, chan->crypt_ctx->vinst);
51 nv_wo32(ramin, 0xac, 0);
52 nv_wo32(ramin, 0xb0, 0);
53 nv_wo32(ramin, 0xb4, 0);
54
55 dev_priv->engine.instmem.flush(dev);
56 atomic_inc(&chan->vm->pcrypt_refs);
57 return 0;
58}
59
60void
61nv84_crypt_destroy_context(struct nouveau_channel *chan)
62{
63 struct drm_device *dev = chan->dev;
64 u32 inst;
65
66 if (!chan->crypt_ctx)
67 return;
68
69 inst = (chan->ramin->vinst >> 12);
70 inst |= 0x80000000;
71
72 /* mark context as invalid if still on the hardware, not
73 * doing this causes issues the next time PCRYPT is used,
74 * unsurprisingly :)
75 */
76 nv_wr32(dev, 0x10200c, 0x00000000);
77 if (nv_rd32(dev, 0x102188) == inst)
78 nv_mask(dev, 0x102188, 0x80000000, 0x00000000);
79 if (nv_rd32(dev, 0x10218c) == inst)
80 nv_mask(dev, 0x10218c, 0x80000000, 0x00000000);
81 nv_wr32(dev, 0x10200c, 0x00000010);
82
83 nouveau_gpuobj_ref(NULL, &chan->crypt_ctx);
84 atomic_dec(&chan->vm->pcrypt_refs);
85}
86
87void
88nv84_crypt_tlb_flush(struct drm_device *dev)
89{
90 nv50_vm_flush_engine(dev, 0x0a);
91}
92
93int
94nv84_crypt_init(struct drm_device *dev)
95{
96 struct drm_nouveau_private *dev_priv = dev->dev_private;
97 struct nouveau_crypt_engine *pcrypt = &dev_priv->engine.crypt;
98
99 if (!pcrypt->registered) {
100 NVOBJ_CLASS(dev, 0x74c1, CRYPT);
101 pcrypt->registered = true;
102 }
103
104 nv_mask(dev, 0x000200, 0x00004000, 0x00000000);
105 nv_mask(dev, 0x000200, 0x00004000, 0x00004000);
106
107 nouveau_irq_register(dev, 14, nv84_crypt_isr);
108 nv_wr32(dev, 0x102130, 0xffffffff);
109 nv_wr32(dev, 0x102140, 0xffffffbf);
110
111 nv_wr32(dev, 0x10200c, 0x00000010);
112 return 0;
113}
114
115void
116nv84_crypt_fini(struct drm_device *dev)
117{
118 nv_wr32(dev, 0x102140, 0x00000000);
119 nouveau_irq_unregister(dev, 14);
120}
121
122static void
123nv84_crypt_isr(struct drm_device *dev)
124{
125 u32 stat = nv_rd32(dev, 0x102130);
126 u32 mthd = nv_rd32(dev, 0x102190);
127 u32 data = nv_rd32(dev, 0x102194);
128 u32 inst = nv_rd32(dev, 0x102188) & 0x7fffffff;
129 int show = nouveau_ratelimit();
130
131 if (show) {
132 NV_INFO(dev, "PCRYPT_INTR: 0x%08x 0x%08x 0x%08x 0x%08x\n",
133 stat, mthd, data, inst);
134 }
135
136 nv_wr32(dev, 0x102130, stat);
137 nv_wr32(dev, 0x10200c, 0x10);
138
139 nv50_fb_vm_trap(dev, show, "PCRYPT");
140}
diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
new file mode 100644
index 000000000000..fa5d4c234383
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
@@ -0,0 +1,269 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26#include "nouveau_drv.h"
27#include "nouveau_dma.h"
28#include "nouveau_ramht.h"
29#include "nouveau_fbcon.h"
30#include "nouveau_mm.h"
31
32int
33nvc0_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
34{
35 struct nouveau_fbdev *nfbdev = info->par;
36 struct drm_device *dev = nfbdev->dev;
37 struct drm_nouveau_private *dev_priv = dev->dev_private;
38 struct nouveau_channel *chan = dev_priv->channel;
39 int ret;
40
41 ret = RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11);
42 if (ret)
43 return ret;
44
45 if (rect->rop != ROP_COPY) {
46 BEGIN_NVC0(chan, 2, NvSub2D, 0x02ac, 1);
47 OUT_RING (chan, 1);
48 }
49 BEGIN_NVC0(chan, 2, NvSub2D, 0x0588, 1);
50 if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
51 info->fix.visual == FB_VISUAL_DIRECTCOLOR)
52 OUT_RING (chan, ((uint32_t *)info->pseudo_palette)[rect->color]);
53 else
54 OUT_RING (chan, rect->color);
55 BEGIN_NVC0(chan, 2, NvSub2D, 0x0600, 4);
56 OUT_RING (chan, rect->dx);
57 OUT_RING (chan, rect->dy);
58 OUT_RING (chan, rect->dx + rect->width);
59 OUT_RING (chan, rect->dy + rect->height);
60 if (rect->rop != ROP_COPY) {
61 BEGIN_NVC0(chan, 2, NvSub2D, 0x02ac, 1);
62 OUT_RING (chan, 3);
63 }
64 FIRE_RING(chan);
65 return 0;
66}
67
68int
69nvc0_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
70{
71 struct nouveau_fbdev *nfbdev = info->par;
72 struct drm_device *dev = nfbdev->dev;
73 struct drm_nouveau_private *dev_priv = dev->dev_private;
74 struct nouveau_channel *chan = dev_priv->channel;
75 int ret;
76
77 ret = RING_SPACE(chan, 12);
78 if (ret)
79 return ret;
80
81 BEGIN_NVC0(chan, 2, NvSub2D, 0x0110, 1);
82 OUT_RING (chan, 0);
83 BEGIN_NVC0(chan, 2, NvSub2D, 0x08b0, 4);
84 OUT_RING (chan, region->dx);
85 OUT_RING (chan, region->dy);
86 OUT_RING (chan, region->width);
87 OUT_RING (chan, region->height);
88 BEGIN_NVC0(chan, 2, NvSub2D, 0x08d0, 4);
89 OUT_RING (chan, 0);
90 OUT_RING (chan, region->sx);
91 OUT_RING (chan, 0);
92 OUT_RING (chan, region->sy);
93 FIRE_RING(chan);
94 return 0;
95}
96
97int
98nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
99{
100 struct nouveau_fbdev *nfbdev = info->par;
101 struct drm_device *dev = nfbdev->dev;
102 struct drm_nouveau_private *dev_priv = dev->dev_private;
103 struct nouveau_channel *chan = dev_priv->channel;
104 uint32_t width, dwords, *data = (uint32_t *)image->data;
105 uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
106 uint32_t *palette = info->pseudo_palette;
107 int ret;
108
109 if (image->depth != 1)
110 return -ENODEV;
111
112 ret = RING_SPACE(chan, 11);
113 if (ret)
114 return ret;
115
116 width = ALIGN(image->width, 32);
117 dwords = (width * image->height) >> 5;
118
119 BEGIN_NVC0(chan, 2, NvSub2D, 0x0814, 2);
120 if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
121 info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
122 OUT_RING (chan, palette[image->bg_color] | mask);
123 OUT_RING (chan, palette[image->fg_color] | mask);
124 } else {
125 OUT_RING (chan, image->bg_color);
126 OUT_RING (chan, image->fg_color);
127 }
128 BEGIN_NVC0(chan, 2, NvSub2D, 0x0838, 2);
129 OUT_RING (chan, image->width);
130 OUT_RING (chan, image->height);
131 BEGIN_NVC0(chan, 2, NvSub2D, 0x0850, 4);
132 OUT_RING (chan, 0);
133 OUT_RING (chan, image->dx);
134 OUT_RING (chan, 0);
135 OUT_RING (chan, image->dy);
136
137 while (dwords) {
138 int push = dwords > 2047 ? 2047 : dwords;
139
140 ret = RING_SPACE(chan, push + 1);
141 if (ret)
142 return ret;
143
144 dwords -= push;
145
146 BEGIN_NVC0(chan, 6, NvSub2D, 0x0860, push);
147 OUT_RINGp(chan, data, push);
148 data += push;
149 }
150
151 FIRE_RING(chan);
152 return 0;
153}
154
155int
156nvc0_fbcon_accel_init(struct fb_info *info)
157{
158 struct nouveau_fbdev *nfbdev = info->par;
159 struct drm_device *dev = nfbdev->dev;
160 struct drm_nouveau_private *dev_priv = dev->dev_private;
161 struct nouveau_channel *chan = dev_priv->channel;
162 struct nouveau_bo *nvbo = nfbdev->nouveau_fb.nvbo;
163 int ret, format;
164
165 ret = nouveau_gpuobj_gr_new(chan, 0x902d, 0x902d);
166 if (ret)
167 return ret;
168
169 switch (info->var.bits_per_pixel) {
170 case 8:
171 format = 0xf3;
172 break;
173 case 15:
174 format = 0xf8;
175 break;
176 case 16:
177 format = 0xe8;
178 break;
179 case 32:
180 switch (info->var.transp.length) {
181 case 0: /* depth 24 */
182 case 8: /* depth 32, just use 24.. */
183 format = 0xe6;
184 break;
185 case 2: /* depth 30 */
186 format = 0xd1;
187 break;
188 default:
189 return -EINVAL;
190 }
191 break;
192 default:
193 return -EINVAL;
194 }
195
196 ret = RING_SPACE(chan, 60);
197 if (ret) {
198 WARN_ON(1);
199 nouveau_fbcon_gpu_lockup(info);
200 return ret;
201 }
202
203 BEGIN_NVC0(chan, 2, NvSub2D, 0x0000, 1);
204 OUT_RING (chan, 0x0000902d);
205 BEGIN_NVC0(chan, 2, NvSub2D, 0x0104, 2);
206 OUT_RING (chan, upper_32_bits(chan->notifier_bo->bo.offset));
207 OUT_RING (chan, lower_32_bits(chan->notifier_bo->bo.offset));
208 BEGIN_NVC0(chan, 2, NvSub2D, 0x0290, 1);
209 OUT_RING (chan, 0);
210 BEGIN_NVC0(chan, 2, NvSub2D, 0x0888, 1);
211 OUT_RING (chan, 1);
212 BEGIN_NVC0(chan, 2, NvSub2D, 0x02ac, 1);
213 OUT_RING (chan, 3);
214 BEGIN_NVC0(chan, 2, NvSub2D, 0x02a0, 1);
215 OUT_RING (chan, 0x55);
216 BEGIN_NVC0(chan, 2, NvSub2D, 0x08c0, 4);
217 OUT_RING (chan, 0);
218 OUT_RING (chan, 1);
219 OUT_RING (chan, 0);
220 OUT_RING (chan, 1);
221 BEGIN_NVC0(chan, 2, NvSub2D, 0x0580, 2);
222 OUT_RING (chan, 4);
223 OUT_RING (chan, format);
224 BEGIN_NVC0(chan, 2, NvSub2D, 0x02e8, 2);
225 OUT_RING (chan, 2);
226 OUT_RING (chan, 1);
227
228 BEGIN_NVC0(chan, 2, NvSub2D, 0x0804, 1);
229 OUT_RING (chan, format);
230 BEGIN_NVC0(chan, 2, NvSub2D, 0x0800, 1);
231 OUT_RING (chan, 1);
232 BEGIN_NVC0(chan, 2, NvSub2D, 0x0808, 3);
233 OUT_RING (chan, 0);
234 OUT_RING (chan, 0);
235 OUT_RING (chan, 1);
236 BEGIN_NVC0(chan, 2, NvSub2D, 0x081c, 1);
237 OUT_RING (chan, 1);
238 BEGIN_NVC0(chan, 2, NvSub2D, 0x0840, 4);
239 OUT_RING (chan, 0);
240 OUT_RING (chan, 1);
241 OUT_RING (chan, 0);
242 OUT_RING (chan, 1);
243 BEGIN_NVC0(chan, 2, NvSub2D, 0x0200, 10);
244 OUT_RING (chan, format);
245 OUT_RING (chan, 1);
246 OUT_RING (chan, 0);
247 OUT_RING (chan, 1);
248 OUT_RING (chan, 0);
249 OUT_RING (chan, info->fix.line_length);
250 OUT_RING (chan, info->var.xres_virtual);
251 OUT_RING (chan, info->var.yres_virtual);
252 OUT_RING (chan, upper_32_bits(nvbo->vma.offset));
253 OUT_RING (chan, lower_32_bits(nvbo->vma.offset));
254 BEGIN_NVC0(chan, 2, NvSub2D, 0x0230, 10);
255 OUT_RING (chan, format);
256 OUT_RING (chan, 1);
257 OUT_RING (chan, 0);
258 OUT_RING (chan, 1);
259 OUT_RING (chan, 0);
260 OUT_RING (chan, info->fix.line_length);
261 OUT_RING (chan, info->var.xres_virtual);
262 OUT_RING (chan, info->var.yres_virtual);
263 OUT_RING (chan, upper_32_bits(nvbo->vma.offset));
264 OUT_RING (chan, lower_32_bits(nvbo->vma.offset));
265 FIRE_RING (chan);
266
267 return 0;
268}
269
diff --git a/drivers/gpu/drm/nouveau/nvc0_fifo.c b/drivers/gpu/drm/nouveau/nvc0_fifo.c
index 890c2b95fbc1..e6f92c541dba 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fifo.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fifo.c
@@ -25,6 +25,49 @@
25#include "drmP.h" 25#include "drmP.h"
26 26
27#include "nouveau_drv.h" 27#include "nouveau_drv.h"
28#include "nouveau_mm.h"
29
30static void nvc0_fifo_isr(struct drm_device *);
31
32struct nvc0_fifo_priv {
33 struct nouveau_gpuobj *playlist[2];
34 int cur_playlist;
35 struct nouveau_vma user_vma;
36 int spoon_nr;
37};
38
39struct nvc0_fifo_chan {
40 struct nouveau_bo *user;
41 struct nouveau_gpuobj *ramfc;
42};
43
44static void
45nvc0_fifo_playlist_update(struct drm_device *dev)
46{
47 struct drm_nouveau_private *dev_priv = dev->dev_private;
48 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
49 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
50 struct nvc0_fifo_priv *priv = pfifo->priv;
51 struct nouveau_gpuobj *cur;
52 int i, p;
53
54 cur = priv->playlist[priv->cur_playlist];
55 priv->cur_playlist = !priv->cur_playlist;
56
57 for (i = 0, p = 0; i < 128; i++) {
58 if (!(nv_rd32(dev, 0x3004 + (i * 8)) & 1))
59 continue;
60 nv_wo32(cur, p + 0, i);
61 nv_wo32(cur, p + 4, 0x00000004);
62 p += 8;
63 }
64 pinstmem->flush(dev);
65
66 nv_wr32(dev, 0x002270, cur->vinst >> 12);
67 nv_wr32(dev, 0x002274, 0x01f00000 | (p >> 3));
68 if (!nv_wait(dev, 0x00227c, 0x00100000, 0x00000000))
69 NV_ERROR(dev, "PFIFO - playlist update failed\n");
70}
28 71
29void 72void
30nvc0_fifo_disable(struct drm_device *dev) 73nvc0_fifo_disable(struct drm_device *dev)
@@ -57,12 +100,135 @@ nvc0_fifo_channel_id(struct drm_device *dev)
57int 100int
58nvc0_fifo_create_context(struct nouveau_channel *chan) 101nvc0_fifo_create_context(struct nouveau_channel *chan)
59{ 102{
103 struct drm_device *dev = chan->dev;
104 struct drm_nouveau_private *dev_priv = dev->dev_private;
105 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
106 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
107 struct nvc0_fifo_priv *priv = pfifo->priv;
108 struct nvc0_fifo_chan *fifoch;
109 u64 ib_virt, user_vinst;
110 int ret;
111
112 chan->fifo_priv = kzalloc(sizeof(*fifoch), GFP_KERNEL);
113 if (!chan->fifo_priv)
114 return -ENOMEM;
115 fifoch = chan->fifo_priv;
116
117 /* allocate vram for control regs, map into polling area */
118 ret = nouveau_bo_new(dev, NULL, 0x1000, 0, TTM_PL_FLAG_VRAM,
119 0, 0, true, true, &fifoch->user);
120 if (ret)
121 goto error;
122
123 ret = nouveau_bo_pin(fifoch->user, TTM_PL_FLAG_VRAM);
124 if (ret) {
125 nouveau_bo_ref(NULL, &fifoch->user);
126 goto error;
127 }
128
129 user_vinst = fifoch->user->bo.mem.start << PAGE_SHIFT;
130
131 ret = nouveau_bo_map(fifoch->user);
132 if (ret) {
133 nouveau_bo_unpin(fifoch->user);
134 nouveau_bo_ref(NULL, &fifoch->user);
135 goto error;
136 }
137
138 nouveau_vm_map_at(&priv->user_vma, chan->id * 0x1000,
139 fifoch->user->bo.mem.mm_node);
140
141 chan->user = ioremap_wc(pci_resource_start(dev->pdev, 1) +
142 priv->user_vma.offset + (chan->id * 0x1000),
143 PAGE_SIZE);
144 if (!chan->user) {
145 ret = -ENOMEM;
146 goto error;
147 }
148
149 ib_virt = chan->pushbuf_base + chan->dma.ib_base * 4;
150
151 /* zero channel regs */
152 nouveau_bo_wr32(fifoch->user, 0x0040/4, 0);
153 nouveau_bo_wr32(fifoch->user, 0x0044/4, 0);
154 nouveau_bo_wr32(fifoch->user, 0x0048/4, 0);
155 nouveau_bo_wr32(fifoch->user, 0x004c/4, 0);
156 nouveau_bo_wr32(fifoch->user, 0x0050/4, 0);
157 nouveau_bo_wr32(fifoch->user, 0x0058/4, 0);
158 nouveau_bo_wr32(fifoch->user, 0x005c/4, 0);
159 nouveau_bo_wr32(fifoch->user, 0x0060/4, 0);
160 nouveau_bo_wr32(fifoch->user, 0x0088/4, 0);
161 nouveau_bo_wr32(fifoch->user, 0x008c/4, 0);
162
163 /* ramfc */
164 ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst,
165 chan->ramin->vinst, 0x100,
166 NVOBJ_FLAG_ZERO_ALLOC, &fifoch->ramfc);
167 if (ret)
168 goto error;
169
170 nv_wo32(fifoch->ramfc, 0x08, lower_32_bits(user_vinst));
171 nv_wo32(fifoch->ramfc, 0x0c, upper_32_bits(user_vinst));
172 nv_wo32(fifoch->ramfc, 0x10, 0x0000face);
173 nv_wo32(fifoch->ramfc, 0x30, 0xfffff902);
174 nv_wo32(fifoch->ramfc, 0x48, lower_32_bits(ib_virt));
175 nv_wo32(fifoch->ramfc, 0x4c, drm_order(chan->dma.ib_max + 1) << 16 |
176 upper_32_bits(ib_virt));
177 nv_wo32(fifoch->ramfc, 0x54, 0x00000002);
178 nv_wo32(fifoch->ramfc, 0x84, 0x20400000);
179 nv_wo32(fifoch->ramfc, 0x94, 0x30000001);
180 nv_wo32(fifoch->ramfc, 0x9c, 0x00000100);
181 nv_wo32(fifoch->ramfc, 0xa4, 0x1f1f1f1f);
182 nv_wo32(fifoch->ramfc, 0xa8, 0x1f1f1f1f);
183 nv_wo32(fifoch->ramfc, 0xac, 0x0000001f);
184 nv_wo32(fifoch->ramfc, 0xb8, 0xf8000000);
185 nv_wo32(fifoch->ramfc, 0xf8, 0x10003080); /* 0x002310 */
186 nv_wo32(fifoch->ramfc, 0xfc, 0x10000010); /* 0x002350 */
187 pinstmem->flush(dev);
188
189 nv_wr32(dev, 0x003000 + (chan->id * 8), 0xc0000000 |
190 (chan->ramin->vinst >> 12));
191 nv_wr32(dev, 0x003004 + (chan->id * 8), 0x001f0001);
192 nvc0_fifo_playlist_update(dev);
60 return 0; 193 return 0;
194
195error:
196 pfifo->destroy_context(chan);
197 return ret;
61} 198}
62 199
63void 200void
64nvc0_fifo_destroy_context(struct nouveau_channel *chan) 201nvc0_fifo_destroy_context(struct nouveau_channel *chan)
65{ 202{
203 struct drm_device *dev = chan->dev;
204 struct nvc0_fifo_chan *fifoch;
205
206 nv_mask(dev, 0x003004 + (chan->id * 8), 0x00000001, 0x00000000);
207 nv_wr32(dev, 0x002634, chan->id);
208 if (!nv_wait(dev, 0x0002634, 0xffffffff, chan->id))
209 NV_WARN(dev, "0x2634 != chid: 0x%08x\n", nv_rd32(dev, 0x2634));
210
211 nvc0_fifo_playlist_update(dev);
212
213 nv_wr32(dev, 0x003000 + (chan->id * 8), 0x00000000);
214
215 if (chan->user) {
216 iounmap(chan->user);
217 chan->user = NULL;
218 }
219
220 fifoch = chan->fifo_priv;
221 chan->fifo_priv = NULL;
222 if (!fifoch)
223 return;
224
225 nouveau_gpuobj_ref(NULL, &fifoch->ramfc);
226 if (fifoch->user) {
227 nouveau_bo_unmap(fifoch->user);
228 nouveau_bo_unpin(fifoch->user);
229 nouveau_bo_ref(NULL, &fifoch->user);
230 }
231 kfree(fifoch);
66} 232}
67 233
68int 234int
@@ -77,14 +243,213 @@ nvc0_fifo_unload_context(struct drm_device *dev)
77 return 0; 243 return 0;
78} 244}
79 245
246static void
247nvc0_fifo_destroy(struct drm_device *dev)
248{
249 struct drm_nouveau_private *dev_priv = dev->dev_private;
250 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
251 struct nvc0_fifo_priv *priv;
252
253 priv = pfifo->priv;
254 if (!priv)
255 return;
256
257 nouveau_vm_put(&priv->user_vma);
258 nouveau_gpuobj_ref(NULL, &priv->playlist[1]);
259 nouveau_gpuobj_ref(NULL, &priv->playlist[0]);
260 kfree(priv);
261}
262
80void 263void
81nvc0_fifo_takedown(struct drm_device *dev) 264nvc0_fifo_takedown(struct drm_device *dev)
82{ 265{
266 nv_wr32(dev, 0x002140, 0x00000000);
267 nvc0_fifo_destroy(dev);
268}
269
270static int
271nvc0_fifo_create(struct drm_device *dev)
272{
273 struct drm_nouveau_private *dev_priv = dev->dev_private;
274 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
275 struct nvc0_fifo_priv *priv;
276 int ret;
277
278 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
279 if (!priv)
280 return -ENOMEM;
281 pfifo->priv = priv;
282
283 ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0x1000, 0,
284 &priv->playlist[0]);
285 if (ret)
286 goto error;
287
288 ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0x1000, 0,
289 &priv->playlist[1]);
290 if (ret)
291 goto error;
292
293 ret = nouveau_vm_get(dev_priv->bar1_vm, pfifo->channels * 0x1000,
294 12, NV_MEM_ACCESS_RW, &priv->user_vma);
295 if (ret)
296 goto error;
297
298 nouveau_irq_register(dev, 8, nvc0_fifo_isr);
299 NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
300 return 0;
301
302error:
303 nvc0_fifo_destroy(dev);
304 return ret;
83} 305}
84 306
85int 307int
86nvc0_fifo_init(struct drm_device *dev) 308nvc0_fifo_init(struct drm_device *dev)
87{ 309{
310 struct drm_nouveau_private *dev_priv = dev->dev_private;
311 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
312 struct nvc0_fifo_priv *priv;
313 int ret, i;
314
315 if (!pfifo->priv) {
316 ret = nvc0_fifo_create(dev);
317 if (ret)
318 return ret;
319 }
320 priv = pfifo->priv;
321
322 /* reset PFIFO, enable all available PSUBFIFO areas */
323 nv_mask(dev, 0x000200, 0x00000100, 0x00000000);
324 nv_mask(dev, 0x000200, 0x00000100, 0x00000100);
325 nv_wr32(dev, 0x000204, 0xffffffff);
326 nv_wr32(dev, 0x002204, 0xffffffff);
327
328 priv->spoon_nr = hweight32(nv_rd32(dev, 0x002204));
329 NV_DEBUG(dev, "PFIFO: %d subfifo(s)\n", priv->spoon_nr);
330
331 /* assign engines to subfifos */
332 if (priv->spoon_nr >= 3) {
333 nv_wr32(dev, 0x002208, ~(1 << 0)); /* PGRAPH */
334 nv_wr32(dev, 0x00220c, ~(1 << 1)); /* PVP */
335 nv_wr32(dev, 0x002210, ~(1 << 1)); /* PPP */
336 nv_wr32(dev, 0x002214, ~(1 << 1)); /* PBSP */
337 nv_wr32(dev, 0x002218, ~(1 << 2)); /* PCE0 */
338 nv_wr32(dev, 0x00221c, ~(1 << 1)); /* PCE1 */
339 }
340
341 /* PSUBFIFO[n] */
342 for (i = 0; i < 3; i++) {
343 nv_mask(dev, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
344 nv_wr32(dev, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
345 nv_wr32(dev, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTR_EN */
346 }
347
348 nv_mask(dev, 0x002200, 0x00000001, 0x00000001);
349 nv_wr32(dev, 0x002254, 0x10000000 | priv->user_vma.offset >> 12);
350
351 nv_wr32(dev, 0x002a00, 0xffffffff); /* clears PFIFO.INTR bit 30 */
352 nv_wr32(dev, 0x002100, 0xffffffff);
353 nv_wr32(dev, 0x002140, 0xbfffffff);
88 return 0; 354 return 0;
89} 355}
90 356
357struct nouveau_enum nvc0_fifo_fault_unit[] = {
358 { 0, "PGRAPH" },
359 { 3, "PEEPHOLE" },
360 { 4, "BAR1" },
361 { 5, "BAR3" },
362 { 7, "PFIFO" },
363 {}
364};
365
366struct nouveau_enum nvc0_fifo_fault_reason[] = {
367 { 0, "PT_NOT_PRESENT" },
368 { 1, "PT_TOO_SHORT" },
369 { 2, "PAGE_NOT_PRESENT" },
370 { 3, "VM_LIMIT_EXCEEDED" },
371 {}
372};
373
374struct nouveau_bitfield nvc0_fifo_subfifo_intr[] = {
375/* { 0x00008000, "" } seen with null ib push */
376 { 0x00200000, "ILLEGAL_MTHD" },
377 { 0x00800000, "EMPTY_SUBC" },
378 {}
379};
380
381static void
382nvc0_fifo_isr_vm_fault(struct drm_device *dev, int unit)
383{
384 u32 inst = nv_rd32(dev, 0x2800 + (unit * 0x10));
385 u32 valo = nv_rd32(dev, 0x2804 + (unit * 0x10));
386 u32 vahi = nv_rd32(dev, 0x2808 + (unit * 0x10));
387 u32 stat = nv_rd32(dev, 0x280c + (unit * 0x10));
388
389 NV_INFO(dev, "PFIFO: %s fault at 0x%010llx [",
390 (stat & 0x00000080) ? "write" : "read", (u64)vahi << 32 | valo);
391 nouveau_enum_print(nvc0_fifo_fault_reason, stat & 0x0000000f);
392 printk("] from ");
393 nouveau_enum_print(nvc0_fifo_fault_unit, unit);
394 printk(" on channel 0x%010llx\n", (u64)inst << 12);
395}
396
397static void
398nvc0_fifo_isr_subfifo_intr(struct drm_device *dev, int unit)
399{
400 u32 stat = nv_rd32(dev, 0x040108 + (unit * 0x2000));
401 u32 addr = nv_rd32(dev, 0x0400c0 + (unit * 0x2000));
402 u32 data = nv_rd32(dev, 0x0400c4 + (unit * 0x2000));
403 u32 chid = nv_rd32(dev, 0x040120 + (unit * 0x2000)) & 0x7f;
404 u32 subc = (addr & 0x00070000);
405 u32 mthd = (addr & 0x00003ffc);
406
407 NV_INFO(dev, "PSUBFIFO %d:", unit);
408 nouveau_bitfield_print(nvc0_fifo_subfifo_intr, stat);
409 NV_INFO(dev, "PSUBFIFO %d: ch %d subc %d mthd 0x%04x data 0x%08x\n",
410 unit, chid, subc, mthd, data);
411
412 nv_wr32(dev, 0x0400c0 + (unit * 0x2000), 0x80600008);
413 nv_wr32(dev, 0x040108 + (unit * 0x2000), stat);
414}
415
416static void
417nvc0_fifo_isr(struct drm_device *dev)
418{
419 u32 stat = nv_rd32(dev, 0x002100);
420
421 if (stat & 0x10000000) {
422 u32 units = nv_rd32(dev, 0x00259c);
423 u32 u = units;
424
425 while (u) {
426 int i = ffs(u) - 1;
427 nvc0_fifo_isr_vm_fault(dev, i);
428 u &= ~(1 << i);
429 }
430
431 nv_wr32(dev, 0x00259c, units);
432 stat &= ~0x10000000;
433 }
434
435 if (stat & 0x20000000) {
436 u32 units = nv_rd32(dev, 0x0025a0);
437 u32 u = units;
438
439 while (u) {
440 int i = ffs(u) - 1;
441 nvc0_fifo_isr_subfifo_intr(dev, i);
442 u &= ~(1 << i);
443 }
444
445 nv_wr32(dev, 0x0025a0, units);
446 stat &= ~0x20000000;
447 }
448
449 if (stat) {
450 NV_INFO(dev, "PFIFO: unhandled status 0x%08x\n", stat);
451 nv_wr32(dev, 0x002100, stat);
452 }
453
454 nv_wr32(dev, 0x2140, 0);
455}
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c
index 717a5177a8d8..5feacd5d5fa4 100644
--- a/drivers/gpu/drm/nouveau/nvc0_graph.c
+++ b/drivers/gpu/drm/nouveau/nvc0_graph.c
@@ -22,9 +22,16 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <linux/firmware.h>
26
25#include "drmP.h" 27#include "drmP.h"
26 28
27#include "nouveau_drv.h" 29#include "nouveau_drv.h"
30#include "nouveau_mm.h"
31#include "nvc0_graph.h"
32
33static void nvc0_graph_isr(struct drm_device *);
34static int nvc0_graph_unload_context_to(struct drm_device *dev, u64 chan);
28 35
29void 36void
30nvc0_graph_fifo_access(struct drm_device *dev, bool enabled) 37nvc0_graph_fifo_access(struct drm_device *dev, bool enabled)
@@ -37,39 +44,735 @@ nvc0_graph_channel(struct drm_device *dev)
37 return NULL; 44 return NULL;
38} 45}
39 46
47static int
48nvc0_graph_construct_context(struct nouveau_channel *chan)
49{
50 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
51 struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv;
52 struct nvc0_graph_chan *grch = chan->pgraph_ctx;
53 struct drm_device *dev = chan->dev;
54 int ret, i;
55 u32 *ctx;
56
57 ctx = kmalloc(priv->grctx_size, GFP_KERNEL);
58 if (!ctx)
59 return -ENOMEM;
60
61 nvc0_graph_load_context(chan);
62
63 nv_wo32(grch->grctx, 0x1c, 1);
64 nv_wo32(grch->grctx, 0x20, 0);
65 nv_wo32(grch->grctx, 0x28, 0);
66 nv_wo32(grch->grctx, 0x2c, 0);
67 dev_priv->engine.instmem.flush(dev);
68
69 ret = nvc0_grctx_generate(chan);
70 if (ret) {
71 kfree(ctx);
72 return ret;
73 }
74
75 ret = nvc0_graph_unload_context_to(dev, chan->ramin->vinst);
76 if (ret) {
77 kfree(ctx);
78 return ret;
79 }
80
81 for (i = 0; i < priv->grctx_size; i += 4)
82 ctx[i / 4] = nv_ro32(grch->grctx, i);
83
84 priv->grctx_vals = ctx;
85 return 0;
86}
87
88static int
89nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan)
90{
91 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
92 struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv;
93 struct nvc0_graph_chan *grch = chan->pgraph_ctx;
94 struct drm_device *dev = chan->dev;
95 int i = 0, gpc, tp, ret;
96 u32 magic;
97
98 ret = nouveau_gpuobj_new(dev, NULL, 0x2000, 256, NVOBJ_FLAG_VM,
99 &grch->unk408004);
100 if (ret)
101 return ret;
102
103 ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 256, NVOBJ_FLAG_VM,
104 &grch->unk40800c);
105 if (ret)
106 return ret;
107
108 ret = nouveau_gpuobj_new(dev, NULL, 384 * 1024, 4096, NVOBJ_FLAG_VM,
109 &grch->unk418810);
110 if (ret)
111 return ret;
112
113 ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0, NVOBJ_FLAG_VM,
114 &grch->mmio);
115 if (ret)
116 return ret;
117
118
119 nv_wo32(grch->mmio, i++ * 4, 0x00408004);
120 nv_wo32(grch->mmio, i++ * 4, grch->unk408004->vinst >> 8);
121 nv_wo32(grch->mmio, i++ * 4, 0x00408008);
122 nv_wo32(grch->mmio, i++ * 4, 0x80000018);
123
124 nv_wo32(grch->mmio, i++ * 4, 0x0040800c);
125 nv_wo32(grch->mmio, i++ * 4, grch->unk40800c->vinst >> 8);
126 nv_wo32(grch->mmio, i++ * 4, 0x00408010);
127 nv_wo32(grch->mmio, i++ * 4, 0x80000000);
128
129 nv_wo32(grch->mmio, i++ * 4, 0x00418810);
130 nv_wo32(grch->mmio, i++ * 4, 0x80000000 | grch->unk418810->vinst >> 12);
131 nv_wo32(grch->mmio, i++ * 4, 0x00419848);
132 nv_wo32(grch->mmio, i++ * 4, 0x10000000 | grch->unk418810->vinst >> 12);
133
134 nv_wo32(grch->mmio, i++ * 4, 0x00419004);
135 nv_wo32(grch->mmio, i++ * 4, grch->unk40800c->vinst >> 8);
136 nv_wo32(grch->mmio, i++ * 4, 0x00419008);
137 nv_wo32(grch->mmio, i++ * 4, 0x00000000);
138
139 nv_wo32(grch->mmio, i++ * 4, 0x00418808);
140 nv_wo32(grch->mmio, i++ * 4, grch->unk408004->vinst >> 8);
141 nv_wo32(grch->mmio, i++ * 4, 0x0041880c);
142 nv_wo32(grch->mmio, i++ * 4, 0x80000018);
143
144 magic = 0x02180000;
145 nv_wo32(grch->mmio, i++ * 4, 0x00405830);
146 nv_wo32(grch->mmio, i++ * 4, magic);
147 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
148 for (tp = 0; tp < priv->tp_nr[gpc]; tp++, magic += 0x02fc) {
149 u32 reg = 0x504520 + (gpc * 0x8000) + (tp * 0x0800);
150 nv_wo32(grch->mmio, i++ * 4, reg);
151 nv_wo32(grch->mmio, i++ * 4, magic);
152 }
153 }
154
155 grch->mmio_nr = i / 2;
156 return 0;
157}
158
40int 159int
41nvc0_graph_create_context(struct nouveau_channel *chan) 160nvc0_graph_create_context(struct nouveau_channel *chan)
42{ 161{
162 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
163 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
164 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
165 struct nvc0_graph_priv *priv = pgraph->priv;
166 struct nvc0_graph_chan *grch;
167 struct drm_device *dev = chan->dev;
168 struct nouveau_gpuobj *grctx;
169 int ret, i;
170
171 chan->pgraph_ctx = kzalloc(sizeof(*grch), GFP_KERNEL);
172 if (!chan->pgraph_ctx)
173 return -ENOMEM;
174 grch = chan->pgraph_ctx;
175
176 ret = nouveau_gpuobj_new(dev, NULL, priv->grctx_size, 256,
177 NVOBJ_FLAG_VM | NVOBJ_FLAG_ZERO_ALLOC,
178 &grch->grctx);
179 if (ret)
180 goto error;
181 chan->ramin_grctx = grch->grctx;
182 grctx = grch->grctx;
183
184 ret = nvc0_graph_create_context_mmio_list(chan);
185 if (ret)
186 goto error;
187
188 nv_wo32(chan->ramin, 0x0210, lower_32_bits(grctx->vinst) | 4);
189 nv_wo32(chan->ramin, 0x0214, upper_32_bits(grctx->vinst));
190 pinstmem->flush(dev);
191
192 if (!priv->grctx_vals) {
193 ret = nvc0_graph_construct_context(chan);
194 if (ret)
195 goto error;
196 }
197
198 for (i = 0; i < priv->grctx_size; i += 4)
199 nv_wo32(grctx, i, priv->grctx_vals[i / 4]);
200
201 nv_wo32(grctx, 0xf4, 0);
202 nv_wo32(grctx, 0xf8, 0);
203 nv_wo32(grctx, 0x10, grch->mmio_nr);
204 nv_wo32(grctx, 0x14, lower_32_bits(grch->mmio->vinst));
205 nv_wo32(grctx, 0x18, upper_32_bits(grch->mmio->vinst));
206 nv_wo32(grctx, 0x1c, 1);
207 nv_wo32(grctx, 0x20, 0);
208 nv_wo32(grctx, 0x28, 0);
209 nv_wo32(grctx, 0x2c, 0);
210 pinstmem->flush(dev);
43 return 0; 211 return 0;
212
213error:
214 pgraph->destroy_context(chan);
215 return ret;
44} 216}
45 217
46void 218void
47nvc0_graph_destroy_context(struct nouveau_channel *chan) 219nvc0_graph_destroy_context(struct nouveau_channel *chan)
48{ 220{
221 struct nvc0_graph_chan *grch;
222
223 grch = chan->pgraph_ctx;
224 chan->pgraph_ctx = NULL;
225 if (!grch)
226 return;
227
228 nouveau_gpuobj_ref(NULL, &grch->mmio);
229 nouveau_gpuobj_ref(NULL, &grch->unk418810);
230 nouveau_gpuobj_ref(NULL, &grch->unk40800c);
231 nouveau_gpuobj_ref(NULL, &grch->unk408004);
232 nouveau_gpuobj_ref(NULL, &grch->grctx);
233 chan->ramin_grctx = NULL;
49} 234}
50 235
51int 236int
52nvc0_graph_load_context(struct nouveau_channel *chan) 237nvc0_graph_load_context(struct nouveau_channel *chan)
53{ 238{
239 struct drm_device *dev = chan->dev;
240
241 nv_wr32(dev, 0x409840, 0x00000030);
242 nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->vinst >> 12);
243 nv_wr32(dev, 0x409504, 0x00000003);
244 if (!nv_wait(dev, 0x409800, 0x00000010, 0x00000010))
245 NV_ERROR(dev, "PGRAPH: load_ctx timeout\n");
246
247 return 0;
248}
249
250static int
251nvc0_graph_unload_context_to(struct drm_device *dev, u64 chan)
252{
253 nv_wr32(dev, 0x409840, 0x00000003);
254 nv_wr32(dev, 0x409500, 0x80000000 | chan >> 12);
255 nv_wr32(dev, 0x409504, 0x00000009);
256 if (!nv_wait(dev, 0x409800, 0x00000001, 0x00000000)) {
257 NV_ERROR(dev, "PGRAPH: unload_ctx timeout\n");
258 return -EBUSY;
259 }
260
54 return 0; 261 return 0;
55} 262}
56 263
57int 264int
58nvc0_graph_unload_context(struct drm_device *dev) 265nvc0_graph_unload_context(struct drm_device *dev)
59{ 266{
60 return 0; 267 u64 inst = (u64)(nv_rd32(dev, 0x409b00) & 0x0fffffff) << 12;
268 return nvc0_graph_unload_context_to(dev, inst);
269}
270
271static void
272nvc0_graph_destroy(struct drm_device *dev)
273{
274 struct drm_nouveau_private *dev_priv = dev->dev_private;
275 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
276 struct nvc0_graph_priv *priv;
277
278 priv = pgraph->priv;
279 if (!priv)
280 return;
281
282 nouveau_irq_unregister(dev, 12);
283
284 nouveau_gpuobj_ref(NULL, &priv->unk4188b8);
285 nouveau_gpuobj_ref(NULL, &priv->unk4188b4);
286
287 if (priv->grctx_vals)
288 kfree(priv->grctx_vals);
289 kfree(priv);
61} 290}
62 291
63void 292void
64nvc0_graph_takedown(struct drm_device *dev) 293nvc0_graph_takedown(struct drm_device *dev)
65{ 294{
295 nvc0_graph_destroy(dev);
296}
297
298static int
299nvc0_graph_create(struct drm_device *dev)
300{
301 struct drm_nouveau_private *dev_priv = dev->dev_private;
302 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
303 struct nvc0_graph_priv *priv;
304 int ret, gpc, i;
305
306 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
307 if (!priv)
308 return -ENOMEM;
309 pgraph->priv = priv;
310
311 ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b4);
312 if (ret)
313 goto error;
314
315 ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b8);
316 if (ret)
317 goto error;
318
319 for (i = 0; i < 0x1000; i += 4) {
320 nv_wo32(priv->unk4188b4, i, 0x00000010);
321 nv_wo32(priv->unk4188b8, i, 0x00000010);
322 }
323
324 priv->gpc_nr = nv_rd32(dev, 0x409604) & 0x0000001f;
325 priv->rop_nr = (nv_rd32(dev, 0x409604) & 0x001f0000) >> 16;
326 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
327 priv->tp_nr[gpc] = nv_rd32(dev, GPC_UNIT(gpc, 0x2608));
328 priv->tp_total += priv->tp_nr[gpc];
329 }
330
331 /*XXX: these need figuring out... */
332 switch (dev_priv->chipset) {
333 case 0xc0:
334 if (priv->tp_total == 11) { /* 465, 3/4/4/0, 4 */
335 priv->magic_not_rop_nr = 0x07;
336 /* filled values up to tp_total, the rest 0 */
337 priv->magicgpc980[0] = 0x22111000;
338 priv->magicgpc980[1] = 0x00000233;
339 priv->magicgpc980[2] = 0x00000000;
340 priv->magicgpc980[3] = 0x00000000;
341 priv->magicgpc918 = 0x000ba2e9;
342 } else
343 if (priv->tp_total == 14) { /* 470, 3/3/4/4, 5 */
344 priv->magic_not_rop_nr = 0x05;
345 priv->magicgpc980[0] = 0x11110000;
346 priv->magicgpc980[1] = 0x00233222;
347 priv->magicgpc980[2] = 0x00000000;
348 priv->magicgpc980[3] = 0x00000000;
349 priv->magicgpc918 = 0x00092493;
350 } else
351 if (priv->tp_total == 15) { /* 480, 3/4/4/4, 6 */
352 priv->magic_not_rop_nr = 0x06;
353 priv->magicgpc980[0] = 0x11110000;
354 priv->magicgpc980[1] = 0x03332222;
355 priv->magicgpc980[2] = 0x00000000;
356 priv->magicgpc980[3] = 0x00000000;
357 priv->magicgpc918 = 0x00088889;
358 }
359 break;
360 case 0xc3: /* 450, 4/0/0/0, 2 */
361 priv->magic_not_rop_nr = 0x03;
362 priv->magicgpc980[0] = 0x00003210;
363 priv->magicgpc980[1] = 0x00000000;
364 priv->magicgpc980[2] = 0x00000000;
365 priv->magicgpc980[3] = 0x00000000;
366 priv->magicgpc918 = 0x00200000;
367 break;
368 case 0xc4: /* 460, 3/4/0/0, 4 */
369 priv->magic_not_rop_nr = 0x01;
370 priv->magicgpc980[0] = 0x02321100;
371 priv->magicgpc980[1] = 0x00000000;
372 priv->magicgpc980[2] = 0x00000000;
373 priv->magicgpc980[3] = 0x00000000;
374 priv->magicgpc918 = 0x00124925;
375 break;
376 }
377
378 if (!priv->magic_not_rop_nr) {
379 NV_ERROR(dev, "PGRAPH: unknown config: %d/%d/%d/%d, %d\n",
380 priv->tp_nr[0], priv->tp_nr[1], priv->tp_nr[2],
381 priv->tp_nr[3], priv->rop_nr);
382 /* use 0xc3's values... */
383 priv->magic_not_rop_nr = 0x03;
384 priv->magicgpc980[0] = 0x00003210;
385 priv->magicgpc980[1] = 0x00000000;
386 priv->magicgpc980[2] = 0x00000000;
387 priv->magicgpc980[3] = 0x00000000;
388 priv->magicgpc918 = 0x00200000;
389 }
390
391 nouveau_irq_register(dev, 12, nvc0_graph_isr);
392 NVOBJ_CLASS(dev, 0x902d, GR); /* 2D */
393 NVOBJ_CLASS(dev, 0x9039, GR); /* M2MF */
394 NVOBJ_CLASS(dev, 0x9097, GR); /* 3D */
395 NVOBJ_CLASS(dev, 0x90c0, GR); /* COMPUTE */
396 return 0;
397
398error:
399 nvc0_graph_destroy(dev);
400 return ret;
401}
402
403static void
404nvc0_graph_init_obj418880(struct drm_device *dev)
405{
406 struct drm_nouveau_private *dev_priv = dev->dev_private;
407 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
408 struct nvc0_graph_priv *priv = pgraph->priv;
409 int i;
410
411 nv_wr32(dev, GPC_BCAST(0x0880), 0x00000000);
412 nv_wr32(dev, GPC_BCAST(0x08a4), 0x00000000);
413 for (i = 0; i < 4; i++)
414 nv_wr32(dev, GPC_BCAST(0x0888) + (i * 4), 0x00000000);
415 nv_wr32(dev, GPC_BCAST(0x08b4), priv->unk4188b4->vinst >> 8);
416 nv_wr32(dev, GPC_BCAST(0x08b8), priv->unk4188b8->vinst >> 8);
417}
418
419static void
420nvc0_graph_init_regs(struct drm_device *dev)
421{
422 nv_wr32(dev, 0x400080, 0x003083c2);
423 nv_wr32(dev, 0x400088, 0x00006fe7);
424 nv_wr32(dev, 0x40008c, 0x00000000);
425 nv_wr32(dev, 0x400090, 0x00000030);
426 nv_wr32(dev, 0x40013c, 0x013901f7);
427 nv_wr32(dev, 0x400140, 0x00000100);
428 nv_wr32(dev, 0x400144, 0x00000000);
429 nv_wr32(dev, 0x400148, 0x00000110);
430 nv_wr32(dev, 0x400138, 0x00000000);
431 nv_wr32(dev, 0x400130, 0x00000000);
432 nv_wr32(dev, 0x400134, 0x00000000);
433 nv_wr32(dev, 0x400124, 0x00000002);
434}
435
436static void
437nvc0_graph_init_gpc_0(struct drm_device *dev)
438{
439 struct drm_nouveau_private *dev_priv = dev->dev_private;
440 struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv;
441 int gpc;
442
443 // TP ROP UNKVAL(magic_not_rop_nr)
444 // 450: 4/0/0/0 2 3
445 // 460: 3/4/0/0 4 1
446 // 465: 3/4/4/0 4 7
447 // 470: 3/3/4/4 5 5
448 // 480: 3/4/4/4 6 6
449
450 // magicgpc918
451 // 450: 00200000 00000000001000000000000000000000
452 // 460: 00124925 00000000000100100100100100100101
453 // 465: 000ba2e9 00000000000010111010001011101001
454 // 470: 00092493 00000000000010010010010010010011
455 // 480: 00088889 00000000000010001000100010001001
456
457 /* filled values up to tp_total, remainder 0 */
458 // 450: 00003210 00000000 00000000 00000000
459 // 460: 02321100 00000000 00000000 00000000
460 // 465: 22111000 00000233 00000000 00000000
461 // 470: 11110000 00233222 00000000 00000000
462 // 480: 11110000 03332222 00000000 00000000
463
464 nv_wr32(dev, GPC_BCAST(0x0980), priv->magicgpc980[0]);
465 nv_wr32(dev, GPC_BCAST(0x0984), priv->magicgpc980[1]);
466 nv_wr32(dev, GPC_BCAST(0x0988), priv->magicgpc980[2]);
467 nv_wr32(dev, GPC_BCAST(0x098c), priv->magicgpc980[3]);
468
469 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
470 nv_wr32(dev, GPC_UNIT(gpc, 0x0914), priv->magic_not_rop_nr << 8 |
471 priv->tp_nr[gpc]);
472 nv_wr32(dev, GPC_UNIT(gpc, 0x0910), 0x00040000 | priv->tp_total);
473 nv_wr32(dev, GPC_UNIT(gpc, 0x0918), priv->magicgpc918);
474 }
475
476 nv_wr32(dev, GPC_BCAST(0x1bd4), priv->magicgpc918);
477 nv_wr32(dev, GPC_BCAST(0x08ac), priv->rop_nr);
478}
479
480static void
481nvc0_graph_init_units(struct drm_device *dev)
482{
483 nv_wr32(dev, 0x409c24, 0x000f0000);
484 nv_wr32(dev, 0x404000, 0xc0000000); /* DISPATCH */
485 nv_wr32(dev, 0x404600, 0xc0000000); /* M2MF */
486 nv_wr32(dev, 0x408030, 0xc0000000);
487 nv_wr32(dev, 0x40601c, 0xc0000000);
488 nv_wr32(dev, 0x404490, 0xc0000000); /* MACRO */
489 nv_wr32(dev, 0x406018, 0xc0000000);
490 nv_wr32(dev, 0x405840, 0xc0000000);
491 nv_wr32(dev, 0x405844, 0x00ffffff);
492 nv_mask(dev, 0x419cc0, 0x00000008, 0x00000008);
493 nv_mask(dev, 0x419eb4, 0x00001000, 0x00001000);
494}
495
496static void
497nvc0_graph_init_gpc_1(struct drm_device *dev)
498{
499 struct drm_nouveau_private *dev_priv = dev->dev_private;
500 struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv;
501 int gpc, tp;
502
503 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
504 nv_wr32(dev, GPC_UNIT(gpc, 0x0420), 0xc0000000);
505 nv_wr32(dev, GPC_UNIT(gpc, 0x0900), 0xc0000000);
506 nv_wr32(dev, GPC_UNIT(gpc, 0x1028), 0xc0000000);
507 nv_wr32(dev, GPC_UNIT(gpc, 0x0824), 0xc0000000);
508 for (tp = 0; tp < priv->tp_nr[gpc]; tp++) {
509 nv_wr32(dev, TP_UNIT(gpc, tp, 0x508), 0xffffffff);
510 nv_wr32(dev, TP_UNIT(gpc, tp, 0x50c), 0xffffffff);
511 nv_wr32(dev, TP_UNIT(gpc, tp, 0x224), 0xc0000000);
512 nv_wr32(dev, TP_UNIT(gpc, tp, 0x48c), 0xc0000000);
513 nv_wr32(dev, TP_UNIT(gpc, tp, 0x084), 0xc0000000);
514 nv_wr32(dev, TP_UNIT(gpc, tp, 0xe44), 0x001ffffe);
515 nv_wr32(dev, TP_UNIT(gpc, tp, 0xe4c), 0x0000000f);
516 }
517 nv_wr32(dev, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
518 nv_wr32(dev, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
519 }
520}
521
522static void
523nvc0_graph_init_rop(struct drm_device *dev)
524{
525 struct drm_nouveau_private *dev_priv = dev->dev_private;
526 struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv;
527 int rop;
528
529 for (rop = 0; rop < priv->rop_nr; rop++) {
530 nv_wr32(dev, ROP_UNIT(rop, 0x144), 0xc0000000);
531 nv_wr32(dev, ROP_UNIT(rop, 0x070), 0xc0000000);
532 nv_wr32(dev, ROP_UNIT(rop, 0x204), 0xffffffff);
533 nv_wr32(dev, ROP_UNIT(rop, 0x208), 0xffffffff);
534 }
535}
536
537static int
538nvc0_fuc_load_fw(struct drm_device *dev, u32 fuc_base,
539 const char *code_fw, const char *data_fw)
540{
541 const struct firmware *fw;
542 char name[32];
543 int ret, i;
544
545 snprintf(name, sizeof(name), "nouveau/%s", data_fw);
546 ret = request_firmware(&fw, name, &dev->pdev->dev);
547 if (ret) {
548 NV_ERROR(dev, "failed to load %s\n", data_fw);
549 return ret;
550 }
551
552 nv_wr32(dev, fuc_base + 0x01c0, 0x01000000);
553 for (i = 0; i < fw->size / 4; i++)
554 nv_wr32(dev, fuc_base + 0x01c4, ((u32 *)fw->data)[i]);
555 release_firmware(fw);
556
557 snprintf(name, sizeof(name), "nouveau/%s", code_fw);
558 ret = request_firmware(&fw, name, &dev->pdev->dev);
559 if (ret) {
560 NV_ERROR(dev, "failed to load %s\n", code_fw);
561 return ret;
562 }
563
564 nv_wr32(dev, fuc_base + 0x0180, 0x01000000);
565 for (i = 0; i < fw->size / 4; i++) {
566 if ((i & 0x3f) == 0)
567 nv_wr32(dev, fuc_base + 0x0188, i >> 6);
568 nv_wr32(dev, fuc_base + 0x0184, ((u32 *)fw->data)[i]);
569 }
570 release_firmware(fw);
571
572 return 0;
573}
574
575static int
576nvc0_graph_init_ctxctl(struct drm_device *dev)
577{
578 struct drm_nouveau_private *dev_priv = dev->dev_private;
579 struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv;
580 u32 r000260;
581 int ret;
582
583 /* load fuc microcode */
584 r000260 = nv_mask(dev, 0x000260, 0x00000001, 0x00000000);
585 ret = nvc0_fuc_load_fw(dev, 0x409000, "fuc409c", "fuc409d");
586 if (ret == 0)
587 ret = nvc0_fuc_load_fw(dev, 0x41a000, "fuc41ac", "fuc41ad");
588 nv_wr32(dev, 0x000260, r000260);
589
590 if (ret)
591 return ret;
592
593 /* start both of them running */
594 nv_wr32(dev, 0x409840, 0xffffffff);
595 nv_wr32(dev, 0x41a10c, 0x00000000);
596 nv_wr32(dev, 0x40910c, 0x00000000);
597 nv_wr32(dev, 0x41a100, 0x00000002);
598 nv_wr32(dev, 0x409100, 0x00000002);
599 if (!nv_wait(dev, 0x409800, 0x00000001, 0x00000001))
600 NV_INFO(dev, "0x409800 wait failed\n");
601
602 nv_wr32(dev, 0x409840, 0xffffffff);
603 nv_wr32(dev, 0x409500, 0x7fffffff);
604 nv_wr32(dev, 0x409504, 0x00000021);
605
606 nv_wr32(dev, 0x409840, 0xffffffff);
607 nv_wr32(dev, 0x409500, 0x00000000);
608 nv_wr32(dev, 0x409504, 0x00000010);
609 if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
610 NV_ERROR(dev, "fuc09 req 0x10 timeout\n");
611 return -EBUSY;
612 }
613 priv->grctx_size = nv_rd32(dev, 0x409800);
614
615 nv_wr32(dev, 0x409840, 0xffffffff);
616 nv_wr32(dev, 0x409500, 0x00000000);
617 nv_wr32(dev, 0x409504, 0x00000016);
618 if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
619 NV_ERROR(dev, "fuc09 req 0x16 timeout\n");
620 return -EBUSY;
621 }
622
623 nv_wr32(dev, 0x409840, 0xffffffff);
624 nv_wr32(dev, 0x409500, 0x00000000);
625 nv_wr32(dev, 0x409504, 0x00000025);
626 if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
627 NV_ERROR(dev, "fuc09 req 0x25 timeout\n");
628 return -EBUSY;
629 }
630
631 return 0;
66} 632}
67 633
68int 634int
69nvc0_graph_init(struct drm_device *dev) 635nvc0_graph_init(struct drm_device *dev)
70{ 636{
71 struct drm_nouveau_private *dev_priv = dev->dev_private; 637 struct drm_nouveau_private *dev_priv = dev->dev_private;
638 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
639 struct nvc0_graph_priv *priv;
640 int ret;
641
72 dev_priv->engine.graph.accel_blocked = true; 642 dev_priv->engine.graph.accel_blocked = true;
643
644 switch (dev_priv->chipset) {
645 case 0xc0:
646 case 0xc3:
647 case 0xc4:
648 break;
649 default:
650 NV_ERROR(dev, "PGRAPH: unsupported chipset, please report!\n");
651 if (nouveau_noaccel != 0)
652 return 0;
653 break;
654 }
655
656 nv_mask(dev, 0x000200, 0x18001000, 0x00000000);
657 nv_mask(dev, 0x000200, 0x18001000, 0x18001000);
658
659 if (!pgraph->priv) {
660 ret = nvc0_graph_create(dev);
661 if (ret)
662 return ret;
663 }
664 priv = pgraph->priv;
665
666 nvc0_graph_init_obj418880(dev);
667 nvc0_graph_init_regs(dev);
668 //nvc0_graph_init_unitplemented_magics(dev);
669 nvc0_graph_init_gpc_0(dev);
670 //nvc0_graph_init_unitplemented_c242(dev);
671
672 nv_wr32(dev, 0x400500, 0x00010001);
673 nv_wr32(dev, 0x400100, 0xffffffff);
674 nv_wr32(dev, 0x40013c, 0xffffffff);
675
676 nvc0_graph_init_units(dev);
677 nvc0_graph_init_gpc_1(dev);
678 nvc0_graph_init_rop(dev);
679
680 nv_wr32(dev, 0x400108, 0xffffffff);
681 nv_wr32(dev, 0x400138, 0xffffffff);
682 nv_wr32(dev, 0x400118, 0xffffffff);
683 nv_wr32(dev, 0x400130, 0xffffffff);
684 nv_wr32(dev, 0x40011c, 0xffffffff);
685 nv_wr32(dev, 0x400134, 0xffffffff);
686 nv_wr32(dev, 0x400054, 0x34ce3464);
687
688 ret = nvc0_graph_init_ctxctl(dev);
689 if (ret == 0)
690 dev_priv->engine.graph.accel_blocked = false;
73 return 0; 691 return 0;
74} 692}
75 693
694static int
695nvc0_graph_isr_chid(struct drm_device *dev, u64 inst)
696{
697 struct drm_nouveau_private *dev_priv = dev->dev_private;
698 struct nouveau_channel *chan;
699 unsigned long flags;
700 int i;
701
702 spin_lock_irqsave(&dev_priv->channels.lock, flags);
703 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
704 chan = dev_priv->channels.ptr[i];
705 if (!chan || !chan->ramin)
706 continue;
707
708 if (inst == chan->ramin->vinst)
709 break;
710 }
711 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
712 return i;
713}
714
715static void
716nvc0_graph_isr(struct drm_device *dev)
717{
718 u64 inst = (u64)(nv_rd32(dev, 0x409b00) & 0x0fffffff) << 12;
719 u32 chid = nvc0_graph_isr_chid(dev, inst);
720 u32 stat = nv_rd32(dev, 0x400100);
721 u32 addr = nv_rd32(dev, 0x400704);
722 u32 mthd = (addr & 0x00003ffc);
723 u32 subc = (addr & 0x00070000) >> 16;
724 u32 data = nv_rd32(dev, 0x400708);
725 u32 code = nv_rd32(dev, 0x400110);
726 u32 class = nv_rd32(dev, 0x404200 + (subc * 4));
727
728 if (stat & 0x00000010) {
729 NV_INFO(dev, "PGRAPH: ILLEGAL_MTHD ch %d [0x%010llx] subc %d "
730 "class 0x%04x mthd 0x%04x data 0x%08x\n",
731 chid, inst, subc, class, mthd, data);
732 nv_wr32(dev, 0x400100, 0x00000010);
733 stat &= ~0x00000010;
734 }
735
736 if (stat & 0x00000020) {
737 NV_INFO(dev, "PGRAPH: ILLEGAL_CLASS ch %d [0x%010llx] subc %d "
738 "class 0x%04x mthd 0x%04x data 0x%08x\n",
739 chid, inst, subc, class, mthd, data);
740 nv_wr32(dev, 0x400100, 0x00000020);
741 stat &= ~0x00000020;
742 }
743
744 if (stat & 0x00100000) {
745 NV_INFO(dev, "PGRAPH: DATA_ERROR [");
746 nouveau_enum_print(nv50_data_error_names, code);
747 printk("] ch %d [0x%010llx] subc %d class 0x%04x "
748 "mthd 0x%04x data 0x%08x\n",
749 chid, inst, subc, class, mthd, data);
750 nv_wr32(dev, 0x400100, 0x00100000);
751 stat &= ~0x00100000;
752 }
753
754 if (stat & 0x00200000) {
755 u32 trap = nv_rd32(dev, 0x400108);
756 NV_INFO(dev, "PGRAPH: TRAP ch %d status 0x%08x\n", chid, trap);
757 nv_wr32(dev, 0x400108, trap);
758 nv_wr32(dev, 0x400100, 0x00200000);
759 stat &= ~0x00200000;
760 }
761
762 if (stat & 0x00080000) {
763 u32 ustat = nv_rd32(dev, 0x409c18);
764
765 NV_INFO(dev, "PGRAPH: CTXCTRL ustat 0x%08x\n", ustat);
766
767 nv_wr32(dev, 0x409c20, ustat);
768 nv_wr32(dev, 0x400100, 0x00080000);
769 stat &= ~0x00080000;
770 }
771
772 if (stat) {
773 NV_INFO(dev, "PGRAPH: unknown stat 0x%08x\n", stat);
774 nv_wr32(dev, 0x400100, stat);
775 }
776
777 nv_wr32(dev, 0x400500, 0x00010001);
778}
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.h b/drivers/gpu/drm/nouveau/nvc0_graph.h
new file mode 100644
index 000000000000..40e26f9c56c4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvc0_graph.h
@@ -0,0 +1,64 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#ifndef __NVC0_GRAPH_H__
26#define __NVC0_GRAPH_H__
27
28#define GPC_MAX 4
29#define TP_MAX 32
30
31#define ROP_BCAST(r) (0x408800 + (r))
32#define ROP_UNIT(u,r) (0x410000 + (u) * 0x400 + (r))
33#define GPC_BCAST(r) (0x418000 + (r))
34#define GPC_UNIT(t,r) (0x500000 + (t) * 0x8000 + (r))
35#define TP_UNIT(t,m,r) (0x504000 + (t) * 0x8000 + (m) * 0x800 + (r))
36
37struct nvc0_graph_priv {
38 u8 gpc_nr;
39 u8 rop_nr;
40 u8 tp_nr[GPC_MAX];
41 u8 tp_total;
42
43 u32 grctx_size;
44 u32 *grctx_vals;
45 struct nouveau_gpuobj *unk4188b4;
46 struct nouveau_gpuobj *unk4188b8;
47
48 u8 magic_not_rop_nr;
49 u32 magicgpc980[4];
50 u32 magicgpc918;
51};
52
53struct nvc0_graph_chan {
54 struct nouveau_gpuobj *grctx;
55 struct nouveau_gpuobj *unk408004; // 0x418810 too
56 struct nouveau_gpuobj *unk40800c; // 0x419004 too
57 struct nouveau_gpuobj *unk418810; // 0x419848 too
58 struct nouveau_gpuobj *mmio;
59 int mmio_nr;
60};
61
62int nvc0_grctx_generate(struct nouveau_channel *);
63
64#endif
diff --git a/drivers/gpu/drm/nouveau/nvc0_grctx.c b/drivers/gpu/drm/nouveau/nvc0_grctx.c
new file mode 100644
index 000000000000..b9e68b2d30aa
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvc0_grctx.c
@@ -0,0 +1,2874 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26#include "nouveau_drv.h"
27#include "nouveau_mm.h"
28#include "nvc0_graph.h"
29
30static void
31nv_icmd(struct drm_device *dev, u32 icmd, u32 data)
32{
33 nv_wr32(dev, 0x400204, data);
34 nv_wr32(dev, 0x400200, icmd);
35 while (nv_rd32(dev, 0x400700) & 2) {}
36}
37
38static void
39nv_mthd(struct drm_device *dev, u32 class, u32 mthd, u32 data)
40{
41 nv_wr32(dev, 0x40448c, data);
42 nv_wr32(dev, 0x404488, 0x80000000 | (mthd << 14) | class);
43}
44
45static void
46nvc0_grctx_generate_9097(struct drm_device *dev)
47{
48 nv_mthd(dev, 0x9097, 0x0800, 0x00000000);
49 nv_mthd(dev, 0x9097, 0x0840, 0x00000000);
50 nv_mthd(dev, 0x9097, 0x0880, 0x00000000);
51 nv_mthd(dev, 0x9097, 0x08c0, 0x00000000);
52 nv_mthd(dev, 0x9097, 0x0900, 0x00000000);
53 nv_mthd(dev, 0x9097, 0x0940, 0x00000000);
54 nv_mthd(dev, 0x9097, 0x0980, 0x00000000);
55 nv_mthd(dev, 0x9097, 0x09c0, 0x00000000);
56 nv_mthd(dev, 0x9097, 0x0804, 0x00000000);
57 nv_mthd(dev, 0x9097, 0x0844, 0x00000000);
58 nv_mthd(dev, 0x9097, 0x0884, 0x00000000);
59 nv_mthd(dev, 0x9097, 0x08c4, 0x00000000);
60 nv_mthd(dev, 0x9097, 0x0904, 0x00000000);
61 nv_mthd(dev, 0x9097, 0x0944, 0x00000000);
62 nv_mthd(dev, 0x9097, 0x0984, 0x00000000);
63 nv_mthd(dev, 0x9097, 0x09c4, 0x00000000);
64 nv_mthd(dev, 0x9097, 0x0808, 0x00000400);
65 nv_mthd(dev, 0x9097, 0x0848, 0x00000400);
66 nv_mthd(dev, 0x9097, 0x0888, 0x00000400);
67 nv_mthd(dev, 0x9097, 0x08c8, 0x00000400);
68 nv_mthd(dev, 0x9097, 0x0908, 0x00000400);
69 nv_mthd(dev, 0x9097, 0x0948, 0x00000400);
70 nv_mthd(dev, 0x9097, 0x0988, 0x00000400);
71 nv_mthd(dev, 0x9097, 0x09c8, 0x00000400);
72 nv_mthd(dev, 0x9097, 0x080c, 0x00000300);
73 nv_mthd(dev, 0x9097, 0x084c, 0x00000300);
74 nv_mthd(dev, 0x9097, 0x088c, 0x00000300);
75 nv_mthd(dev, 0x9097, 0x08cc, 0x00000300);
76 nv_mthd(dev, 0x9097, 0x090c, 0x00000300);
77 nv_mthd(dev, 0x9097, 0x094c, 0x00000300);
78 nv_mthd(dev, 0x9097, 0x098c, 0x00000300);
79 nv_mthd(dev, 0x9097, 0x09cc, 0x00000300);
80 nv_mthd(dev, 0x9097, 0x0810, 0x000000cf);
81 nv_mthd(dev, 0x9097, 0x0850, 0x00000000);
82 nv_mthd(dev, 0x9097, 0x0890, 0x00000000);
83 nv_mthd(dev, 0x9097, 0x08d0, 0x00000000);
84 nv_mthd(dev, 0x9097, 0x0910, 0x00000000);
85 nv_mthd(dev, 0x9097, 0x0950, 0x00000000);
86 nv_mthd(dev, 0x9097, 0x0990, 0x00000000);
87 nv_mthd(dev, 0x9097, 0x09d0, 0x00000000);
88 nv_mthd(dev, 0x9097, 0x0814, 0x00000040);
89 nv_mthd(dev, 0x9097, 0x0854, 0x00000040);
90 nv_mthd(dev, 0x9097, 0x0894, 0x00000040);
91 nv_mthd(dev, 0x9097, 0x08d4, 0x00000040);
92 nv_mthd(dev, 0x9097, 0x0914, 0x00000040);
93 nv_mthd(dev, 0x9097, 0x0954, 0x00000040);
94 nv_mthd(dev, 0x9097, 0x0994, 0x00000040);
95 nv_mthd(dev, 0x9097, 0x09d4, 0x00000040);
96 nv_mthd(dev, 0x9097, 0x0818, 0x00000001);
97 nv_mthd(dev, 0x9097, 0x0858, 0x00000001);
98 nv_mthd(dev, 0x9097, 0x0898, 0x00000001);
99 nv_mthd(dev, 0x9097, 0x08d8, 0x00000001);
100 nv_mthd(dev, 0x9097, 0x0918, 0x00000001);
101 nv_mthd(dev, 0x9097, 0x0958, 0x00000001);
102 nv_mthd(dev, 0x9097, 0x0998, 0x00000001);
103 nv_mthd(dev, 0x9097, 0x09d8, 0x00000001);
104 nv_mthd(dev, 0x9097, 0x081c, 0x00000000);
105 nv_mthd(dev, 0x9097, 0x085c, 0x00000000);
106 nv_mthd(dev, 0x9097, 0x089c, 0x00000000);
107 nv_mthd(dev, 0x9097, 0x08dc, 0x00000000);
108 nv_mthd(dev, 0x9097, 0x091c, 0x00000000);
109 nv_mthd(dev, 0x9097, 0x095c, 0x00000000);
110 nv_mthd(dev, 0x9097, 0x099c, 0x00000000);
111 nv_mthd(dev, 0x9097, 0x09dc, 0x00000000);
112 nv_mthd(dev, 0x9097, 0x0820, 0x00000000);
113 nv_mthd(dev, 0x9097, 0x0860, 0x00000000);
114 nv_mthd(dev, 0x9097, 0x08a0, 0x00000000);
115 nv_mthd(dev, 0x9097, 0x08e0, 0x00000000);
116 nv_mthd(dev, 0x9097, 0x0920, 0x00000000);
117 nv_mthd(dev, 0x9097, 0x0960, 0x00000000);
118 nv_mthd(dev, 0x9097, 0x09a0, 0x00000000);
119 nv_mthd(dev, 0x9097, 0x09e0, 0x00000000);
120 nv_mthd(dev, 0x9097, 0x2700, 0x00000000);
121 nv_mthd(dev, 0x9097, 0x2720, 0x00000000);
122 nv_mthd(dev, 0x9097, 0x2740, 0x00000000);
123 nv_mthd(dev, 0x9097, 0x2760, 0x00000000);
124 nv_mthd(dev, 0x9097, 0x2780, 0x00000000);
125 nv_mthd(dev, 0x9097, 0x27a0, 0x00000000);
126 nv_mthd(dev, 0x9097, 0x27c0, 0x00000000);
127 nv_mthd(dev, 0x9097, 0x27e0, 0x00000000);
128 nv_mthd(dev, 0x9097, 0x2704, 0x00000000);
129 nv_mthd(dev, 0x9097, 0x2724, 0x00000000);
130 nv_mthd(dev, 0x9097, 0x2744, 0x00000000);
131 nv_mthd(dev, 0x9097, 0x2764, 0x00000000);
132 nv_mthd(dev, 0x9097, 0x2784, 0x00000000);
133 nv_mthd(dev, 0x9097, 0x27a4, 0x00000000);
134 nv_mthd(dev, 0x9097, 0x27c4, 0x00000000);
135 nv_mthd(dev, 0x9097, 0x27e4, 0x00000000);
136 nv_mthd(dev, 0x9097, 0x2708, 0x00000000);
137 nv_mthd(dev, 0x9097, 0x2728, 0x00000000);
138 nv_mthd(dev, 0x9097, 0x2748, 0x00000000);
139 nv_mthd(dev, 0x9097, 0x2768, 0x00000000);
140 nv_mthd(dev, 0x9097, 0x2788, 0x00000000);
141 nv_mthd(dev, 0x9097, 0x27a8, 0x00000000);
142 nv_mthd(dev, 0x9097, 0x27c8, 0x00000000);
143 nv_mthd(dev, 0x9097, 0x27e8, 0x00000000);
144 nv_mthd(dev, 0x9097, 0x270c, 0x00000000);
145 nv_mthd(dev, 0x9097, 0x272c, 0x00000000);
146 nv_mthd(dev, 0x9097, 0x274c, 0x00000000);
147 nv_mthd(dev, 0x9097, 0x276c, 0x00000000);
148 nv_mthd(dev, 0x9097, 0x278c, 0x00000000);
149 nv_mthd(dev, 0x9097, 0x27ac, 0x00000000);
150 nv_mthd(dev, 0x9097, 0x27cc, 0x00000000);
151 nv_mthd(dev, 0x9097, 0x27ec, 0x00000000);
152 nv_mthd(dev, 0x9097, 0x2710, 0x00014000);
153 nv_mthd(dev, 0x9097, 0x2730, 0x00014000);
154 nv_mthd(dev, 0x9097, 0x2750, 0x00014000);
155 nv_mthd(dev, 0x9097, 0x2770, 0x00014000);
156 nv_mthd(dev, 0x9097, 0x2790, 0x00014000);
157 nv_mthd(dev, 0x9097, 0x27b0, 0x00014000);
158 nv_mthd(dev, 0x9097, 0x27d0, 0x00014000);
159 nv_mthd(dev, 0x9097, 0x27f0, 0x00014000);
160 nv_mthd(dev, 0x9097, 0x2714, 0x00000040);
161 nv_mthd(dev, 0x9097, 0x2734, 0x00000040);
162 nv_mthd(dev, 0x9097, 0x2754, 0x00000040);
163 nv_mthd(dev, 0x9097, 0x2774, 0x00000040);
164 nv_mthd(dev, 0x9097, 0x2794, 0x00000040);
165 nv_mthd(dev, 0x9097, 0x27b4, 0x00000040);
166 nv_mthd(dev, 0x9097, 0x27d4, 0x00000040);
167 nv_mthd(dev, 0x9097, 0x27f4, 0x00000040);
168 nv_mthd(dev, 0x9097, 0x1c00, 0x00000000);
169 nv_mthd(dev, 0x9097, 0x1c10, 0x00000000);
170 nv_mthd(dev, 0x9097, 0x1c20, 0x00000000);
171 nv_mthd(dev, 0x9097, 0x1c30, 0x00000000);
172 nv_mthd(dev, 0x9097, 0x1c40, 0x00000000);
173 nv_mthd(dev, 0x9097, 0x1c50, 0x00000000);
174 nv_mthd(dev, 0x9097, 0x1c60, 0x00000000);
175 nv_mthd(dev, 0x9097, 0x1c70, 0x00000000);
176 nv_mthd(dev, 0x9097, 0x1c80, 0x00000000);
177 nv_mthd(dev, 0x9097, 0x1c90, 0x00000000);
178 nv_mthd(dev, 0x9097, 0x1ca0, 0x00000000);
179 nv_mthd(dev, 0x9097, 0x1cb0, 0x00000000);
180 nv_mthd(dev, 0x9097, 0x1cc0, 0x00000000);
181 nv_mthd(dev, 0x9097, 0x1cd0, 0x00000000);
182 nv_mthd(dev, 0x9097, 0x1ce0, 0x00000000);
183 nv_mthd(dev, 0x9097, 0x1cf0, 0x00000000);
184 nv_mthd(dev, 0x9097, 0x1c04, 0x00000000);
185 nv_mthd(dev, 0x9097, 0x1c14, 0x00000000);
186 nv_mthd(dev, 0x9097, 0x1c24, 0x00000000);
187 nv_mthd(dev, 0x9097, 0x1c34, 0x00000000);
188 nv_mthd(dev, 0x9097, 0x1c44, 0x00000000);
189 nv_mthd(dev, 0x9097, 0x1c54, 0x00000000);
190 nv_mthd(dev, 0x9097, 0x1c64, 0x00000000);
191 nv_mthd(dev, 0x9097, 0x1c74, 0x00000000);
192 nv_mthd(dev, 0x9097, 0x1c84, 0x00000000);
193 nv_mthd(dev, 0x9097, 0x1c94, 0x00000000);
194 nv_mthd(dev, 0x9097, 0x1ca4, 0x00000000);
195 nv_mthd(dev, 0x9097, 0x1cb4, 0x00000000);
196 nv_mthd(dev, 0x9097, 0x1cc4, 0x00000000);
197 nv_mthd(dev, 0x9097, 0x1cd4, 0x00000000);
198 nv_mthd(dev, 0x9097, 0x1ce4, 0x00000000);
199 nv_mthd(dev, 0x9097, 0x1cf4, 0x00000000);
200 nv_mthd(dev, 0x9097, 0x1c08, 0x00000000);
201 nv_mthd(dev, 0x9097, 0x1c18, 0x00000000);
202 nv_mthd(dev, 0x9097, 0x1c28, 0x00000000);
203 nv_mthd(dev, 0x9097, 0x1c38, 0x00000000);
204 nv_mthd(dev, 0x9097, 0x1c48, 0x00000000);
205 nv_mthd(dev, 0x9097, 0x1c58, 0x00000000);
206 nv_mthd(dev, 0x9097, 0x1c68, 0x00000000);
207 nv_mthd(dev, 0x9097, 0x1c78, 0x00000000);
208 nv_mthd(dev, 0x9097, 0x1c88, 0x00000000);
209 nv_mthd(dev, 0x9097, 0x1c98, 0x00000000);
210 nv_mthd(dev, 0x9097, 0x1ca8, 0x00000000);
211 nv_mthd(dev, 0x9097, 0x1cb8, 0x00000000);
212 nv_mthd(dev, 0x9097, 0x1cc8, 0x00000000);
213 nv_mthd(dev, 0x9097, 0x1cd8, 0x00000000);
214 nv_mthd(dev, 0x9097, 0x1ce8, 0x00000000);
215 nv_mthd(dev, 0x9097, 0x1cf8, 0x00000000);
216 nv_mthd(dev, 0x9097, 0x1c0c, 0x00000000);
217 nv_mthd(dev, 0x9097, 0x1c1c, 0x00000000);
218 nv_mthd(dev, 0x9097, 0x1c2c, 0x00000000);
219 nv_mthd(dev, 0x9097, 0x1c3c, 0x00000000);
220 nv_mthd(dev, 0x9097, 0x1c4c, 0x00000000);
221 nv_mthd(dev, 0x9097, 0x1c5c, 0x00000000);
222 nv_mthd(dev, 0x9097, 0x1c6c, 0x00000000);
223 nv_mthd(dev, 0x9097, 0x1c7c, 0x00000000);
224 nv_mthd(dev, 0x9097, 0x1c8c, 0x00000000);
225 nv_mthd(dev, 0x9097, 0x1c9c, 0x00000000);
226 nv_mthd(dev, 0x9097, 0x1cac, 0x00000000);
227 nv_mthd(dev, 0x9097, 0x1cbc, 0x00000000);
228 nv_mthd(dev, 0x9097, 0x1ccc, 0x00000000);
229 nv_mthd(dev, 0x9097, 0x1cdc, 0x00000000);
230 nv_mthd(dev, 0x9097, 0x1cec, 0x00000000);
231 nv_mthd(dev, 0x9097, 0x1cfc, 0x00000000);
232 nv_mthd(dev, 0x9097, 0x1d00, 0x00000000);
233 nv_mthd(dev, 0x9097, 0x1d10, 0x00000000);
234 nv_mthd(dev, 0x9097, 0x1d20, 0x00000000);
235 nv_mthd(dev, 0x9097, 0x1d30, 0x00000000);
236 nv_mthd(dev, 0x9097, 0x1d40, 0x00000000);
237 nv_mthd(dev, 0x9097, 0x1d50, 0x00000000);
238 nv_mthd(dev, 0x9097, 0x1d60, 0x00000000);
239 nv_mthd(dev, 0x9097, 0x1d70, 0x00000000);
240 nv_mthd(dev, 0x9097, 0x1d80, 0x00000000);
241 nv_mthd(dev, 0x9097, 0x1d90, 0x00000000);
242 nv_mthd(dev, 0x9097, 0x1da0, 0x00000000);
243 nv_mthd(dev, 0x9097, 0x1db0, 0x00000000);
244 nv_mthd(dev, 0x9097, 0x1dc0, 0x00000000);
245 nv_mthd(dev, 0x9097, 0x1dd0, 0x00000000);
246 nv_mthd(dev, 0x9097, 0x1de0, 0x00000000);
247 nv_mthd(dev, 0x9097, 0x1df0, 0x00000000);
248 nv_mthd(dev, 0x9097, 0x1d04, 0x00000000);
249 nv_mthd(dev, 0x9097, 0x1d14, 0x00000000);
250 nv_mthd(dev, 0x9097, 0x1d24, 0x00000000);
251 nv_mthd(dev, 0x9097, 0x1d34, 0x00000000);
252 nv_mthd(dev, 0x9097, 0x1d44, 0x00000000);
253 nv_mthd(dev, 0x9097, 0x1d54, 0x00000000);
254 nv_mthd(dev, 0x9097, 0x1d64, 0x00000000);
255 nv_mthd(dev, 0x9097, 0x1d74, 0x00000000);
256 nv_mthd(dev, 0x9097, 0x1d84, 0x00000000);
257 nv_mthd(dev, 0x9097, 0x1d94, 0x00000000);
258 nv_mthd(dev, 0x9097, 0x1da4, 0x00000000);
259 nv_mthd(dev, 0x9097, 0x1db4, 0x00000000);
260 nv_mthd(dev, 0x9097, 0x1dc4, 0x00000000);
261 nv_mthd(dev, 0x9097, 0x1dd4, 0x00000000);
262 nv_mthd(dev, 0x9097, 0x1de4, 0x00000000);
263 nv_mthd(dev, 0x9097, 0x1df4, 0x00000000);
264 nv_mthd(dev, 0x9097, 0x1d08, 0x00000000);
265 nv_mthd(dev, 0x9097, 0x1d18, 0x00000000);
266 nv_mthd(dev, 0x9097, 0x1d28, 0x00000000);
267 nv_mthd(dev, 0x9097, 0x1d38, 0x00000000);
268 nv_mthd(dev, 0x9097, 0x1d48, 0x00000000);
269 nv_mthd(dev, 0x9097, 0x1d58, 0x00000000);
270 nv_mthd(dev, 0x9097, 0x1d68, 0x00000000);
271 nv_mthd(dev, 0x9097, 0x1d78, 0x00000000);
272 nv_mthd(dev, 0x9097, 0x1d88, 0x00000000);
273 nv_mthd(dev, 0x9097, 0x1d98, 0x00000000);
274 nv_mthd(dev, 0x9097, 0x1da8, 0x00000000);
275 nv_mthd(dev, 0x9097, 0x1db8, 0x00000000);
276 nv_mthd(dev, 0x9097, 0x1dc8, 0x00000000);
277 nv_mthd(dev, 0x9097, 0x1dd8, 0x00000000);
278 nv_mthd(dev, 0x9097, 0x1de8, 0x00000000);
279 nv_mthd(dev, 0x9097, 0x1df8, 0x00000000);
280 nv_mthd(dev, 0x9097, 0x1d0c, 0x00000000);
281 nv_mthd(dev, 0x9097, 0x1d1c, 0x00000000);
282 nv_mthd(dev, 0x9097, 0x1d2c, 0x00000000);
283 nv_mthd(dev, 0x9097, 0x1d3c, 0x00000000);
284 nv_mthd(dev, 0x9097, 0x1d4c, 0x00000000);
285 nv_mthd(dev, 0x9097, 0x1d5c, 0x00000000);
286 nv_mthd(dev, 0x9097, 0x1d6c, 0x00000000);
287 nv_mthd(dev, 0x9097, 0x1d7c, 0x00000000);
288 nv_mthd(dev, 0x9097, 0x1d8c, 0x00000000);
289 nv_mthd(dev, 0x9097, 0x1d9c, 0x00000000);
290 nv_mthd(dev, 0x9097, 0x1dac, 0x00000000);
291 nv_mthd(dev, 0x9097, 0x1dbc, 0x00000000);
292 nv_mthd(dev, 0x9097, 0x1dcc, 0x00000000);
293 nv_mthd(dev, 0x9097, 0x1ddc, 0x00000000);
294 nv_mthd(dev, 0x9097, 0x1dec, 0x00000000);
295 nv_mthd(dev, 0x9097, 0x1dfc, 0x00000000);
296 nv_mthd(dev, 0x9097, 0x1f00, 0x00000000);
297 nv_mthd(dev, 0x9097, 0x1f08, 0x00000000);
298 nv_mthd(dev, 0x9097, 0x1f10, 0x00000000);
299 nv_mthd(dev, 0x9097, 0x1f18, 0x00000000);
300 nv_mthd(dev, 0x9097, 0x1f20, 0x00000000);
301 nv_mthd(dev, 0x9097, 0x1f28, 0x00000000);
302 nv_mthd(dev, 0x9097, 0x1f30, 0x00000000);
303 nv_mthd(dev, 0x9097, 0x1f38, 0x00000000);
304 nv_mthd(dev, 0x9097, 0x1f40, 0x00000000);
305 nv_mthd(dev, 0x9097, 0x1f48, 0x00000000);
306 nv_mthd(dev, 0x9097, 0x1f50, 0x00000000);
307 nv_mthd(dev, 0x9097, 0x1f58, 0x00000000);
308 nv_mthd(dev, 0x9097, 0x1f60, 0x00000000);
309 nv_mthd(dev, 0x9097, 0x1f68, 0x00000000);
310 nv_mthd(dev, 0x9097, 0x1f70, 0x00000000);
311 nv_mthd(dev, 0x9097, 0x1f78, 0x00000000);
312 nv_mthd(dev, 0x9097, 0x1f04, 0x00000000);
313 nv_mthd(dev, 0x9097, 0x1f0c, 0x00000000);
314 nv_mthd(dev, 0x9097, 0x1f14, 0x00000000);
315 nv_mthd(dev, 0x9097, 0x1f1c, 0x00000000);
316 nv_mthd(dev, 0x9097, 0x1f24, 0x00000000);
317 nv_mthd(dev, 0x9097, 0x1f2c, 0x00000000);
318 nv_mthd(dev, 0x9097, 0x1f34, 0x00000000);
319 nv_mthd(dev, 0x9097, 0x1f3c, 0x00000000);
320 nv_mthd(dev, 0x9097, 0x1f44, 0x00000000);
321 nv_mthd(dev, 0x9097, 0x1f4c, 0x00000000);
322 nv_mthd(dev, 0x9097, 0x1f54, 0x00000000);
323 nv_mthd(dev, 0x9097, 0x1f5c, 0x00000000);
324 nv_mthd(dev, 0x9097, 0x1f64, 0x00000000);
325 nv_mthd(dev, 0x9097, 0x1f6c, 0x00000000);
326 nv_mthd(dev, 0x9097, 0x1f74, 0x00000000);
327 nv_mthd(dev, 0x9097, 0x1f7c, 0x00000000);
328 nv_mthd(dev, 0x9097, 0x1f80, 0x00000000);
329 nv_mthd(dev, 0x9097, 0x1f88, 0x00000000);
330 nv_mthd(dev, 0x9097, 0x1f90, 0x00000000);
331 nv_mthd(dev, 0x9097, 0x1f98, 0x00000000);
332 nv_mthd(dev, 0x9097, 0x1fa0, 0x00000000);
333 nv_mthd(dev, 0x9097, 0x1fa8, 0x00000000);
334 nv_mthd(dev, 0x9097, 0x1fb0, 0x00000000);
335 nv_mthd(dev, 0x9097, 0x1fb8, 0x00000000);
336 nv_mthd(dev, 0x9097, 0x1fc0, 0x00000000);
337 nv_mthd(dev, 0x9097, 0x1fc8, 0x00000000);
338 nv_mthd(dev, 0x9097, 0x1fd0, 0x00000000);
339 nv_mthd(dev, 0x9097, 0x1fd8, 0x00000000);
340 nv_mthd(dev, 0x9097, 0x1fe0, 0x00000000);
341 nv_mthd(dev, 0x9097, 0x1fe8, 0x00000000);
342 nv_mthd(dev, 0x9097, 0x1ff0, 0x00000000);
343 nv_mthd(dev, 0x9097, 0x1ff8, 0x00000000);
344 nv_mthd(dev, 0x9097, 0x1f84, 0x00000000);
345 nv_mthd(dev, 0x9097, 0x1f8c, 0x00000000);
346 nv_mthd(dev, 0x9097, 0x1f94, 0x00000000);
347 nv_mthd(dev, 0x9097, 0x1f9c, 0x00000000);
348 nv_mthd(dev, 0x9097, 0x1fa4, 0x00000000);
349 nv_mthd(dev, 0x9097, 0x1fac, 0x00000000);
350 nv_mthd(dev, 0x9097, 0x1fb4, 0x00000000);
351 nv_mthd(dev, 0x9097, 0x1fbc, 0x00000000);
352 nv_mthd(dev, 0x9097, 0x1fc4, 0x00000000);
353 nv_mthd(dev, 0x9097, 0x1fcc, 0x00000000);
354 nv_mthd(dev, 0x9097, 0x1fd4, 0x00000000);
355 nv_mthd(dev, 0x9097, 0x1fdc, 0x00000000);
356 nv_mthd(dev, 0x9097, 0x1fe4, 0x00000000);
357 nv_mthd(dev, 0x9097, 0x1fec, 0x00000000);
358 nv_mthd(dev, 0x9097, 0x1ff4, 0x00000000);
359 nv_mthd(dev, 0x9097, 0x1ffc, 0x00000000);
360 nv_mthd(dev, 0x9097, 0x2200, 0x00000022);
361 nv_mthd(dev, 0x9097, 0x2210, 0x00000022);
362 nv_mthd(dev, 0x9097, 0x2220, 0x00000022);
363 nv_mthd(dev, 0x9097, 0x2230, 0x00000022);
364 nv_mthd(dev, 0x9097, 0x2240, 0x00000022);
365 nv_mthd(dev, 0x9097, 0x2000, 0x00000000);
366 nv_mthd(dev, 0x9097, 0x2040, 0x00000011);
367 nv_mthd(dev, 0x9097, 0x2080, 0x00000020);
368 nv_mthd(dev, 0x9097, 0x20c0, 0x00000030);
369 nv_mthd(dev, 0x9097, 0x2100, 0x00000040);
370 nv_mthd(dev, 0x9097, 0x2140, 0x00000051);
371 nv_mthd(dev, 0x9097, 0x200c, 0x00000001);
372 nv_mthd(dev, 0x9097, 0x204c, 0x00000001);
373 nv_mthd(dev, 0x9097, 0x208c, 0x00000001);
374 nv_mthd(dev, 0x9097, 0x20cc, 0x00000001);
375 nv_mthd(dev, 0x9097, 0x210c, 0x00000001);
376 nv_mthd(dev, 0x9097, 0x214c, 0x00000001);
377 nv_mthd(dev, 0x9097, 0x2010, 0x00000000);
378 nv_mthd(dev, 0x9097, 0x2050, 0x00000000);
379 nv_mthd(dev, 0x9097, 0x2090, 0x00000001);
380 nv_mthd(dev, 0x9097, 0x20d0, 0x00000002);
381 nv_mthd(dev, 0x9097, 0x2110, 0x00000003);
382 nv_mthd(dev, 0x9097, 0x2150, 0x00000004);
383 nv_mthd(dev, 0x9097, 0x0380, 0x00000000);
384 nv_mthd(dev, 0x9097, 0x03a0, 0x00000000);
385 nv_mthd(dev, 0x9097, 0x03c0, 0x00000000);
386 nv_mthd(dev, 0x9097, 0x03e0, 0x00000000);
387 nv_mthd(dev, 0x9097, 0x0384, 0x00000000);
388 nv_mthd(dev, 0x9097, 0x03a4, 0x00000000);
389 nv_mthd(dev, 0x9097, 0x03c4, 0x00000000);
390 nv_mthd(dev, 0x9097, 0x03e4, 0x00000000);
391 nv_mthd(dev, 0x9097, 0x0388, 0x00000000);
392 nv_mthd(dev, 0x9097, 0x03a8, 0x00000000);
393 nv_mthd(dev, 0x9097, 0x03c8, 0x00000000);
394 nv_mthd(dev, 0x9097, 0x03e8, 0x00000000);
395 nv_mthd(dev, 0x9097, 0x038c, 0x00000000);
396 nv_mthd(dev, 0x9097, 0x03ac, 0x00000000);
397 nv_mthd(dev, 0x9097, 0x03cc, 0x00000000);
398 nv_mthd(dev, 0x9097, 0x03ec, 0x00000000);
399 nv_mthd(dev, 0x9097, 0x0700, 0x00000000);
400 nv_mthd(dev, 0x9097, 0x0710, 0x00000000);
401 nv_mthd(dev, 0x9097, 0x0720, 0x00000000);
402 nv_mthd(dev, 0x9097, 0x0730, 0x00000000);
403 nv_mthd(dev, 0x9097, 0x0704, 0x00000000);
404 nv_mthd(dev, 0x9097, 0x0714, 0x00000000);
405 nv_mthd(dev, 0x9097, 0x0724, 0x00000000);
406 nv_mthd(dev, 0x9097, 0x0734, 0x00000000);
407 nv_mthd(dev, 0x9097, 0x0708, 0x00000000);
408 nv_mthd(dev, 0x9097, 0x0718, 0x00000000);
409 nv_mthd(dev, 0x9097, 0x0728, 0x00000000);
410 nv_mthd(dev, 0x9097, 0x0738, 0x00000000);
411 nv_mthd(dev, 0x9097, 0x2800, 0x00000000);
412 nv_mthd(dev, 0x9097, 0x2804, 0x00000000);
413 nv_mthd(dev, 0x9097, 0x2808, 0x00000000);
414 nv_mthd(dev, 0x9097, 0x280c, 0x00000000);
415 nv_mthd(dev, 0x9097, 0x2810, 0x00000000);
416 nv_mthd(dev, 0x9097, 0x2814, 0x00000000);
417 nv_mthd(dev, 0x9097, 0x2818, 0x00000000);
418 nv_mthd(dev, 0x9097, 0x281c, 0x00000000);
419 nv_mthd(dev, 0x9097, 0x2820, 0x00000000);
420 nv_mthd(dev, 0x9097, 0x2824, 0x00000000);
421 nv_mthd(dev, 0x9097, 0x2828, 0x00000000);
422 nv_mthd(dev, 0x9097, 0x282c, 0x00000000);
423 nv_mthd(dev, 0x9097, 0x2830, 0x00000000);
424 nv_mthd(dev, 0x9097, 0x2834, 0x00000000);
425 nv_mthd(dev, 0x9097, 0x2838, 0x00000000);
426 nv_mthd(dev, 0x9097, 0x283c, 0x00000000);
427 nv_mthd(dev, 0x9097, 0x2840, 0x00000000);
428 nv_mthd(dev, 0x9097, 0x2844, 0x00000000);
429 nv_mthd(dev, 0x9097, 0x2848, 0x00000000);
430 nv_mthd(dev, 0x9097, 0x284c, 0x00000000);
431 nv_mthd(dev, 0x9097, 0x2850, 0x00000000);
432 nv_mthd(dev, 0x9097, 0x2854, 0x00000000);
433 nv_mthd(dev, 0x9097, 0x2858, 0x00000000);
434 nv_mthd(dev, 0x9097, 0x285c, 0x00000000);
435 nv_mthd(dev, 0x9097, 0x2860, 0x00000000);
436 nv_mthd(dev, 0x9097, 0x2864, 0x00000000);
437 nv_mthd(dev, 0x9097, 0x2868, 0x00000000);
438 nv_mthd(dev, 0x9097, 0x286c, 0x00000000);
439 nv_mthd(dev, 0x9097, 0x2870, 0x00000000);
440 nv_mthd(dev, 0x9097, 0x2874, 0x00000000);
441 nv_mthd(dev, 0x9097, 0x2878, 0x00000000);
442 nv_mthd(dev, 0x9097, 0x287c, 0x00000000);
443 nv_mthd(dev, 0x9097, 0x2880, 0x00000000);
444 nv_mthd(dev, 0x9097, 0x2884, 0x00000000);
445 nv_mthd(dev, 0x9097, 0x2888, 0x00000000);
446 nv_mthd(dev, 0x9097, 0x288c, 0x00000000);
447 nv_mthd(dev, 0x9097, 0x2890, 0x00000000);
448 nv_mthd(dev, 0x9097, 0x2894, 0x00000000);
449 nv_mthd(dev, 0x9097, 0x2898, 0x00000000);
450 nv_mthd(dev, 0x9097, 0x289c, 0x00000000);
451 nv_mthd(dev, 0x9097, 0x28a0, 0x00000000);
452 nv_mthd(dev, 0x9097, 0x28a4, 0x00000000);
453 nv_mthd(dev, 0x9097, 0x28a8, 0x00000000);
454 nv_mthd(dev, 0x9097, 0x28ac, 0x00000000);
455 nv_mthd(dev, 0x9097, 0x28b0, 0x00000000);
456 nv_mthd(dev, 0x9097, 0x28b4, 0x00000000);
457 nv_mthd(dev, 0x9097, 0x28b8, 0x00000000);
458 nv_mthd(dev, 0x9097, 0x28bc, 0x00000000);
459 nv_mthd(dev, 0x9097, 0x28c0, 0x00000000);
460 nv_mthd(dev, 0x9097, 0x28c4, 0x00000000);
461 nv_mthd(dev, 0x9097, 0x28c8, 0x00000000);
462 nv_mthd(dev, 0x9097, 0x28cc, 0x00000000);
463 nv_mthd(dev, 0x9097, 0x28d0, 0x00000000);
464 nv_mthd(dev, 0x9097, 0x28d4, 0x00000000);
465 nv_mthd(dev, 0x9097, 0x28d8, 0x00000000);
466 nv_mthd(dev, 0x9097, 0x28dc, 0x00000000);
467 nv_mthd(dev, 0x9097, 0x28e0, 0x00000000);
468 nv_mthd(dev, 0x9097, 0x28e4, 0x00000000);
469 nv_mthd(dev, 0x9097, 0x28e8, 0x00000000);
470 nv_mthd(dev, 0x9097, 0x28ec, 0x00000000);
471 nv_mthd(dev, 0x9097, 0x28f0, 0x00000000);
472 nv_mthd(dev, 0x9097, 0x28f4, 0x00000000);
473 nv_mthd(dev, 0x9097, 0x28f8, 0x00000000);
474 nv_mthd(dev, 0x9097, 0x28fc, 0x00000000);
475 nv_mthd(dev, 0x9097, 0x2900, 0x00000000);
476 nv_mthd(dev, 0x9097, 0x2904, 0x00000000);
477 nv_mthd(dev, 0x9097, 0x2908, 0x00000000);
478 nv_mthd(dev, 0x9097, 0x290c, 0x00000000);
479 nv_mthd(dev, 0x9097, 0x2910, 0x00000000);
480 nv_mthd(dev, 0x9097, 0x2914, 0x00000000);
481 nv_mthd(dev, 0x9097, 0x2918, 0x00000000);
482 nv_mthd(dev, 0x9097, 0x291c, 0x00000000);
483 nv_mthd(dev, 0x9097, 0x2920, 0x00000000);
484 nv_mthd(dev, 0x9097, 0x2924, 0x00000000);
485 nv_mthd(dev, 0x9097, 0x2928, 0x00000000);
486 nv_mthd(dev, 0x9097, 0x292c, 0x00000000);
487 nv_mthd(dev, 0x9097, 0x2930, 0x00000000);
488 nv_mthd(dev, 0x9097, 0x2934, 0x00000000);
489 nv_mthd(dev, 0x9097, 0x2938, 0x00000000);
490 nv_mthd(dev, 0x9097, 0x293c, 0x00000000);
491 nv_mthd(dev, 0x9097, 0x2940, 0x00000000);
492 nv_mthd(dev, 0x9097, 0x2944, 0x00000000);
493 nv_mthd(dev, 0x9097, 0x2948, 0x00000000);
494 nv_mthd(dev, 0x9097, 0x294c, 0x00000000);
495 nv_mthd(dev, 0x9097, 0x2950, 0x00000000);
496 nv_mthd(dev, 0x9097, 0x2954, 0x00000000);
497 nv_mthd(dev, 0x9097, 0x2958, 0x00000000);
498 nv_mthd(dev, 0x9097, 0x295c, 0x00000000);
499 nv_mthd(dev, 0x9097, 0x2960, 0x00000000);
500 nv_mthd(dev, 0x9097, 0x2964, 0x00000000);
501 nv_mthd(dev, 0x9097, 0x2968, 0x00000000);
502 nv_mthd(dev, 0x9097, 0x296c, 0x00000000);
503 nv_mthd(dev, 0x9097, 0x2970, 0x00000000);
504 nv_mthd(dev, 0x9097, 0x2974, 0x00000000);
505 nv_mthd(dev, 0x9097, 0x2978, 0x00000000);
506 nv_mthd(dev, 0x9097, 0x297c, 0x00000000);
507 nv_mthd(dev, 0x9097, 0x2980, 0x00000000);
508 nv_mthd(dev, 0x9097, 0x2984, 0x00000000);
509 nv_mthd(dev, 0x9097, 0x2988, 0x00000000);
510 nv_mthd(dev, 0x9097, 0x298c, 0x00000000);
511 nv_mthd(dev, 0x9097, 0x2990, 0x00000000);
512 nv_mthd(dev, 0x9097, 0x2994, 0x00000000);
513 nv_mthd(dev, 0x9097, 0x2998, 0x00000000);
514 nv_mthd(dev, 0x9097, 0x299c, 0x00000000);
515 nv_mthd(dev, 0x9097, 0x29a0, 0x00000000);
516 nv_mthd(dev, 0x9097, 0x29a4, 0x00000000);
517 nv_mthd(dev, 0x9097, 0x29a8, 0x00000000);
518 nv_mthd(dev, 0x9097, 0x29ac, 0x00000000);
519 nv_mthd(dev, 0x9097, 0x29b0, 0x00000000);
520 nv_mthd(dev, 0x9097, 0x29b4, 0x00000000);
521 nv_mthd(dev, 0x9097, 0x29b8, 0x00000000);
522 nv_mthd(dev, 0x9097, 0x29bc, 0x00000000);
523 nv_mthd(dev, 0x9097, 0x29c0, 0x00000000);
524 nv_mthd(dev, 0x9097, 0x29c4, 0x00000000);
525 nv_mthd(dev, 0x9097, 0x29c8, 0x00000000);
526 nv_mthd(dev, 0x9097, 0x29cc, 0x00000000);
527 nv_mthd(dev, 0x9097, 0x29d0, 0x00000000);
528 nv_mthd(dev, 0x9097, 0x29d4, 0x00000000);
529 nv_mthd(dev, 0x9097, 0x29d8, 0x00000000);
530 nv_mthd(dev, 0x9097, 0x29dc, 0x00000000);
531 nv_mthd(dev, 0x9097, 0x29e0, 0x00000000);
532 nv_mthd(dev, 0x9097, 0x29e4, 0x00000000);
533 nv_mthd(dev, 0x9097, 0x29e8, 0x00000000);
534 nv_mthd(dev, 0x9097, 0x29ec, 0x00000000);
535 nv_mthd(dev, 0x9097, 0x29f0, 0x00000000);
536 nv_mthd(dev, 0x9097, 0x29f4, 0x00000000);
537 nv_mthd(dev, 0x9097, 0x29f8, 0x00000000);
538 nv_mthd(dev, 0x9097, 0x29fc, 0x00000000);
539 nv_mthd(dev, 0x9097, 0x0a00, 0x00000000);
540 nv_mthd(dev, 0x9097, 0x0a20, 0x00000000);
541 nv_mthd(dev, 0x9097, 0x0a40, 0x00000000);
542 nv_mthd(dev, 0x9097, 0x0a60, 0x00000000);
543 nv_mthd(dev, 0x9097, 0x0a80, 0x00000000);
544 nv_mthd(dev, 0x9097, 0x0aa0, 0x00000000);
545 nv_mthd(dev, 0x9097, 0x0ac0, 0x00000000);
546 nv_mthd(dev, 0x9097, 0x0ae0, 0x00000000);
547 nv_mthd(dev, 0x9097, 0x0b00, 0x00000000);
548 nv_mthd(dev, 0x9097, 0x0b20, 0x00000000);
549 nv_mthd(dev, 0x9097, 0x0b40, 0x00000000);
550 nv_mthd(dev, 0x9097, 0x0b60, 0x00000000);
551 nv_mthd(dev, 0x9097, 0x0b80, 0x00000000);
552 nv_mthd(dev, 0x9097, 0x0ba0, 0x00000000);
553 nv_mthd(dev, 0x9097, 0x0bc0, 0x00000000);
554 nv_mthd(dev, 0x9097, 0x0be0, 0x00000000);
555 nv_mthd(dev, 0x9097, 0x0a04, 0x00000000);
556 nv_mthd(dev, 0x9097, 0x0a24, 0x00000000);
557 nv_mthd(dev, 0x9097, 0x0a44, 0x00000000);
558 nv_mthd(dev, 0x9097, 0x0a64, 0x00000000);
559 nv_mthd(dev, 0x9097, 0x0a84, 0x00000000);
560 nv_mthd(dev, 0x9097, 0x0aa4, 0x00000000);
561 nv_mthd(dev, 0x9097, 0x0ac4, 0x00000000);
562 nv_mthd(dev, 0x9097, 0x0ae4, 0x00000000);
563 nv_mthd(dev, 0x9097, 0x0b04, 0x00000000);
564 nv_mthd(dev, 0x9097, 0x0b24, 0x00000000);
565 nv_mthd(dev, 0x9097, 0x0b44, 0x00000000);
566 nv_mthd(dev, 0x9097, 0x0b64, 0x00000000);
567 nv_mthd(dev, 0x9097, 0x0b84, 0x00000000);
568 nv_mthd(dev, 0x9097, 0x0ba4, 0x00000000);
569 nv_mthd(dev, 0x9097, 0x0bc4, 0x00000000);
570 nv_mthd(dev, 0x9097, 0x0be4, 0x00000000);
571 nv_mthd(dev, 0x9097, 0x0a08, 0x00000000);
572 nv_mthd(dev, 0x9097, 0x0a28, 0x00000000);
573 nv_mthd(dev, 0x9097, 0x0a48, 0x00000000);
574 nv_mthd(dev, 0x9097, 0x0a68, 0x00000000);
575 nv_mthd(dev, 0x9097, 0x0a88, 0x00000000);
576 nv_mthd(dev, 0x9097, 0x0aa8, 0x00000000);
577 nv_mthd(dev, 0x9097, 0x0ac8, 0x00000000);
578 nv_mthd(dev, 0x9097, 0x0ae8, 0x00000000);
579 nv_mthd(dev, 0x9097, 0x0b08, 0x00000000);
580 nv_mthd(dev, 0x9097, 0x0b28, 0x00000000);
581 nv_mthd(dev, 0x9097, 0x0b48, 0x00000000);
582 nv_mthd(dev, 0x9097, 0x0b68, 0x00000000);
583 nv_mthd(dev, 0x9097, 0x0b88, 0x00000000);
584 nv_mthd(dev, 0x9097, 0x0ba8, 0x00000000);
585 nv_mthd(dev, 0x9097, 0x0bc8, 0x00000000);
586 nv_mthd(dev, 0x9097, 0x0be8, 0x00000000);
587 nv_mthd(dev, 0x9097, 0x0a0c, 0x00000000);
588 nv_mthd(dev, 0x9097, 0x0a2c, 0x00000000);
589 nv_mthd(dev, 0x9097, 0x0a4c, 0x00000000);
590 nv_mthd(dev, 0x9097, 0x0a6c, 0x00000000);
591 nv_mthd(dev, 0x9097, 0x0a8c, 0x00000000);
592 nv_mthd(dev, 0x9097, 0x0aac, 0x00000000);
593 nv_mthd(dev, 0x9097, 0x0acc, 0x00000000);
594 nv_mthd(dev, 0x9097, 0x0aec, 0x00000000);
595 nv_mthd(dev, 0x9097, 0x0b0c, 0x00000000);
596 nv_mthd(dev, 0x9097, 0x0b2c, 0x00000000);
597 nv_mthd(dev, 0x9097, 0x0b4c, 0x00000000);
598 nv_mthd(dev, 0x9097, 0x0b6c, 0x00000000);
599 nv_mthd(dev, 0x9097, 0x0b8c, 0x00000000);
600 nv_mthd(dev, 0x9097, 0x0bac, 0x00000000);
601 nv_mthd(dev, 0x9097, 0x0bcc, 0x00000000);
602 nv_mthd(dev, 0x9097, 0x0bec, 0x00000000);
603 nv_mthd(dev, 0x9097, 0x0a10, 0x00000000);
604 nv_mthd(dev, 0x9097, 0x0a30, 0x00000000);
605 nv_mthd(dev, 0x9097, 0x0a50, 0x00000000);
606 nv_mthd(dev, 0x9097, 0x0a70, 0x00000000);
607 nv_mthd(dev, 0x9097, 0x0a90, 0x00000000);
608 nv_mthd(dev, 0x9097, 0x0ab0, 0x00000000);
609 nv_mthd(dev, 0x9097, 0x0ad0, 0x00000000);
610 nv_mthd(dev, 0x9097, 0x0af0, 0x00000000);
611 nv_mthd(dev, 0x9097, 0x0b10, 0x00000000);
612 nv_mthd(dev, 0x9097, 0x0b30, 0x00000000);
613 nv_mthd(dev, 0x9097, 0x0b50, 0x00000000);
614 nv_mthd(dev, 0x9097, 0x0b70, 0x00000000);
615 nv_mthd(dev, 0x9097, 0x0b90, 0x00000000);
616 nv_mthd(dev, 0x9097, 0x0bb0, 0x00000000);
617 nv_mthd(dev, 0x9097, 0x0bd0, 0x00000000);
618 nv_mthd(dev, 0x9097, 0x0bf0, 0x00000000);
619 nv_mthd(dev, 0x9097, 0x0a14, 0x00000000);
620 nv_mthd(dev, 0x9097, 0x0a34, 0x00000000);
621 nv_mthd(dev, 0x9097, 0x0a54, 0x00000000);
622 nv_mthd(dev, 0x9097, 0x0a74, 0x00000000);
623 nv_mthd(dev, 0x9097, 0x0a94, 0x00000000);
624 nv_mthd(dev, 0x9097, 0x0ab4, 0x00000000);
625 nv_mthd(dev, 0x9097, 0x0ad4, 0x00000000);
626 nv_mthd(dev, 0x9097, 0x0af4, 0x00000000);
627 nv_mthd(dev, 0x9097, 0x0b14, 0x00000000);
628 nv_mthd(dev, 0x9097, 0x0b34, 0x00000000);
629 nv_mthd(dev, 0x9097, 0x0b54, 0x00000000);
630 nv_mthd(dev, 0x9097, 0x0b74, 0x00000000);
631 nv_mthd(dev, 0x9097, 0x0b94, 0x00000000);
632 nv_mthd(dev, 0x9097, 0x0bb4, 0x00000000);
633 nv_mthd(dev, 0x9097, 0x0bd4, 0x00000000);
634 nv_mthd(dev, 0x9097, 0x0bf4, 0x00000000);
635 nv_mthd(dev, 0x9097, 0x0c00, 0x00000000);
636 nv_mthd(dev, 0x9097, 0x0c10, 0x00000000);
637 nv_mthd(dev, 0x9097, 0x0c20, 0x00000000);
638 nv_mthd(dev, 0x9097, 0x0c30, 0x00000000);
639 nv_mthd(dev, 0x9097, 0x0c40, 0x00000000);
640 nv_mthd(dev, 0x9097, 0x0c50, 0x00000000);
641 nv_mthd(dev, 0x9097, 0x0c60, 0x00000000);
642 nv_mthd(dev, 0x9097, 0x0c70, 0x00000000);
643 nv_mthd(dev, 0x9097, 0x0c80, 0x00000000);
644 nv_mthd(dev, 0x9097, 0x0c90, 0x00000000);
645 nv_mthd(dev, 0x9097, 0x0ca0, 0x00000000);
646 nv_mthd(dev, 0x9097, 0x0cb0, 0x00000000);
647 nv_mthd(dev, 0x9097, 0x0cc0, 0x00000000);
648 nv_mthd(dev, 0x9097, 0x0cd0, 0x00000000);
649 nv_mthd(dev, 0x9097, 0x0ce0, 0x00000000);
650 nv_mthd(dev, 0x9097, 0x0cf0, 0x00000000);
651 nv_mthd(dev, 0x9097, 0x0c04, 0x00000000);
652 nv_mthd(dev, 0x9097, 0x0c14, 0x00000000);
653 nv_mthd(dev, 0x9097, 0x0c24, 0x00000000);
654 nv_mthd(dev, 0x9097, 0x0c34, 0x00000000);
655 nv_mthd(dev, 0x9097, 0x0c44, 0x00000000);
656 nv_mthd(dev, 0x9097, 0x0c54, 0x00000000);
657 nv_mthd(dev, 0x9097, 0x0c64, 0x00000000);
658 nv_mthd(dev, 0x9097, 0x0c74, 0x00000000);
659 nv_mthd(dev, 0x9097, 0x0c84, 0x00000000);
660 nv_mthd(dev, 0x9097, 0x0c94, 0x00000000);
661 nv_mthd(dev, 0x9097, 0x0ca4, 0x00000000);
662 nv_mthd(dev, 0x9097, 0x0cb4, 0x00000000);
663 nv_mthd(dev, 0x9097, 0x0cc4, 0x00000000);
664 nv_mthd(dev, 0x9097, 0x0cd4, 0x00000000);
665 nv_mthd(dev, 0x9097, 0x0ce4, 0x00000000);
666 nv_mthd(dev, 0x9097, 0x0cf4, 0x00000000);
667 nv_mthd(dev, 0x9097, 0x0c08, 0x00000000);
668 nv_mthd(dev, 0x9097, 0x0c18, 0x00000000);
669 nv_mthd(dev, 0x9097, 0x0c28, 0x00000000);
670 nv_mthd(dev, 0x9097, 0x0c38, 0x00000000);
671 nv_mthd(dev, 0x9097, 0x0c48, 0x00000000);
672 nv_mthd(dev, 0x9097, 0x0c58, 0x00000000);
673 nv_mthd(dev, 0x9097, 0x0c68, 0x00000000);
674 nv_mthd(dev, 0x9097, 0x0c78, 0x00000000);
675 nv_mthd(dev, 0x9097, 0x0c88, 0x00000000);
676 nv_mthd(dev, 0x9097, 0x0c98, 0x00000000);
677 nv_mthd(dev, 0x9097, 0x0ca8, 0x00000000);
678 nv_mthd(dev, 0x9097, 0x0cb8, 0x00000000);
679 nv_mthd(dev, 0x9097, 0x0cc8, 0x00000000);
680 nv_mthd(dev, 0x9097, 0x0cd8, 0x00000000);
681 nv_mthd(dev, 0x9097, 0x0ce8, 0x00000000);
682 nv_mthd(dev, 0x9097, 0x0cf8, 0x00000000);
683 nv_mthd(dev, 0x9097, 0x0c0c, 0x3f800000);
684 nv_mthd(dev, 0x9097, 0x0c1c, 0x3f800000);
685 nv_mthd(dev, 0x9097, 0x0c2c, 0x3f800000);
686 nv_mthd(dev, 0x9097, 0x0c3c, 0x3f800000);
687 nv_mthd(dev, 0x9097, 0x0c4c, 0x3f800000);
688 nv_mthd(dev, 0x9097, 0x0c5c, 0x3f800000);
689 nv_mthd(dev, 0x9097, 0x0c6c, 0x3f800000);
690 nv_mthd(dev, 0x9097, 0x0c7c, 0x3f800000);
691 nv_mthd(dev, 0x9097, 0x0c8c, 0x3f800000);
692 nv_mthd(dev, 0x9097, 0x0c9c, 0x3f800000);
693 nv_mthd(dev, 0x9097, 0x0cac, 0x3f800000);
694 nv_mthd(dev, 0x9097, 0x0cbc, 0x3f800000);
695 nv_mthd(dev, 0x9097, 0x0ccc, 0x3f800000);
696 nv_mthd(dev, 0x9097, 0x0cdc, 0x3f800000);
697 nv_mthd(dev, 0x9097, 0x0cec, 0x3f800000);
698 nv_mthd(dev, 0x9097, 0x0cfc, 0x3f800000);
699 nv_mthd(dev, 0x9097, 0x0d00, 0xffff0000);
700 nv_mthd(dev, 0x9097, 0x0d08, 0xffff0000);
701 nv_mthd(dev, 0x9097, 0x0d10, 0xffff0000);
702 nv_mthd(dev, 0x9097, 0x0d18, 0xffff0000);
703 nv_mthd(dev, 0x9097, 0x0d20, 0xffff0000);
704 nv_mthd(dev, 0x9097, 0x0d28, 0xffff0000);
705 nv_mthd(dev, 0x9097, 0x0d30, 0xffff0000);
706 nv_mthd(dev, 0x9097, 0x0d38, 0xffff0000);
707 nv_mthd(dev, 0x9097, 0x0d04, 0xffff0000);
708 nv_mthd(dev, 0x9097, 0x0d0c, 0xffff0000);
709 nv_mthd(dev, 0x9097, 0x0d14, 0xffff0000);
710 nv_mthd(dev, 0x9097, 0x0d1c, 0xffff0000);
711 nv_mthd(dev, 0x9097, 0x0d24, 0xffff0000);
712 nv_mthd(dev, 0x9097, 0x0d2c, 0xffff0000);
713 nv_mthd(dev, 0x9097, 0x0d34, 0xffff0000);
714 nv_mthd(dev, 0x9097, 0x0d3c, 0xffff0000);
715 nv_mthd(dev, 0x9097, 0x0e00, 0x00000000);
716 nv_mthd(dev, 0x9097, 0x0e10, 0x00000000);
717 nv_mthd(dev, 0x9097, 0x0e20, 0x00000000);
718 nv_mthd(dev, 0x9097, 0x0e30, 0x00000000);
719 nv_mthd(dev, 0x9097, 0x0e40, 0x00000000);
720 nv_mthd(dev, 0x9097, 0x0e50, 0x00000000);
721 nv_mthd(dev, 0x9097, 0x0e60, 0x00000000);
722 nv_mthd(dev, 0x9097, 0x0e70, 0x00000000);
723 nv_mthd(dev, 0x9097, 0x0e80, 0x00000000);
724 nv_mthd(dev, 0x9097, 0x0e90, 0x00000000);
725 nv_mthd(dev, 0x9097, 0x0ea0, 0x00000000);
726 nv_mthd(dev, 0x9097, 0x0eb0, 0x00000000);
727 nv_mthd(dev, 0x9097, 0x0ec0, 0x00000000);
728 nv_mthd(dev, 0x9097, 0x0ed0, 0x00000000);
729 nv_mthd(dev, 0x9097, 0x0ee0, 0x00000000);
730 nv_mthd(dev, 0x9097, 0x0ef0, 0x00000000);
731 nv_mthd(dev, 0x9097, 0x0e04, 0xffff0000);
732 nv_mthd(dev, 0x9097, 0x0e14, 0xffff0000);
733 nv_mthd(dev, 0x9097, 0x0e24, 0xffff0000);
734 nv_mthd(dev, 0x9097, 0x0e34, 0xffff0000);
735 nv_mthd(dev, 0x9097, 0x0e44, 0xffff0000);
736 nv_mthd(dev, 0x9097, 0x0e54, 0xffff0000);
737 nv_mthd(dev, 0x9097, 0x0e64, 0xffff0000);
738 nv_mthd(dev, 0x9097, 0x0e74, 0xffff0000);
739 nv_mthd(dev, 0x9097, 0x0e84, 0xffff0000);
740 nv_mthd(dev, 0x9097, 0x0e94, 0xffff0000);
741 nv_mthd(dev, 0x9097, 0x0ea4, 0xffff0000);
742 nv_mthd(dev, 0x9097, 0x0eb4, 0xffff0000);
743 nv_mthd(dev, 0x9097, 0x0ec4, 0xffff0000);
744 nv_mthd(dev, 0x9097, 0x0ed4, 0xffff0000);
745 nv_mthd(dev, 0x9097, 0x0ee4, 0xffff0000);
746 nv_mthd(dev, 0x9097, 0x0ef4, 0xffff0000);
747 nv_mthd(dev, 0x9097, 0x0e08, 0xffff0000);
748 nv_mthd(dev, 0x9097, 0x0e18, 0xffff0000);
749 nv_mthd(dev, 0x9097, 0x0e28, 0xffff0000);
750 nv_mthd(dev, 0x9097, 0x0e38, 0xffff0000);
751 nv_mthd(dev, 0x9097, 0x0e48, 0xffff0000);
752 nv_mthd(dev, 0x9097, 0x0e58, 0xffff0000);
753 nv_mthd(dev, 0x9097, 0x0e68, 0xffff0000);
754 nv_mthd(dev, 0x9097, 0x0e78, 0xffff0000);
755 nv_mthd(dev, 0x9097, 0x0e88, 0xffff0000);
756 nv_mthd(dev, 0x9097, 0x0e98, 0xffff0000);
757 nv_mthd(dev, 0x9097, 0x0ea8, 0xffff0000);
758 nv_mthd(dev, 0x9097, 0x0eb8, 0xffff0000);
759 nv_mthd(dev, 0x9097, 0x0ec8, 0xffff0000);
760 nv_mthd(dev, 0x9097, 0x0ed8, 0xffff0000);
761 nv_mthd(dev, 0x9097, 0x0ee8, 0xffff0000);
762 nv_mthd(dev, 0x9097, 0x0ef8, 0xffff0000);
763 nv_mthd(dev, 0x9097, 0x0d40, 0x00000000);
764 nv_mthd(dev, 0x9097, 0x0d48, 0x00000000);
765 nv_mthd(dev, 0x9097, 0x0d50, 0x00000000);
766 nv_mthd(dev, 0x9097, 0x0d58, 0x00000000);
767 nv_mthd(dev, 0x9097, 0x0d44, 0x00000000);
768 nv_mthd(dev, 0x9097, 0x0d4c, 0x00000000);
769 nv_mthd(dev, 0x9097, 0x0d54, 0x00000000);
770 nv_mthd(dev, 0x9097, 0x0d5c, 0x00000000);
771 nv_mthd(dev, 0x9097, 0x1e00, 0x00000001);
772 nv_mthd(dev, 0x9097, 0x1e20, 0x00000001);
773 nv_mthd(dev, 0x9097, 0x1e40, 0x00000001);
774 nv_mthd(dev, 0x9097, 0x1e60, 0x00000001);
775 nv_mthd(dev, 0x9097, 0x1e80, 0x00000001);
776 nv_mthd(dev, 0x9097, 0x1ea0, 0x00000001);
777 nv_mthd(dev, 0x9097, 0x1ec0, 0x00000001);
778 nv_mthd(dev, 0x9097, 0x1ee0, 0x00000001);
779 nv_mthd(dev, 0x9097, 0x1e04, 0x00000001);
780 nv_mthd(dev, 0x9097, 0x1e24, 0x00000001);
781 nv_mthd(dev, 0x9097, 0x1e44, 0x00000001);
782 nv_mthd(dev, 0x9097, 0x1e64, 0x00000001);
783 nv_mthd(dev, 0x9097, 0x1e84, 0x00000001);
784 nv_mthd(dev, 0x9097, 0x1ea4, 0x00000001);
785 nv_mthd(dev, 0x9097, 0x1ec4, 0x00000001);
786 nv_mthd(dev, 0x9097, 0x1ee4, 0x00000001);
787 nv_mthd(dev, 0x9097, 0x1e08, 0x00000002);
788 nv_mthd(dev, 0x9097, 0x1e28, 0x00000002);
789 nv_mthd(dev, 0x9097, 0x1e48, 0x00000002);
790 nv_mthd(dev, 0x9097, 0x1e68, 0x00000002);
791 nv_mthd(dev, 0x9097, 0x1e88, 0x00000002);
792 nv_mthd(dev, 0x9097, 0x1ea8, 0x00000002);
793 nv_mthd(dev, 0x9097, 0x1ec8, 0x00000002);
794 nv_mthd(dev, 0x9097, 0x1ee8, 0x00000002);
795 nv_mthd(dev, 0x9097, 0x1e0c, 0x00000001);
796 nv_mthd(dev, 0x9097, 0x1e2c, 0x00000001);
797 nv_mthd(dev, 0x9097, 0x1e4c, 0x00000001);
798 nv_mthd(dev, 0x9097, 0x1e6c, 0x00000001);
799 nv_mthd(dev, 0x9097, 0x1e8c, 0x00000001);
800 nv_mthd(dev, 0x9097, 0x1eac, 0x00000001);
801 nv_mthd(dev, 0x9097, 0x1ecc, 0x00000001);
802 nv_mthd(dev, 0x9097, 0x1eec, 0x00000001);
803 nv_mthd(dev, 0x9097, 0x1e10, 0x00000001);
804 nv_mthd(dev, 0x9097, 0x1e30, 0x00000001);
805 nv_mthd(dev, 0x9097, 0x1e50, 0x00000001);
806 nv_mthd(dev, 0x9097, 0x1e70, 0x00000001);
807 nv_mthd(dev, 0x9097, 0x1e90, 0x00000001);
808 nv_mthd(dev, 0x9097, 0x1eb0, 0x00000001);
809 nv_mthd(dev, 0x9097, 0x1ed0, 0x00000001);
810 nv_mthd(dev, 0x9097, 0x1ef0, 0x00000001);
811 nv_mthd(dev, 0x9097, 0x1e14, 0x00000002);
812 nv_mthd(dev, 0x9097, 0x1e34, 0x00000002);
813 nv_mthd(dev, 0x9097, 0x1e54, 0x00000002);
814 nv_mthd(dev, 0x9097, 0x1e74, 0x00000002);
815 nv_mthd(dev, 0x9097, 0x1e94, 0x00000002);
816 nv_mthd(dev, 0x9097, 0x1eb4, 0x00000002);
817 nv_mthd(dev, 0x9097, 0x1ed4, 0x00000002);
818 nv_mthd(dev, 0x9097, 0x1ef4, 0x00000002);
819 nv_mthd(dev, 0x9097, 0x1e18, 0x00000001);
820 nv_mthd(dev, 0x9097, 0x1e38, 0x00000001);
821 nv_mthd(dev, 0x9097, 0x1e58, 0x00000001);
822 nv_mthd(dev, 0x9097, 0x1e78, 0x00000001);
823 nv_mthd(dev, 0x9097, 0x1e98, 0x00000001);
824 nv_mthd(dev, 0x9097, 0x1eb8, 0x00000001);
825 nv_mthd(dev, 0x9097, 0x1ed8, 0x00000001);
826 nv_mthd(dev, 0x9097, 0x1ef8, 0x00000001);
827 nv_mthd(dev, 0x9097, 0x3400, 0x00000000);
828 nv_mthd(dev, 0x9097, 0x3404, 0x00000000);
829 nv_mthd(dev, 0x9097, 0x3408, 0x00000000);
830 nv_mthd(dev, 0x9097, 0x340c, 0x00000000);
831 nv_mthd(dev, 0x9097, 0x3410, 0x00000000);
832 nv_mthd(dev, 0x9097, 0x3414, 0x00000000);
833 nv_mthd(dev, 0x9097, 0x3418, 0x00000000);
834 nv_mthd(dev, 0x9097, 0x341c, 0x00000000);
835 nv_mthd(dev, 0x9097, 0x3420, 0x00000000);
836 nv_mthd(dev, 0x9097, 0x3424, 0x00000000);
837 nv_mthd(dev, 0x9097, 0x3428, 0x00000000);
838 nv_mthd(dev, 0x9097, 0x342c, 0x00000000);
839 nv_mthd(dev, 0x9097, 0x3430, 0x00000000);
840 nv_mthd(dev, 0x9097, 0x3434, 0x00000000);
841 nv_mthd(dev, 0x9097, 0x3438, 0x00000000);
842 nv_mthd(dev, 0x9097, 0x343c, 0x00000000);
843 nv_mthd(dev, 0x9097, 0x3440, 0x00000000);
844 nv_mthd(dev, 0x9097, 0x3444, 0x00000000);
845 nv_mthd(dev, 0x9097, 0x3448, 0x00000000);
846 nv_mthd(dev, 0x9097, 0x344c, 0x00000000);
847 nv_mthd(dev, 0x9097, 0x3450, 0x00000000);
848 nv_mthd(dev, 0x9097, 0x3454, 0x00000000);
849 nv_mthd(dev, 0x9097, 0x3458, 0x00000000);
850 nv_mthd(dev, 0x9097, 0x345c, 0x00000000);
851 nv_mthd(dev, 0x9097, 0x3460, 0x00000000);
852 nv_mthd(dev, 0x9097, 0x3464, 0x00000000);
853 nv_mthd(dev, 0x9097, 0x3468, 0x00000000);
854 nv_mthd(dev, 0x9097, 0x346c, 0x00000000);
855 nv_mthd(dev, 0x9097, 0x3470, 0x00000000);
856 nv_mthd(dev, 0x9097, 0x3474, 0x00000000);
857 nv_mthd(dev, 0x9097, 0x3478, 0x00000000);
858 nv_mthd(dev, 0x9097, 0x347c, 0x00000000);
859 nv_mthd(dev, 0x9097, 0x3480, 0x00000000);
860 nv_mthd(dev, 0x9097, 0x3484, 0x00000000);
861 nv_mthd(dev, 0x9097, 0x3488, 0x00000000);
862 nv_mthd(dev, 0x9097, 0x348c, 0x00000000);
863 nv_mthd(dev, 0x9097, 0x3490, 0x00000000);
864 nv_mthd(dev, 0x9097, 0x3494, 0x00000000);
865 nv_mthd(dev, 0x9097, 0x3498, 0x00000000);
866 nv_mthd(dev, 0x9097, 0x349c, 0x00000000);
867 nv_mthd(dev, 0x9097, 0x34a0, 0x00000000);
868 nv_mthd(dev, 0x9097, 0x34a4, 0x00000000);
869 nv_mthd(dev, 0x9097, 0x34a8, 0x00000000);
870 nv_mthd(dev, 0x9097, 0x34ac, 0x00000000);
871 nv_mthd(dev, 0x9097, 0x34b0, 0x00000000);
872 nv_mthd(dev, 0x9097, 0x34b4, 0x00000000);
873 nv_mthd(dev, 0x9097, 0x34b8, 0x00000000);
874 nv_mthd(dev, 0x9097, 0x34bc, 0x00000000);
875 nv_mthd(dev, 0x9097, 0x34c0, 0x00000000);
876 nv_mthd(dev, 0x9097, 0x34c4, 0x00000000);
877 nv_mthd(dev, 0x9097, 0x34c8, 0x00000000);
878 nv_mthd(dev, 0x9097, 0x34cc, 0x00000000);
879 nv_mthd(dev, 0x9097, 0x34d0, 0x00000000);
880 nv_mthd(dev, 0x9097, 0x34d4, 0x00000000);
881 nv_mthd(dev, 0x9097, 0x34d8, 0x00000000);
882 nv_mthd(dev, 0x9097, 0x34dc, 0x00000000);
883 nv_mthd(dev, 0x9097, 0x34e0, 0x00000000);
884 nv_mthd(dev, 0x9097, 0x34e4, 0x00000000);
885 nv_mthd(dev, 0x9097, 0x34e8, 0x00000000);
886 nv_mthd(dev, 0x9097, 0x34ec, 0x00000000);
887 nv_mthd(dev, 0x9097, 0x34f0, 0x00000000);
888 nv_mthd(dev, 0x9097, 0x34f4, 0x00000000);
889 nv_mthd(dev, 0x9097, 0x34f8, 0x00000000);
890 nv_mthd(dev, 0x9097, 0x34fc, 0x00000000);
891 nv_mthd(dev, 0x9097, 0x3500, 0x00000000);
892 nv_mthd(dev, 0x9097, 0x3504, 0x00000000);
893 nv_mthd(dev, 0x9097, 0x3508, 0x00000000);
894 nv_mthd(dev, 0x9097, 0x350c, 0x00000000);
895 nv_mthd(dev, 0x9097, 0x3510, 0x00000000);
896 nv_mthd(dev, 0x9097, 0x3514, 0x00000000);
897 nv_mthd(dev, 0x9097, 0x3518, 0x00000000);
898 nv_mthd(dev, 0x9097, 0x351c, 0x00000000);
899 nv_mthd(dev, 0x9097, 0x3520, 0x00000000);
900 nv_mthd(dev, 0x9097, 0x3524, 0x00000000);
901 nv_mthd(dev, 0x9097, 0x3528, 0x00000000);
902 nv_mthd(dev, 0x9097, 0x352c, 0x00000000);
903 nv_mthd(dev, 0x9097, 0x3530, 0x00000000);
904 nv_mthd(dev, 0x9097, 0x3534, 0x00000000);
905 nv_mthd(dev, 0x9097, 0x3538, 0x00000000);
906 nv_mthd(dev, 0x9097, 0x353c, 0x00000000);
907 nv_mthd(dev, 0x9097, 0x3540, 0x00000000);
908 nv_mthd(dev, 0x9097, 0x3544, 0x00000000);
909 nv_mthd(dev, 0x9097, 0x3548, 0x00000000);
910 nv_mthd(dev, 0x9097, 0x354c, 0x00000000);
911 nv_mthd(dev, 0x9097, 0x3550, 0x00000000);
912 nv_mthd(dev, 0x9097, 0x3554, 0x00000000);
913 nv_mthd(dev, 0x9097, 0x3558, 0x00000000);
914 nv_mthd(dev, 0x9097, 0x355c, 0x00000000);
915 nv_mthd(dev, 0x9097, 0x3560, 0x00000000);
916 nv_mthd(dev, 0x9097, 0x3564, 0x00000000);
917 nv_mthd(dev, 0x9097, 0x3568, 0x00000000);
918 nv_mthd(dev, 0x9097, 0x356c, 0x00000000);
919 nv_mthd(dev, 0x9097, 0x3570, 0x00000000);
920 nv_mthd(dev, 0x9097, 0x3574, 0x00000000);
921 nv_mthd(dev, 0x9097, 0x3578, 0x00000000);
922 nv_mthd(dev, 0x9097, 0x357c, 0x00000000);
923 nv_mthd(dev, 0x9097, 0x3580, 0x00000000);
924 nv_mthd(dev, 0x9097, 0x3584, 0x00000000);
925 nv_mthd(dev, 0x9097, 0x3588, 0x00000000);
926 nv_mthd(dev, 0x9097, 0x358c, 0x00000000);
927 nv_mthd(dev, 0x9097, 0x3590, 0x00000000);
928 nv_mthd(dev, 0x9097, 0x3594, 0x00000000);
929 nv_mthd(dev, 0x9097, 0x3598, 0x00000000);
930 nv_mthd(dev, 0x9097, 0x359c, 0x00000000);
931 nv_mthd(dev, 0x9097, 0x35a0, 0x00000000);
932 nv_mthd(dev, 0x9097, 0x35a4, 0x00000000);
933 nv_mthd(dev, 0x9097, 0x35a8, 0x00000000);
934 nv_mthd(dev, 0x9097, 0x35ac, 0x00000000);
935 nv_mthd(dev, 0x9097, 0x35b0, 0x00000000);
936 nv_mthd(dev, 0x9097, 0x35b4, 0x00000000);
937 nv_mthd(dev, 0x9097, 0x35b8, 0x00000000);
938 nv_mthd(dev, 0x9097, 0x35bc, 0x00000000);
939 nv_mthd(dev, 0x9097, 0x35c0, 0x00000000);
940 nv_mthd(dev, 0x9097, 0x35c4, 0x00000000);
941 nv_mthd(dev, 0x9097, 0x35c8, 0x00000000);
942 nv_mthd(dev, 0x9097, 0x35cc, 0x00000000);
943 nv_mthd(dev, 0x9097, 0x35d0, 0x00000000);
944 nv_mthd(dev, 0x9097, 0x35d4, 0x00000000);
945 nv_mthd(dev, 0x9097, 0x35d8, 0x00000000);
946 nv_mthd(dev, 0x9097, 0x35dc, 0x00000000);
947 nv_mthd(dev, 0x9097, 0x35e0, 0x00000000);
948 nv_mthd(dev, 0x9097, 0x35e4, 0x00000000);
949 nv_mthd(dev, 0x9097, 0x35e8, 0x00000000);
950 nv_mthd(dev, 0x9097, 0x35ec, 0x00000000);
951 nv_mthd(dev, 0x9097, 0x35f0, 0x00000000);
952 nv_mthd(dev, 0x9097, 0x35f4, 0x00000000);
953 nv_mthd(dev, 0x9097, 0x35f8, 0x00000000);
954 nv_mthd(dev, 0x9097, 0x35fc, 0x00000000);
955 nv_mthd(dev, 0x9097, 0x030c, 0x00000001);
956 nv_mthd(dev, 0x9097, 0x1944, 0x00000000);
957 nv_mthd(dev, 0x9097, 0x1514, 0x00000000);
958 nv_mthd(dev, 0x9097, 0x0d68, 0x0000ffff);
959 nv_mthd(dev, 0x9097, 0x121c, 0x0fac6881);
960 nv_mthd(dev, 0x9097, 0x0fac, 0x00000001);
961 nv_mthd(dev, 0x9097, 0x1538, 0x00000001);
962 nv_mthd(dev, 0x9097, 0x0fe0, 0x00000000);
963 nv_mthd(dev, 0x9097, 0x0fe4, 0x00000000);
964 nv_mthd(dev, 0x9097, 0x0fe8, 0x00000014);
965 nv_mthd(dev, 0x9097, 0x0fec, 0x00000040);
966 nv_mthd(dev, 0x9097, 0x0ff0, 0x00000000);
967 nv_mthd(dev, 0x9097, 0x179c, 0x00000000);
968 nv_mthd(dev, 0x9097, 0x1228, 0x00000400);
969 nv_mthd(dev, 0x9097, 0x122c, 0x00000300);
970 nv_mthd(dev, 0x9097, 0x1230, 0x00010001);
971 nv_mthd(dev, 0x9097, 0x07f8, 0x00000000);
972 nv_mthd(dev, 0x9097, 0x15b4, 0x00000001);
973 nv_mthd(dev, 0x9097, 0x15cc, 0x00000000);
974 nv_mthd(dev, 0x9097, 0x1534, 0x00000000);
975 nv_mthd(dev, 0x9097, 0x0fb0, 0x00000000);
976 nv_mthd(dev, 0x9097, 0x15d0, 0x00000000);
977 nv_mthd(dev, 0x9097, 0x153c, 0x00000000);
978 nv_mthd(dev, 0x9097, 0x16b4, 0x00000003);
979 nv_mthd(dev, 0x9097, 0x0fbc, 0x0000ffff);
980 nv_mthd(dev, 0x9097, 0x0fc0, 0x0000ffff);
981 nv_mthd(dev, 0x9097, 0x0fc4, 0x0000ffff);
982 nv_mthd(dev, 0x9097, 0x0fc8, 0x0000ffff);
983 nv_mthd(dev, 0x9097, 0x0df8, 0x00000000);
984 nv_mthd(dev, 0x9097, 0x0dfc, 0x00000000);
985 nv_mthd(dev, 0x9097, 0x1948, 0x00000000);
986 nv_mthd(dev, 0x9097, 0x1970, 0x00000001);
987 nv_mthd(dev, 0x9097, 0x161c, 0x000009f0);
988 nv_mthd(dev, 0x9097, 0x0dcc, 0x00000010);
989 nv_mthd(dev, 0x9097, 0x163c, 0x00000000);
990 nv_mthd(dev, 0x9097, 0x15e4, 0x00000000);
991 nv_mthd(dev, 0x9097, 0x1160, 0x25e00040);
992 nv_mthd(dev, 0x9097, 0x1164, 0x25e00040);
993 nv_mthd(dev, 0x9097, 0x1168, 0x25e00040);
994 nv_mthd(dev, 0x9097, 0x116c, 0x25e00040);
995 nv_mthd(dev, 0x9097, 0x1170, 0x25e00040);
996 nv_mthd(dev, 0x9097, 0x1174, 0x25e00040);
997 nv_mthd(dev, 0x9097, 0x1178, 0x25e00040);
998 nv_mthd(dev, 0x9097, 0x117c, 0x25e00040);
999 nv_mthd(dev, 0x9097, 0x1180, 0x25e00040);
1000 nv_mthd(dev, 0x9097, 0x1184, 0x25e00040);
1001 nv_mthd(dev, 0x9097, 0x1188, 0x25e00040);
1002 nv_mthd(dev, 0x9097, 0x118c, 0x25e00040);
1003 nv_mthd(dev, 0x9097, 0x1190, 0x25e00040);
1004 nv_mthd(dev, 0x9097, 0x1194, 0x25e00040);
1005 nv_mthd(dev, 0x9097, 0x1198, 0x25e00040);
1006 nv_mthd(dev, 0x9097, 0x119c, 0x25e00040);
1007 nv_mthd(dev, 0x9097, 0x11a0, 0x25e00040);
1008 nv_mthd(dev, 0x9097, 0x11a4, 0x25e00040);
1009 nv_mthd(dev, 0x9097, 0x11a8, 0x25e00040);
1010 nv_mthd(dev, 0x9097, 0x11ac, 0x25e00040);
1011 nv_mthd(dev, 0x9097, 0x11b0, 0x25e00040);
1012 nv_mthd(dev, 0x9097, 0x11b4, 0x25e00040);
1013 nv_mthd(dev, 0x9097, 0x11b8, 0x25e00040);
1014 nv_mthd(dev, 0x9097, 0x11bc, 0x25e00040);
1015 nv_mthd(dev, 0x9097, 0x11c0, 0x25e00040);
1016 nv_mthd(dev, 0x9097, 0x11c4, 0x25e00040);
1017 nv_mthd(dev, 0x9097, 0x11c8, 0x25e00040);
1018 nv_mthd(dev, 0x9097, 0x11cc, 0x25e00040);
1019 nv_mthd(dev, 0x9097, 0x11d0, 0x25e00040);
1020 nv_mthd(dev, 0x9097, 0x11d4, 0x25e00040);
1021 nv_mthd(dev, 0x9097, 0x11d8, 0x25e00040);
1022 nv_mthd(dev, 0x9097, 0x11dc, 0x25e00040);
1023 nv_mthd(dev, 0x9097, 0x1880, 0x00000000);
1024 nv_mthd(dev, 0x9097, 0x1884, 0x00000000);
1025 nv_mthd(dev, 0x9097, 0x1888, 0x00000000);
1026 nv_mthd(dev, 0x9097, 0x188c, 0x00000000);
1027 nv_mthd(dev, 0x9097, 0x1890, 0x00000000);
1028 nv_mthd(dev, 0x9097, 0x1894, 0x00000000);
1029 nv_mthd(dev, 0x9097, 0x1898, 0x00000000);
1030 nv_mthd(dev, 0x9097, 0x189c, 0x00000000);
1031 nv_mthd(dev, 0x9097, 0x18a0, 0x00000000);
1032 nv_mthd(dev, 0x9097, 0x18a4, 0x00000000);
1033 nv_mthd(dev, 0x9097, 0x18a8, 0x00000000);
1034 nv_mthd(dev, 0x9097, 0x18ac, 0x00000000);
1035 nv_mthd(dev, 0x9097, 0x18b0, 0x00000000);
1036 nv_mthd(dev, 0x9097, 0x18b4, 0x00000000);
1037 nv_mthd(dev, 0x9097, 0x18b8, 0x00000000);
1038 nv_mthd(dev, 0x9097, 0x18bc, 0x00000000);
1039 nv_mthd(dev, 0x9097, 0x18c0, 0x00000000);
1040 nv_mthd(dev, 0x9097, 0x18c4, 0x00000000);
1041 nv_mthd(dev, 0x9097, 0x18c8, 0x00000000);
1042 nv_mthd(dev, 0x9097, 0x18cc, 0x00000000);
1043 nv_mthd(dev, 0x9097, 0x18d0, 0x00000000);
1044 nv_mthd(dev, 0x9097, 0x18d4, 0x00000000);
1045 nv_mthd(dev, 0x9097, 0x18d8, 0x00000000);
1046 nv_mthd(dev, 0x9097, 0x18dc, 0x00000000);
1047 nv_mthd(dev, 0x9097, 0x18e0, 0x00000000);
1048 nv_mthd(dev, 0x9097, 0x18e4, 0x00000000);
1049 nv_mthd(dev, 0x9097, 0x18e8, 0x00000000);
1050 nv_mthd(dev, 0x9097, 0x18ec, 0x00000000);
1051 nv_mthd(dev, 0x9097, 0x18f0, 0x00000000);
1052 nv_mthd(dev, 0x9097, 0x18f4, 0x00000000);
1053 nv_mthd(dev, 0x9097, 0x18f8, 0x00000000);
1054 nv_mthd(dev, 0x9097, 0x18fc, 0x00000000);
1055 nv_mthd(dev, 0x9097, 0x0f84, 0x00000000);
1056 nv_mthd(dev, 0x9097, 0x0f88, 0x00000000);
1057 nv_mthd(dev, 0x9097, 0x17c8, 0x00000000);
1058 nv_mthd(dev, 0x9097, 0x17cc, 0x00000000);
1059 nv_mthd(dev, 0x9097, 0x17d0, 0x000000ff);
1060 nv_mthd(dev, 0x9097, 0x17d4, 0xffffffff);
1061 nv_mthd(dev, 0x9097, 0x17d8, 0x00000002);
1062 nv_mthd(dev, 0x9097, 0x17dc, 0x00000000);
1063 nv_mthd(dev, 0x9097, 0x15f4, 0x00000000);
1064 nv_mthd(dev, 0x9097, 0x15f8, 0x00000000);
1065 nv_mthd(dev, 0x9097, 0x1434, 0x00000000);
1066 nv_mthd(dev, 0x9097, 0x1438, 0x00000000);
1067 nv_mthd(dev, 0x9097, 0x0d74, 0x00000000);
1068 nv_mthd(dev, 0x9097, 0x0dec, 0x00000001);
1069 nv_mthd(dev, 0x9097, 0x13a4, 0x00000000);
1070 nv_mthd(dev, 0x9097, 0x1318, 0x00000001);
1071 nv_mthd(dev, 0x9097, 0x1644, 0x00000000);
1072 nv_mthd(dev, 0x9097, 0x0748, 0x00000000);
1073 nv_mthd(dev, 0x9097, 0x0de8, 0x00000000);
1074 nv_mthd(dev, 0x9097, 0x1648, 0x00000000);
1075 nv_mthd(dev, 0x9097, 0x12a4, 0x00000000);
1076 nv_mthd(dev, 0x9097, 0x1120, 0x00000000);
1077 nv_mthd(dev, 0x9097, 0x1124, 0x00000000);
1078 nv_mthd(dev, 0x9097, 0x1128, 0x00000000);
1079 nv_mthd(dev, 0x9097, 0x112c, 0x00000000);
1080 nv_mthd(dev, 0x9097, 0x1118, 0x00000000);
1081 nv_mthd(dev, 0x9097, 0x164c, 0x00000000);
1082 nv_mthd(dev, 0x9097, 0x1658, 0x00000000);
1083 nv_mthd(dev, 0x9097, 0x1910, 0x00000290);
1084 nv_mthd(dev, 0x9097, 0x1518, 0x00000000);
1085 nv_mthd(dev, 0x9097, 0x165c, 0x00000001);
1086 nv_mthd(dev, 0x9097, 0x1520, 0x00000000);
1087 nv_mthd(dev, 0x9097, 0x1604, 0x00000000);
1088 nv_mthd(dev, 0x9097, 0x1570, 0x00000000);
1089 nv_mthd(dev, 0x9097, 0x13b0, 0x3f800000);
1090 nv_mthd(dev, 0x9097, 0x13b4, 0x3f800000);
1091 nv_mthd(dev, 0x9097, 0x020c, 0x00000000);
1092 nv_mthd(dev, 0x9097, 0x1670, 0x30201000);
1093 nv_mthd(dev, 0x9097, 0x1674, 0x70605040);
1094 nv_mthd(dev, 0x9097, 0x1678, 0xb8a89888);
1095 nv_mthd(dev, 0x9097, 0x167c, 0xf8e8d8c8);
1096 nv_mthd(dev, 0x9097, 0x166c, 0x00000000);
1097 nv_mthd(dev, 0x9097, 0x1680, 0x00ffff00);
1098 nv_mthd(dev, 0x9097, 0x12d0, 0x00000003);
1099 nv_mthd(dev, 0x9097, 0x12d4, 0x00000002);
1100 nv_mthd(dev, 0x9097, 0x1684, 0x00000000);
1101 nv_mthd(dev, 0x9097, 0x1688, 0x00000000);
1102 nv_mthd(dev, 0x9097, 0x0dac, 0x00001b02);
1103 nv_mthd(dev, 0x9097, 0x0db0, 0x00001b02);
1104 nv_mthd(dev, 0x9097, 0x0db4, 0x00000000);
1105 nv_mthd(dev, 0x9097, 0x168c, 0x00000000);
1106 nv_mthd(dev, 0x9097, 0x15bc, 0x00000000);
1107 nv_mthd(dev, 0x9097, 0x156c, 0x00000000);
1108 nv_mthd(dev, 0x9097, 0x187c, 0x00000000);
1109 nv_mthd(dev, 0x9097, 0x1110, 0x00000001);
1110 nv_mthd(dev, 0x9097, 0x0dc0, 0x00000000);
1111 nv_mthd(dev, 0x9097, 0x0dc4, 0x00000000);
1112 nv_mthd(dev, 0x9097, 0x0dc8, 0x00000000);
1113 nv_mthd(dev, 0x9097, 0x1234, 0x00000000);
1114 nv_mthd(dev, 0x9097, 0x1690, 0x00000000);
1115 nv_mthd(dev, 0x9097, 0x12ac, 0x00000001);
1116 nv_mthd(dev, 0x9097, 0x02c4, 0x00000000);
1117 nv_mthd(dev, 0x9097, 0x0790, 0x00000000);
1118 nv_mthd(dev, 0x9097, 0x0794, 0x00000000);
1119 nv_mthd(dev, 0x9097, 0x0798, 0x00000000);
1120 nv_mthd(dev, 0x9097, 0x079c, 0x00000000);
1121 nv_mthd(dev, 0x9097, 0x07a0, 0x00000000);
1122 nv_mthd(dev, 0x9097, 0x077c, 0x00000000);
1123 nv_mthd(dev, 0x9097, 0x1000, 0x00000010);
1124 nv_mthd(dev, 0x9097, 0x10fc, 0x00000000);
1125 nv_mthd(dev, 0x9097, 0x1290, 0x00000000);
1126 nv_mthd(dev, 0x9097, 0x0218, 0x00000010);
1127 nv_mthd(dev, 0x9097, 0x12d8, 0x00000000);
1128 nv_mthd(dev, 0x9097, 0x12dc, 0x00000010);
1129 nv_mthd(dev, 0x9097, 0x0d94, 0x00000001);
1130 nv_mthd(dev, 0x9097, 0x155c, 0x00000000);
1131 nv_mthd(dev, 0x9097, 0x1560, 0x00000000);
1132 nv_mthd(dev, 0x9097, 0x1564, 0x00001fff);
1133 nv_mthd(dev, 0x9097, 0x1574, 0x00000000);
1134 nv_mthd(dev, 0x9097, 0x1578, 0x00000000);
1135 nv_mthd(dev, 0x9097, 0x157c, 0x003fffff);
1136 nv_mthd(dev, 0x9097, 0x1354, 0x00000000);
1137 nv_mthd(dev, 0x9097, 0x1664, 0x00000000);
1138 nv_mthd(dev, 0x9097, 0x1610, 0x00000012);
1139 nv_mthd(dev, 0x9097, 0x1608, 0x00000000);
1140 nv_mthd(dev, 0x9097, 0x160c, 0x00000000);
1141 nv_mthd(dev, 0x9097, 0x162c, 0x00000003);
1142 nv_mthd(dev, 0x9097, 0x0210, 0x00000000);
1143 nv_mthd(dev, 0x9097, 0x0320, 0x00000000);
1144 nv_mthd(dev, 0x9097, 0x0324, 0x3f800000);
1145 nv_mthd(dev, 0x9097, 0x0328, 0x3f800000);
1146 nv_mthd(dev, 0x9097, 0x032c, 0x3f800000);
1147 nv_mthd(dev, 0x9097, 0x0330, 0x3f800000);
1148 nv_mthd(dev, 0x9097, 0x0334, 0x3f800000);
1149 nv_mthd(dev, 0x9097, 0x0338, 0x3f800000);
1150 nv_mthd(dev, 0x9097, 0x0750, 0x00000000);
1151 nv_mthd(dev, 0x9097, 0x0760, 0x39291909);
1152 nv_mthd(dev, 0x9097, 0x0764, 0x79695949);
1153 nv_mthd(dev, 0x9097, 0x0768, 0xb9a99989);
1154 nv_mthd(dev, 0x9097, 0x076c, 0xf9e9d9c9);
1155 nv_mthd(dev, 0x9097, 0x0770, 0x30201000);
1156 nv_mthd(dev, 0x9097, 0x0774, 0x70605040);
1157 nv_mthd(dev, 0x9097, 0x0778, 0x00009080);
1158 nv_mthd(dev, 0x9097, 0x0780, 0x39291909);
1159 nv_mthd(dev, 0x9097, 0x0784, 0x79695949);
1160 nv_mthd(dev, 0x9097, 0x0788, 0xb9a99989);
1161 nv_mthd(dev, 0x9097, 0x078c, 0xf9e9d9c9);
1162 nv_mthd(dev, 0x9097, 0x07d0, 0x30201000);
1163 nv_mthd(dev, 0x9097, 0x07d4, 0x70605040);
1164 nv_mthd(dev, 0x9097, 0x07d8, 0x00009080);
1165 nv_mthd(dev, 0x9097, 0x037c, 0x00000001);
1166 nv_mthd(dev, 0x9097, 0x0740, 0x00000000);
1167 nv_mthd(dev, 0x9097, 0x0744, 0x00000000);
1168 nv_mthd(dev, 0x9097, 0x2600, 0x00000000);
1169 nv_mthd(dev, 0x9097, 0x1918, 0x00000000);
1170 nv_mthd(dev, 0x9097, 0x191c, 0x00000900);
1171 nv_mthd(dev, 0x9097, 0x1920, 0x00000405);
1172 nv_mthd(dev, 0x9097, 0x1308, 0x00000001);
1173 nv_mthd(dev, 0x9097, 0x1924, 0x00000000);
1174 nv_mthd(dev, 0x9097, 0x13ac, 0x00000000);
1175 nv_mthd(dev, 0x9097, 0x192c, 0x00000001);
1176 nv_mthd(dev, 0x9097, 0x193c, 0x00002c1c);
1177 nv_mthd(dev, 0x9097, 0x0d7c, 0x00000000);
1178 nv_mthd(dev, 0x9097, 0x0f8c, 0x00000000);
1179 nv_mthd(dev, 0x9097, 0x02c0, 0x00000001);
1180 nv_mthd(dev, 0x9097, 0x1510, 0x00000000);
1181 nv_mthd(dev, 0x9097, 0x1940, 0x00000000);
1182 nv_mthd(dev, 0x9097, 0x0ff4, 0x00000000);
1183 nv_mthd(dev, 0x9097, 0x0ff8, 0x00000000);
1184 nv_mthd(dev, 0x9097, 0x194c, 0x00000000);
1185 nv_mthd(dev, 0x9097, 0x1950, 0x00000000);
1186 nv_mthd(dev, 0x9097, 0x1968, 0x00000000);
1187 nv_mthd(dev, 0x9097, 0x1590, 0x0000003f);
1188 nv_mthd(dev, 0x9097, 0x07e8, 0x00000000);
1189 nv_mthd(dev, 0x9097, 0x07ec, 0x00000000);
1190 nv_mthd(dev, 0x9097, 0x07f0, 0x00000000);
1191 nv_mthd(dev, 0x9097, 0x07f4, 0x00000000);
1192 nv_mthd(dev, 0x9097, 0x196c, 0x00000011);
1193 nv_mthd(dev, 0x9097, 0x197c, 0x00000000);
1194 nv_mthd(dev, 0x9097, 0x0fcc, 0x00000000);
1195 nv_mthd(dev, 0x9097, 0x0fd0, 0x00000000);
1196 nv_mthd(dev, 0x9097, 0x02d8, 0x00000040);
1197 nv_mthd(dev, 0x9097, 0x1980, 0x00000080);
1198 nv_mthd(dev, 0x9097, 0x1504, 0x00000080);
1199 nv_mthd(dev, 0x9097, 0x1984, 0x00000000);
1200 nv_mthd(dev, 0x9097, 0x0300, 0x00000001);
1201 nv_mthd(dev, 0x9097, 0x13a8, 0x00000000);
1202 nv_mthd(dev, 0x9097, 0x12ec, 0x00000000);
1203 nv_mthd(dev, 0x9097, 0x1310, 0x00000000);
1204 nv_mthd(dev, 0x9097, 0x1314, 0x00000001);
1205 nv_mthd(dev, 0x9097, 0x1380, 0x00000000);
1206 nv_mthd(dev, 0x9097, 0x1384, 0x00000001);
1207 nv_mthd(dev, 0x9097, 0x1388, 0x00000001);
1208 nv_mthd(dev, 0x9097, 0x138c, 0x00000001);
1209 nv_mthd(dev, 0x9097, 0x1390, 0x00000001);
1210 nv_mthd(dev, 0x9097, 0x1394, 0x00000000);
1211 nv_mthd(dev, 0x9097, 0x139c, 0x00000000);
1212 nv_mthd(dev, 0x9097, 0x1398, 0x00000000);
1213 nv_mthd(dev, 0x9097, 0x1594, 0x00000000);
1214 nv_mthd(dev, 0x9097, 0x1598, 0x00000001);
1215 nv_mthd(dev, 0x9097, 0x159c, 0x00000001);
1216 nv_mthd(dev, 0x9097, 0x15a0, 0x00000001);
1217 nv_mthd(dev, 0x9097, 0x15a4, 0x00000001);
1218 nv_mthd(dev, 0x9097, 0x0f54, 0x00000000);
1219 nv_mthd(dev, 0x9097, 0x0f58, 0x00000000);
1220 nv_mthd(dev, 0x9097, 0x0f5c, 0x00000000);
1221 nv_mthd(dev, 0x9097, 0x19bc, 0x00000000);
1222 nv_mthd(dev, 0x9097, 0x0f9c, 0x00000000);
1223 nv_mthd(dev, 0x9097, 0x0fa0, 0x00000000);
1224 nv_mthd(dev, 0x9097, 0x12cc, 0x00000000);
1225 nv_mthd(dev, 0x9097, 0x12e8, 0x00000000);
1226 nv_mthd(dev, 0x9097, 0x130c, 0x00000001);
1227 nv_mthd(dev, 0x9097, 0x1360, 0x00000000);
1228 nv_mthd(dev, 0x9097, 0x1364, 0x00000000);
1229 nv_mthd(dev, 0x9097, 0x1368, 0x00000000);
1230 nv_mthd(dev, 0x9097, 0x136c, 0x00000000);
1231 nv_mthd(dev, 0x9097, 0x1370, 0x00000000);
1232 nv_mthd(dev, 0x9097, 0x1374, 0x00000000);
1233 nv_mthd(dev, 0x9097, 0x1378, 0x00000000);
1234 nv_mthd(dev, 0x9097, 0x137c, 0x00000000);
1235 nv_mthd(dev, 0x9097, 0x133c, 0x00000001);
1236 nv_mthd(dev, 0x9097, 0x1340, 0x00000001);
1237 nv_mthd(dev, 0x9097, 0x1344, 0x00000002);
1238 nv_mthd(dev, 0x9097, 0x1348, 0x00000001);
1239 nv_mthd(dev, 0x9097, 0x134c, 0x00000001);
1240 nv_mthd(dev, 0x9097, 0x1350, 0x00000002);
1241 nv_mthd(dev, 0x9097, 0x1358, 0x00000001);
1242 nv_mthd(dev, 0x9097, 0x12e4, 0x00000000);
1243 nv_mthd(dev, 0x9097, 0x131c, 0x00000000);
1244 nv_mthd(dev, 0x9097, 0x1320, 0x00000000);
1245 nv_mthd(dev, 0x9097, 0x1324, 0x00000000);
1246 nv_mthd(dev, 0x9097, 0x1328, 0x00000000);
1247 nv_mthd(dev, 0x9097, 0x19c0, 0x00000000);
1248 nv_mthd(dev, 0x9097, 0x1140, 0x00000000);
1249 nv_mthd(dev, 0x9097, 0x19c4, 0x00000000);
1250 nv_mthd(dev, 0x9097, 0x19c8, 0x00001500);
1251 nv_mthd(dev, 0x9097, 0x135c, 0x00000000);
1252 nv_mthd(dev, 0x9097, 0x0f90, 0x00000000);
1253 nv_mthd(dev, 0x9097, 0x19e0, 0x00000001);
1254 nv_mthd(dev, 0x9097, 0x19e4, 0x00000001);
1255 nv_mthd(dev, 0x9097, 0x19e8, 0x00000001);
1256 nv_mthd(dev, 0x9097, 0x19ec, 0x00000001);
1257 nv_mthd(dev, 0x9097, 0x19f0, 0x00000001);
1258 nv_mthd(dev, 0x9097, 0x19f4, 0x00000001);
1259 nv_mthd(dev, 0x9097, 0x19f8, 0x00000001);
1260 nv_mthd(dev, 0x9097, 0x19fc, 0x00000001);
1261 nv_mthd(dev, 0x9097, 0x19cc, 0x00000001);
1262 nv_mthd(dev, 0x9097, 0x15b8, 0x00000000);
1263 nv_mthd(dev, 0x9097, 0x1a00, 0x00001111);
1264 nv_mthd(dev, 0x9097, 0x1a04, 0x00000000);
1265 nv_mthd(dev, 0x9097, 0x1a08, 0x00000000);
1266 nv_mthd(dev, 0x9097, 0x1a0c, 0x00000000);
1267 nv_mthd(dev, 0x9097, 0x1a10, 0x00000000);
1268 nv_mthd(dev, 0x9097, 0x1a14, 0x00000000);
1269 nv_mthd(dev, 0x9097, 0x1a18, 0x00000000);
1270 nv_mthd(dev, 0x9097, 0x1a1c, 0x00000000);
1271 nv_mthd(dev, 0x9097, 0x0d6c, 0xffff0000);
1272 nv_mthd(dev, 0x9097, 0x0d70, 0xffff0000);
1273 nv_mthd(dev, 0x9097, 0x10f8, 0x00001010);
1274 nv_mthd(dev, 0x9097, 0x0d80, 0x00000000);
1275 nv_mthd(dev, 0x9097, 0x0d84, 0x00000000);
1276 nv_mthd(dev, 0x9097, 0x0d88, 0x00000000);
1277 nv_mthd(dev, 0x9097, 0x0d8c, 0x00000000);
1278 nv_mthd(dev, 0x9097, 0x0d90, 0x00000000);
1279 nv_mthd(dev, 0x9097, 0x0da0, 0x00000000);
1280 nv_mthd(dev, 0x9097, 0x1508, 0x80000000);
1281 nv_mthd(dev, 0x9097, 0x150c, 0x40000000);
1282 nv_mthd(dev, 0x9097, 0x1668, 0x00000000);
1283 nv_mthd(dev, 0x9097, 0x0318, 0x00000008);
1284 nv_mthd(dev, 0x9097, 0x031c, 0x00000008);
1285 nv_mthd(dev, 0x9097, 0x0d9c, 0x00000001);
1286 nv_mthd(dev, 0x9097, 0x07dc, 0x00000000);
1287 nv_mthd(dev, 0x9097, 0x074c, 0x00000055);
1288 nv_mthd(dev, 0x9097, 0x1420, 0x00000003);
1289 nv_mthd(dev, 0x9097, 0x17bc, 0x00000000);
1290 nv_mthd(dev, 0x9097, 0x17c0, 0x00000000);
1291 nv_mthd(dev, 0x9097, 0x17c4, 0x00000001);
1292 nv_mthd(dev, 0x9097, 0x1008, 0x00000008);
1293 nv_mthd(dev, 0x9097, 0x100c, 0x00000040);
1294 nv_mthd(dev, 0x9097, 0x1010, 0x0000012c);
1295 nv_mthd(dev, 0x9097, 0x0d60, 0x00000040);
1296 nv_mthd(dev, 0x9097, 0x075c, 0x00000003);
1297 nv_mthd(dev, 0x9097, 0x1018, 0x00000020);
1298 nv_mthd(dev, 0x9097, 0x101c, 0x00000001);
1299 nv_mthd(dev, 0x9097, 0x1020, 0x00000020);
1300 nv_mthd(dev, 0x9097, 0x1024, 0x00000001);
1301 nv_mthd(dev, 0x9097, 0x1444, 0x00000000);
1302 nv_mthd(dev, 0x9097, 0x1448, 0x00000000);
1303 nv_mthd(dev, 0x9097, 0x144c, 0x00000000);
1304 nv_mthd(dev, 0x9097, 0x0360, 0x20164010);
1305 nv_mthd(dev, 0x9097, 0x0364, 0x00000020);
1306 nv_mthd(dev, 0x9097, 0x0368, 0x00000000);
1307 nv_mthd(dev, 0x9097, 0x0de4, 0x00000000);
1308 nv_mthd(dev, 0x9097, 0x0204, 0x00000006);
1309 nv_mthd(dev, 0x9097, 0x0208, 0x00000000);
1310 nv_mthd(dev, 0x9097, 0x02cc, 0x003fffff);
1311 nv_mthd(dev, 0x9097, 0x02d0, 0x00000c48);
1312 nv_mthd(dev, 0x9097, 0x1220, 0x00000005);
1313 nv_mthd(dev, 0x9097, 0x0fdc, 0x00000000);
1314 nv_mthd(dev, 0x9097, 0x0f98, 0x00300008);
1315 nv_mthd(dev, 0x9097, 0x1284, 0x04000080);
1316 nv_mthd(dev, 0x9097, 0x1450, 0x00300008);
1317 nv_mthd(dev, 0x9097, 0x1454, 0x04000080);
1318 nv_mthd(dev, 0x9097, 0x0214, 0x00000000);
1319 /* in trace, right after 0x90c0, not here */
1320 nv_mthd(dev, 0x9097, 0x3410, 0x80002006);
1321}
1322
1323static void
1324nvc0_grctx_generate_902d(struct drm_device *dev)
1325{
1326 nv_mthd(dev, 0x902d, 0x0200, 0x000000cf);
1327 nv_mthd(dev, 0x902d, 0x0204, 0x00000001);
1328 nv_mthd(dev, 0x902d, 0x0208, 0x00000020);
1329 nv_mthd(dev, 0x902d, 0x020c, 0x00000001);
1330 nv_mthd(dev, 0x902d, 0x0210, 0x00000000);
1331 nv_mthd(dev, 0x902d, 0x0214, 0x00000080);
1332 nv_mthd(dev, 0x902d, 0x0218, 0x00000100);
1333 nv_mthd(dev, 0x902d, 0x021c, 0x00000100);
1334 nv_mthd(dev, 0x902d, 0x0220, 0x00000000);
1335 nv_mthd(dev, 0x902d, 0x0224, 0x00000000);
1336 nv_mthd(dev, 0x902d, 0x0230, 0x000000cf);
1337 nv_mthd(dev, 0x902d, 0x0234, 0x00000001);
1338 nv_mthd(dev, 0x902d, 0x0238, 0x00000020);
1339 nv_mthd(dev, 0x902d, 0x023c, 0x00000001);
1340 nv_mthd(dev, 0x902d, 0x0244, 0x00000080);
1341 nv_mthd(dev, 0x902d, 0x0248, 0x00000100);
1342 nv_mthd(dev, 0x902d, 0x024c, 0x00000100);
1343}
1344
1345static void
1346nvc0_grctx_generate_9039(struct drm_device *dev)
1347{
1348 nv_mthd(dev, 0x9039, 0x030c, 0x00000000);
1349 nv_mthd(dev, 0x9039, 0x0310, 0x00000000);
1350 nv_mthd(dev, 0x9039, 0x0314, 0x00000000);
1351 nv_mthd(dev, 0x9039, 0x0320, 0x00000000);
1352 nv_mthd(dev, 0x9039, 0x0238, 0x00000000);
1353 nv_mthd(dev, 0x9039, 0x023c, 0x00000000);
1354 nv_mthd(dev, 0x9039, 0x0318, 0x00000000);
1355 nv_mthd(dev, 0x9039, 0x031c, 0x00000000);
1356}
1357
1358static void
1359nvc0_grctx_generate_90c0(struct drm_device *dev)
1360{
1361 nv_mthd(dev, 0x90c0, 0x270c, 0x00000000);
1362 nv_mthd(dev, 0x90c0, 0x272c, 0x00000000);
1363 nv_mthd(dev, 0x90c0, 0x274c, 0x00000000);
1364 nv_mthd(dev, 0x90c0, 0x276c, 0x00000000);
1365 nv_mthd(dev, 0x90c0, 0x278c, 0x00000000);
1366 nv_mthd(dev, 0x90c0, 0x27ac, 0x00000000);
1367 nv_mthd(dev, 0x90c0, 0x27cc, 0x00000000);
1368 nv_mthd(dev, 0x90c0, 0x27ec, 0x00000000);
1369 nv_mthd(dev, 0x90c0, 0x030c, 0x00000001);
1370 nv_mthd(dev, 0x90c0, 0x1944, 0x00000000);
1371 nv_mthd(dev, 0x90c0, 0x0758, 0x00000100);
1372 nv_mthd(dev, 0x90c0, 0x02c4, 0x00000000);
1373 nv_mthd(dev, 0x90c0, 0x0790, 0x00000000);
1374 nv_mthd(dev, 0x90c0, 0x0794, 0x00000000);
1375 nv_mthd(dev, 0x90c0, 0x0798, 0x00000000);
1376 nv_mthd(dev, 0x90c0, 0x079c, 0x00000000);
1377 nv_mthd(dev, 0x90c0, 0x07a0, 0x00000000);
1378 nv_mthd(dev, 0x90c0, 0x077c, 0x00000000);
1379 nv_mthd(dev, 0x90c0, 0x0204, 0x00000000);
1380 nv_mthd(dev, 0x90c0, 0x0208, 0x00000000);
1381 nv_mthd(dev, 0x90c0, 0x020c, 0x00000000);
1382 nv_mthd(dev, 0x90c0, 0x0214, 0x00000000);
1383 nv_mthd(dev, 0x90c0, 0x024c, 0x00000000);
1384 nv_mthd(dev, 0x90c0, 0x0d94, 0x00000001);
1385 nv_mthd(dev, 0x90c0, 0x1608, 0x00000000);
1386 nv_mthd(dev, 0x90c0, 0x160c, 0x00000000);
1387 nv_mthd(dev, 0x90c0, 0x1664, 0x00000000);
1388}
1389
1390static void
1391nvc0_grctx_generate_dispatch(struct drm_device *dev)
1392{
1393 int i;
1394
1395 nv_wr32(dev, 0x404004, 0x00000000);
1396 nv_wr32(dev, 0x404008, 0x00000000);
1397 nv_wr32(dev, 0x40400c, 0x00000000);
1398 nv_wr32(dev, 0x404010, 0x00000000);
1399 nv_wr32(dev, 0x404014, 0x00000000);
1400 nv_wr32(dev, 0x404018, 0x00000000);
1401 nv_wr32(dev, 0x40401c, 0x00000000);
1402 nv_wr32(dev, 0x404020, 0x00000000);
1403 nv_wr32(dev, 0x404024, 0x00000000);
1404 nv_wr32(dev, 0x404028, 0x00000000);
1405 nv_wr32(dev, 0x40402c, 0x00000000);
1406 nv_wr32(dev, 0x404044, 0x00000000);
1407 nv_wr32(dev, 0x404094, 0x00000000);
1408 nv_wr32(dev, 0x404098, 0x00000000);
1409 nv_wr32(dev, 0x40409c, 0x00000000);
1410 nv_wr32(dev, 0x4040a0, 0x00000000);
1411 nv_wr32(dev, 0x4040a4, 0x00000000);
1412 nv_wr32(dev, 0x4040a8, 0x00000000);
1413 nv_wr32(dev, 0x4040ac, 0x00000000);
1414 nv_wr32(dev, 0x4040b0, 0x00000000);
1415 nv_wr32(dev, 0x4040b4, 0x00000000);
1416 nv_wr32(dev, 0x4040b8, 0x00000000);
1417 nv_wr32(dev, 0x4040bc, 0x00000000);
1418 nv_wr32(dev, 0x4040c0, 0x00000000);
1419 nv_wr32(dev, 0x4040c4, 0x00000000);
1420 nv_wr32(dev, 0x4040c8, 0xf0000087);
1421 nv_wr32(dev, 0x4040d4, 0x00000000);
1422 nv_wr32(dev, 0x4040d8, 0x00000000);
1423 nv_wr32(dev, 0x4040dc, 0x00000000);
1424 nv_wr32(dev, 0x4040e0, 0x00000000);
1425 nv_wr32(dev, 0x4040e4, 0x00000000);
1426 nv_wr32(dev, 0x4040e8, 0x00001000);
1427 nv_wr32(dev, 0x4040f8, 0x00000000);
1428 nv_wr32(dev, 0x404130, 0x00000000);
1429 nv_wr32(dev, 0x404134, 0x00000000);
1430 nv_wr32(dev, 0x404138, 0x20000040);
1431 nv_wr32(dev, 0x404150, 0x0000002e);
1432 nv_wr32(dev, 0x404154, 0x00000400);
1433 nv_wr32(dev, 0x404158, 0x00000200);
1434 nv_wr32(dev, 0x404164, 0x00000055);
1435 nv_wr32(dev, 0x404168, 0x00000000);
1436 nv_wr32(dev, 0x404174, 0x00000000);
1437 nv_wr32(dev, 0x404178, 0x00000000);
1438 nv_wr32(dev, 0x40417c, 0x00000000);
1439 for (i = 0; i < 8; i++)
1440 nv_wr32(dev, 0x404200 + (i * 4), 0x00000000); /* subc */
1441}
1442
1443static void
1444nvc0_grctx_generate_macro(struct drm_device *dev)
1445{
1446 nv_wr32(dev, 0x404404, 0x00000000);
1447 nv_wr32(dev, 0x404408, 0x00000000);
1448 nv_wr32(dev, 0x40440c, 0x00000000);
1449 nv_wr32(dev, 0x404410, 0x00000000);
1450 nv_wr32(dev, 0x404414, 0x00000000);
1451 nv_wr32(dev, 0x404418, 0x00000000);
1452 nv_wr32(dev, 0x40441c, 0x00000000);
1453 nv_wr32(dev, 0x404420, 0x00000000);
1454 nv_wr32(dev, 0x404424, 0x00000000);
1455 nv_wr32(dev, 0x404428, 0x00000000);
1456 nv_wr32(dev, 0x40442c, 0x00000000);
1457 nv_wr32(dev, 0x404430, 0x00000000);
1458 nv_wr32(dev, 0x404434, 0x00000000);
1459 nv_wr32(dev, 0x404438, 0x00000000);
1460 nv_wr32(dev, 0x404460, 0x00000000);
1461 nv_wr32(dev, 0x404464, 0x00000000);
1462 nv_wr32(dev, 0x404468, 0x00ffffff);
1463 nv_wr32(dev, 0x40446c, 0x00000000);
1464 nv_wr32(dev, 0x404480, 0x00000001);
1465 nv_wr32(dev, 0x404498, 0x00000001);
1466}
1467
1468static void
1469nvc0_grctx_generate_m2mf(struct drm_device *dev)
1470{
1471 nv_wr32(dev, 0x404604, 0x00000015);
1472 nv_wr32(dev, 0x404608, 0x00000000);
1473 nv_wr32(dev, 0x40460c, 0x00002e00);
1474 nv_wr32(dev, 0x404610, 0x00000100);
1475 nv_wr32(dev, 0x404618, 0x00000000);
1476 nv_wr32(dev, 0x40461c, 0x00000000);
1477 nv_wr32(dev, 0x404620, 0x00000000);
1478 nv_wr32(dev, 0x404624, 0x00000000);
1479 nv_wr32(dev, 0x404628, 0x00000000);
1480 nv_wr32(dev, 0x40462c, 0x00000000);
1481 nv_wr32(dev, 0x404630, 0x00000000);
1482 nv_wr32(dev, 0x404634, 0x00000000);
1483 nv_wr32(dev, 0x404638, 0x00000004);
1484 nv_wr32(dev, 0x40463c, 0x00000000);
1485 nv_wr32(dev, 0x404640, 0x00000000);
1486 nv_wr32(dev, 0x404644, 0x00000000);
1487 nv_wr32(dev, 0x404648, 0x00000000);
1488 nv_wr32(dev, 0x40464c, 0x00000000);
1489 nv_wr32(dev, 0x404650, 0x00000000);
1490 nv_wr32(dev, 0x404654, 0x00000000);
1491 nv_wr32(dev, 0x404658, 0x00000000);
1492 nv_wr32(dev, 0x40465c, 0x007f0100);
1493 nv_wr32(dev, 0x404660, 0x00000000);
1494 nv_wr32(dev, 0x404664, 0x00000000);
1495 nv_wr32(dev, 0x404668, 0x00000000);
1496 nv_wr32(dev, 0x40466c, 0x00000000);
1497 nv_wr32(dev, 0x404670, 0x00000000);
1498 nv_wr32(dev, 0x404674, 0x00000000);
1499 nv_wr32(dev, 0x404678, 0x00000000);
1500 nv_wr32(dev, 0x40467c, 0x00000002);
1501 nv_wr32(dev, 0x404680, 0x00000000);
1502 nv_wr32(dev, 0x404684, 0x00000000);
1503 nv_wr32(dev, 0x404688, 0x00000000);
1504 nv_wr32(dev, 0x40468c, 0x00000000);
1505 nv_wr32(dev, 0x404690, 0x00000000);
1506 nv_wr32(dev, 0x404694, 0x00000000);
1507 nv_wr32(dev, 0x404698, 0x00000000);
1508 nv_wr32(dev, 0x40469c, 0x00000000);
1509 nv_wr32(dev, 0x4046a0, 0x007f0080);
1510 nv_wr32(dev, 0x4046a4, 0x00000000);
1511 nv_wr32(dev, 0x4046a8, 0x00000000);
1512 nv_wr32(dev, 0x4046ac, 0x00000000);
1513 nv_wr32(dev, 0x4046b0, 0x00000000);
1514 nv_wr32(dev, 0x4046b4, 0x00000000);
1515 nv_wr32(dev, 0x4046b8, 0x00000000);
1516 nv_wr32(dev, 0x4046bc, 0x00000000);
1517 nv_wr32(dev, 0x4046c0, 0x00000000);
1518 nv_wr32(dev, 0x4046c4, 0x00000000);
1519 nv_wr32(dev, 0x4046c8, 0x00000000);
1520 nv_wr32(dev, 0x4046cc, 0x00000000);
1521 nv_wr32(dev, 0x4046d0, 0x00000000);
1522 nv_wr32(dev, 0x4046d4, 0x00000000);
1523 nv_wr32(dev, 0x4046d8, 0x00000000);
1524 nv_wr32(dev, 0x4046dc, 0x00000000);
1525 nv_wr32(dev, 0x4046e0, 0x00000000);
1526 nv_wr32(dev, 0x4046e4, 0x00000000);
1527 nv_wr32(dev, 0x4046e8, 0x00000000);
1528 nv_wr32(dev, 0x4046f0, 0x00000000);
1529 nv_wr32(dev, 0x4046f4, 0x00000000);
1530}
1531
1532static void
1533nvc0_grctx_generate_unk47xx(struct drm_device *dev)
1534{
1535 nv_wr32(dev, 0x404700, 0x00000000);
1536 nv_wr32(dev, 0x404704, 0x00000000);
1537 nv_wr32(dev, 0x404708, 0x00000000);
1538 nv_wr32(dev, 0x40470c, 0x00000000);
1539 nv_wr32(dev, 0x404710, 0x00000000);
1540 nv_wr32(dev, 0x404714, 0x00000000);
1541 nv_wr32(dev, 0x404718, 0x00000000);
1542 nv_wr32(dev, 0x40471c, 0x00000000);
1543 nv_wr32(dev, 0x404720, 0x00000000);
1544 nv_wr32(dev, 0x404724, 0x00000000);
1545 nv_wr32(dev, 0x404728, 0x00000000);
1546 nv_wr32(dev, 0x40472c, 0x00000000);
1547 nv_wr32(dev, 0x404730, 0x00000000);
1548 nv_wr32(dev, 0x404734, 0x00000100);
1549 nv_wr32(dev, 0x404738, 0x00000000);
1550 nv_wr32(dev, 0x40473c, 0x00000000);
1551 nv_wr32(dev, 0x404740, 0x00000000);
1552 nv_wr32(dev, 0x404744, 0x00000000);
1553 nv_wr32(dev, 0x404748, 0x00000000);
1554 nv_wr32(dev, 0x40474c, 0x00000000);
1555 nv_wr32(dev, 0x404750, 0x00000000);
1556 nv_wr32(dev, 0x404754, 0x00000000);
1557}
1558
1559static void
1560nvc0_grctx_generate_shaders(struct drm_device *dev)
1561{
1562 nv_wr32(dev, 0x405800, 0x078000bf);
1563 nv_wr32(dev, 0x405830, 0x02180000);
1564 nv_wr32(dev, 0x405834, 0x00000000);
1565 nv_wr32(dev, 0x405838, 0x00000000);
1566 nv_wr32(dev, 0x405854, 0x00000000);
1567 nv_wr32(dev, 0x405870, 0x00000001);
1568 nv_wr32(dev, 0x405874, 0x00000001);
1569 nv_wr32(dev, 0x405878, 0x00000001);
1570 nv_wr32(dev, 0x40587c, 0x00000001);
1571 nv_wr32(dev, 0x405a00, 0x00000000);
1572 nv_wr32(dev, 0x405a04, 0x00000000);
1573 nv_wr32(dev, 0x405a18, 0x00000000);
1574}
1575
1576static void
1577nvc0_grctx_generate_unk60xx(struct drm_device *dev)
1578{
1579 nv_wr32(dev, 0x406020, 0x000103c1);
1580 nv_wr32(dev, 0x406028, 0x00000001);
1581 nv_wr32(dev, 0x40602c, 0x00000001);
1582 nv_wr32(dev, 0x406030, 0x00000001);
1583 nv_wr32(dev, 0x406034, 0x00000001);
1584}
1585
1586static void
1587nvc0_grctx_generate_unk64xx(struct drm_device *dev)
1588{
1589 nv_wr32(dev, 0x4064a8, 0x00000000);
1590 nv_wr32(dev, 0x4064ac, 0x00003fff);
1591 nv_wr32(dev, 0x4064b4, 0x00000000);
1592 nv_wr32(dev, 0x4064b8, 0x00000000);
1593}
1594
1595static void
1596nvc0_grctx_generate_tpbus(struct drm_device *dev)
1597{
1598 nv_wr32(dev, 0x407804, 0x00000023);
1599 nv_wr32(dev, 0x40780c, 0x0a418820);
1600 nv_wr32(dev, 0x407810, 0x062080e6);
1601 nv_wr32(dev, 0x407814, 0x020398a4);
1602 nv_wr32(dev, 0x407818, 0x0e629062);
1603 nv_wr32(dev, 0x40781c, 0x0a418820);
1604 nv_wr32(dev, 0x407820, 0x000000e6);
1605 nv_wr32(dev, 0x4078bc, 0x00000103);
1606}
1607
1608static void
1609nvc0_grctx_generate_ccache(struct drm_device *dev)
1610{
1611 nv_wr32(dev, 0x408000, 0x00000000);
1612 nv_wr32(dev, 0x408004, 0x00000000);
1613 nv_wr32(dev, 0x408008, 0x00000018);
1614 nv_wr32(dev, 0x40800c, 0x00000000);
1615 nv_wr32(dev, 0x408010, 0x00000000);
1616 nv_wr32(dev, 0x408014, 0x00000069);
1617 nv_wr32(dev, 0x408018, 0xe100e100);
1618 nv_wr32(dev, 0x408064, 0x00000000);
1619}
1620
1621static void
1622nvc0_grctx_generate_rop(struct drm_device *dev)
1623{
1624 struct drm_nouveau_private *dev_priv = dev->dev_private;
1625
1626 // ROPC_BROADCAST
1627 nv_wr32(dev, 0x408800, 0x02802a3c);
1628 nv_wr32(dev, 0x408804, 0x00000040);
1629 nv_wr32(dev, 0x408808, 0x0003e00d);
1630 switch (dev_priv->chipset) {
1631 case 0xc0:
1632 nv_wr32(dev, 0x408900, 0x0080b801);
1633 break;
1634 case 0xc3:
1635 case 0xc4:
1636 nv_wr32(dev, 0x408900, 0x3080b801);
1637 break;
1638 }
1639 nv_wr32(dev, 0x408904, 0x02000001);
1640 nv_wr32(dev, 0x408908, 0x00c80929);
1641 nv_wr32(dev, 0x40890c, 0x00000000);
1642 nv_wr32(dev, 0x408980, 0x0000011d);
1643}
1644
1645static void
1646nvc0_grctx_generate_gpc(struct drm_device *dev)
1647{
1648 int i;
1649
1650 // GPC_BROADCAST
1651 nv_wr32(dev, 0x418380, 0x00000016);
1652 nv_wr32(dev, 0x418400, 0x38004e00);
1653 nv_wr32(dev, 0x418404, 0x71e0ffff);
1654 nv_wr32(dev, 0x418408, 0x00000000);
1655 nv_wr32(dev, 0x41840c, 0x00001008);
1656 nv_wr32(dev, 0x418410, 0x0fff0fff);
1657 nv_wr32(dev, 0x418414, 0x00200fff);
1658 nv_wr32(dev, 0x418450, 0x00000000);
1659 nv_wr32(dev, 0x418454, 0x00000000);
1660 nv_wr32(dev, 0x418458, 0x00000000);
1661 nv_wr32(dev, 0x41845c, 0x00000000);
1662 nv_wr32(dev, 0x418460, 0x00000000);
1663 nv_wr32(dev, 0x418464, 0x00000000);
1664 nv_wr32(dev, 0x418468, 0x00000001);
1665 nv_wr32(dev, 0x41846c, 0x00000000);
1666 nv_wr32(dev, 0x418470, 0x00000000);
1667 nv_wr32(dev, 0x418600, 0x0000001f);
1668 nv_wr32(dev, 0x418684, 0x0000000f);
1669 nv_wr32(dev, 0x418700, 0x00000002);
1670 nv_wr32(dev, 0x418704, 0x00000080);
1671 nv_wr32(dev, 0x418708, 0x00000000);
1672 nv_wr32(dev, 0x41870c, 0x07c80000);
1673 nv_wr32(dev, 0x418710, 0x00000000);
1674 nv_wr32(dev, 0x418800, 0x0006860a);
1675 nv_wr32(dev, 0x418808, 0x00000000);
1676 nv_wr32(dev, 0x41880c, 0x00000000);
1677 nv_wr32(dev, 0x418810, 0x00000000);
1678 nv_wr32(dev, 0x418828, 0x00008442);
1679 nv_wr32(dev, 0x418830, 0x00000001);
1680 nv_wr32(dev, 0x4188d8, 0x00000008);
1681 nv_wr32(dev, 0x4188e0, 0x01000000);
1682 nv_wr32(dev, 0x4188e8, 0x00000000);
1683 nv_wr32(dev, 0x4188ec, 0x00000000);
1684 nv_wr32(dev, 0x4188f0, 0x00000000);
1685 nv_wr32(dev, 0x4188f4, 0x00000000);
1686 nv_wr32(dev, 0x4188f8, 0x00000000);
1687 nv_wr32(dev, 0x4188fc, 0x00100000);
1688 nv_wr32(dev, 0x41891c, 0x00ff00ff);
1689 nv_wr32(dev, 0x418924, 0x00000000);
1690 nv_wr32(dev, 0x418928, 0x00ffff00);
1691 nv_wr32(dev, 0x41892c, 0x0000ff00);
1692 for (i = 0; i < 8; i++) {
1693 nv_wr32(dev, 0x418a00 + (i * 0x20), 0x00000000);
1694 nv_wr32(dev, 0x418a04 + (i * 0x20), 0x00000000);
1695 nv_wr32(dev, 0x418a08 + (i * 0x20), 0x00000000);
1696 nv_wr32(dev, 0x418a0c + (i * 0x20), 0x00010000);
1697 nv_wr32(dev, 0x418a10 + (i * 0x20), 0x00000000);
1698 nv_wr32(dev, 0x418a14 + (i * 0x20), 0x00000000);
1699 nv_wr32(dev, 0x418a18 + (i * 0x20), 0x00000000);
1700 }
1701 nv_wr32(dev, 0x418b00, 0x00000000);
1702 nv_wr32(dev, 0x418b08, 0x0a418820);
1703 nv_wr32(dev, 0x418b0c, 0x062080e6);
1704 nv_wr32(dev, 0x418b10, 0x020398a4);
1705 nv_wr32(dev, 0x418b14, 0x0e629062);
1706 nv_wr32(dev, 0x418b18, 0x0a418820);
1707 nv_wr32(dev, 0x418b1c, 0x000000e6);
1708 nv_wr32(dev, 0x418bb8, 0x00000103);
1709 nv_wr32(dev, 0x418c08, 0x00000001);
1710 nv_wr32(dev, 0x418c10, 0x00000000);
1711 nv_wr32(dev, 0x418c14, 0x00000000);
1712 nv_wr32(dev, 0x418c18, 0x00000000);
1713 nv_wr32(dev, 0x418c1c, 0x00000000);
1714 nv_wr32(dev, 0x418c20, 0x00000000);
1715 nv_wr32(dev, 0x418c24, 0x00000000);
1716 nv_wr32(dev, 0x418c28, 0x00000000);
1717 nv_wr32(dev, 0x418c2c, 0x00000000);
1718 nv_wr32(dev, 0x418c80, 0x20200004);
1719 nv_wr32(dev, 0x418c8c, 0x00000001);
1720 nv_wr32(dev, 0x419000, 0x00000780);
1721 nv_wr32(dev, 0x419004, 0x00000000);
1722 nv_wr32(dev, 0x419008, 0x00000000);
1723 nv_wr32(dev, 0x419014, 0x00000004);
1724}
1725
1726static void
1727nvc0_grctx_generate_tp(struct drm_device *dev)
1728{
1729 struct drm_nouveau_private *dev_priv = dev->dev_private;
1730
1731 // GPC_BROADCAST.TP_BROADCAST
1732 nv_wr32(dev, 0x419848, 0x00000000);
1733 nv_wr32(dev, 0x419864, 0x0000012a);
1734 nv_wr32(dev, 0x419888, 0x00000000);
1735 nv_wr32(dev, 0x419a00, 0x000001f0);
1736 nv_wr32(dev, 0x419a04, 0x00000001);
1737 nv_wr32(dev, 0x419a08, 0x00000023);
1738 nv_wr32(dev, 0x419a0c, 0x00020000);
1739 nv_wr32(dev, 0x419a10, 0x00000000);
1740 nv_wr32(dev, 0x419a14, 0x00000200);
1741 nv_wr32(dev, 0x419a1c, 0x00000000);
1742 nv_wr32(dev, 0x419a20, 0x00000800);
1743 if (dev_priv->chipset != 0xc0)
1744 nv_wr32(dev, 0x00419ac4, 0x0007f440); // 0xc3
1745 nv_wr32(dev, 0x419b00, 0x0a418820);
1746 nv_wr32(dev, 0x419b04, 0x062080e6);
1747 nv_wr32(dev, 0x419b08, 0x020398a4);
1748 nv_wr32(dev, 0x419b0c, 0x0e629062);
1749 nv_wr32(dev, 0x419b10, 0x0a418820);
1750 nv_wr32(dev, 0x419b14, 0x000000e6);
1751 nv_wr32(dev, 0x419bd0, 0x00900103);
1752 nv_wr32(dev, 0x419be0, 0x00000001);
1753 nv_wr32(dev, 0x419be4, 0x00000000);
1754 nv_wr32(dev, 0x419c00, 0x00000002);
1755 nv_wr32(dev, 0x419c04, 0x00000006);
1756 nv_wr32(dev, 0x419c08, 0x00000002);
1757 nv_wr32(dev, 0x419c20, 0x00000000);
1758 nv_wr32(dev, 0x419cbc, 0x28137606);
1759 nv_wr32(dev, 0x419ce8, 0x00000000);
1760 nv_wr32(dev, 0x419cf4, 0x00000183);
1761 nv_wr32(dev, 0x419d20, 0x02180000);
1762 nv_wr32(dev, 0x419d24, 0x00001fff);
1763 nv_wr32(dev, 0x419e04, 0x00000000);
1764 nv_wr32(dev, 0x419e08, 0x00000000);
1765 nv_wr32(dev, 0x419e0c, 0x00000000);
1766 nv_wr32(dev, 0x419e10, 0x00000002);
1767 nv_wr32(dev, 0x419e44, 0x001beff2);
1768 nv_wr32(dev, 0x419e48, 0x00000000);
1769 nv_wr32(dev, 0x419e4c, 0x0000000f);
1770 nv_wr32(dev, 0x419e50, 0x00000000);
1771 nv_wr32(dev, 0x419e54, 0x00000000);
1772 nv_wr32(dev, 0x419e58, 0x00000000);
1773 nv_wr32(dev, 0x419e5c, 0x00000000);
1774 nv_wr32(dev, 0x419e60, 0x00000000);
1775 nv_wr32(dev, 0x419e64, 0x00000000);
1776 nv_wr32(dev, 0x419e68, 0x00000000);
1777 nv_wr32(dev, 0x419e6c, 0x00000000);
1778 nv_wr32(dev, 0x419e70, 0x00000000);
1779 nv_wr32(dev, 0x419e74, 0x00000000);
1780 nv_wr32(dev, 0x419e78, 0x00000000);
1781 nv_wr32(dev, 0x419e7c, 0x00000000);
1782 nv_wr32(dev, 0x419e80, 0x00000000);
1783 nv_wr32(dev, 0x419e84, 0x00000000);
1784 nv_wr32(dev, 0x419e88, 0x00000000);
1785 nv_wr32(dev, 0x419e8c, 0x00000000);
1786 nv_wr32(dev, 0x419e90, 0x00000000);
1787 nv_wr32(dev, 0x419e98, 0x00000000);
1788 if (dev_priv->chipset != 0xc0)
1789 nv_wr32(dev, 0x419ee0, 0x00011110);
1790 nv_wr32(dev, 0x419f50, 0x00000000);
1791 nv_wr32(dev, 0x419f54, 0x00000000);
1792 if (dev_priv->chipset != 0xc0)
1793 nv_wr32(dev, 0x419f58, 0x00000000);
1794}
1795
1796int
1797nvc0_grctx_generate(struct nouveau_channel *chan)
1798{
1799 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
1800 struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv;
1801 struct nvc0_graph_chan *grch = chan->pgraph_ctx;
1802 struct drm_device *dev = chan->dev;
1803 int i, gpc, tp, id;
1804 u32 r000260, tmp;
1805
1806 r000260 = nv_rd32(dev, 0x000260);
1807 nv_wr32(dev, 0x000260, r000260 & ~1);
1808 nv_wr32(dev, 0x400208, 0x00000000);
1809
1810 nvc0_grctx_generate_dispatch(dev);
1811 nvc0_grctx_generate_macro(dev);
1812 nvc0_grctx_generate_m2mf(dev);
1813 nvc0_grctx_generate_unk47xx(dev);
1814 nvc0_grctx_generate_shaders(dev);
1815 nvc0_grctx_generate_unk60xx(dev);
1816 nvc0_grctx_generate_unk64xx(dev);
1817 nvc0_grctx_generate_tpbus(dev);
1818 nvc0_grctx_generate_ccache(dev);
1819 nvc0_grctx_generate_rop(dev);
1820 nvc0_grctx_generate_gpc(dev);
1821 nvc0_grctx_generate_tp(dev);
1822
1823 nv_wr32(dev, 0x404154, 0x00000000);
1824
1825 /* fuc "mmio list" writes */
1826 for (i = 0; i < grch->mmio_nr * 8; i += 8) {
1827 u32 reg = nv_ro32(grch->mmio, i + 0);
1828 nv_wr32(dev, reg, nv_ro32(grch->mmio, i + 4));
1829 }
1830
1831 for (tp = 0, id = 0; tp < 4; tp++) {
1832 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
1833 if (tp <= priv->tp_nr[gpc]) {
1834 nv_wr32(dev, TP_UNIT(gpc, tp, 0x698), id);
1835 nv_wr32(dev, TP_UNIT(gpc, tp, 0x4e8), id);
1836 nv_wr32(dev, GPC_UNIT(gpc, 0x0c10 + tp * 4), id);
1837 nv_wr32(dev, TP_UNIT(gpc, tp, 0x088), id);
1838 id++;
1839 }
1840
1841 nv_wr32(dev, GPC_UNIT(gpc, 0x0c08), priv->tp_nr[gpc]);
1842 nv_wr32(dev, GPC_UNIT(gpc, 0x0c8c), priv->tp_nr[gpc]);
1843 }
1844 }
1845
1846 tmp = 0;
1847 for (i = 0; i < priv->gpc_nr; i++)
1848 tmp |= priv->tp_nr[i] << (i * 4);
1849 nv_wr32(dev, 0x406028, tmp);
1850 nv_wr32(dev, 0x405870, tmp);
1851
1852 nv_wr32(dev, 0x40602c, 0x00000000);
1853 nv_wr32(dev, 0x405874, 0x00000000);
1854 nv_wr32(dev, 0x406030, 0x00000000);
1855 nv_wr32(dev, 0x405878, 0x00000000);
1856 nv_wr32(dev, 0x406034, 0x00000000);
1857 nv_wr32(dev, 0x40587c, 0x00000000);
1858
1859 if (1) {
1860 const u8 chipset_tp_max[] = { 16, 0, 0, 4, 8 };
1861 u8 max = chipset_tp_max[dev_priv->chipset & 0x0f];
1862 u8 tpnr[GPC_MAX];
1863 u8 data[32];
1864
1865 memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr));
1866 memset(data, 0x1f, sizeof(data));
1867
1868 gpc = -1;
1869 for (tp = 0; tp < priv->tp_total; tp++) {
1870 do {
1871 gpc = (gpc + 1) % priv->gpc_nr;
1872 } while (!tpnr[gpc]);
1873 tpnr[gpc]--;
1874 data[tp] = gpc;
1875 }
1876
1877 for (i = 0; i < max / 4; i++)
1878 nv_wr32(dev, 0x4060a8 + (i * 4), ((u32 *)data)[i]);
1879 }
1880
1881 if (1) {
1882 u32 data[6] = {}, data2[2] = {};
1883 u8 tpnr[GPC_MAX];
1884 u8 shift, ntpcv;
1885
1886 /* calculate first set of magics */
1887 memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr));
1888
1889 for (tp = 0; tp < priv->tp_total; tp++) {
1890 do {
1891 gpc = (gpc + 1) % priv->gpc_nr;
1892 } while (!tpnr[gpc]);
1893 tpnr[gpc]--;
1894
1895 data[tp / 6] |= gpc << ((tp % 6) * 5);
1896 }
1897
1898 for (; tp < 32; tp++)
1899 data[tp / 6] |= 7 << ((tp % 6) * 5);
1900
1901 /* and the second... */
1902 shift = 0;
1903 ntpcv = priv->tp_total;
1904 while (!(ntpcv & (1 << 4))) {
1905 ntpcv <<= 1;
1906 shift++;
1907 }
1908
1909 data2[0] = (ntpcv << 16);
1910 data2[0] |= (shift << 21);
1911 data2[0] |= (((1 << (0 + 5)) % ntpcv) << 24);
1912 for (i = 1; i < 7; i++)
1913 data2[1] |= ((1 << (i + 5)) % ntpcv) << ((i - 1) * 5);
1914
1915 // GPC_BROADCAST
1916 nv_wr32(dev, 0x418bb8, (priv->tp_total << 8) |
1917 priv->magic_not_rop_nr);
1918 for (i = 0; i < 6; i++)
1919 nv_wr32(dev, 0x418b08 + (i * 4), data[i]);
1920
1921 // GPC_BROADCAST.TP_BROADCAST
1922 nv_wr32(dev, 0x419bd0, (priv->tp_total << 8) |
1923 priv->magic_not_rop_nr |
1924 data2[0]);
1925 nv_wr32(dev, 0x419be4, data2[1]);
1926 for (i = 0; i < 6; i++)
1927 nv_wr32(dev, 0x419b00 + (i * 4), data[i]);
1928
1929 // UNK78xx
1930 nv_wr32(dev, 0x4078bc, (priv->tp_total << 8) |
1931 priv->magic_not_rop_nr);
1932 for (i = 0; i < 6; i++)
1933 nv_wr32(dev, 0x40780c + (i * 4), data[i]);
1934 }
1935
1936 if (1) {
1937 u32 tp_mask = 0, tp_set = 0;
1938 u8 tpnr[GPC_MAX];
1939
1940 memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr));
1941 for (gpc = 0; gpc < priv->gpc_nr; gpc++)
1942 tp_mask |= ((1 << priv->tp_nr[gpc]) - 1) << (gpc * 8);
1943
1944 gpc = -1;
1945 for (i = 0, gpc = -1; i < 32; i++) {
1946 int ltp = i * (priv->tp_total - 1) / 32;
1947
1948 do {
1949 gpc = (gpc + 1) % priv->gpc_nr;
1950 } while (!tpnr[gpc]);
1951 tp = priv->tp_nr[gpc] - tpnr[gpc]--;
1952
1953 tp_set |= 1 << ((gpc * 8) + tp);
1954
1955 do {
1956 nv_wr32(dev, 0x406800 + (i * 0x20), tp_set);
1957 tp_set ^= tp_mask;
1958 nv_wr32(dev, 0x406c00 + (i * 0x20), tp_set);
1959 tp_set ^= tp_mask;
1960 } while (ltp == (++i * (priv->tp_total - 1) / 32));
1961 i--;
1962 }
1963 }
1964
1965 nv_wr32(dev, 0x400208, 0x80000000);
1966
1967 nv_icmd(dev, 0x00001000, 0x00000004);
1968 nv_icmd(dev, 0x000000a9, 0x0000ffff);
1969 nv_icmd(dev, 0x00000038, 0x0fac6881);
1970 nv_icmd(dev, 0x0000003d, 0x00000001);
1971 nv_icmd(dev, 0x000000e8, 0x00000400);
1972 nv_icmd(dev, 0x000000e9, 0x00000400);
1973 nv_icmd(dev, 0x000000ea, 0x00000400);
1974 nv_icmd(dev, 0x000000eb, 0x00000400);
1975 nv_icmd(dev, 0x000000ec, 0x00000400);
1976 nv_icmd(dev, 0x000000ed, 0x00000400);
1977 nv_icmd(dev, 0x000000ee, 0x00000400);
1978 nv_icmd(dev, 0x000000ef, 0x00000400);
1979 nv_icmd(dev, 0x00000078, 0x00000300);
1980 nv_icmd(dev, 0x00000079, 0x00000300);
1981 nv_icmd(dev, 0x0000007a, 0x00000300);
1982 nv_icmd(dev, 0x0000007b, 0x00000300);
1983 nv_icmd(dev, 0x0000007c, 0x00000300);
1984 nv_icmd(dev, 0x0000007d, 0x00000300);
1985 nv_icmd(dev, 0x0000007e, 0x00000300);
1986 nv_icmd(dev, 0x0000007f, 0x00000300);
1987 nv_icmd(dev, 0x00000050, 0x00000011);
1988 nv_icmd(dev, 0x00000058, 0x00000008);
1989 nv_icmd(dev, 0x00000059, 0x00000008);
1990 nv_icmd(dev, 0x0000005a, 0x00000008);
1991 nv_icmd(dev, 0x0000005b, 0x00000008);
1992 nv_icmd(dev, 0x0000005c, 0x00000008);
1993 nv_icmd(dev, 0x0000005d, 0x00000008);
1994 nv_icmd(dev, 0x0000005e, 0x00000008);
1995 nv_icmd(dev, 0x0000005f, 0x00000008);
1996 nv_icmd(dev, 0x00000208, 0x00000001);
1997 nv_icmd(dev, 0x00000209, 0x00000001);
1998 nv_icmd(dev, 0x0000020a, 0x00000001);
1999 nv_icmd(dev, 0x0000020b, 0x00000001);
2000 nv_icmd(dev, 0x0000020c, 0x00000001);
2001 nv_icmd(dev, 0x0000020d, 0x00000001);
2002 nv_icmd(dev, 0x0000020e, 0x00000001);
2003 nv_icmd(dev, 0x0000020f, 0x00000001);
2004 nv_icmd(dev, 0x00000081, 0x00000001);
2005 nv_icmd(dev, 0x00000085, 0x00000004);
2006 nv_icmd(dev, 0x00000088, 0x00000400);
2007 nv_icmd(dev, 0x00000090, 0x00000300);
2008 nv_icmd(dev, 0x00000098, 0x00001001);
2009 nv_icmd(dev, 0x000000e3, 0x00000001);
2010 nv_icmd(dev, 0x000000da, 0x00000001);
2011 nv_icmd(dev, 0x000000f8, 0x00000003);
2012 nv_icmd(dev, 0x000000fa, 0x00000001);
2013 nv_icmd(dev, 0x0000009f, 0x0000ffff);
2014 nv_icmd(dev, 0x000000a0, 0x0000ffff);
2015 nv_icmd(dev, 0x000000a1, 0x0000ffff);
2016 nv_icmd(dev, 0x000000a2, 0x0000ffff);
2017 nv_icmd(dev, 0x000000b1, 0x00000001);
2018 nv_icmd(dev, 0x000000b2, 0x00000000);
2019 nv_icmd(dev, 0x000000b3, 0x00000000);
2020 nv_icmd(dev, 0x000000b4, 0x00000000);
2021 nv_icmd(dev, 0x000000b5, 0x00000000);
2022 nv_icmd(dev, 0x000000b6, 0x00000000);
2023 nv_icmd(dev, 0x000000b7, 0x00000000);
2024 nv_icmd(dev, 0x000000b8, 0x00000000);
2025 nv_icmd(dev, 0x000000b9, 0x00000000);
2026 nv_icmd(dev, 0x000000ba, 0x00000000);
2027 nv_icmd(dev, 0x000000bb, 0x00000000);
2028 nv_icmd(dev, 0x000000bc, 0x00000000);
2029 nv_icmd(dev, 0x000000bd, 0x00000000);
2030 nv_icmd(dev, 0x000000be, 0x00000000);
2031 nv_icmd(dev, 0x000000bf, 0x00000000);
2032 nv_icmd(dev, 0x000000c0, 0x00000000);
2033 nv_icmd(dev, 0x000000c1, 0x00000000);
2034 nv_icmd(dev, 0x000000c2, 0x00000000);
2035 nv_icmd(dev, 0x000000c3, 0x00000000);
2036 nv_icmd(dev, 0x000000c4, 0x00000000);
2037 nv_icmd(dev, 0x000000c5, 0x00000000);
2038 nv_icmd(dev, 0x000000c6, 0x00000000);
2039 nv_icmd(dev, 0x000000c7, 0x00000000);
2040 nv_icmd(dev, 0x000000c8, 0x00000000);
2041 nv_icmd(dev, 0x000000c9, 0x00000000);
2042 nv_icmd(dev, 0x000000ca, 0x00000000);
2043 nv_icmd(dev, 0x000000cb, 0x00000000);
2044 nv_icmd(dev, 0x000000cc, 0x00000000);
2045 nv_icmd(dev, 0x000000cd, 0x00000000);
2046 nv_icmd(dev, 0x000000ce, 0x00000000);
2047 nv_icmd(dev, 0x000000cf, 0x00000000);
2048 nv_icmd(dev, 0x000000d0, 0x00000000);
2049 nv_icmd(dev, 0x000000d1, 0x00000000);
2050 nv_icmd(dev, 0x000000d2, 0x00000000);
2051 nv_icmd(dev, 0x000000d3, 0x00000000);
2052 nv_icmd(dev, 0x000000d4, 0x00000000);
2053 nv_icmd(dev, 0x000000d5, 0x00000000);
2054 nv_icmd(dev, 0x000000d6, 0x00000000);
2055 nv_icmd(dev, 0x000000d7, 0x00000000);
2056 nv_icmd(dev, 0x000000d8, 0x00000000);
2057 nv_icmd(dev, 0x000000d9, 0x00000000);
2058 nv_icmd(dev, 0x00000210, 0x00000040);
2059 nv_icmd(dev, 0x00000211, 0x00000040);
2060 nv_icmd(dev, 0x00000212, 0x00000040);
2061 nv_icmd(dev, 0x00000213, 0x00000040);
2062 nv_icmd(dev, 0x00000214, 0x00000040);
2063 nv_icmd(dev, 0x00000215, 0x00000040);
2064 nv_icmd(dev, 0x00000216, 0x00000040);
2065 nv_icmd(dev, 0x00000217, 0x00000040);
2066 nv_icmd(dev, 0x00000218, 0x0000c080);
2067 nv_icmd(dev, 0x00000219, 0x0000c080);
2068 nv_icmd(dev, 0x0000021a, 0x0000c080);
2069 nv_icmd(dev, 0x0000021b, 0x0000c080);
2070 nv_icmd(dev, 0x0000021c, 0x0000c080);
2071 nv_icmd(dev, 0x0000021d, 0x0000c080);
2072 nv_icmd(dev, 0x0000021e, 0x0000c080);
2073 nv_icmd(dev, 0x0000021f, 0x0000c080);
2074 nv_icmd(dev, 0x000000ad, 0x0000013e);
2075 nv_icmd(dev, 0x000000e1, 0x00000010);
2076 nv_icmd(dev, 0x00000290, 0x00000000);
2077 nv_icmd(dev, 0x00000291, 0x00000000);
2078 nv_icmd(dev, 0x00000292, 0x00000000);
2079 nv_icmd(dev, 0x00000293, 0x00000000);
2080 nv_icmd(dev, 0x00000294, 0x00000000);
2081 nv_icmd(dev, 0x00000295, 0x00000000);
2082 nv_icmd(dev, 0x00000296, 0x00000000);
2083 nv_icmd(dev, 0x00000297, 0x00000000);
2084 nv_icmd(dev, 0x00000298, 0x00000000);
2085 nv_icmd(dev, 0x00000299, 0x00000000);
2086 nv_icmd(dev, 0x0000029a, 0x00000000);
2087 nv_icmd(dev, 0x0000029b, 0x00000000);
2088 nv_icmd(dev, 0x0000029c, 0x00000000);
2089 nv_icmd(dev, 0x0000029d, 0x00000000);
2090 nv_icmd(dev, 0x0000029e, 0x00000000);
2091 nv_icmd(dev, 0x0000029f, 0x00000000);
2092 nv_icmd(dev, 0x000003b0, 0x00000000);
2093 nv_icmd(dev, 0x000003b1, 0x00000000);
2094 nv_icmd(dev, 0x000003b2, 0x00000000);
2095 nv_icmd(dev, 0x000003b3, 0x00000000);
2096 nv_icmd(dev, 0x000003b4, 0x00000000);
2097 nv_icmd(dev, 0x000003b5, 0x00000000);
2098 nv_icmd(dev, 0x000003b6, 0x00000000);
2099 nv_icmd(dev, 0x000003b7, 0x00000000);
2100 nv_icmd(dev, 0x000003b8, 0x00000000);
2101 nv_icmd(dev, 0x000003b9, 0x00000000);
2102 nv_icmd(dev, 0x000003ba, 0x00000000);
2103 nv_icmd(dev, 0x000003bb, 0x00000000);
2104 nv_icmd(dev, 0x000003bc, 0x00000000);
2105 nv_icmd(dev, 0x000003bd, 0x00000000);
2106 nv_icmd(dev, 0x000003be, 0x00000000);
2107 nv_icmd(dev, 0x000003bf, 0x00000000);
2108 nv_icmd(dev, 0x000002a0, 0x00000000);
2109 nv_icmd(dev, 0x000002a1, 0x00000000);
2110 nv_icmd(dev, 0x000002a2, 0x00000000);
2111 nv_icmd(dev, 0x000002a3, 0x00000000);
2112 nv_icmd(dev, 0x000002a4, 0x00000000);
2113 nv_icmd(dev, 0x000002a5, 0x00000000);
2114 nv_icmd(dev, 0x000002a6, 0x00000000);
2115 nv_icmd(dev, 0x000002a7, 0x00000000);
2116 nv_icmd(dev, 0x000002a8, 0x00000000);
2117 nv_icmd(dev, 0x000002a9, 0x00000000);
2118 nv_icmd(dev, 0x000002aa, 0x00000000);
2119 nv_icmd(dev, 0x000002ab, 0x00000000);
2120 nv_icmd(dev, 0x000002ac, 0x00000000);
2121 nv_icmd(dev, 0x000002ad, 0x00000000);
2122 nv_icmd(dev, 0x000002ae, 0x00000000);
2123 nv_icmd(dev, 0x000002af, 0x00000000);
2124 nv_icmd(dev, 0x00000420, 0x00000000);
2125 nv_icmd(dev, 0x00000421, 0x00000000);
2126 nv_icmd(dev, 0x00000422, 0x00000000);
2127 nv_icmd(dev, 0x00000423, 0x00000000);
2128 nv_icmd(dev, 0x00000424, 0x00000000);
2129 nv_icmd(dev, 0x00000425, 0x00000000);
2130 nv_icmd(dev, 0x00000426, 0x00000000);
2131 nv_icmd(dev, 0x00000427, 0x00000000);
2132 nv_icmd(dev, 0x00000428, 0x00000000);
2133 nv_icmd(dev, 0x00000429, 0x00000000);
2134 nv_icmd(dev, 0x0000042a, 0x00000000);
2135 nv_icmd(dev, 0x0000042b, 0x00000000);
2136 nv_icmd(dev, 0x0000042c, 0x00000000);
2137 nv_icmd(dev, 0x0000042d, 0x00000000);
2138 nv_icmd(dev, 0x0000042e, 0x00000000);
2139 nv_icmd(dev, 0x0000042f, 0x00000000);
2140 nv_icmd(dev, 0x000002b0, 0x00000000);
2141 nv_icmd(dev, 0x000002b1, 0x00000000);
2142 nv_icmd(dev, 0x000002b2, 0x00000000);
2143 nv_icmd(dev, 0x000002b3, 0x00000000);
2144 nv_icmd(dev, 0x000002b4, 0x00000000);
2145 nv_icmd(dev, 0x000002b5, 0x00000000);
2146 nv_icmd(dev, 0x000002b6, 0x00000000);
2147 nv_icmd(dev, 0x000002b7, 0x00000000);
2148 nv_icmd(dev, 0x000002b8, 0x00000000);
2149 nv_icmd(dev, 0x000002b9, 0x00000000);
2150 nv_icmd(dev, 0x000002ba, 0x00000000);
2151 nv_icmd(dev, 0x000002bb, 0x00000000);
2152 nv_icmd(dev, 0x000002bc, 0x00000000);
2153 nv_icmd(dev, 0x000002bd, 0x00000000);
2154 nv_icmd(dev, 0x000002be, 0x00000000);
2155 nv_icmd(dev, 0x000002bf, 0x00000000);
2156 nv_icmd(dev, 0x00000430, 0x00000000);
2157 nv_icmd(dev, 0x00000431, 0x00000000);
2158 nv_icmd(dev, 0x00000432, 0x00000000);
2159 nv_icmd(dev, 0x00000433, 0x00000000);
2160 nv_icmd(dev, 0x00000434, 0x00000000);
2161 nv_icmd(dev, 0x00000435, 0x00000000);
2162 nv_icmd(dev, 0x00000436, 0x00000000);
2163 nv_icmd(dev, 0x00000437, 0x00000000);
2164 nv_icmd(dev, 0x00000438, 0x00000000);
2165 nv_icmd(dev, 0x00000439, 0x00000000);
2166 nv_icmd(dev, 0x0000043a, 0x00000000);
2167 nv_icmd(dev, 0x0000043b, 0x00000000);
2168 nv_icmd(dev, 0x0000043c, 0x00000000);
2169 nv_icmd(dev, 0x0000043d, 0x00000000);
2170 nv_icmd(dev, 0x0000043e, 0x00000000);
2171 nv_icmd(dev, 0x0000043f, 0x00000000);
2172 nv_icmd(dev, 0x000002c0, 0x00000000);
2173 nv_icmd(dev, 0x000002c1, 0x00000000);
2174 nv_icmd(dev, 0x000002c2, 0x00000000);
2175 nv_icmd(dev, 0x000002c3, 0x00000000);
2176 nv_icmd(dev, 0x000002c4, 0x00000000);
2177 nv_icmd(dev, 0x000002c5, 0x00000000);
2178 nv_icmd(dev, 0x000002c6, 0x00000000);
2179 nv_icmd(dev, 0x000002c7, 0x00000000);
2180 nv_icmd(dev, 0x000002c8, 0x00000000);
2181 nv_icmd(dev, 0x000002c9, 0x00000000);
2182 nv_icmd(dev, 0x000002ca, 0x00000000);
2183 nv_icmd(dev, 0x000002cb, 0x00000000);
2184 nv_icmd(dev, 0x000002cc, 0x00000000);
2185 nv_icmd(dev, 0x000002cd, 0x00000000);
2186 nv_icmd(dev, 0x000002ce, 0x00000000);
2187 nv_icmd(dev, 0x000002cf, 0x00000000);
2188 nv_icmd(dev, 0x000004d0, 0x00000000);
2189 nv_icmd(dev, 0x000004d1, 0x00000000);
2190 nv_icmd(dev, 0x000004d2, 0x00000000);
2191 nv_icmd(dev, 0x000004d3, 0x00000000);
2192 nv_icmd(dev, 0x000004d4, 0x00000000);
2193 nv_icmd(dev, 0x000004d5, 0x00000000);
2194 nv_icmd(dev, 0x000004d6, 0x00000000);
2195 nv_icmd(dev, 0x000004d7, 0x00000000);
2196 nv_icmd(dev, 0x000004d8, 0x00000000);
2197 nv_icmd(dev, 0x000004d9, 0x00000000);
2198 nv_icmd(dev, 0x000004da, 0x00000000);
2199 nv_icmd(dev, 0x000004db, 0x00000000);
2200 nv_icmd(dev, 0x000004dc, 0x00000000);
2201 nv_icmd(dev, 0x000004dd, 0x00000000);
2202 nv_icmd(dev, 0x000004de, 0x00000000);
2203 nv_icmd(dev, 0x000004df, 0x00000000);
2204 nv_icmd(dev, 0x00000720, 0x00000000);
2205 nv_icmd(dev, 0x00000721, 0x00000000);
2206 nv_icmd(dev, 0x00000722, 0x00000000);
2207 nv_icmd(dev, 0x00000723, 0x00000000);
2208 nv_icmd(dev, 0x00000724, 0x00000000);
2209 nv_icmd(dev, 0x00000725, 0x00000000);
2210 nv_icmd(dev, 0x00000726, 0x00000000);
2211 nv_icmd(dev, 0x00000727, 0x00000000);
2212 nv_icmd(dev, 0x00000728, 0x00000000);
2213 nv_icmd(dev, 0x00000729, 0x00000000);
2214 nv_icmd(dev, 0x0000072a, 0x00000000);
2215 nv_icmd(dev, 0x0000072b, 0x00000000);
2216 nv_icmd(dev, 0x0000072c, 0x00000000);
2217 nv_icmd(dev, 0x0000072d, 0x00000000);
2218 nv_icmd(dev, 0x0000072e, 0x00000000);
2219 nv_icmd(dev, 0x0000072f, 0x00000000);
2220 nv_icmd(dev, 0x000008c0, 0x00000000);
2221 nv_icmd(dev, 0x000008c1, 0x00000000);
2222 nv_icmd(dev, 0x000008c2, 0x00000000);
2223 nv_icmd(dev, 0x000008c3, 0x00000000);
2224 nv_icmd(dev, 0x000008c4, 0x00000000);
2225 nv_icmd(dev, 0x000008c5, 0x00000000);
2226 nv_icmd(dev, 0x000008c6, 0x00000000);
2227 nv_icmd(dev, 0x000008c7, 0x00000000);
2228 nv_icmd(dev, 0x000008c8, 0x00000000);
2229 nv_icmd(dev, 0x000008c9, 0x00000000);
2230 nv_icmd(dev, 0x000008ca, 0x00000000);
2231 nv_icmd(dev, 0x000008cb, 0x00000000);
2232 nv_icmd(dev, 0x000008cc, 0x00000000);
2233 nv_icmd(dev, 0x000008cd, 0x00000000);
2234 nv_icmd(dev, 0x000008ce, 0x00000000);
2235 nv_icmd(dev, 0x000008cf, 0x00000000);
2236 nv_icmd(dev, 0x00000890, 0x00000000);
2237 nv_icmd(dev, 0x00000891, 0x00000000);
2238 nv_icmd(dev, 0x00000892, 0x00000000);
2239 nv_icmd(dev, 0x00000893, 0x00000000);
2240 nv_icmd(dev, 0x00000894, 0x00000000);
2241 nv_icmd(dev, 0x00000895, 0x00000000);
2242 nv_icmd(dev, 0x00000896, 0x00000000);
2243 nv_icmd(dev, 0x00000897, 0x00000000);
2244 nv_icmd(dev, 0x00000898, 0x00000000);
2245 nv_icmd(dev, 0x00000899, 0x00000000);
2246 nv_icmd(dev, 0x0000089a, 0x00000000);
2247 nv_icmd(dev, 0x0000089b, 0x00000000);
2248 nv_icmd(dev, 0x0000089c, 0x00000000);
2249 nv_icmd(dev, 0x0000089d, 0x00000000);
2250 nv_icmd(dev, 0x0000089e, 0x00000000);
2251 nv_icmd(dev, 0x0000089f, 0x00000000);
2252 nv_icmd(dev, 0x000008e0, 0x00000000);
2253 nv_icmd(dev, 0x000008e1, 0x00000000);
2254 nv_icmd(dev, 0x000008e2, 0x00000000);
2255 nv_icmd(dev, 0x000008e3, 0x00000000);
2256 nv_icmd(dev, 0x000008e4, 0x00000000);
2257 nv_icmd(dev, 0x000008e5, 0x00000000);
2258 nv_icmd(dev, 0x000008e6, 0x00000000);
2259 nv_icmd(dev, 0x000008e7, 0x00000000);
2260 nv_icmd(dev, 0x000008e8, 0x00000000);
2261 nv_icmd(dev, 0x000008e9, 0x00000000);
2262 nv_icmd(dev, 0x000008ea, 0x00000000);
2263 nv_icmd(dev, 0x000008eb, 0x00000000);
2264 nv_icmd(dev, 0x000008ec, 0x00000000);
2265 nv_icmd(dev, 0x000008ed, 0x00000000);
2266 nv_icmd(dev, 0x000008ee, 0x00000000);
2267 nv_icmd(dev, 0x000008ef, 0x00000000);
2268 nv_icmd(dev, 0x000008a0, 0x00000000);
2269 nv_icmd(dev, 0x000008a1, 0x00000000);
2270 nv_icmd(dev, 0x000008a2, 0x00000000);
2271 nv_icmd(dev, 0x000008a3, 0x00000000);
2272 nv_icmd(dev, 0x000008a4, 0x00000000);
2273 nv_icmd(dev, 0x000008a5, 0x00000000);
2274 nv_icmd(dev, 0x000008a6, 0x00000000);
2275 nv_icmd(dev, 0x000008a7, 0x00000000);
2276 nv_icmd(dev, 0x000008a8, 0x00000000);
2277 nv_icmd(dev, 0x000008a9, 0x00000000);
2278 nv_icmd(dev, 0x000008aa, 0x00000000);
2279 nv_icmd(dev, 0x000008ab, 0x00000000);
2280 nv_icmd(dev, 0x000008ac, 0x00000000);
2281 nv_icmd(dev, 0x000008ad, 0x00000000);
2282 nv_icmd(dev, 0x000008ae, 0x00000000);
2283 nv_icmd(dev, 0x000008af, 0x00000000);
2284 nv_icmd(dev, 0x000008f0, 0x00000000);
2285 nv_icmd(dev, 0x000008f1, 0x00000000);
2286 nv_icmd(dev, 0x000008f2, 0x00000000);
2287 nv_icmd(dev, 0x000008f3, 0x00000000);
2288 nv_icmd(dev, 0x000008f4, 0x00000000);
2289 nv_icmd(dev, 0x000008f5, 0x00000000);
2290 nv_icmd(dev, 0x000008f6, 0x00000000);
2291 nv_icmd(dev, 0x000008f7, 0x00000000);
2292 nv_icmd(dev, 0x000008f8, 0x00000000);
2293 nv_icmd(dev, 0x000008f9, 0x00000000);
2294 nv_icmd(dev, 0x000008fa, 0x00000000);
2295 nv_icmd(dev, 0x000008fb, 0x00000000);
2296 nv_icmd(dev, 0x000008fc, 0x00000000);
2297 nv_icmd(dev, 0x000008fd, 0x00000000);
2298 nv_icmd(dev, 0x000008fe, 0x00000000);
2299 nv_icmd(dev, 0x000008ff, 0x00000000);
2300 nv_icmd(dev, 0x0000094c, 0x000000ff);
2301 nv_icmd(dev, 0x0000094d, 0xffffffff);
2302 nv_icmd(dev, 0x0000094e, 0x00000002);
2303 nv_icmd(dev, 0x000002ec, 0x00000001);
2304 nv_icmd(dev, 0x00000303, 0x00000001);
2305 nv_icmd(dev, 0x000002e6, 0x00000001);
2306 nv_icmd(dev, 0x00000466, 0x00000052);
2307 nv_icmd(dev, 0x00000301, 0x3f800000);
2308 nv_icmd(dev, 0x00000304, 0x30201000);
2309 nv_icmd(dev, 0x00000305, 0x70605040);
2310 nv_icmd(dev, 0x00000306, 0xb8a89888);
2311 nv_icmd(dev, 0x00000307, 0xf8e8d8c8);
2312 nv_icmd(dev, 0x0000030a, 0x00ffff00);
2313 nv_icmd(dev, 0x0000030b, 0x0000001a);
2314 nv_icmd(dev, 0x0000030c, 0x00000001);
2315 nv_icmd(dev, 0x00000318, 0x00000001);
2316 nv_icmd(dev, 0x00000340, 0x00000000);
2317 nv_icmd(dev, 0x00000375, 0x00000001);
2318 nv_icmd(dev, 0x00000351, 0x00000100);
2319 nv_icmd(dev, 0x0000037d, 0x00000006);
2320 nv_icmd(dev, 0x000003a0, 0x00000002);
2321 nv_icmd(dev, 0x000003aa, 0x00000001);
2322 nv_icmd(dev, 0x000003a9, 0x00000001);
2323 nv_icmd(dev, 0x00000380, 0x00000001);
2324 nv_icmd(dev, 0x00000360, 0x00000040);
2325 nv_icmd(dev, 0x00000366, 0x00000000);
2326 nv_icmd(dev, 0x00000367, 0x00000000);
2327 nv_icmd(dev, 0x00000368, 0x00001fff);
2328 nv_icmd(dev, 0x00000370, 0x00000000);
2329 nv_icmd(dev, 0x00000371, 0x00000000);
2330 nv_icmd(dev, 0x00000372, 0x003fffff);
2331 nv_icmd(dev, 0x0000037a, 0x00000012);
2332 nv_icmd(dev, 0x000005e0, 0x00000022);
2333 nv_icmd(dev, 0x000005e1, 0x00000022);
2334 nv_icmd(dev, 0x000005e2, 0x00000022);
2335 nv_icmd(dev, 0x000005e3, 0x00000022);
2336 nv_icmd(dev, 0x000005e4, 0x00000022);
2337 nv_icmd(dev, 0x00000619, 0x00000003);
2338 nv_icmd(dev, 0x00000811, 0x00000003);
2339 nv_icmd(dev, 0x00000812, 0x00000004);
2340 nv_icmd(dev, 0x00000813, 0x00000006);
2341 nv_icmd(dev, 0x00000814, 0x00000008);
2342 nv_icmd(dev, 0x00000815, 0x0000000b);
2343 nv_icmd(dev, 0x00000800, 0x00000001);
2344 nv_icmd(dev, 0x00000801, 0x00000001);
2345 nv_icmd(dev, 0x00000802, 0x00000001);
2346 nv_icmd(dev, 0x00000803, 0x00000001);
2347 nv_icmd(dev, 0x00000804, 0x00000001);
2348 nv_icmd(dev, 0x00000805, 0x00000001);
2349 nv_icmd(dev, 0x00000632, 0x00000001);
2350 nv_icmd(dev, 0x00000633, 0x00000002);
2351 nv_icmd(dev, 0x00000634, 0x00000003);
2352 nv_icmd(dev, 0x00000635, 0x00000004);
2353 nv_icmd(dev, 0x00000654, 0x3f800000);
2354 nv_icmd(dev, 0x00000657, 0x3f800000);
2355 nv_icmd(dev, 0x00000655, 0x3f800000);
2356 nv_icmd(dev, 0x00000656, 0x3f800000);
2357 nv_icmd(dev, 0x000006cd, 0x3f800000);
2358 nv_icmd(dev, 0x000007f5, 0x3f800000);
2359 nv_icmd(dev, 0x000007dc, 0x39291909);
2360 nv_icmd(dev, 0x000007dd, 0x79695949);
2361 nv_icmd(dev, 0x000007de, 0xb9a99989);
2362 nv_icmd(dev, 0x000007df, 0xf9e9d9c9);
2363 nv_icmd(dev, 0x000007e8, 0x00003210);
2364 nv_icmd(dev, 0x000007e9, 0x00007654);
2365 nv_icmd(dev, 0x000007ea, 0x00000098);
2366 nv_icmd(dev, 0x000007ec, 0x39291909);
2367 nv_icmd(dev, 0x000007ed, 0x79695949);
2368 nv_icmd(dev, 0x000007ee, 0xb9a99989);
2369 nv_icmd(dev, 0x000007ef, 0xf9e9d9c9);
2370 nv_icmd(dev, 0x000007f0, 0x00003210);
2371 nv_icmd(dev, 0x000007f1, 0x00007654);
2372 nv_icmd(dev, 0x000007f2, 0x00000098);
2373 nv_icmd(dev, 0x000005a5, 0x00000001);
2374 nv_icmd(dev, 0x00000980, 0x00000000);
2375 nv_icmd(dev, 0x00000981, 0x00000000);
2376 nv_icmd(dev, 0x00000982, 0x00000000);
2377 nv_icmd(dev, 0x00000983, 0x00000000);
2378 nv_icmd(dev, 0x00000984, 0x00000000);
2379 nv_icmd(dev, 0x00000985, 0x00000000);
2380 nv_icmd(dev, 0x00000986, 0x00000000);
2381 nv_icmd(dev, 0x00000987, 0x00000000);
2382 nv_icmd(dev, 0x00000988, 0x00000000);
2383 nv_icmd(dev, 0x00000989, 0x00000000);
2384 nv_icmd(dev, 0x0000098a, 0x00000000);
2385 nv_icmd(dev, 0x0000098b, 0x00000000);
2386 nv_icmd(dev, 0x0000098c, 0x00000000);
2387 nv_icmd(dev, 0x0000098d, 0x00000000);
2388 nv_icmd(dev, 0x0000098e, 0x00000000);
2389 nv_icmd(dev, 0x0000098f, 0x00000000);
2390 nv_icmd(dev, 0x00000990, 0x00000000);
2391 nv_icmd(dev, 0x00000991, 0x00000000);
2392 nv_icmd(dev, 0x00000992, 0x00000000);
2393 nv_icmd(dev, 0x00000993, 0x00000000);
2394 nv_icmd(dev, 0x00000994, 0x00000000);
2395 nv_icmd(dev, 0x00000995, 0x00000000);
2396 nv_icmd(dev, 0x00000996, 0x00000000);
2397 nv_icmd(dev, 0x00000997, 0x00000000);
2398 nv_icmd(dev, 0x00000998, 0x00000000);
2399 nv_icmd(dev, 0x00000999, 0x00000000);
2400 nv_icmd(dev, 0x0000099a, 0x00000000);
2401 nv_icmd(dev, 0x0000099b, 0x00000000);
2402 nv_icmd(dev, 0x0000099c, 0x00000000);
2403 nv_icmd(dev, 0x0000099d, 0x00000000);
2404 nv_icmd(dev, 0x0000099e, 0x00000000);
2405 nv_icmd(dev, 0x0000099f, 0x00000000);
2406 nv_icmd(dev, 0x000009a0, 0x00000000);
2407 nv_icmd(dev, 0x000009a1, 0x00000000);
2408 nv_icmd(dev, 0x000009a2, 0x00000000);
2409 nv_icmd(dev, 0x000009a3, 0x00000000);
2410 nv_icmd(dev, 0x000009a4, 0x00000000);
2411 nv_icmd(dev, 0x000009a5, 0x00000000);
2412 nv_icmd(dev, 0x000009a6, 0x00000000);
2413 nv_icmd(dev, 0x000009a7, 0x00000000);
2414 nv_icmd(dev, 0x000009a8, 0x00000000);
2415 nv_icmd(dev, 0x000009a9, 0x00000000);
2416 nv_icmd(dev, 0x000009aa, 0x00000000);
2417 nv_icmd(dev, 0x000009ab, 0x00000000);
2418 nv_icmd(dev, 0x000009ac, 0x00000000);
2419 nv_icmd(dev, 0x000009ad, 0x00000000);
2420 nv_icmd(dev, 0x000009ae, 0x00000000);
2421 nv_icmd(dev, 0x000009af, 0x00000000);
2422 nv_icmd(dev, 0x000009b0, 0x00000000);
2423 nv_icmd(dev, 0x000009b1, 0x00000000);
2424 nv_icmd(dev, 0x000009b2, 0x00000000);
2425 nv_icmd(dev, 0x000009b3, 0x00000000);
2426 nv_icmd(dev, 0x000009b4, 0x00000000);
2427 nv_icmd(dev, 0x000009b5, 0x00000000);
2428 nv_icmd(dev, 0x000009b6, 0x00000000);
2429 nv_icmd(dev, 0x000009b7, 0x00000000);
2430 nv_icmd(dev, 0x000009b8, 0x00000000);
2431 nv_icmd(dev, 0x000009b9, 0x00000000);
2432 nv_icmd(dev, 0x000009ba, 0x00000000);
2433 nv_icmd(dev, 0x000009bb, 0x00000000);
2434 nv_icmd(dev, 0x000009bc, 0x00000000);
2435 nv_icmd(dev, 0x000009bd, 0x00000000);
2436 nv_icmd(dev, 0x000009be, 0x00000000);
2437 nv_icmd(dev, 0x000009bf, 0x00000000);
2438 nv_icmd(dev, 0x000009c0, 0x00000000);
2439 nv_icmd(dev, 0x000009c1, 0x00000000);
2440 nv_icmd(dev, 0x000009c2, 0x00000000);
2441 nv_icmd(dev, 0x000009c3, 0x00000000);
2442 nv_icmd(dev, 0x000009c4, 0x00000000);
2443 nv_icmd(dev, 0x000009c5, 0x00000000);
2444 nv_icmd(dev, 0x000009c6, 0x00000000);
2445 nv_icmd(dev, 0x000009c7, 0x00000000);
2446 nv_icmd(dev, 0x000009c8, 0x00000000);
2447 nv_icmd(dev, 0x000009c9, 0x00000000);
2448 nv_icmd(dev, 0x000009ca, 0x00000000);
2449 nv_icmd(dev, 0x000009cb, 0x00000000);
2450 nv_icmd(dev, 0x000009cc, 0x00000000);
2451 nv_icmd(dev, 0x000009cd, 0x00000000);
2452 nv_icmd(dev, 0x000009ce, 0x00000000);
2453 nv_icmd(dev, 0x000009cf, 0x00000000);
2454 nv_icmd(dev, 0x000009d0, 0x00000000);
2455 nv_icmd(dev, 0x000009d1, 0x00000000);
2456 nv_icmd(dev, 0x000009d2, 0x00000000);
2457 nv_icmd(dev, 0x000009d3, 0x00000000);
2458 nv_icmd(dev, 0x000009d4, 0x00000000);
2459 nv_icmd(dev, 0x000009d5, 0x00000000);
2460 nv_icmd(dev, 0x000009d6, 0x00000000);
2461 nv_icmd(dev, 0x000009d7, 0x00000000);
2462 nv_icmd(dev, 0x000009d8, 0x00000000);
2463 nv_icmd(dev, 0x000009d9, 0x00000000);
2464 nv_icmd(dev, 0x000009da, 0x00000000);
2465 nv_icmd(dev, 0x000009db, 0x00000000);
2466 nv_icmd(dev, 0x000009dc, 0x00000000);
2467 nv_icmd(dev, 0x000009dd, 0x00000000);
2468 nv_icmd(dev, 0x000009de, 0x00000000);
2469 nv_icmd(dev, 0x000009df, 0x00000000);
2470 nv_icmd(dev, 0x000009e0, 0x00000000);
2471 nv_icmd(dev, 0x000009e1, 0x00000000);
2472 nv_icmd(dev, 0x000009e2, 0x00000000);
2473 nv_icmd(dev, 0x000009e3, 0x00000000);
2474 nv_icmd(dev, 0x000009e4, 0x00000000);
2475 nv_icmd(dev, 0x000009e5, 0x00000000);
2476 nv_icmd(dev, 0x000009e6, 0x00000000);
2477 nv_icmd(dev, 0x000009e7, 0x00000000);
2478 nv_icmd(dev, 0x000009e8, 0x00000000);
2479 nv_icmd(dev, 0x000009e9, 0x00000000);
2480 nv_icmd(dev, 0x000009ea, 0x00000000);
2481 nv_icmd(dev, 0x000009eb, 0x00000000);
2482 nv_icmd(dev, 0x000009ec, 0x00000000);
2483 nv_icmd(dev, 0x000009ed, 0x00000000);
2484 nv_icmd(dev, 0x000009ee, 0x00000000);
2485 nv_icmd(dev, 0x000009ef, 0x00000000);
2486 nv_icmd(dev, 0x000009f0, 0x00000000);
2487 nv_icmd(dev, 0x000009f1, 0x00000000);
2488 nv_icmd(dev, 0x000009f2, 0x00000000);
2489 nv_icmd(dev, 0x000009f3, 0x00000000);
2490 nv_icmd(dev, 0x000009f4, 0x00000000);
2491 nv_icmd(dev, 0x000009f5, 0x00000000);
2492 nv_icmd(dev, 0x000009f6, 0x00000000);
2493 nv_icmd(dev, 0x000009f7, 0x00000000);
2494 nv_icmd(dev, 0x000009f8, 0x00000000);
2495 nv_icmd(dev, 0x000009f9, 0x00000000);
2496 nv_icmd(dev, 0x000009fa, 0x00000000);
2497 nv_icmd(dev, 0x000009fb, 0x00000000);
2498 nv_icmd(dev, 0x000009fc, 0x00000000);
2499 nv_icmd(dev, 0x000009fd, 0x00000000);
2500 nv_icmd(dev, 0x000009fe, 0x00000000);
2501 nv_icmd(dev, 0x000009ff, 0x00000000);
2502 nv_icmd(dev, 0x00000468, 0x00000004);
2503 nv_icmd(dev, 0x0000046c, 0x00000001);
2504 nv_icmd(dev, 0x00000470, 0x00000000);
2505 nv_icmd(dev, 0x00000471, 0x00000000);
2506 nv_icmd(dev, 0x00000472, 0x00000000);
2507 nv_icmd(dev, 0x00000473, 0x00000000);
2508 nv_icmd(dev, 0x00000474, 0x00000000);
2509 nv_icmd(dev, 0x00000475, 0x00000000);
2510 nv_icmd(dev, 0x00000476, 0x00000000);
2511 nv_icmd(dev, 0x00000477, 0x00000000);
2512 nv_icmd(dev, 0x00000478, 0x00000000);
2513 nv_icmd(dev, 0x00000479, 0x00000000);
2514 nv_icmd(dev, 0x0000047a, 0x00000000);
2515 nv_icmd(dev, 0x0000047b, 0x00000000);
2516 nv_icmd(dev, 0x0000047c, 0x00000000);
2517 nv_icmd(dev, 0x0000047d, 0x00000000);
2518 nv_icmd(dev, 0x0000047e, 0x00000000);
2519 nv_icmd(dev, 0x0000047f, 0x00000000);
2520 nv_icmd(dev, 0x00000480, 0x00000000);
2521 nv_icmd(dev, 0x00000481, 0x00000000);
2522 nv_icmd(dev, 0x00000482, 0x00000000);
2523 nv_icmd(dev, 0x00000483, 0x00000000);
2524 nv_icmd(dev, 0x00000484, 0x00000000);
2525 nv_icmd(dev, 0x00000485, 0x00000000);
2526 nv_icmd(dev, 0x00000486, 0x00000000);
2527 nv_icmd(dev, 0x00000487, 0x00000000);
2528 nv_icmd(dev, 0x00000488, 0x00000000);
2529 nv_icmd(dev, 0x00000489, 0x00000000);
2530 nv_icmd(dev, 0x0000048a, 0x00000000);
2531 nv_icmd(dev, 0x0000048b, 0x00000000);
2532 nv_icmd(dev, 0x0000048c, 0x00000000);
2533 nv_icmd(dev, 0x0000048d, 0x00000000);
2534 nv_icmd(dev, 0x0000048e, 0x00000000);
2535 nv_icmd(dev, 0x0000048f, 0x00000000);
2536 nv_icmd(dev, 0x00000490, 0x00000000);
2537 nv_icmd(dev, 0x00000491, 0x00000000);
2538 nv_icmd(dev, 0x00000492, 0x00000000);
2539 nv_icmd(dev, 0x00000493, 0x00000000);
2540 nv_icmd(dev, 0x00000494, 0x00000000);
2541 nv_icmd(dev, 0x00000495, 0x00000000);
2542 nv_icmd(dev, 0x00000496, 0x00000000);
2543 nv_icmd(dev, 0x00000497, 0x00000000);
2544 nv_icmd(dev, 0x00000498, 0x00000000);
2545 nv_icmd(dev, 0x00000499, 0x00000000);
2546 nv_icmd(dev, 0x0000049a, 0x00000000);
2547 nv_icmd(dev, 0x0000049b, 0x00000000);
2548 nv_icmd(dev, 0x0000049c, 0x00000000);
2549 nv_icmd(dev, 0x0000049d, 0x00000000);
2550 nv_icmd(dev, 0x0000049e, 0x00000000);
2551 nv_icmd(dev, 0x0000049f, 0x00000000);
2552 nv_icmd(dev, 0x000004a0, 0x00000000);
2553 nv_icmd(dev, 0x000004a1, 0x00000000);
2554 nv_icmd(dev, 0x000004a2, 0x00000000);
2555 nv_icmd(dev, 0x000004a3, 0x00000000);
2556 nv_icmd(dev, 0x000004a4, 0x00000000);
2557 nv_icmd(dev, 0x000004a5, 0x00000000);
2558 nv_icmd(dev, 0x000004a6, 0x00000000);
2559 nv_icmd(dev, 0x000004a7, 0x00000000);
2560 nv_icmd(dev, 0x000004a8, 0x00000000);
2561 nv_icmd(dev, 0x000004a9, 0x00000000);
2562 nv_icmd(dev, 0x000004aa, 0x00000000);
2563 nv_icmd(dev, 0x000004ab, 0x00000000);
2564 nv_icmd(dev, 0x000004ac, 0x00000000);
2565 nv_icmd(dev, 0x000004ad, 0x00000000);
2566 nv_icmd(dev, 0x000004ae, 0x00000000);
2567 nv_icmd(dev, 0x000004af, 0x00000000);
2568 nv_icmd(dev, 0x000004b0, 0x00000000);
2569 nv_icmd(dev, 0x000004b1, 0x00000000);
2570 nv_icmd(dev, 0x000004b2, 0x00000000);
2571 nv_icmd(dev, 0x000004b3, 0x00000000);
2572 nv_icmd(dev, 0x000004b4, 0x00000000);
2573 nv_icmd(dev, 0x000004b5, 0x00000000);
2574 nv_icmd(dev, 0x000004b6, 0x00000000);
2575 nv_icmd(dev, 0x000004b7, 0x00000000);
2576 nv_icmd(dev, 0x000004b8, 0x00000000);
2577 nv_icmd(dev, 0x000004b9, 0x00000000);
2578 nv_icmd(dev, 0x000004ba, 0x00000000);
2579 nv_icmd(dev, 0x000004bb, 0x00000000);
2580 nv_icmd(dev, 0x000004bc, 0x00000000);
2581 nv_icmd(dev, 0x000004bd, 0x00000000);
2582 nv_icmd(dev, 0x000004be, 0x00000000);
2583 nv_icmd(dev, 0x000004bf, 0x00000000);
2584 nv_icmd(dev, 0x000004c0, 0x00000000);
2585 nv_icmd(dev, 0x000004c1, 0x00000000);
2586 nv_icmd(dev, 0x000004c2, 0x00000000);
2587 nv_icmd(dev, 0x000004c3, 0x00000000);
2588 nv_icmd(dev, 0x000004c4, 0x00000000);
2589 nv_icmd(dev, 0x000004c5, 0x00000000);
2590 nv_icmd(dev, 0x000004c6, 0x00000000);
2591 nv_icmd(dev, 0x000004c7, 0x00000000);
2592 nv_icmd(dev, 0x000004c8, 0x00000000);
2593 nv_icmd(dev, 0x000004c9, 0x00000000);
2594 nv_icmd(dev, 0x000004ca, 0x00000000);
2595 nv_icmd(dev, 0x000004cb, 0x00000000);
2596 nv_icmd(dev, 0x000004cc, 0x00000000);
2597 nv_icmd(dev, 0x000004cd, 0x00000000);
2598 nv_icmd(dev, 0x000004ce, 0x00000000);
2599 nv_icmd(dev, 0x000004cf, 0x00000000);
2600 nv_icmd(dev, 0x00000510, 0x3f800000);
2601 nv_icmd(dev, 0x00000511, 0x3f800000);
2602 nv_icmd(dev, 0x00000512, 0x3f800000);
2603 nv_icmd(dev, 0x00000513, 0x3f800000);
2604 nv_icmd(dev, 0x00000514, 0x3f800000);
2605 nv_icmd(dev, 0x00000515, 0x3f800000);
2606 nv_icmd(dev, 0x00000516, 0x3f800000);
2607 nv_icmd(dev, 0x00000517, 0x3f800000);
2608 nv_icmd(dev, 0x00000518, 0x3f800000);
2609 nv_icmd(dev, 0x00000519, 0x3f800000);
2610 nv_icmd(dev, 0x0000051a, 0x3f800000);
2611 nv_icmd(dev, 0x0000051b, 0x3f800000);
2612 nv_icmd(dev, 0x0000051c, 0x3f800000);
2613 nv_icmd(dev, 0x0000051d, 0x3f800000);
2614 nv_icmd(dev, 0x0000051e, 0x3f800000);
2615 nv_icmd(dev, 0x0000051f, 0x3f800000);
2616 nv_icmd(dev, 0x00000520, 0x000002b6);
2617 nv_icmd(dev, 0x00000529, 0x00000001);
2618 nv_icmd(dev, 0x00000530, 0xffff0000);
2619 nv_icmd(dev, 0x00000531, 0xffff0000);
2620 nv_icmd(dev, 0x00000532, 0xffff0000);
2621 nv_icmd(dev, 0x00000533, 0xffff0000);
2622 nv_icmd(dev, 0x00000534, 0xffff0000);
2623 nv_icmd(dev, 0x00000535, 0xffff0000);
2624 nv_icmd(dev, 0x00000536, 0xffff0000);
2625 nv_icmd(dev, 0x00000537, 0xffff0000);
2626 nv_icmd(dev, 0x00000538, 0xffff0000);
2627 nv_icmd(dev, 0x00000539, 0xffff0000);
2628 nv_icmd(dev, 0x0000053a, 0xffff0000);
2629 nv_icmd(dev, 0x0000053b, 0xffff0000);
2630 nv_icmd(dev, 0x0000053c, 0xffff0000);
2631 nv_icmd(dev, 0x0000053d, 0xffff0000);
2632 nv_icmd(dev, 0x0000053e, 0xffff0000);
2633 nv_icmd(dev, 0x0000053f, 0xffff0000);
2634 nv_icmd(dev, 0x00000585, 0x0000003f);
2635 nv_icmd(dev, 0x00000576, 0x00000003);
2636 nv_icmd(dev, 0x00000586, 0x00000040);
2637 nv_icmd(dev, 0x00000582, 0x00000080);
2638 nv_icmd(dev, 0x00000583, 0x00000080);
2639 nv_icmd(dev, 0x000005c2, 0x00000001);
2640 nv_icmd(dev, 0x00000638, 0x00000001);
2641 nv_icmd(dev, 0x00000639, 0x00000001);
2642 nv_icmd(dev, 0x0000063a, 0x00000002);
2643 nv_icmd(dev, 0x0000063b, 0x00000001);
2644 nv_icmd(dev, 0x0000063c, 0x00000001);
2645 nv_icmd(dev, 0x0000063d, 0x00000002);
2646 nv_icmd(dev, 0x0000063e, 0x00000001);
2647 nv_icmd(dev, 0x000008b8, 0x00000001);
2648 nv_icmd(dev, 0x000008b9, 0x00000001);
2649 nv_icmd(dev, 0x000008ba, 0x00000001);
2650 nv_icmd(dev, 0x000008bb, 0x00000001);
2651 nv_icmd(dev, 0x000008bc, 0x00000001);
2652 nv_icmd(dev, 0x000008bd, 0x00000001);
2653 nv_icmd(dev, 0x000008be, 0x00000001);
2654 nv_icmd(dev, 0x000008bf, 0x00000001);
2655 nv_icmd(dev, 0x00000900, 0x00000001);
2656 nv_icmd(dev, 0x00000901, 0x00000001);
2657 nv_icmd(dev, 0x00000902, 0x00000001);
2658 nv_icmd(dev, 0x00000903, 0x00000001);
2659 nv_icmd(dev, 0x00000904, 0x00000001);
2660 nv_icmd(dev, 0x00000905, 0x00000001);
2661 nv_icmd(dev, 0x00000906, 0x00000001);
2662 nv_icmd(dev, 0x00000907, 0x00000001);
2663 nv_icmd(dev, 0x00000908, 0x00000002);
2664 nv_icmd(dev, 0x00000909, 0x00000002);
2665 nv_icmd(dev, 0x0000090a, 0x00000002);
2666 nv_icmd(dev, 0x0000090b, 0x00000002);
2667 nv_icmd(dev, 0x0000090c, 0x00000002);
2668 nv_icmd(dev, 0x0000090d, 0x00000002);
2669 nv_icmd(dev, 0x0000090e, 0x00000002);
2670 nv_icmd(dev, 0x0000090f, 0x00000002);
2671 nv_icmd(dev, 0x00000910, 0x00000001);
2672 nv_icmd(dev, 0x00000911, 0x00000001);
2673 nv_icmd(dev, 0x00000912, 0x00000001);
2674 nv_icmd(dev, 0x00000913, 0x00000001);
2675 nv_icmd(dev, 0x00000914, 0x00000001);
2676 nv_icmd(dev, 0x00000915, 0x00000001);
2677 nv_icmd(dev, 0x00000916, 0x00000001);
2678 nv_icmd(dev, 0x00000917, 0x00000001);
2679 nv_icmd(dev, 0x00000918, 0x00000001);
2680 nv_icmd(dev, 0x00000919, 0x00000001);
2681 nv_icmd(dev, 0x0000091a, 0x00000001);
2682 nv_icmd(dev, 0x0000091b, 0x00000001);
2683 nv_icmd(dev, 0x0000091c, 0x00000001);
2684 nv_icmd(dev, 0x0000091d, 0x00000001);
2685 nv_icmd(dev, 0x0000091e, 0x00000001);
2686 nv_icmd(dev, 0x0000091f, 0x00000001);
2687 nv_icmd(dev, 0x00000920, 0x00000002);
2688 nv_icmd(dev, 0x00000921, 0x00000002);
2689 nv_icmd(dev, 0x00000922, 0x00000002);
2690 nv_icmd(dev, 0x00000923, 0x00000002);
2691 nv_icmd(dev, 0x00000924, 0x00000002);
2692 nv_icmd(dev, 0x00000925, 0x00000002);
2693 nv_icmd(dev, 0x00000926, 0x00000002);
2694 nv_icmd(dev, 0x00000927, 0x00000002);
2695 nv_icmd(dev, 0x00000928, 0x00000001);
2696 nv_icmd(dev, 0x00000929, 0x00000001);
2697 nv_icmd(dev, 0x0000092a, 0x00000001);
2698 nv_icmd(dev, 0x0000092b, 0x00000001);
2699 nv_icmd(dev, 0x0000092c, 0x00000001);
2700 nv_icmd(dev, 0x0000092d, 0x00000001);
2701 nv_icmd(dev, 0x0000092e, 0x00000001);
2702 nv_icmd(dev, 0x0000092f, 0x00000001);
2703 nv_icmd(dev, 0x00000648, 0x00000001);
2704 nv_icmd(dev, 0x00000649, 0x00000001);
2705 nv_icmd(dev, 0x0000064a, 0x00000001);
2706 nv_icmd(dev, 0x0000064b, 0x00000001);
2707 nv_icmd(dev, 0x0000064c, 0x00000001);
2708 nv_icmd(dev, 0x0000064d, 0x00000001);
2709 nv_icmd(dev, 0x0000064e, 0x00000001);
2710 nv_icmd(dev, 0x0000064f, 0x00000001);
2711 nv_icmd(dev, 0x00000650, 0x00000001);
2712 nv_icmd(dev, 0x00000658, 0x0000000f);
2713 nv_icmd(dev, 0x000007ff, 0x0000000a);
2714 nv_icmd(dev, 0x0000066a, 0x40000000);
2715 nv_icmd(dev, 0x0000066b, 0x10000000);
2716 nv_icmd(dev, 0x0000066c, 0xffff0000);
2717 nv_icmd(dev, 0x0000066d, 0xffff0000);
2718 nv_icmd(dev, 0x000007af, 0x00000008);
2719 nv_icmd(dev, 0x000007b0, 0x00000008);
2720 nv_icmd(dev, 0x000007f6, 0x00000001);
2721 nv_icmd(dev, 0x000006b2, 0x00000055);
2722 nv_icmd(dev, 0x000007ad, 0x00000003);
2723 nv_icmd(dev, 0x00000937, 0x00000001);
2724 nv_icmd(dev, 0x00000971, 0x00000008);
2725 nv_icmd(dev, 0x00000972, 0x00000040);
2726 nv_icmd(dev, 0x00000973, 0x0000012c);
2727 nv_icmd(dev, 0x0000097c, 0x00000040);
2728 nv_icmd(dev, 0x00000979, 0x00000003);
2729 nv_icmd(dev, 0x00000975, 0x00000020);
2730 nv_icmd(dev, 0x00000976, 0x00000001);
2731 nv_icmd(dev, 0x00000977, 0x00000020);
2732 nv_icmd(dev, 0x00000978, 0x00000001);
2733 nv_icmd(dev, 0x00000957, 0x00000003);
2734 nv_icmd(dev, 0x0000095e, 0x20164010);
2735 nv_icmd(dev, 0x0000095f, 0x00000020);
2736 nv_icmd(dev, 0x00000683, 0x00000006);
2737 nv_icmd(dev, 0x00000685, 0x003fffff);
2738 nv_icmd(dev, 0x00000687, 0x00000c48);
2739 nv_icmd(dev, 0x000006a0, 0x00000005);
2740 nv_icmd(dev, 0x00000840, 0x00300008);
2741 nv_icmd(dev, 0x00000841, 0x04000080);
2742 nv_icmd(dev, 0x00000842, 0x00300008);
2743 nv_icmd(dev, 0x00000843, 0x04000080);
2744 nv_icmd(dev, 0x00000818, 0x00000000);
2745 nv_icmd(dev, 0x00000819, 0x00000000);
2746 nv_icmd(dev, 0x0000081a, 0x00000000);
2747 nv_icmd(dev, 0x0000081b, 0x00000000);
2748 nv_icmd(dev, 0x0000081c, 0x00000000);
2749 nv_icmd(dev, 0x0000081d, 0x00000000);
2750 nv_icmd(dev, 0x0000081e, 0x00000000);
2751 nv_icmd(dev, 0x0000081f, 0x00000000);
2752 nv_icmd(dev, 0x00000848, 0x00000000);
2753 nv_icmd(dev, 0x00000849, 0x00000000);
2754 nv_icmd(dev, 0x0000084a, 0x00000000);
2755 nv_icmd(dev, 0x0000084b, 0x00000000);
2756 nv_icmd(dev, 0x0000084c, 0x00000000);
2757 nv_icmd(dev, 0x0000084d, 0x00000000);
2758 nv_icmd(dev, 0x0000084e, 0x00000000);
2759 nv_icmd(dev, 0x0000084f, 0x00000000);
2760 nv_icmd(dev, 0x00000850, 0x00000000);
2761 nv_icmd(dev, 0x00000851, 0x00000000);
2762 nv_icmd(dev, 0x00000852, 0x00000000);
2763 nv_icmd(dev, 0x00000853, 0x00000000);
2764 nv_icmd(dev, 0x00000854, 0x00000000);
2765 nv_icmd(dev, 0x00000855, 0x00000000);
2766 nv_icmd(dev, 0x00000856, 0x00000000);
2767 nv_icmd(dev, 0x00000857, 0x00000000);
2768 nv_icmd(dev, 0x00000738, 0x00000000);
2769 nv_icmd(dev, 0x000006aa, 0x00000001);
2770 nv_icmd(dev, 0x000006ab, 0x00000002);
2771 nv_icmd(dev, 0x000006ac, 0x00000080);
2772 nv_icmd(dev, 0x000006ad, 0x00000100);
2773 nv_icmd(dev, 0x000006ae, 0x00000100);
2774 nv_icmd(dev, 0x000006b1, 0x00000011);
2775 nv_icmd(dev, 0x000006bb, 0x000000cf);
2776 nv_icmd(dev, 0x000006ce, 0x2a712488);
2777 nv_icmd(dev, 0x00000739, 0x4085c000);
2778 nv_icmd(dev, 0x0000073a, 0x00000080);
2779 nv_icmd(dev, 0x00000786, 0x80000100);
2780 nv_icmd(dev, 0x0000073c, 0x00010100);
2781 nv_icmd(dev, 0x0000073d, 0x02800000);
2782 nv_icmd(dev, 0x00000787, 0x000000cf);
2783 nv_icmd(dev, 0x0000078c, 0x00000008);
2784 nv_icmd(dev, 0x00000792, 0x00000001);
2785 nv_icmd(dev, 0x00000794, 0x00000001);
2786 nv_icmd(dev, 0x00000795, 0x00000001);
2787 nv_icmd(dev, 0x00000796, 0x00000001);
2788 nv_icmd(dev, 0x00000797, 0x000000cf);
2789 nv_icmd(dev, 0x00000836, 0x00000001);
2790 nv_icmd(dev, 0x0000079a, 0x00000002);
2791 nv_icmd(dev, 0x00000833, 0x04444480);
2792 nv_icmd(dev, 0x000007a1, 0x00000001);
2793 nv_icmd(dev, 0x000007a3, 0x00000001);
2794 nv_icmd(dev, 0x000007a4, 0x00000001);
2795 nv_icmd(dev, 0x000007a5, 0x00000001);
2796 nv_icmd(dev, 0x00000831, 0x00000004);
2797 nv_icmd(dev, 0x0000080c, 0x00000002);
2798 nv_icmd(dev, 0x0000080d, 0x00000100);
2799 nv_icmd(dev, 0x0000080e, 0x00000100);
2800 nv_icmd(dev, 0x0000080f, 0x00000001);
2801 nv_icmd(dev, 0x00000823, 0x00000002);
2802 nv_icmd(dev, 0x00000824, 0x00000100);
2803 nv_icmd(dev, 0x00000825, 0x00000100);
2804 nv_icmd(dev, 0x00000826, 0x00000001);
2805 nv_icmd(dev, 0x0000095d, 0x00000001);
2806 nv_icmd(dev, 0x0000082b, 0x00000004);
2807 nv_icmd(dev, 0x00000942, 0x00010001);
2808 nv_icmd(dev, 0x00000943, 0x00000001);
2809 nv_icmd(dev, 0x00000944, 0x00000022);
2810 nv_icmd(dev, 0x000007c5, 0x00010001);
2811 nv_icmd(dev, 0x00000834, 0x00000001);
2812 nv_icmd(dev, 0x000007c7, 0x00000001);
2813 nv_icmd(dev, 0x0000c1b0, 0x0000000f);
2814 nv_icmd(dev, 0x0000c1b1, 0x0000000f);
2815 nv_icmd(dev, 0x0000c1b2, 0x0000000f);
2816 nv_icmd(dev, 0x0000c1b3, 0x0000000f);
2817 nv_icmd(dev, 0x0000c1b4, 0x0000000f);
2818 nv_icmd(dev, 0x0000c1b5, 0x0000000f);
2819 nv_icmd(dev, 0x0000c1b6, 0x0000000f);
2820 nv_icmd(dev, 0x0000c1b7, 0x0000000f);
2821 nv_icmd(dev, 0x0000c1b8, 0x0fac6881);
2822 nv_icmd(dev, 0x0000c1b9, 0x00fac688);
2823 nv_icmd(dev, 0x0001e100, 0x00000001);
2824 nv_icmd(dev, 0x00001000, 0x00000002);
2825 nv_icmd(dev, 0x000006aa, 0x00000001);
2826 nv_icmd(dev, 0x000006ad, 0x00000100);
2827 nv_icmd(dev, 0x000006ae, 0x00000100);
2828 nv_icmd(dev, 0x000006b1, 0x00000011);
2829 nv_icmd(dev, 0x0000078c, 0x00000008);
2830 nv_icmd(dev, 0x00000792, 0x00000001);
2831 nv_icmd(dev, 0x00000794, 0x00000001);
2832 nv_icmd(dev, 0x00000795, 0x00000001);
2833 nv_icmd(dev, 0x00000796, 0x00000001);
2834 nv_icmd(dev, 0x00000797, 0x000000cf);
2835 nv_icmd(dev, 0x0000079a, 0x00000002);
2836 nv_icmd(dev, 0x00000833, 0x04444480);
2837 nv_icmd(dev, 0x000007a1, 0x00000001);
2838 nv_icmd(dev, 0x000007a3, 0x00000001);
2839 nv_icmd(dev, 0x000007a4, 0x00000001);
2840 nv_icmd(dev, 0x000007a5, 0x00000001);
2841 nv_icmd(dev, 0x00000831, 0x00000004);
2842 nv_icmd(dev, 0x0001e100, 0x00000001);
2843 nv_icmd(dev, 0x00001000, 0x00000014);
2844 nv_icmd(dev, 0x00000351, 0x00000100);
2845 nv_icmd(dev, 0x00000957, 0x00000003);
2846 nv_icmd(dev, 0x0000095d, 0x00000001);
2847 nv_icmd(dev, 0x0000082b, 0x00000004);
2848 nv_icmd(dev, 0x00000942, 0x00010001);
2849 nv_icmd(dev, 0x00000943, 0x00000001);
2850 nv_icmd(dev, 0x000007c5, 0x00010001);
2851 nv_icmd(dev, 0x00000834, 0x00000001);
2852 nv_icmd(dev, 0x000007c7, 0x00000001);
2853 nv_icmd(dev, 0x0001e100, 0x00000001);
2854 nv_icmd(dev, 0x00001000, 0x00000001);
2855 nv_icmd(dev, 0x0000080c, 0x00000002);
2856 nv_icmd(dev, 0x0000080d, 0x00000100);
2857 nv_icmd(dev, 0x0000080e, 0x00000100);
2858 nv_icmd(dev, 0x0000080f, 0x00000001);
2859 nv_icmd(dev, 0x00000823, 0x00000002);
2860 nv_icmd(dev, 0x00000824, 0x00000100);
2861 nv_icmd(dev, 0x00000825, 0x00000100);
2862 nv_icmd(dev, 0x00000826, 0x00000001);
2863 nv_icmd(dev, 0x0001e100, 0x00000001);
2864 nv_wr32(dev, 0x400208, 0x00000000);
2865 nv_wr32(dev, 0x404154, 0x00000400);
2866
2867 nvc0_grctx_generate_9097(dev);
2868 nvc0_grctx_generate_902d(dev);
2869 nvc0_grctx_generate_9039(dev);
2870 nvc0_grctx_generate_90c0(dev);
2871
2872 nv_wr32(dev, 0x000260, r000260);
2873 return 0;
2874}
diff --git a/drivers/gpu/drm/nouveau/nvc0_instmem.c b/drivers/gpu/drm/nouveau/nvc0_instmem.c
index 13a0f78a9088..c09091749054 100644
--- a/drivers/gpu/drm/nouveau/nvc0_instmem.c
+++ b/drivers/gpu/drm/nouveau/nvc0_instmem.c
@@ -25,206 +25,207 @@
25#include "drmP.h" 25#include "drmP.h"
26 26
27#include "nouveau_drv.h" 27#include "nouveau_drv.h"
28#include "nouveau_vm.h"
29
30struct nvc0_instmem_priv {
31 struct nouveau_gpuobj *bar1_pgd;
32 struct nouveau_channel *bar1;
33 struct nouveau_gpuobj *bar3_pgd;
34 struct nouveau_channel *bar3;
35 struct nouveau_gpuobj *chan_pgd;
36};
28 37
29int 38int
30nvc0_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, 39nvc0_instmem_suspend(struct drm_device *dev)
31 uint32_t *size)
32{ 40{
33 int ret; 41 struct drm_nouveau_private *dev_priv = dev->dev_private;
34
35 *size = ALIGN(*size, 4096);
36 if (*size == 0)
37 return -EINVAL;
38
39 ret = nouveau_bo_new(dev, NULL, *size, 0, TTM_PL_FLAG_VRAM, 0, 0x0000,
40 true, false, &gpuobj->im_backing);
41 if (ret) {
42 NV_ERROR(dev, "error getting PRAMIN backing pages: %d\n", ret);
43 return ret;
44 }
45
46 ret = nouveau_bo_pin(gpuobj->im_backing, TTM_PL_FLAG_VRAM);
47 if (ret) {
48 NV_ERROR(dev, "error pinning PRAMIN backing VRAM: %d\n", ret);
49 nouveau_bo_ref(NULL, &gpuobj->im_backing);
50 return ret;
51 }
52 42
53 gpuobj->vinst = gpuobj->im_backing->bo.mem.start << PAGE_SHIFT; 43 dev_priv->ramin_available = false;
54 return 0; 44 return 0;
55} 45}
56 46
57void 47void
58nvc0_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) 48nvc0_instmem_resume(struct drm_device *dev)
59{ 49{
60 struct drm_nouveau_private *dev_priv = dev->dev_private; 50 struct drm_nouveau_private *dev_priv = dev->dev_private;
51 struct nvc0_instmem_priv *priv = dev_priv->engine.instmem.priv;
61 52
62 if (gpuobj && gpuobj->im_backing) { 53 nv_mask(dev, 0x100c80, 0x00000001, 0x00000000);
63 if (gpuobj->im_bound) 54 nv_wr32(dev, 0x001704, 0x80000000 | priv->bar1->ramin->vinst >> 12);
64 dev_priv->engine.instmem.unbind(dev, gpuobj); 55 nv_wr32(dev, 0x001714, 0xc0000000 | priv->bar3->ramin->vinst >> 12);
65 nouveau_bo_unpin(gpuobj->im_backing); 56 dev_priv->ramin_available = true;
66 nouveau_bo_ref(NULL, &gpuobj->im_backing);
67 gpuobj->im_backing = NULL;
68 }
69} 57}
70 58
71int 59static void
72nvc0_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) 60nvc0_channel_del(struct nouveau_channel **pchan)
73{ 61{
74 struct drm_nouveau_private *dev_priv = dev->dev_private; 62 struct nouveau_channel *chan;
75 uint32_t pte, pte_end; 63
76 uint64_t vram; 64 chan = *pchan;
77 65 *pchan = NULL;
78 if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound) 66 if (!chan)
79 return -EINVAL; 67 return;
80 68
81 NV_DEBUG(dev, "st=0x%lx sz=0x%lx\n", 69 nouveau_vm_ref(NULL, &chan->vm, NULL);
82 gpuobj->im_pramin->start, gpuobj->im_pramin->size); 70 if (chan->ramin_heap.free_stack.next)
71 drm_mm_takedown(&chan->ramin_heap);
72 nouveau_gpuobj_ref(NULL, &chan->ramin);
73 kfree(chan);
74}
83 75
84 pte = gpuobj->im_pramin->start >> 12; 76static int
85 pte_end = (gpuobj->im_pramin->size >> 12) + pte; 77nvc0_channel_new(struct drm_device *dev, u32 size, struct nouveau_vm *vm,
86 vram = gpuobj->vinst; 78 struct nouveau_channel **pchan,
79 struct nouveau_gpuobj *pgd, u64 vm_size)
80{
81 struct nouveau_channel *chan;
82 int ret;
87 83
88 NV_DEBUG(dev, "pramin=0x%lx, pte=%d, pte_end=%d\n", 84 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
89 gpuobj->im_pramin->start, pte, pte_end); 85 if (!chan)
90 NV_DEBUG(dev, "first vram page: 0x%010llx\n", gpuobj->vinst); 86 return -ENOMEM;
87 chan->dev = dev;
91 88
92 while (pte < pte_end) { 89 ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin);
93 nv_wr32(dev, 0x702000 + (pte * 8), (vram >> 8) | 1); 90 if (ret) {
94 nv_wr32(dev, 0x702004 + (pte * 8), 0); 91 nvc0_channel_del(&chan);
95 vram += 4096; 92 return ret;
96 pte++;
97 } 93 }
98 dev_priv->engine.instmem.flush(dev);
99 94
100 if (1) { 95 ret = drm_mm_init(&chan->ramin_heap, 0x1000, size - 0x1000);
101 u32 chan = nv_rd32(dev, 0x1700) << 16; 96 if (ret) {
102 nv_wr32(dev, 0x100cb8, (chan + 0x1000) >> 8); 97 nvc0_channel_del(&chan);
103 nv_wr32(dev, 0x100cbc, 0x80000005); 98 return ret;
104 } 99 }
105 100
106 gpuobj->im_bound = 1; 101 ret = nouveau_vm_ref(vm, &chan->vm, NULL);
107 return 0; 102 if (ret) {
108} 103 nvc0_channel_del(&chan);
109 104 return ret;
110int
111nvc0_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
112{
113 struct drm_nouveau_private *dev_priv = dev->dev_private;
114 uint32_t pte, pte_end;
115
116 if (gpuobj->im_bound == 0)
117 return -EINVAL;
118
119 pte = gpuobj->im_pramin->start >> 12;
120 pte_end = (gpuobj->im_pramin->size >> 12) + pte;
121 while (pte < pte_end) {
122 nv_wr32(dev, 0x702000 + (pte * 8), 0);
123 nv_wr32(dev, 0x702004 + (pte * 8), 0);
124 pte++;
125 } 105 }
126 dev_priv->engine.instmem.flush(dev);
127 106
128 gpuobj->im_bound = 0; 107 nv_wo32(chan->ramin, 0x0200, lower_32_bits(pgd->vinst));
129 return 0; 108 nv_wo32(chan->ramin, 0x0204, upper_32_bits(pgd->vinst));
130} 109 nv_wo32(chan->ramin, 0x0208, lower_32_bits(vm_size - 1));
110 nv_wo32(chan->ramin, 0x020c, upper_32_bits(vm_size - 1));
131 111
132void 112 *pchan = chan;
133nvc0_instmem_flush(struct drm_device *dev) 113 return 0;
134{
135 nv_wr32(dev, 0x070000, 1);
136 if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000))
137 NV_ERROR(dev, "PRAMIN flush timeout\n");
138} 114}
139 115
140int 116int
141nvc0_instmem_suspend(struct drm_device *dev) 117nvc0_instmem_init(struct drm_device *dev)
142{ 118{
143 struct drm_nouveau_private *dev_priv = dev->dev_private; 119 struct drm_nouveau_private *dev_priv = dev->dev_private;
144 u32 *buf; 120 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
145 int i; 121 struct pci_dev *pdev = dev->pdev;
122 struct nvc0_instmem_priv *priv;
123 struct nouveau_vm *vm = NULL;
124 int ret;
146 125
147 dev_priv->susres.ramin_copy = vmalloc(65536); 126 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
148 if (!dev_priv->susres.ramin_copy) 127 if (!priv)
149 return -ENOMEM; 128 return -ENOMEM;
150 buf = dev_priv->susres.ramin_copy; 129 pinstmem->priv = priv;
151 130
152 for (i = 0; i < 65536; i += 4) 131 /* BAR3 VM */
153 buf[i/4] = nv_rd32(dev, NV04_PRAMIN + i); 132 ret = nouveau_vm_new(dev, 0, pci_resource_len(pdev, 3), 0,
133 &dev_priv->bar3_vm);
134 if (ret)
135 goto error;
136
137 ret = nouveau_gpuobj_new(dev, NULL,
138 (pci_resource_len(pdev, 3) >> 12) * 8, 0,
139 NVOBJ_FLAG_DONT_MAP |
140 NVOBJ_FLAG_ZERO_ALLOC,
141 &dev_priv->bar3_vm->pgt[0].obj[0]);
142 if (ret)
143 goto error;
144 dev_priv->bar3_vm->pgt[0].refcount[0] = 1;
145
146 nv50_instmem_map(dev_priv->bar3_vm->pgt[0].obj[0]);
147
148 ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 4096,
149 NVOBJ_FLAG_ZERO_ALLOC, &priv->bar3_pgd);
150 if (ret)
151 goto error;
152
153 ret = nouveau_vm_ref(dev_priv->bar3_vm, &vm, priv->bar3_pgd);
154 if (ret)
155 goto error;
156 nouveau_vm_ref(NULL, &vm, NULL);
157
158 ret = nvc0_channel_new(dev, 8192, dev_priv->bar3_vm, &priv->bar3,
159 priv->bar3_pgd, pci_resource_len(dev->pdev, 3));
160 if (ret)
161 goto error;
162
163 /* BAR1 VM */
164 ret = nouveau_vm_new(dev, 0, pci_resource_len(pdev, 1), 0, &vm);
165 if (ret)
166 goto error;
167
168 ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 4096,
169 NVOBJ_FLAG_ZERO_ALLOC, &priv->bar1_pgd);
170 if (ret)
171 goto error;
172
173 ret = nouveau_vm_ref(vm, &dev_priv->bar1_vm, priv->bar1_pgd);
174 if (ret)
175 goto error;
176 nouveau_vm_ref(NULL, &vm, NULL);
177
178 ret = nvc0_channel_new(dev, 8192, dev_priv->bar1_vm, &priv->bar1,
179 priv->bar1_pgd, pci_resource_len(dev->pdev, 1));
180 if (ret)
181 goto error;
182
183 /* channel vm */
184 ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0008000000ULL, &vm);
185 if (ret)
186 goto error;
187
188 ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 4096, 0, &priv->chan_pgd);
189 if (ret)
190 goto error;
191
192 nouveau_vm_ref(vm, &dev_priv->chan_vm, priv->chan_pgd);
193 nouveau_vm_ref(NULL, &vm, NULL);
194
195 nvc0_instmem_resume(dev);
154 return 0; 196 return 0;
197error:
198 nvc0_instmem_takedown(dev);
199 return ret;
155} 200}
156 201
157void 202void
158nvc0_instmem_resume(struct drm_device *dev) 203nvc0_instmem_takedown(struct drm_device *dev)
159{ 204{
160 struct drm_nouveau_private *dev_priv = dev->dev_private; 205 struct drm_nouveau_private *dev_priv = dev->dev_private;
161 u32 *buf = dev_priv->susres.ramin_copy; 206 struct nvc0_instmem_priv *priv = dev_priv->engine.instmem.priv;
162 u64 chan; 207 struct nouveau_vm *vm = NULL;
163 int i;
164 208
165 chan = dev_priv->vram_size - dev_priv->ramin_rsvd_vram; 209 nvc0_instmem_suspend(dev);
166 nv_wr32(dev, 0x001700, chan >> 16);
167 210
168 for (i = 0; i < 65536; i += 4) 211 nv_wr32(dev, 0x1704, 0x00000000);
169 nv_wr32(dev, NV04_PRAMIN + i, buf[i/4]); 212 nv_wr32(dev, 0x1714, 0x00000000);
170 vfree(dev_priv->susres.ramin_copy);
171 dev_priv->susres.ramin_copy = NULL;
172 213
173 nv_wr32(dev, 0x001714, 0xc0000000 | (chan >> 12)); 214 nouveau_vm_ref(NULL, &dev_priv->chan_vm, priv->chan_pgd);
174} 215 nouveau_gpuobj_ref(NULL, &priv->chan_pgd);
175 216
176int 217 nvc0_channel_del(&priv->bar1);
177nvc0_instmem_init(struct drm_device *dev) 218 nouveau_vm_ref(NULL, &dev_priv->bar1_vm, priv->bar1_pgd);
178{ 219 nouveau_gpuobj_ref(NULL, &priv->bar1_pgd);
179 struct drm_nouveau_private *dev_priv = dev->dev_private;
180 u64 chan, pgt3, imem, lim3 = dev_priv->ramin_size - 1;
181 int ret, i;
182
183 dev_priv->ramin_rsvd_vram = 1 * 1024 * 1024;
184 chan = dev_priv->vram_size - dev_priv->ramin_rsvd_vram;
185 imem = 4096 + 4096 + 32768;
186
187 nv_wr32(dev, 0x001700, chan >> 16);
188
189 /* channel setup */
190 nv_wr32(dev, 0x700200, lower_32_bits(chan + 0x1000));
191 nv_wr32(dev, 0x700204, upper_32_bits(chan + 0x1000));
192 nv_wr32(dev, 0x700208, lower_32_bits(lim3));
193 nv_wr32(dev, 0x70020c, upper_32_bits(lim3));
194
195 /* point pgd -> pgt */
196 nv_wr32(dev, 0x701000, 0);
197 nv_wr32(dev, 0x701004, ((chan + 0x2000) >> 8) | 1);
198
199 /* point pgt -> physical vram for channel */
200 pgt3 = 0x2000;
201 for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4096, pgt3 += 8) {
202 nv_wr32(dev, 0x700000 + pgt3, ((chan + i) >> 8) | 1);
203 nv_wr32(dev, 0x700004 + pgt3, 0);
204 }
205
206 /* clear rest of pgt */
207 for (; i < dev_priv->ramin_size; i += 4096, pgt3 += 8) {
208 nv_wr32(dev, 0x700000 + pgt3, 0);
209 nv_wr32(dev, 0x700004 + pgt3, 0);
210 }
211
212 /* point bar3 at the channel */
213 nv_wr32(dev, 0x001714, 0xc0000000 | (chan >> 12));
214
215 /* Global PRAMIN heap */
216 ret = drm_mm_init(&dev_priv->ramin_heap, imem,
217 dev_priv->ramin_size - imem);
218 if (ret) {
219 NV_ERROR(dev, "Failed to init RAMIN heap\n");
220 return -ENOMEM;
221 }
222 220
223 return 0; 221 nvc0_channel_del(&priv->bar3);
224} 222 nouveau_vm_ref(dev_priv->bar3_vm, &vm, NULL);
223 nouveau_vm_ref(NULL, &vm, priv->bar3_pgd);
224 nouveau_gpuobj_ref(NULL, &priv->bar3_pgd);
225 nouveau_gpuobj_ref(NULL, &dev_priv->bar3_vm->pgt[0].obj[0]);
226 nouveau_vm_ref(NULL, &dev_priv->bar3_vm, NULL);
225 227
226void 228 dev_priv->engine.instmem.priv = NULL;
227nvc0_instmem_takedown(struct drm_device *dev) 229 kfree(priv);
228{
229} 230}
230 231
diff --git a/drivers/gpu/drm/nouveau/nvc0_vm.c b/drivers/gpu/drm/nouveau/nvc0_vm.c
new file mode 100644
index 000000000000..4b9251bb0ff4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvc0_vm.c
@@ -0,0 +1,123 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26
27#include "nouveau_drv.h"
28#include "nouveau_vm.h"
29
30void
31nvc0_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 index,
32 struct nouveau_gpuobj *pgt[2])
33{
34 u32 pde[2] = { 0, 0 };
35
36 if (pgt[0])
37 pde[1] = 0x00000001 | (pgt[0]->vinst >> 8);
38 if (pgt[1])
39 pde[0] = 0x00000001 | (pgt[1]->vinst >> 8);
40
41 nv_wo32(pgd, (index * 8) + 0, pde[0]);
42 nv_wo32(pgd, (index * 8) + 4, pde[1]);
43}
44
45static inline u64
46nvc0_vm_addr(struct nouveau_vma *vma, u64 phys, u32 memtype, u32 target)
47{
48 phys >>= 8;
49
50 phys |= 0x00000001; /* present */
51// if (vma->access & NV_MEM_ACCESS_SYS)
52// phys |= 0x00000002;
53
54 phys |= ((u64)target << 32);
55 phys |= ((u64)memtype << 36);
56
57 return phys;
58}
59
60void
61nvc0_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
62 struct nouveau_vram *mem, u32 pte, u32 cnt, u64 phys)
63{
64 u32 next = 1 << (vma->node->type - 8);
65
66 phys = nvc0_vm_addr(vma, phys, mem->memtype, 0);
67 pte <<= 3;
68 while (cnt--) {
69 nv_wo32(pgt, pte + 0, lower_32_bits(phys));
70 nv_wo32(pgt, pte + 4, upper_32_bits(phys));
71 phys += next;
72 pte += 8;
73 }
74}
75
76void
77nvc0_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
78 u32 pte, dma_addr_t *list, u32 cnt)
79{
80 pte <<= 3;
81 while (cnt--) {
82 u64 phys = nvc0_vm_addr(vma, *list++, 0, 5);
83 nv_wo32(pgt, pte + 0, lower_32_bits(phys));
84 nv_wo32(pgt, pte + 4, upper_32_bits(phys));
85 pte += 8;
86 }
87}
88
89void
90nvc0_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
91{
92 pte <<= 3;
93 while (cnt--) {
94 nv_wo32(pgt, pte + 0, 0x00000000);
95 nv_wo32(pgt, pte + 4, 0x00000000);
96 pte += 8;
97 }
98}
99
100void
101nvc0_vm_flush(struct nouveau_vm *vm)
102{
103 struct drm_nouveau_private *dev_priv = vm->dev->dev_private;
104 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
105 struct drm_device *dev = vm->dev;
106 struct nouveau_vm_pgd *vpgd;
107 u32 r100c80, engine;
108
109 pinstmem->flush(vm->dev);
110
111 if (vm == dev_priv->chan_vm)
112 engine = 1;
113 else
114 engine = 5;
115
116 list_for_each_entry(vpgd, &vm->pgd_list, head) {
117 r100c80 = nv_rd32(dev, 0x100c80);
118 nv_wr32(dev, 0x100cb8, vpgd->obj->vinst >> 8);
119 nv_wr32(dev, 0x100cbc, 0x80000000 | engine);
120 if (!nv_wait(dev, 0x100c80, 0xffffffff, r100c80))
121 NV_ERROR(dev, "vm flush timeout eng %d\n", engine);
122 }
123}
diff --git a/drivers/gpu/drm/nouveau/nvc0_vram.c b/drivers/gpu/drm/nouveau/nvc0_vram.c
new file mode 100644
index 000000000000..858eda5dedd1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvc0_vram.c
@@ -0,0 +1,99 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26#include "nouveau_drv.h"
27#include "nouveau_mm.h"
28
29bool
30nvc0_vram_flags_valid(struct drm_device *dev, u32 tile_flags)
31{
32 switch (tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK) {
33 case 0x0000:
34 case 0xfe00:
35 case 0xdb00:
36 case 0x1100:
37 return true;
38 default:
39 break;
40 }
41
42 return false;
43}
44
45int
46nvc0_vram_new(struct drm_device *dev, u64 size, u32 align, u32 ncmin,
47 u32 type, struct nouveau_vram **pvram)
48{
49 struct drm_nouveau_private *dev_priv = dev->dev_private;
50 struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
51 struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM];
52 struct nouveau_mm *mm = man->priv;
53 struct nouveau_mm_node *r;
54 struct nouveau_vram *vram;
55 int ret;
56
57 size >>= 12;
58 align >>= 12;
59 ncmin >>= 12;
60
61 vram = kzalloc(sizeof(*vram), GFP_KERNEL);
62 if (!vram)
63 return -ENOMEM;
64
65 INIT_LIST_HEAD(&vram->regions);
66 vram->dev = dev_priv->dev;
67 vram->memtype = type;
68 vram->size = size;
69
70 mutex_lock(&mm->mutex);
71 do {
72 ret = nouveau_mm_get(mm, 1, size, ncmin, align, &r);
73 if (ret) {
74 mutex_unlock(&mm->mutex);
75 nv50_vram_del(dev, &vram);
76 return ret;
77 }
78
79 list_add_tail(&r->rl_entry, &vram->regions);
80 size -= r->length;
81 } while (size);
82 mutex_unlock(&mm->mutex);
83
84 r = list_first_entry(&vram->regions, struct nouveau_mm_node, rl_entry);
85 vram->offset = (u64)r->offset << 12;
86 *pvram = vram;
87 return 0;
88}
89
90int
91nvc0_vram_init(struct drm_device *dev)
92{
93 struct drm_nouveau_private *dev_priv = dev->dev_private;
94
95 dev_priv->vram_size = nv_rd32(dev, 0x10f20c) << 20;
96 dev_priv->vram_size *= nv_rd32(dev, 0x121c74);
97 dev_priv->vram_rblock_size = 4096;
98 return 0;
99}
diff --git a/drivers/gpu/drm/nouveau/nvreg.h b/drivers/gpu/drm/nouveau/nvreg.h
index 881f8a585613..fe0f253089ac 100644
--- a/drivers/gpu/drm/nouveau/nvreg.h
+++ b/drivers/gpu/drm/nouveau/nvreg.h
@@ -153,7 +153,8 @@
153#define NV_PCRTC_START 0x00600800 153#define NV_PCRTC_START 0x00600800
154#define NV_PCRTC_CONFIG 0x00600804 154#define NV_PCRTC_CONFIG 0x00600804
155# define NV_PCRTC_CONFIG_START_ADDRESS_NON_VGA (1 << 0) 155# define NV_PCRTC_CONFIG_START_ADDRESS_NON_VGA (1 << 0)
156# define NV_PCRTC_CONFIG_START_ADDRESS_HSYNC (2 << 0) 156# define NV04_PCRTC_CONFIG_START_ADDRESS_HSYNC (4 << 0)
157# define NV10_PCRTC_CONFIG_START_ADDRESS_HSYNC (2 << 0)
157#define NV_PCRTC_CURSOR_CONFIG 0x00600810 158#define NV_PCRTC_CURSOR_CONFIG 0x00600810
158# define NV_PCRTC_CURSOR_CONFIG_ENABLE_ENABLE (1 << 0) 159# define NV_PCRTC_CURSOR_CONFIG_ENABLE_ENABLE (1 << 0)
159# define NV_PCRTC_CURSOR_CONFIG_DOUBLE_SCAN_ENABLE (1 << 4) 160# define NV_PCRTC_CURSOR_CONFIG_DOUBLE_SCAN_ENABLE (1 << 4)
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index 6cae4f2028d2..e47eecfc2df4 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -65,10 +65,13 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
65 rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \ 65 rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \
66 r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \ 66 r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \
67 r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \ 67 r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \
68 evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o 68 evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o \
69 radeon_trace_points.o ni.o
69 70
70radeon-$(CONFIG_COMPAT) += radeon_ioc32.o 71radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
71radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o 72radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o
72radeon-$(CONFIG_ACPI) += radeon_acpi.o 73radeon-$(CONFIG_ACPI) += radeon_acpi.o
73 74
74obj-$(CONFIG_DRM_RADEON)+= radeon.o 75obj-$(CONFIG_DRM_RADEON)+= radeon.o
76
77CFLAGS_radeon_trace_points.o := -I$(src) \ No newline at end of file
diff --git a/drivers/gpu/drm/radeon/ObjectID.h b/drivers/gpu/drm/radeon/ObjectID.h
index c714179d1bfa..c61c3fe9fb98 100644
--- a/drivers/gpu/drm/radeon/ObjectID.h
+++ b/drivers/gpu/drm/radeon/ObjectID.h
@@ -37,6 +37,8 @@
37#define GRAPH_OBJECT_TYPE_CONNECTOR 0x3 37#define GRAPH_OBJECT_TYPE_CONNECTOR 0x3
38#define GRAPH_OBJECT_TYPE_ROUTER 0x4 38#define GRAPH_OBJECT_TYPE_ROUTER 0x4
39/* deleted */ 39/* deleted */
40#define GRAPH_OBJECT_TYPE_DISPLAY_PATH 0x6
41#define GRAPH_OBJECT_TYPE_GENERIC 0x7
40 42
41/****************************************************/ 43/****************************************************/
42/* Encoder Object ID Definition */ 44/* Encoder Object ID Definition */
@@ -64,6 +66,9 @@
64#define ENCODER_OBJECT_ID_VT1623 0x10 66#define ENCODER_OBJECT_ID_VT1623 0x10
65#define ENCODER_OBJECT_ID_HDMI_SI1930 0x11 67#define ENCODER_OBJECT_ID_HDMI_SI1930 0x11
66#define ENCODER_OBJECT_ID_HDMI_INTERNAL 0x12 68#define ENCODER_OBJECT_ID_HDMI_INTERNAL 0x12
69#define ENCODER_OBJECT_ID_ALMOND 0x22
70#define ENCODER_OBJECT_ID_TRAVIS 0x23
71#define ENCODER_OBJECT_ID_NUTMEG 0x22
67/* Kaleidoscope (KLDSCP) Class Display Hardware (internal) */ 72/* Kaleidoscope (KLDSCP) Class Display Hardware (internal) */
68#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 0x13 73#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 0x13
69#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1 0x14 74#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1 0x14
@@ -108,6 +113,7 @@
108#define CONNECTOR_OBJECT_ID_DISPLAYPORT 0x13 113#define CONNECTOR_OBJECT_ID_DISPLAYPORT 0x13
109#define CONNECTOR_OBJECT_ID_eDP 0x14 114#define CONNECTOR_OBJECT_ID_eDP 0x14
110#define CONNECTOR_OBJECT_ID_MXM 0x15 115#define CONNECTOR_OBJECT_ID_MXM 0x15
116#define CONNECTOR_OBJECT_ID_LVDS_eDP 0x16
111 117
112/* deleted */ 118/* deleted */
113 119
@@ -124,6 +130,7 @@
124#define GENERIC_OBJECT_ID_GLSYNC 0x01 130#define GENERIC_OBJECT_ID_GLSYNC 0x01
125#define GENERIC_OBJECT_ID_PX2_NON_DRIVABLE 0x02 131#define GENERIC_OBJECT_ID_PX2_NON_DRIVABLE 0x02
126#define GENERIC_OBJECT_ID_MXM_OPM 0x03 132#define GENERIC_OBJECT_ID_MXM_OPM 0x03
133#define GENERIC_OBJECT_ID_STEREO_PIN 0x04 //This object could show up from Misc Object table, it follows ATOM_OBJECT format, and contains one ATOM_OBJECT_GPIO_CNTL_RECORD for the stereo pin
127 134
128/****************************************************/ 135/****************************************************/
129/* Graphics Object ENUM ID Definition */ 136/* Graphics Object ENUM ID Definition */
@@ -360,6 +367,26 @@
360 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 367 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
361 ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO << OBJECT_ID_SHIFT) 368 ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO << OBJECT_ID_SHIFT)
362 369
370#define ENCODER_ALMOND_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
371 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
372 ENCODER_OBJECT_ID_ALMOND << OBJECT_ID_SHIFT)
373
374#define ENCODER_ALMOND_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
375 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
376 ENCODER_OBJECT_ID_ALMOND << OBJECT_ID_SHIFT)
377
378#define ENCODER_TRAVIS_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
379 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
380 ENCODER_OBJECT_ID_TRAVIS << OBJECT_ID_SHIFT)
381
382#define ENCODER_TRAVIS_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
383 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
384 ENCODER_OBJECT_ID_TRAVIS << OBJECT_ID_SHIFT)
385
386#define ENCODER_NUTMEG_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
387 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
388 ENCODER_OBJECT_ID_NUTMEG << OBJECT_ID_SHIFT)
389
363/****************************************************/ 390/****************************************************/
364/* Connector Object ID definition - Shared with BIOS */ 391/* Connector Object ID definition - Shared with BIOS */
365/****************************************************/ 392/****************************************************/
@@ -421,6 +448,14 @@
421 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ 448 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
422 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT) 449 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
423 450
451#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID3 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
452 GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
453 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
454
455#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID4 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
456 GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\
457 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
458
424#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ 459#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
425 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 460 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
426 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT) 461 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT)
@@ -512,6 +547,7 @@
512#define CONNECTOR_7PIN_DIN_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ 547#define CONNECTOR_7PIN_DIN_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
513 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 548 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
514 CONNECTOR_OBJECT_ID_7PIN_DIN << OBJECT_ID_SHIFT) 549 CONNECTOR_OBJECT_ID_7PIN_DIN << OBJECT_ID_SHIFT)
550
515#define CONNECTOR_7PIN_DIN_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ 551#define CONNECTOR_7PIN_DIN_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
516 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ 552 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
517 CONNECTOR_OBJECT_ID_7PIN_DIN << OBJECT_ID_SHIFT) 553 CONNECTOR_OBJECT_ID_7PIN_DIN << OBJECT_ID_SHIFT)
@@ -593,6 +629,14 @@
593 GRAPH_OBJECT_ENUM_ID7 << ENUM_ID_SHIFT |\ 629 GRAPH_OBJECT_ENUM_ID7 << ENUM_ID_SHIFT |\
594 CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT) //Mapping to MXM_DAC 630 CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT) //Mapping to MXM_DAC
595 631
632#define CONNECTOR_LVDS_eDP_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
633 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
634 CONNECTOR_OBJECT_ID_LVDS_eDP << OBJECT_ID_SHIFT)
635
636#define CONNECTOR_LVDS_eDP_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
637 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
638 CONNECTOR_OBJECT_ID_LVDS_eDP << OBJECT_ID_SHIFT)
639
596/****************************************************/ 640/****************************************************/
597/* Router Object ID definition - Shared with BIOS */ 641/* Router Object ID definition - Shared with BIOS */
598/****************************************************/ 642/****************************************************/
@@ -621,6 +665,10 @@
621 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ 665 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
622 GENERIC_OBJECT_ID_MXM_OPM << OBJECT_ID_SHIFT) 666 GENERIC_OBJECT_ID_MXM_OPM << OBJECT_ID_SHIFT)
623 667
668#define GENERICOBJECT_STEREO_PIN_ENUM_ID1 (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
669 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
670 GENERIC_OBJECT_ID_STEREO_PIN << OBJECT_ID_SHIFT)
671
624/****************************************************/ 672/****************************************************/
625/* Object Cap definition - Shared with BIOS */ 673/* Object Cap definition - Shared with BIOS */
626/****************************************************/ 674/****************************************************/
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index 05efb5b9f13e..258fa5e7a2d9 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -734,16 +734,16 @@ static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
734static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg) 734static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg)
735{ 735{
736 uint8_t attr = U8((*ptr)++); 736 uint8_t attr = U8((*ptr)++);
737 uint32_t dst, src1, src2, saved; 737 uint32_t dst, mask, src, saved;
738 int dptr = *ptr; 738 int dptr = *ptr;
739 SDEBUG(" dst: "); 739 SDEBUG(" dst: ");
740 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 740 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
741 SDEBUG(" src1: "); 741 mask = atom_get_src_direct(ctx, ((attr >> 3) & 7), ptr);
742 src1 = atom_get_src_direct(ctx, ((attr >> 3) & 7), ptr); 742 SDEBUG(" mask: 0x%08x", mask);
743 SDEBUG(" src2: "); 743 SDEBUG(" src: ");
744 src2 = atom_get_src(ctx, attr, ptr); 744 src = atom_get_src(ctx, attr, ptr);
745 dst &= src1; 745 dst &= mask;
746 dst |= src2; 746 dst |= src;
747 SDEBUG(" dst: "); 747 SDEBUG(" dst: ");
748 atom_put_dst(ctx, arg, attr, &dptr, dst, saved); 748 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
749} 749}
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index fe359a239df3..58a0cd02c0a2 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -73,8 +73,18 @@
73#define ATOM_PPLL1 0 73#define ATOM_PPLL1 0
74#define ATOM_PPLL2 1 74#define ATOM_PPLL2 1
75#define ATOM_DCPLL 2 75#define ATOM_DCPLL 2
76#define ATOM_PPLL0 2
77#define ATOM_EXT_PLL1 8
78#define ATOM_EXT_PLL2 9
79#define ATOM_EXT_CLOCK 10
76#define ATOM_PPLL_INVALID 0xFF 80#define ATOM_PPLL_INVALID 0xFF
77 81
82#define ENCODER_REFCLK_SRC_P1PLL 0
83#define ENCODER_REFCLK_SRC_P2PLL 1
84#define ENCODER_REFCLK_SRC_DCPLL 2
85#define ENCODER_REFCLK_SRC_EXTCLK 3
86#define ENCODER_REFCLK_SRC_INVALID 0xFF
87
78#define ATOM_SCALER1 0 88#define ATOM_SCALER1 0
79#define ATOM_SCALER2 1 89#define ATOM_SCALER2 1
80 90
@@ -192,6 +202,9 @@ typedef struct _ATOM_COMMON_TABLE_HEADER
192 /*Image can't be updated, while Driver needs to carry the new table! */ 202 /*Image can't be updated, while Driver needs to carry the new table! */
193}ATOM_COMMON_TABLE_HEADER; 203}ATOM_COMMON_TABLE_HEADER;
194 204
205/****************************************************************************/
206// Structure stores the ROM header.
207/****************************************************************************/
195typedef struct _ATOM_ROM_HEADER 208typedef struct _ATOM_ROM_HEADER
196{ 209{
197 ATOM_COMMON_TABLE_HEADER sHeader; 210 ATOM_COMMON_TABLE_HEADER sHeader;
@@ -221,6 +234,9 @@ typedef struct _ATOM_ROM_HEADER
221 #define USHORT void* 234 #define USHORT void*
222#endif 235#endif
223 236
237/****************************************************************************/
238// Structures used in Command.mtb
239/****************************************************************************/
224typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES{ 240typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES{
225 USHORT ASIC_Init; //Function Table, used by various SW components,latest version 1.1 241 USHORT ASIC_Init; //Function Table, used by various SW components,latest version 1.1
226 USHORT GetDisplaySurfaceSize; //Atomic Table, Used by Bios when enabling HW ICON 242 USHORT GetDisplaySurfaceSize; //Atomic Table, Used by Bios when enabling HW ICON
@@ -312,6 +328,7 @@ typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES{
312#define SetUniphyInstance ASIC_StaticPwrMgtStatusChange 328#define SetUniphyInstance ASIC_StaticPwrMgtStatusChange
313#define HPDInterruptService ReadHWAssistedI2CStatus 329#define HPDInterruptService ReadHWAssistedI2CStatus
314#define EnableVGA_Access GetSCLKOverMCLKRatio 330#define EnableVGA_Access GetSCLKOverMCLKRatio
331#define GetDispObjectInfo EnableYUV
315 332
316typedef struct _ATOM_MASTER_COMMAND_TABLE 333typedef struct _ATOM_MASTER_COMMAND_TABLE
317{ 334{
@@ -357,6 +374,24 @@ typedef struct _ATOM_COMMON_ROM_COMMAND_TABLE_HEADER
357/****************************************************************************/ 374/****************************************************************************/
358#define COMPUTE_MEMORY_PLL_PARAM 1 375#define COMPUTE_MEMORY_PLL_PARAM 1
359#define COMPUTE_ENGINE_PLL_PARAM 2 376#define COMPUTE_ENGINE_PLL_PARAM 2
377#define ADJUST_MC_SETTING_PARAM 3
378
379/****************************************************************************/
380// Structures used by AdjustMemoryControllerTable
381/****************************************************************************/
382typedef struct _ATOM_ADJUST_MEMORY_CLOCK_FREQ
383{
384#if ATOM_BIG_ENDIAN
385 ULONG ulPointerReturnFlag:1; // BYTE_3[7]=1 - Return the pointer to the right Data Block; BYTE_3[7]=0 - Program the right Data Block
386 ULONG ulMemoryModuleNumber:7; // BYTE_3[6:0]
387 ULONG ulClockFreq:24;
388#else
389 ULONG ulClockFreq:24;
390 ULONG ulMemoryModuleNumber:7; // BYTE_3[6:0]
391 ULONG ulPointerReturnFlag:1; // BYTE_3[7]=1 - Return the pointer to the right Data Block; BYTE_3[7]=0 - Program the right Data Block
392#endif
393}ATOM_ADJUST_MEMORY_CLOCK_FREQ;
394#define POINTER_RETURN_FLAG 0x80
360 395
361typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS 396typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS
362{ 397{
@@ -440,6 +475,26 @@ typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4
440#endif 475#endif
441}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4; 476}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4;
442 477
478typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5
479{
480 union
481 {
482 ATOM_COMPUTE_CLOCK_FREQ ulClock; //Input Parameter
483 ATOM_S_MPLL_FB_DIVIDER ulFbDiv; //Output Parameter
484 };
485 UCHAR ucRefDiv; //Output Parameter
486 UCHAR ucPostDiv; //Output Parameter
487 union
488 {
489 UCHAR ucCntlFlag; //Output Flags
490 UCHAR ucInputFlag; //Input Flags. ucInputFlag[0] - Strobe(1)/Performance(0) mode
491 };
492 UCHAR ucReserved;
493}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5;
494
495// ucInputFlag
496#define ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN 1 // 1-StrobeMode, 0-PerformanceMode
497
443typedef struct _DYNAMICE_MEMORY_SETTINGS_PARAMETER 498typedef struct _DYNAMICE_MEMORY_SETTINGS_PARAMETER
444{ 499{
445 ATOM_COMPUTE_CLOCK_FREQ ulClock; 500 ATOM_COMPUTE_CLOCK_FREQ ulClock;
@@ -583,6 +638,7 @@ typedef struct _DIG_ENCODER_CONTROL_PARAMETERS
583#define ATOM_ENCODER_CONFIG_DPLINKRATE_MASK 0x01 638#define ATOM_ENCODER_CONFIG_DPLINKRATE_MASK 0x01
584#define ATOM_ENCODER_CONFIG_DPLINKRATE_1_62GHZ 0x00 639#define ATOM_ENCODER_CONFIG_DPLINKRATE_1_62GHZ 0x00
585#define ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ 0x01 640#define ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ 0x01
641#define ATOM_ENCODER_CONFIG_DPLINKRATE_5_40GHZ 0x02
586#define ATOM_ENCODER_CONFIG_LINK_SEL_MASK 0x04 642#define ATOM_ENCODER_CONFIG_LINK_SEL_MASK 0x04
587#define ATOM_ENCODER_CONFIG_LINKA 0x00 643#define ATOM_ENCODER_CONFIG_LINKA 0x00
588#define ATOM_ENCODER_CONFIG_LINKB 0x04 644#define ATOM_ENCODER_CONFIG_LINKB 0x04
@@ -608,6 +664,9 @@ typedef struct _DIG_ENCODER_CONTROL_PARAMETERS
608#define ATOM_ENCODER_MODE_TV 13 664#define ATOM_ENCODER_MODE_TV 13
609#define ATOM_ENCODER_MODE_CV 14 665#define ATOM_ENCODER_MODE_CV 14
610#define ATOM_ENCODER_MODE_CRT 15 666#define ATOM_ENCODER_MODE_CRT 15
667#define ATOM_ENCODER_MODE_DVO 16
668#define ATOM_ENCODER_MODE_DP_SST ATOM_ENCODER_MODE_DP // For DP1.2
669#define ATOM_ENCODER_MODE_DP_MST 5 // For DP1.2
611 670
612typedef struct _ATOM_DIG_ENCODER_CONFIG_V2 671typedef struct _ATOM_DIG_ENCODER_CONFIG_V2
613{ 672{
@@ -661,6 +720,7 @@ typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V2
661#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_START 0x08 720#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_START 0x08
662#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1 0x09 721#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1 0x09
663#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2 0x0a 722#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2 0x0a
723#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN3 0x13
664#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE 0x0b 724#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE 0x0b
665#define ATOM_ENCODER_CMD_DP_VIDEO_OFF 0x0c 725#define ATOM_ENCODER_CMD_DP_VIDEO_OFF 0x0c
666#define ATOM_ENCODER_CMD_DP_VIDEO_ON 0x0d 726#define ATOM_ENCODER_CMD_DP_VIDEO_ON 0x0d
@@ -671,24 +731,34 @@ typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V2
671#define ATOM_ENCODER_STATUS_LINK_TRAINING_COMPLETE 0x10 731#define ATOM_ENCODER_STATUS_LINK_TRAINING_COMPLETE 0x10
672#define ATOM_ENCODER_STATUS_LINK_TRAINING_INCOMPLETE 0x00 732#define ATOM_ENCODER_STATUS_LINK_TRAINING_INCOMPLETE 0x00
673 733
734//ucTableFormatRevision=1
735//ucTableContentRevision=3
674// Following function ENABLE sub-function will be used by driver when TMDS/HDMI/LVDS is used, disable function will be used by driver 736// Following function ENABLE sub-function will be used by driver when TMDS/HDMI/LVDS is used, disable function will be used by driver
675typedef struct _ATOM_DIG_ENCODER_CONFIG_V3 737typedef struct _ATOM_DIG_ENCODER_CONFIG_V3
676{ 738{
677#if ATOM_BIG_ENDIAN 739#if ATOM_BIG_ENDIAN
678 UCHAR ucReserved1:1; 740 UCHAR ucReserved1:1;
679 UCHAR ucDigSel:3; // =0: DIGA/B/C/D/E/F 741 UCHAR ucDigSel:3; // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also refered as DIGA/B/C/D/E/F)
680 UCHAR ucReserved:3; 742 UCHAR ucReserved:3;
681 UCHAR ucDPLinkRate:1; // =0: 1.62Ghz, =1: 2.7Ghz 743 UCHAR ucDPLinkRate:1; // =0: 1.62Ghz, =1: 2.7Ghz
682#else 744#else
683 UCHAR ucDPLinkRate:1; // =0: 1.62Ghz, =1: 2.7Ghz 745 UCHAR ucDPLinkRate:1; // =0: 1.62Ghz, =1: 2.7Ghz
684 UCHAR ucReserved:3; 746 UCHAR ucReserved:3;
685 UCHAR ucDigSel:3; // =0: DIGA/B/C/D/E/F 747 UCHAR ucDigSel:3; // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also refered as DIGA/B/C/D/E/F)
686 UCHAR ucReserved1:1; 748 UCHAR ucReserved1:1;
687#endif 749#endif
688}ATOM_DIG_ENCODER_CONFIG_V3; 750}ATOM_DIG_ENCODER_CONFIG_V3;
689 751
752#define ATOM_ENCODER_CONFIG_V3_DPLINKRATE_MASK 0x03
753#define ATOM_ENCODER_CONFIG_V3_DPLINKRATE_1_62GHZ 0x00
754#define ATOM_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ 0x01
690#define ATOM_ENCODER_CONFIG_V3_ENCODER_SEL 0x70 755#define ATOM_ENCODER_CONFIG_V3_ENCODER_SEL 0x70
691 756#define ATOM_ENCODER_CONFIG_V3_DIG0_ENCODER 0x00
757#define ATOM_ENCODER_CONFIG_V3_DIG1_ENCODER 0x10
758#define ATOM_ENCODER_CONFIG_V3_DIG2_ENCODER 0x20
759#define ATOM_ENCODER_CONFIG_V3_DIG3_ENCODER 0x30
760#define ATOM_ENCODER_CONFIG_V3_DIG4_ENCODER 0x40
761#define ATOM_ENCODER_CONFIG_V3_DIG5_ENCODER 0x50
692 762
693typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V3 763typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V3
694{ 764{
@@ -707,6 +777,56 @@ typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V3
707 UCHAR ucReserved; 777 UCHAR ucReserved;
708}DIG_ENCODER_CONTROL_PARAMETERS_V3; 778}DIG_ENCODER_CONTROL_PARAMETERS_V3;
709 779
780//ucTableFormatRevision=1
781//ucTableContentRevision=4
782// start from NI
783// Following function ENABLE sub-function will be used by driver when TMDS/HDMI/LVDS is used, disable function will be used by driver
784typedef struct _ATOM_DIG_ENCODER_CONFIG_V4
785{
786#if ATOM_BIG_ENDIAN
787 UCHAR ucReserved1:1;
788 UCHAR ucDigSel:3; // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also refered as DIGA/B/C/D/E/F)
789 UCHAR ucReserved:2;
790 UCHAR ucDPLinkRate:2; // =0: 1.62Ghz, =1: 2.7Ghz, 2=5.4Ghz <= Changed comparing to previous version
791#else
792 UCHAR ucDPLinkRate:2; // =0: 1.62Ghz, =1: 2.7Ghz, 2=5.4Ghz <= Changed comparing to previous version
793 UCHAR ucReserved:2;
794 UCHAR ucDigSel:3; // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also refered as DIGA/B/C/D/E/F)
795 UCHAR ucReserved1:1;
796#endif
797}ATOM_DIG_ENCODER_CONFIG_V4;
798
799#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_MASK 0x03
800#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_1_62GHZ 0x00
801#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_2_70GHZ 0x01
802#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ 0x02
803#define ATOM_ENCODER_CONFIG_V4_ENCODER_SEL 0x70
804#define ATOM_ENCODER_CONFIG_V4_DIG0_ENCODER 0x00
805#define ATOM_ENCODER_CONFIG_V4_DIG1_ENCODER 0x10
806#define ATOM_ENCODER_CONFIG_V4_DIG2_ENCODER 0x20
807#define ATOM_ENCODER_CONFIG_V4_DIG3_ENCODER 0x30
808#define ATOM_ENCODER_CONFIG_V4_DIG4_ENCODER 0x40
809#define ATOM_ENCODER_CONFIG_V4_DIG5_ENCODER 0x50
810
811typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V4
812{
813 USHORT usPixelClock; // in 10KHz; for bios convenient
814 union{
815 ATOM_DIG_ENCODER_CONFIG_V4 acConfig;
816 UCHAR ucConfig;
817 };
818 UCHAR ucAction;
819 UCHAR ucEncoderMode;
820 // =0: DP encoder
821 // =1: LVDS encoder
822 // =2: DVI encoder
823 // =3: HDMI encoder
824 // =4: SDVO encoder
825 // =5: DP audio
826 UCHAR ucLaneNum; // how many lanes to enable
827 UCHAR ucBitPerColor; // only valid for DP mode when ucAction = ATOM_ENCODER_CMD_SETUP
828 UCHAR ucHPD_ID; // HPD ID (1-6). =0 means to skip HDP programming. New comparing to previous version
829}DIG_ENCODER_CONTROL_PARAMETERS_V4;
710 830
711// define ucBitPerColor: 831// define ucBitPerColor:
712#define PANEL_BPC_UNDEFINE 0x00 832#define PANEL_BPC_UNDEFINE 0x00
@@ -893,6 +1013,7 @@ typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V3
893#endif 1013#endif
894}ATOM_DIG_TRANSMITTER_CONFIG_V3; 1014}ATOM_DIG_TRANSMITTER_CONFIG_V3;
895 1015
1016
896typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V3 1017typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V3
897{ 1018{
898 union 1019 union
@@ -936,6 +1057,149 @@ typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V3
936#define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER2 0x40 //CD 1057#define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER2 0x40 //CD
937#define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER3 0x80 //EF 1058#define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER3 0x80 //EF
938 1059
1060
1061/****************************************************************************/
1062// Structures used by UNIPHYTransmitterControlTable V1.4
1063// ASIC Families: NI
1064// ucTableFormatRevision=1
1065// ucTableContentRevision=4
1066/****************************************************************************/
1067typedef struct _ATOM_DP_VS_MODE_V4
1068{
1069 UCHAR ucLaneSel;
1070 union
1071 {
1072 UCHAR ucLaneSet;
1073 struct {
1074#if ATOM_BIG_ENDIAN
1075 UCHAR ucPOST_CURSOR2:2; //Bit[7:6] Post Cursor2 Level <= New in V4
1076 UCHAR ucPRE_EMPHASIS:3; //Bit[5:3] Pre-emphasis Level
1077 UCHAR ucVOLTAGE_SWING:3; //Bit[2:0] Voltage Swing Level
1078#else
1079 UCHAR ucVOLTAGE_SWING:3; //Bit[2:0] Voltage Swing Level
1080 UCHAR ucPRE_EMPHASIS:3; //Bit[5:3] Pre-emphasis Level
1081 UCHAR ucPOST_CURSOR2:2; //Bit[7:6] Post Cursor2 Level <= New in V4
1082#endif
1083 };
1084 };
1085}ATOM_DP_VS_MODE_V4;
1086
1087typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V4
1088{
1089#if ATOM_BIG_ENDIAN
1090 UCHAR ucTransmitterSel:2; //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB )
1091 // =1 Dig Transmitter 2 ( Uniphy CD )
1092 // =2 Dig Transmitter 3 ( Uniphy EF )
1093 UCHAR ucRefClkSource:2; //bit5:4: PPLL1 =0, PPLL2=1, DCPLL=2, EXT_CLK=3 <= New
1094 UCHAR ucEncoderSel:1; //bit3=0: Data/Clk path source from DIGA/C/E. =1: Data/clk path source from DIGB/D/F
1095 UCHAR ucLinkSel:1; //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E
1096 // =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F
1097 UCHAR fCoherentMode:1; //bit1=1: Coherent Mode ( for DVI/HDMI mode )
1098 UCHAR fDualLinkConnector:1; //bit0=1: Dual Link DVI connector
1099#else
1100 UCHAR fDualLinkConnector:1; //bit0=1: Dual Link DVI connector
1101 UCHAR fCoherentMode:1; //bit1=1: Coherent Mode ( for DVI/HDMI mode )
1102 UCHAR ucLinkSel:1; //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E
1103 // =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F
1104 UCHAR ucEncoderSel:1; //bit3=0: Data/Clk path source from DIGA/C/E. =1: Data/clk path source from DIGB/D/F
1105 UCHAR ucRefClkSource:2; //bit5:4: PPLL1 =0, PPLL2=1, DCPLL=2, EXT_CLK=3 <= New
1106 UCHAR ucTransmitterSel:2; //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB )
1107 // =1 Dig Transmitter 2 ( Uniphy CD )
1108 // =2 Dig Transmitter 3 ( Uniphy EF )
1109#endif
1110}ATOM_DIG_TRANSMITTER_CONFIG_V4;
1111
1112typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V4
1113{
1114 union
1115 {
1116 USHORT usPixelClock; // in 10KHz; for bios convenient
1117 USHORT usInitInfo; // when init uniphy,lower 8bit is used for connector type defined in objectid.h
1118 ATOM_DP_VS_MODE_V4 asMode; // DP Voltage swing mode Redefined comparing to previous version
1119 };
1120 union
1121 {
1122 ATOM_DIG_TRANSMITTER_CONFIG_V4 acConfig;
1123 UCHAR ucConfig;
1124 };
1125 UCHAR ucAction; // define as ATOM_TRANSMITER_ACTION_XXX
1126 UCHAR ucLaneNum;
1127 UCHAR ucReserved[3];
1128}DIG_TRANSMITTER_CONTROL_PARAMETERS_V4;
1129
1130//ucConfig
1131//Bit0
1132#define ATOM_TRANSMITTER_CONFIG_V4_DUAL_LINK_CONNECTOR 0x01
1133//Bit1
1134#define ATOM_TRANSMITTER_CONFIG_V4_COHERENT 0x02
1135//Bit2
1136#define ATOM_TRANSMITTER_CONFIG_V4_LINK_SEL_MASK 0x04
1137#define ATOM_TRANSMITTER_CONFIG_V4_LINKA 0x00
1138#define ATOM_TRANSMITTER_CONFIG_V4_LINKB 0x04
1139// Bit3
1140#define ATOM_TRANSMITTER_CONFIG_V4_ENCODER_SEL_MASK 0x08
1141#define ATOM_TRANSMITTER_CONFIG_V4_DIG1_ENCODER 0x00
1142#define ATOM_TRANSMITTER_CONFIG_V4_DIG2_ENCODER 0x08
1143// Bit5:4
1144#define ATOM_TRANSMITTER_CONFIG_V4_REFCLK_SEL_MASK 0x30
1145#define ATOM_TRANSMITTER_CONFIG_V4_P1PLL 0x00
1146#define ATOM_TRANSMITTER_CONFIG_V4_P2PLL 0x10
1147#define ATOM_TRANSMITTER_CONFIG_V4_DCPLL 0x20 // New in _V4
1148#define ATOM_TRANSMITTER_CONFIG_V4_REFCLK_SRC_EXT 0x30 // Changed comparing to V3
1149// Bit7:6
1150#define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER_SEL_MASK 0xC0
1151#define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER1 0x00 //AB
1152#define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER2 0x40 //CD
1153#define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER3 0x80 //EF
1154
1155
1156/****************************************************************************/
1157// Structures used by ExternalEncoderControlTable V1.3
1158// ASIC Families: Evergreen, Llano, NI
1159// ucTableFormatRevision=1
1160// ucTableContentRevision=3
1161/****************************************************************************/
1162
1163typedef struct _EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3
1164{
1165 union{
1166 USHORT usPixelClock; // pixel clock in 10Khz, valid when ucAction=SETUP/ENABLE_OUTPUT
1167 USHORT usConnectorId; // connector id, valid when ucAction = INIT
1168 };
1169 UCHAR ucConfig; // indicate which encoder, and DP link rate when ucAction = SETUP/ENABLE_OUTPUT
1170 UCHAR ucAction; //
1171 UCHAR ucEncoderMode; // encoder mode, only used when ucAction = SETUP/ENABLE_OUTPUT
1172 UCHAR ucLaneNum; // lane number, only used when ucAction = SETUP/ENABLE_OUTPUT
1173 UCHAR ucBitPerColor; // output bit per color, only valid when ucAction = SETUP/ENABLE_OUTPUT and ucEncodeMode= DP
1174 UCHAR ucReserved;
1175}EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3;
1176
1177// ucAction
1178#define EXTERNAL_ENCODER_ACTION_V3_DISABLE_OUTPUT 0x00
1179#define EXTERNAL_ENCODER_ACTION_V3_ENABLE_OUTPUT 0x01
1180#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT 0x07
1181#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP 0x0f
1182#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING_OFF 0x10
1183#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING 0x11
1184#define EXTERNAL_ENCODER_ACTION_V3_DACLOAD_DETECTION 0x12
1185
1186// ucConfig
1187#define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_MASK 0x03
1188#define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_1_62GHZ 0x00
1189#define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ 0x01
1190#define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_5_40GHZ 0x02
1191#define EXTERNAL_ENCODER_CONFIG_V3_ENCODER_SEL_MASK 0x70
1192#define EXTERNAL_ENCODER_CONFIG_V3_ENCODER1 0x00
1193#define EXTERNAL_ENCODER_CONFIG_V3_ENCODER2 0x10
1194#define EXTERNAL_ENCODER_CONFIG_V3_ENCODER3 0x20
1195
1196typedef struct _EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3
1197{
1198 EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3 sExtEncoder;
1199 ULONG ulReserved[2];
1200}EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3;
1201
1202
939/****************************************************************************/ 1203/****************************************************************************/
940// Structures used by DAC1OuputControlTable 1204// Structures used by DAC1OuputControlTable
941// DAC2OuputControlTable 1205// DAC2OuputControlTable
@@ -1142,6 +1406,7 @@ typedef struct _PIXEL_CLOCK_PARAMETERS_V2
1142#define PIXEL_CLOCK_V4_MISC_SS_ENABLE 0x10 1406#define PIXEL_CLOCK_V4_MISC_SS_ENABLE 0x10
1143#define PIXEL_CLOCK_V4_MISC_COHERENT_MODE 0x20 1407#define PIXEL_CLOCK_V4_MISC_COHERENT_MODE 0x20
1144 1408
1409
1145typedef struct _PIXEL_CLOCK_PARAMETERS_V3 1410typedef struct _PIXEL_CLOCK_PARAMETERS_V3
1146{ 1411{
1147 USHORT usPixelClock; // in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div) 1412 USHORT usPixelClock; // in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div)
@@ -1202,6 +1467,55 @@ typedef struct _PIXEL_CLOCK_PARAMETERS_V5
1202#define PIXEL_CLOCK_V5_MISC_HDMI_32BPP 0x08 1467#define PIXEL_CLOCK_V5_MISC_HDMI_32BPP 0x08
1203#define PIXEL_CLOCK_V5_MISC_REF_DIV_SRC 0x10 1468#define PIXEL_CLOCK_V5_MISC_REF_DIV_SRC 0x10
1204 1469
1470typedef struct _CRTC_PIXEL_CLOCK_FREQ
1471{
1472#if ATOM_BIG_ENDIAN
1473 ULONG ucCRTC:8; // ATOM_CRTC1~6, indicate the CRTC controller to
1474 // drive the pixel clock. not used for DCPLL case.
1475 ULONG ulPixelClock:24; // target the pixel clock to drive the CRTC timing.
1476 // 0 means disable PPLL/DCPLL. Expanded to 24 bits comparing to previous version.
1477#else
1478 ULONG ulPixelClock:24; // target the pixel clock to drive the CRTC timing.
1479 // 0 means disable PPLL/DCPLL. Expanded to 24 bits comparing to previous version.
1480 ULONG ucCRTC:8; // ATOM_CRTC1~6, indicate the CRTC controller to
1481 // drive the pixel clock. not used for DCPLL case.
1482#endif
1483}CRTC_PIXEL_CLOCK_FREQ;
1484
1485typedef struct _PIXEL_CLOCK_PARAMETERS_V6
1486{
1487 union{
1488 CRTC_PIXEL_CLOCK_FREQ ulCrtcPclkFreq; // pixel clock and CRTC id frequency
1489 ULONG ulDispEngClkFreq; // dispclk frequency
1490 };
1491 USHORT usFbDiv; // feedback divider integer part.
1492 UCHAR ucPostDiv; // post divider.
1493 UCHAR ucRefDiv; // Reference divider
1494 UCHAR ucPpll; // ATOM_PPLL1/ATOM_PPLL2/ATOM_DCPLL
1495 UCHAR ucTransmitterID; // ASIC encoder id defined in objectId.h,
1496 // indicate which graphic encoder will be used.
1497 UCHAR ucEncoderMode; // Encoder mode:
1498 UCHAR ucMiscInfo; // bit[0]= Force program PPLL
1499 // bit[1]= when VGA timing is used.
1500 // bit[3:2]= HDMI panel bit depth: =0: 24bpp =1:30bpp, =2:32bpp
1501 // bit[4]= RefClock source for PPLL.
1502 // =0: XTLAIN( default mode )
1503 // =1: other external clock source, which is pre-defined
1504 // by VBIOS depend on the feature required.
1505 // bit[7:5]: reserved.
1506 ULONG ulFbDivDecFrac; // 20 bit feedback divider decimal fraction part, range from 1~999999 ( 0.000001 to 0.999999 )
1507
1508}PIXEL_CLOCK_PARAMETERS_V6;
1509
1510#define PIXEL_CLOCK_V6_MISC_FORCE_PROG_PPLL 0x01
1511#define PIXEL_CLOCK_V6_MISC_VGA_MODE 0x02
1512#define PIXEL_CLOCK_V6_MISC_HDMI_BPP_MASK 0x0c
1513#define PIXEL_CLOCK_V6_MISC_HDMI_24BPP 0x00
1514#define PIXEL_CLOCK_V6_MISC_HDMI_36BPP 0x04
1515#define PIXEL_CLOCK_V6_MISC_HDMI_30BPP 0x08
1516#define PIXEL_CLOCK_V6_MISC_HDMI_48BPP 0x0c
1517#define PIXEL_CLOCK_V6_MISC_REF_DIV_SRC 0x10
1518
1205typedef struct _GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V2 1519typedef struct _GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V2
1206{ 1520{
1207 PIXEL_CLOCK_PARAMETERS_V3 sDispClkInput; 1521 PIXEL_CLOCK_PARAMETERS_V3 sDispClkInput;
@@ -1241,10 +1555,11 @@ typedef struct _ADJUST_DISPLAY_PLL_PARAMETERS
1241typedef struct _ADJUST_DISPLAY_PLL_INPUT_PARAMETERS_V3 1555typedef struct _ADJUST_DISPLAY_PLL_INPUT_PARAMETERS_V3
1242{ 1556{
1243 USHORT usPixelClock; // target pixel clock 1557 USHORT usPixelClock; // target pixel clock
1244 UCHAR ucTransmitterID; // transmitter id defined in objectid.h 1558 UCHAR ucTransmitterID; // GPU transmitter id defined in objectid.h
1245 UCHAR ucEncodeMode; // encoder mode: CRT, LVDS, DP, TMDS or HDMI 1559 UCHAR ucEncodeMode; // encoder mode: CRT, LVDS, DP, TMDS or HDMI
1246 UCHAR ucDispPllConfig; // display pll configure parameter defined as following DISPPLL_CONFIG_XXXX 1560 UCHAR ucDispPllConfig; // display pll configure parameter defined as following DISPPLL_CONFIG_XXXX
1247 UCHAR ucReserved[3]; 1561 UCHAR ucExtTransmitterID; // external encoder id.
1562 UCHAR ucReserved[2];
1248}ADJUST_DISPLAY_PLL_INPUT_PARAMETERS_V3; 1563}ADJUST_DISPLAY_PLL_INPUT_PARAMETERS_V3;
1249 1564
1250// usDispPllConfig v1.2 for RoadRunner 1565// usDispPllConfig v1.2 for RoadRunner
@@ -1358,6 +1673,7 @@ typedef struct _SET_UP_HW_I2C_DATA_PARAMETERS
1358/**************************************************************************/ 1673/**************************************************************************/
1359#define SPEED_FAN_CONTROL_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS 1674#define SPEED_FAN_CONTROL_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS
1360 1675
1676
1361/****************************************************************************/ 1677/****************************************************************************/
1362// Structures used by PowerConnectorDetectionTable 1678// Structures used by PowerConnectorDetectionTable
1363/****************************************************************************/ 1679/****************************************************************************/
@@ -1438,6 +1754,31 @@ typedef struct _ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2
1438#define ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK 0x0F00 1754#define ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK 0x0F00
1439#define ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT 8 1755#define ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT 8
1440 1756
1757// Used by DCE5.0
1758 typedef struct _ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3
1759{
1760 USHORT usSpreadSpectrumAmountFrac; // SS_AMOUNT_DSFRAC New in DCE5.0
1761 UCHAR ucSpreadSpectrumType; // Bit[0]: 0-Down Spread,1-Center Spread.
1762 // Bit[1]: 1-Ext. 0-Int.
1763 // Bit[3:2]: =0 P1PLL =1 P2PLL =2 DCPLL
1764 // Bits[7:4] reserved
1765 UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE
1766 USHORT usSpreadSpectrumAmount; // Includes SS_AMOUNT_FBDIV[7:0] and SS_AMOUNT_NFRAC_SLIP[11:8]
1767 USHORT usSpreadSpectrumStep; // SS_STEP_SIZE_DSFRAC
1768}ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3;
1769
1770#define ATOM_PPLL_SS_TYPE_V3_DOWN_SPREAD 0x00
1771#define ATOM_PPLL_SS_TYPE_V3_CENTRE_SPREAD 0x01
1772#define ATOM_PPLL_SS_TYPE_V3_EXT_SPREAD 0x02
1773#define ATOM_PPLL_SS_TYPE_V3_PPLL_SEL_MASK 0x0c
1774#define ATOM_PPLL_SS_TYPE_V3_P1PLL 0x00
1775#define ATOM_PPLL_SS_TYPE_V3_P2PLL 0x04
1776#define ATOM_PPLL_SS_TYPE_V3_DCPLL 0x08
1777#define ATOM_PPLL_SS_AMOUNT_V3_FBDIV_MASK 0x00FF
1778#define ATOM_PPLL_SS_AMOUNT_V3_FBDIV_SHIFT 0
1779#define ATOM_PPLL_SS_AMOUNT_V3_NFRAC_MASK 0x0F00
1780#define ATOM_PPLL_SS_AMOUNT_V3_NFRAC_SHIFT 8
1781
1441#define ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION ENABLE_SPREAD_SPECTRUM_ON_PPLL 1782#define ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION ENABLE_SPREAD_SPECTRUM_ON_PPLL
1442 1783
1443/**************************************************************************/ 1784/**************************************************************************/
@@ -1706,7 +2047,7 @@ typedef struct _ATOM_MASTER_LIST_OF_DATA_TABLES
1706 USHORT StandardVESA_Timing; // Only used by Bios 2047 USHORT StandardVESA_Timing; // Only used by Bios
1707 USHORT FirmwareInfo; // Shared by various SW components,latest version 1.4 2048 USHORT FirmwareInfo; // Shared by various SW components,latest version 1.4
1708 USHORT DAC_Info; // Will be obsolete from R600 2049 USHORT DAC_Info; // Will be obsolete from R600
1709 USHORT LVDS_Info; // Shared by various SW components,latest version 1.1 2050 USHORT LCD_Info; // Shared by various SW components,latest version 1.3, was called LVDS_Info
1710 USHORT TMDS_Info; // Will be obsolete from R600 2051 USHORT TMDS_Info; // Will be obsolete from R600
1711 USHORT AnalogTV_Info; // Shared by various SW components,latest version 1.1 2052 USHORT AnalogTV_Info; // Shared by various SW components,latest version 1.1
1712 USHORT SupportedDevicesInfo; // Will be obsolete from R600 2053 USHORT SupportedDevicesInfo; // Will be obsolete from R600
@@ -1736,12 +2077,16 @@ typedef struct _ATOM_MASTER_LIST_OF_DATA_TABLES
1736 USHORT PowerSourceInfo; // Shared by various SW components, latest versoin 1.1 2077 USHORT PowerSourceInfo; // Shared by various SW components, latest versoin 1.1
1737}ATOM_MASTER_LIST_OF_DATA_TABLES; 2078}ATOM_MASTER_LIST_OF_DATA_TABLES;
1738 2079
2080// For backward compatible
2081#define LVDS_Info LCD_Info
2082
1739typedef struct _ATOM_MASTER_DATA_TABLE 2083typedef struct _ATOM_MASTER_DATA_TABLE
1740{ 2084{
1741 ATOM_COMMON_TABLE_HEADER sHeader; 2085 ATOM_COMMON_TABLE_HEADER sHeader;
1742 ATOM_MASTER_LIST_OF_DATA_TABLES ListOfDataTables; 2086 ATOM_MASTER_LIST_OF_DATA_TABLES ListOfDataTables;
1743}ATOM_MASTER_DATA_TABLE; 2087}ATOM_MASTER_DATA_TABLE;
1744 2088
2089
1745/****************************************************************************/ 2090/****************************************************************************/
1746// Structure used in MultimediaCapabilityInfoTable 2091// Structure used in MultimediaCapabilityInfoTable
1747/****************************************************************************/ 2092/****************************************************************************/
@@ -1776,6 +2121,7 @@ typedef struct _ATOM_MULTIMEDIA_CONFIG_INFO
1776 UCHAR ucVideoInput4Info;// Video Input 4 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) 2121 UCHAR ucVideoInput4Info;// Video Input 4 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6)
1777}ATOM_MULTIMEDIA_CONFIG_INFO; 2122}ATOM_MULTIMEDIA_CONFIG_INFO;
1778 2123
2124
1779/****************************************************************************/ 2125/****************************************************************************/
1780// Structures used in FirmwareInfoTable 2126// Structures used in FirmwareInfoTable
1781/****************************************************************************/ 2127/****************************************************************************/
@@ -2031,8 +2377,47 @@ typedef struct _ATOM_FIRMWARE_INFO_V2_1
2031 UCHAR ucReserved4[3]; 2377 UCHAR ucReserved4[3];
2032}ATOM_FIRMWARE_INFO_V2_1; 2378}ATOM_FIRMWARE_INFO_V2_1;
2033 2379
2380//the structure below to be used from NI
2381//ucTableFormatRevision=2
2382//ucTableContentRevision=2
2383typedef struct _ATOM_FIRMWARE_INFO_V2_2
2384{
2385 ATOM_COMMON_TABLE_HEADER sHeader;
2386 ULONG ulFirmwareRevision;
2387 ULONG ulDefaultEngineClock; //In 10Khz unit
2388 ULONG ulDefaultMemoryClock; //In 10Khz unit
2389 ULONG ulReserved[2];
2390 ULONG ulReserved1; //Was ulMaxEngineClockPLL_Output; //In 10Khz unit*
2391 ULONG ulReserved2; //Was ulMaxMemoryClockPLL_Output; //In 10Khz unit*
2392 ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit
2393 ULONG ulBinaryAlteredInfo; //Was ulASICMaxEngineClock ?
2394 ULONG ulDefaultDispEngineClkFreq; //In 10Khz unit. This is the frequency before DCDTO, corresponding to usBootUpVDDCVoltage.
2395 UCHAR ucReserved3; //Was ucASICMaxTemperature;
2396 UCHAR ucMinAllowedBL_Level;
2397 USHORT usBootUpVDDCVoltage; //In MV unit
2398 USHORT usLcdMinPixelClockPLL_Output; // In MHz unit
2399 USHORT usLcdMaxPixelClockPLL_Output; // In MHz unit
2400 ULONG ulReserved4; //Was ulAsicMaximumVoltage
2401 ULONG ulMinPixelClockPLL_Output; //In 10Khz unit
2402 ULONG ulReserved5; //Was usMinEngineClockPLL_Input and usMaxEngineClockPLL_Input
2403 ULONG ulReserved6; //Was usMinEngineClockPLL_Output and usMinMemoryClockPLL_Input
2404 ULONG ulReserved7; //Was usMaxMemoryClockPLL_Input and usMinMemoryClockPLL_Output
2405 USHORT usReserved11; //Was usMaxPixelClock; //In 10Khz unit, Max. Pclk used only for DAC
2406 USHORT usMinPixelClockPLL_Input; //In 10Khz unit
2407 USHORT usMaxPixelClockPLL_Input; //In 10Khz unit
2408 USHORT usBootUpVDDCIVoltage; //In unit of mv; Was usMinPixelClockPLL_Output;
2409 ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
2410 USHORT usCoreReferenceClock; //In 10Khz unit
2411 USHORT usMemoryReferenceClock; //In 10Khz unit
2412 USHORT usUniphyDPModeExtClkFreq; //In 10Khz unit, if it is 0, In DP Mode Uniphy Input clock from internal PPLL, otherwise Input clock from external Spread clock
2413 UCHAR ucMemoryModule_ID; //Indicate what is the board design
2414 UCHAR ucReserved9[3];
2415 USHORT usBootUpMVDDCVoltage; //In unit of mv; Was usMinPixelClockPLL_Output;
2416 USHORT usReserved12;
2417 ULONG ulReserved10[3]; // New added comparing to previous version
2418}ATOM_FIRMWARE_INFO_V2_2;
2034 2419
2035#define ATOM_FIRMWARE_INFO_LAST ATOM_FIRMWARE_INFO_V2_1 2420#define ATOM_FIRMWARE_INFO_LAST ATOM_FIRMWARE_INFO_V2_2
2036 2421
2037/****************************************************************************/ 2422/****************************************************************************/
2038// Structures used in IntegratedSystemInfoTable 2423// Structures used in IntegratedSystemInfoTable
@@ -2212,7 +2597,7 @@ ulDockingPinCFGInfo: [15:0]-Bus/Device/Function # to CFG to read this Docking Pi
2212ucDockingPinBit: which bit in this register to read the pin status; 2597ucDockingPinBit: which bit in this register to read the pin status;
2213ucDockingPinPolarity:Polarity of the pin when docked; 2598ucDockingPinPolarity:Polarity of the pin when docked;
2214 2599
2215ulCPUCapInfo: [7:0]=1:Griffin;[7:0]=2:Greyhound;[7:0]=3:K8, other bits reserved for now and must be 0x0 2600ulCPUCapInfo: [7:0]=1:Griffin;[7:0]=2:Greyhound;[7:0]=3:K8, [7:0]=4:Pharaoh, other bits reserved for now and must be 0x0
2216 2601
2217usNumberOfCyclesInPeriod:Indicate how many cycles when PWM duty is 100%. 2602usNumberOfCyclesInPeriod:Indicate how many cycles when PWM duty is 100%.
2218 2603
@@ -2250,6 +2635,14 @@ usMinUpStreamHTLinkWidth: Asymmetric link width support in the future, to rep
2250usMinDownStreamHTLinkWidth: same as above. 2635usMinDownStreamHTLinkWidth: same as above.
2251*/ 2636*/
2252 2637
2638// ATOM_INTEGRATED_SYSTEM_INFO::ulCPUCapInfo - CPU type definition
2639#define INTEGRATED_SYSTEM_INFO__UNKNOWN_CPU 0
2640#define INTEGRATED_SYSTEM_INFO__AMD_CPU__GRIFFIN 1
2641#define INTEGRATED_SYSTEM_INFO__AMD_CPU__GREYHOUND 2
2642#define INTEGRATED_SYSTEM_INFO__AMD_CPU__K8 3
2643#define INTEGRATED_SYSTEM_INFO__AMD_CPU__PHARAOH 4
2644
2645#define INTEGRATED_SYSTEM_INFO__AMD_CPU__MAX_CODE INTEGRATED_SYSTEM_INFO__AMD_CPU__PHARAOH // this deff reflects max defined CPU code
2253 2646
2254#define SYSTEM_CONFIG_POWEREXPRESS_ENABLE 0x00000001 2647#define SYSTEM_CONFIG_POWEREXPRESS_ENABLE 0x00000001
2255#define SYSTEM_CONFIG_RUN_AT_OVERDRIVE_ENGINE 0x00000002 2648#define SYSTEM_CONFIG_RUN_AT_OVERDRIVE_ENGINE 0x00000002
@@ -2778,8 +3171,88 @@ typedef struct _ATOM_LVDS_INFO_V12
2778#define PANEL_RANDOM_DITHER 0x80 3171#define PANEL_RANDOM_DITHER 0x80
2779#define PANEL_RANDOM_DITHER_MASK 0x80 3172#define PANEL_RANDOM_DITHER_MASK 0x80
2780 3173
3174#define ATOM_LVDS_INFO_LAST ATOM_LVDS_INFO_V12 // no need to change this
3175
3176/****************************************************************************/
3177// Structures used by LCD_InfoTable V1.3 Note: previous version was called ATOM_LVDS_INFO_V12
3178// ASIC Families: NI
3179// ucTableFormatRevision=1
3180// ucTableContentRevision=3
3181/****************************************************************************/
3182typedef struct _ATOM_LCD_INFO_V13
3183{
3184 ATOM_COMMON_TABLE_HEADER sHeader;
3185 ATOM_DTD_FORMAT sLCDTiming;
3186 USHORT usExtInfoTableOffset;
3187 USHORT usSupportedRefreshRate; //Refer to panel info table in ATOMBIOS extension Spec.
3188 ULONG ulReserved0;
3189 UCHAR ucLCD_Misc; // Reorganized in V13
3190 // Bit0: {=0:single, =1:dual},
3191 // Bit1: {=0:LDI format for RGB888, =1 FPDI format for RGB888} // was {=0:666RGB, =1:888RGB},
3192 // Bit3:2: {Grey level}
3193 // Bit6:4 Color Bit Depth definition (see below definition in EDID V1.4 @BYTE 14h)
3194 // Bit7 Reserved. was for ATOM_PANEL_MISC_API_ENABLED, still need it?
3195 UCHAR ucPanelDefaultRefreshRate;
3196 UCHAR ucPanelIdentification;
3197 UCHAR ucSS_Id;
3198 USHORT usLCDVenderID;
3199 USHORT usLCDProductID;
3200 UCHAR ucLCDPanel_SpecialHandlingCap; // Reorganized in V13
3201 // Bit0: Once DAL sees this CAP is set, it will read EDID from LCD on its own
3202 // Bit1: See LCDPANEL_CAP_DRR_SUPPORTED
3203 // Bit2: a quick reference whether an embadded panel (LCD1 ) is LVDS (0) or eDP (1)
3204 // Bit7-3: Reserved
3205 UCHAR ucPanelInfoSize; // start from ATOM_DTD_FORMAT to end of panel info, include ExtInfoTable
3206 USHORT usBacklightPWM; // Backlight PWM in Hz. New in _V13
3207
3208 UCHAR ucPowerSequenceDIGONtoDE_in4Ms;
3209 UCHAR ucPowerSequenceDEtoVARY_BL_in4Ms;
3210 UCHAR ucPowerSequenceDEtoDIGON_in4Ms;
3211 UCHAR ucPowerSequenceVARY_BLtoDE_in4Ms;
3212
3213 UCHAR ucOffDelay_in4Ms;
3214 UCHAR ucPowerSequenceVARY_BLtoBLON_in4Ms;
3215 UCHAR ucPowerSequenceBLONtoVARY_BL_in4Ms;
3216 UCHAR ucReserved1;
3217
3218 ULONG ulReserved[4];
3219}ATOM_LCD_INFO_V13;
3220
3221#define ATOM_LCD_INFO_LAST ATOM_LCD_INFO_V13
3222
3223//Definitions for ucLCD_Misc
3224#define ATOM_PANEL_MISC_V13_DUAL 0x00000001
3225#define ATOM_PANEL_MISC_V13_FPDI 0x00000002
3226#define ATOM_PANEL_MISC_V13_GREY_LEVEL 0x0000000C
3227#define ATOM_PANEL_MISC_V13_GREY_LEVEL_SHIFT 2
3228#define ATOM_PANEL_MISC_V13_COLOR_BIT_DEPTH_MASK 0x70
3229#define ATOM_PANEL_MISC_V13_6BIT_PER_COLOR 0x10
3230#define ATOM_PANEL_MISC_V13_8BIT_PER_COLOR 0x20
3231
3232//Color Bit Depth definition in EDID V1.4 @BYTE 14h
3233//Bit 6 5 4
3234 // 0 0 0 - Color bit depth is undefined
3235 // 0 0 1 - 6 Bits per Primary Color
3236 // 0 1 0 - 8 Bits per Primary Color
3237 // 0 1 1 - 10 Bits per Primary Color
3238 // 1 0 0 - 12 Bits per Primary Color
3239 // 1 0 1 - 14 Bits per Primary Color
3240 // 1 1 0 - 16 Bits per Primary Color
3241 // 1 1 1 - Reserved
3242
3243//Definitions for ucLCDPanel_SpecialHandlingCap:
3244
3245//Once DAL sees this CAP is set, it will read EDID from LCD on its own instead of using sLCDTiming in ATOM_LVDS_INFO_V12.
3246//Other entries in ATOM_LVDS_INFO_V12 are still valid/useful to DAL
3247#define LCDPANEL_CAP_V13_READ_EDID 0x1 // = LCDPANEL_CAP_READ_EDID no change comparing to previous version
3248
3249//If a design supports DRR (dynamic refresh rate) on internal panels (LVDS or EDP), this cap is set in ucLCDPanel_SpecialHandlingCap together
3250//with multiple supported refresh rates@usSupportedRefreshRate. This cap should not be set when only slow refresh rate is supported (static
3251//refresh rate switch by SW. This is only valid from ATOM_LVDS_INFO_V12
3252#define LCDPANEL_CAP_V13_DRR_SUPPORTED 0x2 // = LCDPANEL_CAP_DRR_SUPPORTED no change comparing to previous version
2781 3253
2782#define ATOM_LVDS_INFO_LAST ATOM_LVDS_INFO_V12 3254//Use this cap bit for a quick reference whether an embadded panel (LCD1 ) is LVDS or eDP.
3255#define LCDPANEL_CAP_V13_eDP 0x4 // = LCDPANEL_CAP_eDP no change comparing to previous version
2783 3256
2784typedef struct _ATOM_PATCH_RECORD_MODE 3257typedef struct _ATOM_PATCH_RECORD_MODE
2785{ 3258{
@@ -2944,9 +3417,9 @@ typedef struct _ATOM_DPCD_INFO
2944#define MAX_DTD_MODE_IN_VRAM 6 3417#define MAX_DTD_MODE_IN_VRAM 6
2945#define ATOM_DTD_MODE_SUPPORT_TBL_SIZE (MAX_DTD_MODE_IN_VRAM*28) //28= (SIZEOF ATOM_DTD_FORMAT) 3418#define ATOM_DTD_MODE_SUPPORT_TBL_SIZE (MAX_DTD_MODE_IN_VRAM*28) //28= (SIZEOF ATOM_DTD_FORMAT)
2946#define ATOM_STD_MODE_SUPPORT_TBL_SIZE 32*8 //32 is a predefined number,8= (SIZEOF ATOM_STD_FORMAT) 3419#define ATOM_STD_MODE_SUPPORT_TBL_SIZE 32*8 //32 is a predefined number,8= (SIZEOF ATOM_STD_FORMAT)
2947#define DFP_ENCODER_TYPE_OFFSET 0x80 3420//20 bytes for Encoder Type and DPCD in STD EDID area
2948#define DP_ENCODER_LANE_NUM_OFFSET 0x84 3421#define DFP_ENCODER_TYPE_OFFSET (ATOM_EDID_RAW_DATASIZE + ATOM_DTD_MODE_SUPPORT_TBL_SIZE + ATOM_STD_MODE_SUPPORT_TBL_SIZE - 20)
2949#define DP_ENCODER_LINK_RATE_OFFSET 0x88 3422#define ATOM_DP_DPCD_OFFSET (DFP_ENCODER_TYPE_OFFSET + 4 )
2950 3423
2951#define ATOM_HWICON1_SURFACE_ADDR 0 3424#define ATOM_HWICON1_SURFACE_ADDR 0
2952#define ATOM_HWICON2_SURFACE_ADDR (ATOM_HWICON1_SURFACE_ADDR + ATOM_HWICON_SURFACE_SIZE) 3425#define ATOM_HWICON2_SURFACE_ADDR (ATOM_HWICON1_SURFACE_ADDR + ATOM_HWICON_SURFACE_SIZE)
@@ -2997,14 +3470,16 @@ typedef struct _ATOM_DPCD_INFO
2997#define ATOM_DFP5_DTD_MODE_TBL_ADDR (ATOM_DFP5_EDID_ADDR + ATOM_EDID_RAW_DATASIZE) 3470#define ATOM_DFP5_DTD_MODE_TBL_ADDR (ATOM_DFP5_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
2998#define ATOM_DFP5_STD_MODE_TBL_ADDR (ATOM_DFP5_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE) 3471#define ATOM_DFP5_STD_MODE_TBL_ADDR (ATOM_DFP5_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
2999 3472
3000#define ATOM_DP_TRAINING_TBL_ADDR (ATOM_DFP5_STD_MODE_TBL_ADDR+ATOM_STD_MODE_SUPPORT_TBL_SIZE) 3473#define ATOM_DP_TRAINING_TBL_ADDR (ATOM_DFP5_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
3001 3474
3002#define ATOM_STACK_STORAGE_START (ATOM_DP_TRAINING_TBL_ADDR+256) 3475#define ATOM_STACK_STORAGE_START (ATOM_DP_TRAINING_TBL_ADDR + 1024)
3003#define ATOM_STACK_STORAGE_END ATOM_STACK_STORAGE_START+512 3476#define ATOM_STACK_STORAGE_END ATOM_STACK_STORAGE_START + 512
3004 3477
3005//The size below is in Kb! 3478//The size below is in Kb!
3006#define ATOM_VRAM_RESERVE_SIZE ((((ATOM_STACK_STORAGE_END - ATOM_HWICON1_SURFACE_ADDR)>>10)+4)&0xFFFC) 3479#define ATOM_VRAM_RESERVE_SIZE ((((ATOM_STACK_STORAGE_END - ATOM_HWICON1_SURFACE_ADDR)>>10)+4)&0xFFFC)
3007 3480
3481#define ATOM_VRAM_RESERVE_V2_SIZE 32
3482
3008#define ATOM_VRAM_OPERATION_FLAGS_MASK 0xC0000000L 3483#define ATOM_VRAM_OPERATION_FLAGS_MASK 0xC0000000L
3009#define ATOM_VRAM_OPERATION_FLAGS_SHIFT 30 3484#define ATOM_VRAM_OPERATION_FLAGS_SHIFT 30
3010#define ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION 0x1 3485#define ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION 0x1
@@ -3206,6 +3681,15 @@ typedef struct _ATOM_DISPLAY_OBJECT_PATH
3206 USHORT usGraphicObjIds[1]; //1st Encoder Obj source from GPU to last Graphic Obj destinate to connector. 3681 USHORT usGraphicObjIds[1]; //1st Encoder Obj source from GPU to last Graphic Obj destinate to connector.
3207}ATOM_DISPLAY_OBJECT_PATH; 3682}ATOM_DISPLAY_OBJECT_PATH;
3208 3683
3684typedef struct _ATOM_DISPLAY_EXTERNAL_OBJECT_PATH
3685{
3686 USHORT usDeviceTag; //supported device
3687 USHORT usSize; //the size of ATOM_DISPLAY_OBJECT_PATH
3688 USHORT usConnObjectId; //Connector Object ID
3689 USHORT usGPUObjectId; //GPU ID
3690 USHORT usGraphicObjIds[2]; //usGraphicObjIds[0]= GPU internal encoder, usGraphicObjIds[1]= external encoder
3691}ATOM_DISPLAY_EXTERNAL_OBJECT_PATH;
3692
3209typedef struct _ATOM_DISPLAY_OBJECT_PATH_TABLE 3693typedef struct _ATOM_DISPLAY_OBJECT_PATH_TABLE
3210{ 3694{
3211 UCHAR ucNumOfDispPath; 3695 UCHAR ucNumOfDispPath;
@@ -3261,6 +3745,47 @@ typedef struct _ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT //usSrcDstTableOffset
3261#define EXT_AUXDDC_LUTINDEX_7 7 3745#define EXT_AUXDDC_LUTINDEX_7 7
3262#define MAX_NUMBER_OF_EXT_AUXDDC_LUT_ENTRIES (EXT_AUXDDC_LUTINDEX_7+1) 3746#define MAX_NUMBER_OF_EXT_AUXDDC_LUT_ENTRIES (EXT_AUXDDC_LUTINDEX_7+1)
3263 3747
3748//ucChannelMapping are defined as following
3749//for DP connector, eDP, DP to VGA/LVDS
3750//Bit[1:0]: Define which pin connect to DP connector DP_Lane0, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
3751//Bit[3:2]: Define which pin connect to DP connector DP_Lane1, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
3752//Bit[5:4]: Define which pin connect to DP connector DP_Lane2, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
3753//Bit[7:6]: Define which pin connect to DP connector DP_Lane3, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
3754typedef struct _ATOM_DP_CONN_CHANNEL_MAPPING
3755{
3756#if ATOM_BIG_ENDIAN
3757 UCHAR ucDP_Lane3_Source:2;
3758 UCHAR ucDP_Lane2_Source:2;
3759 UCHAR ucDP_Lane1_Source:2;
3760 UCHAR ucDP_Lane0_Source:2;
3761#else
3762 UCHAR ucDP_Lane0_Source:2;
3763 UCHAR ucDP_Lane1_Source:2;
3764 UCHAR ucDP_Lane2_Source:2;
3765 UCHAR ucDP_Lane3_Source:2;
3766#endif
3767}ATOM_DP_CONN_CHANNEL_MAPPING;
3768
3769//for DVI/HDMI, in dual link case, both links have to have same mapping.
3770//Bit[1:0]: Define which pin connect to DVI connector data Lane2, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
3771//Bit[3:2]: Define which pin connect to DVI connector data Lane1, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
3772//Bit[5:4]: Define which pin connect to DVI connector data Lane0, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
3773//Bit[7:6]: Define which pin connect to DVI connector clock lane, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
3774typedef struct _ATOM_DVI_CONN_CHANNEL_MAPPING
3775{
3776#if ATOM_BIG_ENDIAN
3777 UCHAR ucDVI_CLK_Source:2;
3778 UCHAR ucDVI_DATA0_Source:2;
3779 UCHAR ucDVI_DATA1_Source:2;
3780 UCHAR ucDVI_DATA2_Source:2;
3781#else
3782 UCHAR ucDVI_DATA2_Source:2;
3783 UCHAR ucDVI_DATA1_Source:2;
3784 UCHAR ucDVI_DATA0_Source:2;
3785 UCHAR ucDVI_CLK_Source:2;
3786#endif
3787}ATOM_DVI_CONN_CHANNEL_MAPPING;
3788
3264typedef struct _EXT_DISPLAY_PATH 3789typedef struct _EXT_DISPLAY_PATH
3265{ 3790{
3266 USHORT usDeviceTag; //A bit vector to show what devices are supported 3791 USHORT usDeviceTag; //A bit vector to show what devices are supported
@@ -3269,7 +3794,13 @@ typedef struct _EXT_DISPLAY_PATH
3269 UCHAR ucExtAUXDDCLutIndex; //An index into external AUX/DDC channel LUT 3794 UCHAR ucExtAUXDDCLutIndex; //An index into external AUX/DDC channel LUT
3270 UCHAR ucExtHPDPINLutIndex; //An index into external HPD pin LUT 3795 UCHAR ucExtHPDPINLutIndex; //An index into external HPD pin LUT
3271 USHORT usExtEncoderObjId; //external encoder object id 3796 USHORT usExtEncoderObjId; //external encoder object id
3272 USHORT usReserved[3]; 3797 union{
3798 UCHAR ucChannelMapping; // if ucChannelMapping=0, using default one to one mapping
3799 ATOM_DP_CONN_CHANNEL_MAPPING asDPMapping;
3800 ATOM_DVI_CONN_CHANNEL_MAPPING asDVIMapping;
3801 };
3802 UCHAR ucReserved;
3803 USHORT usReserved[2];
3273}EXT_DISPLAY_PATH; 3804}EXT_DISPLAY_PATH;
3274 3805
3275#define NUMBER_OF_UCHAR_FOR_GUID 16 3806#define NUMBER_OF_UCHAR_FOR_GUID 16
@@ -3281,7 +3812,8 @@ typedef struct _ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO
3281 UCHAR ucGuid [NUMBER_OF_UCHAR_FOR_GUID]; // a GUID is a 16 byte long string 3812 UCHAR ucGuid [NUMBER_OF_UCHAR_FOR_GUID]; // a GUID is a 16 byte long string
3282 EXT_DISPLAY_PATH sPath[MAX_NUMBER_OF_EXT_DISPLAY_PATH]; // total of fixed 7 entries. 3813 EXT_DISPLAY_PATH sPath[MAX_NUMBER_OF_EXT_DISPLAY_PATH]; // total of fixed 7 entries.
3283 UCHAR ucChecksum; // a simple Checksum of the sum of whole structure equal to 0x0. 3814 UCHAR ucChecksum; // a simple Checksum of the sum of whole structure equal to 0x0.
3284 UCHAR Reserved [7]; // for potential expansion 3815 UCHAR uc3DStereoPinId; // use for eDP panel
3816 UCHAR Reserved [6]; // for potential expansion
3285}ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO; 3817}ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO;
3286 3818
3287//Related definitions, all records are differnt but they have a commond header 3819//Related definitions, all records are differnt but they have a commond header
@@ -3311,10 +3843,11 @@ typedef struct _ATOM_COMMON_RECORD_HEADER
3311#define ATOM_CONNECTOR_AUXDDC_LUT_RECORD_TYPE 17 //This is for the case when connectors are not known to object table 3843#define ATOM_CONNECTOR_AUXDDC_LUT_RECORD_TYPE 17 //This is for the case when connectors are not known to object table
3312#define ATOM_OBJECT_LINK_RECORD_TYPE 18 //Once this record is present under one object, it indicats the oobject is linked to another obj described by the record 3844#define ATOM_OBJECT_LINK_RECORD_TYPE 18 //Once this record is present under one object, it indicats the oobject is linked to another obj described by the record
3313#define ATOM_CONNECTOR_REMOTE_CAP_RECORD_TYPE 19 3845#define ATOM_CONNECTOR_REMOTE_CAP_RECORD_TYPE 19
3846#define ATOM_ENCODER_CAP_RECORD_TYPE 20
3314 3847
3315 3848
3316//Must be updated when new record type is added,equal to that record definition! 3849//Must be updated when new record type is added,equal to that record definition!
3317#define ATOM_MAX_OBJECT_RECORD_NUMBER ATOM_CONNECTOR_REMOTE_CAP_RECORD_TYPE 3850#define ATOM_MAX_OBJECT_RECORD_NUMBER ATOM_ENCODER_CAP_RECORD_TYPE
3318 3851
3319typedef struct _ATOM_I2C_RECORD 3852typedef struct _ATOM_I2C_RECORD
3320{ 3853{
@@ -3441,6 +3974,26 @@ typedef struct _ATOM_ENCODER_DVO_CF_RECORD
3441 UCHAR ucPadding[2]; 3974 UCHAR ucPadding[2];
3442}ATOM_ENCODER_DVO_CF_RECORD; 3975}ATOM_ENCODER_DVO_CF_RECORD;
3443 3976
3977// Bit maps for ATOM_ENCODER_CAP_RECORD.ucEncoderCap
3978#define ATOM_ENCODER_CAP_RECORD_HBR2 0x01 // DP1.2 HBR2 is supported by this path
3979
3980typedef struct _ATOM_ENCODER_CAP_RECORD
3981{
3982 ATOM_COMMON_RECORD_HEADER sheader;
3983 union {
3984 USHORT usEncoderCap;
3985 struct {
3986#if ATOM_BIG_ENDIAN
3987 USHORT usReserved:15; // Bit1-15 may be defined for other capability in future
3988 USHORT usHBR2Cap:1; // Bit0 is for DP1.2 HBR2 capability.
3989#else
3990 USHORT usHBR2Cap:1; // Bit0 is for DP1.2 HBR2 capability.
3991 USHORT usReserved:15; // Bit1-15 may be defined for other capability in future
3992#endif
3993 };
3994 };
3995}ATOM_ENCODER_CAP_RECORD;
3996
3444// value for ATOM_CONNECTOR_CF_RECORD.ucConnectedDvoBundle 3997// value for ATOM_CONNECTOR_CF_RECORD.ucConnectedDvoBundle
3445#define ATOM_CONNECTOR_CF_RECORD_CONNECTED_UPPER12BITBUNDLEA 1 3998#define ATOM_CONNECTOR_CF_RECORD_CONNECTED_UPPER12BITBUNDLEA 1
3446#define ATOM_CONNECTOR_CF_RECORD_CONNECTED_LOWER12BITBUNDLEB 2 3999#define ATOM_CONNECTOR_CF_RECORD_CONNECTED_LOWER12BITBUNDLEB 2
@@ -3580,6 +4133,11 @@ typedef struct _ATOM_VOLTAGE_CONTROL
3580#define VOLTAGE_CONTROL_ID_DAC 0x02 //I2C control, used for R5xx/R6xx MVDDC,MVDDQ or VDDCI 4133#define VOLTAGE_CONTROL_ID_DAC 0x02 //I2C control, used for R5xx/R6xx MVDDC,MVDDQ or VDDCI
3581#define VOLTAGE_CONTROL_ID_VT116xM 0x03 //I2C control, used for R6xx Core Voltage 4134#define VOLTAGE_CONTROL_ID_VT116xM 0x03 //I2C control, used for R6xx Core Voltage
3582#define VOLTAGE_CONTROL_ID_DS4402 0x04 4135#define VOLTAGE_CONTROL_ID_DS4402 0x04
4136#define VOLTAGE_CONTROL_ID_UP6266 0x05
4137#define VOLTAGE_CONTROL_ID_SCORPIO 0x06
4138#define VOLTAGE_CONTROL_ID_VT1556M 0x07
4139#define VOLTAGE_CONTROL_ID_CHL822x 0x08
4140#define VOLTAGE_CONTROL_ID_VT1586M 0x09
3583 4141
3584typedef struct _ATOM_VOLTAGE_OBJECT 4142typedef struct _ATOM_VOLTAGE_OBJECT
3585{ 4143{
@@ -3670,66 +4228,157 @@ typedef struct _ATOM_POWER_SOURCE_INFO
3670#define POWER_SENSOR_GPIO 0x01 4228#define POWER_SENSOR_GPIO 0x01
3671#define POWER_SENSOR_I2C 0x02 4229#define POWER_SENSOR_I2C 0x02
3672 4230
4231typedef struct _ATOM_CLK_VOLT_CAPABILITY
4232{
4233 ULONG ulVoltageIndex; // The Voltage Index indicated by FUSE, same voltage index shared with SCLK DPM fuse table
4234 ULONG ulMaximumSupportedCLK; // Maximum clock supported with specified voltage index, unit in 10kHz
4235}ATOM_CLK_VOLT_CAPABILITY;
4236
4237typedef struct _ATOM_AVAILABLE_SCLK_LIST
4238{
4239 ULONG ulSupportedSCLK; // Maximum clock supported with specified voltage index, unit in 10kHz
4240 USHORT usVoltageIndex; // The Voltage Index indicated by FUSE for specified SCLK
4241 USHORT usVoltageID; // The Voltage ID indicated by FUSE for specified SCLK
4242}ATOM_AVAILABLE_SCLK_LIST;
4243
4244// ATOM_INTEGRATED_SYSTEM_INFO_V6 ulSystemConfig cap definition
4245#define ATOM_IGP_INFO_V6_SYSTEM_CONFIG__PCIE_POWER_GATING_ENABLE 1 // refer to ulSystemConfig bit[0]
4246
4247// this IntegrateSystemInfoTable is used for Liano/Ontario APU
3673typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 4248typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V6
3674{ 4249{
3675 ATOM_COMMON_TABLE_HEADER sHeader; 4250 ATOM_COMMON_TABLE_HEADER sHeader;
3676 ULONG ulBootUpEngineClock; 4251 ULONG ulBootUpEngineClock;
3677 ULONG ulDentistVCOFreq; 4252 ULONG ulDentistVCOFreq;
3678 ULONG ulBootUpUMAClock; 4253 ULONG ulBootUpUMAClock;
3679 ULONG ulReserved1[8]; 4254 ATOM_CLK_VOLT_CAPABILITY sDISPCLK_Voltage[4];
3680 ULONG ulBootUpReqDisplayVector; 4255 ULONG ulBootUpReqDisplayVector;
3681 ULONG ulOtherDisplayMisc; 4256 ULONG ulOtherDisplayMisc;
3682 ULONG ulGPUCapInfo; 4257 ULONG ulGPUCapInfo;
3683 ULONG ulReserved2[3]; 4258 ULONG ulSB_MMIO_Base_Addr;
4259 USHORT usRequestedPWMFreqInHz;
4260 UCHAR ucHtcTmpLmt;
4261 UCHAR ucHtcHystLmt;
4262 ULONG ulMinEngineClock;
3684 ULONG ulSystemConfig; 4263 ULONG ulSystemConfig;
3685 ULONG ulCPUCapInfo; 4264 ULONG ulCPUCapInfo;
3686 USHORT usMaxNBVoltage; 4265 USHORT usNBP0Voltage;
3687 USHORT usMinNBVoltage; 4266 USHORT usNBP1Voltage;
3688 USHORT usBootUpNBVoltage; 4267 USHORT usBootUpNBVoltage;
3689 USHORT usExtDispConnInfoOffset; 4268 USHORT usExtDispConnInfoOffset;
3690 UCHAR ucHtcTmpLmt; 4269 USHORT usPanelRefreshRateRange;
3691 UCHAR ucTjOffset;
3692 UCHAR ucMemoryType; 4270 UCHAR ucMemoryType;
3693 UCHAR ucUMAChannelNumber; 4271 UCHAR ucUMAChannelNumber;
3694 ULONG ulCSR_M3_ARB_CNTL_DEFAULT[10]; 4272 ULONG ulCSR_M3_ARB_CNTL_DEFAULT[10];
3695 ULONG ulCSR_M3_ARB_CNTL_UVD[10]; 4273 ULONG ulCSR_M3_ARB_CNTL_UVD[10];
3696 ULONG ulCSR_M3_ARB_CNTL_FS3D[10]; 4274 ULONG ulCSR_M3_ARB_CNTL_FS3D[10];
3697 ULONG ulReserved3[42]; 4275 ATOM_AVAILABLE_SCLK_LIST sAvail_SCLK[5];
4276 ULONG ulGMCRestoreResetTime;
4277 ULONG ulMinimumNClk;
4278 ULONG ulIdleNClk;
4279 ULONG ulDDR_DLL_PowerUpTime;
4280 ULONG ulDDR_PLL_PowerUpTime;
4281 USHORT usPCIEClkSSPercentage;
4282 USHORT usPCIEClkSSType;
4283 USHORT usLvdsSSPercentage;
4284 USHORT usLvdsSSpreadRateIn10Hz;
4285 USHORT usHDMISSPercentage;
4286 USHORT usHDMISSpreadRateIn10Hz;
4287 USHORT usDVISSPercentage;
4288 USHORT usDVISSpreadRateIn10Hz;
4289 ULONG ulReserved3[21];
3698 ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO sExtDispConnInfo; 4290 ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO sExtDispConnInfo;
3699}ATOM_INTEGRATED_SYSTEM_INFO_V6; 4291}ATOM_INTEGRATED_SYSTEM_INFO_V6;
3700 4292
4293// ulGPUCapInfo
4294#define INTEGRATED_SYSTEM_INFO_V6_GPUCAPINFO__TMDSHDMI_COHERENT_SINGLEPLL_MODE 0x01
4295#define INTEGRATED_SYSTEM_INFO_V6_GPUCAPINFO__DISABLE_AUX_HW_MODE_DETECTION 0x08
4296
4297// ulOtherDisplayMisc
4298#define INTEGRATED_SYSTEM_INFO__GET_EDID_CALLBACK_FUNC_SUPPORT 0x01
4299
4300
3701/********************************************************************************************************************** 4301/**********************************************************************************************************************
3702// ATOM_INTEGRATED_SYSTEM_INFO_V6 Description 4302 ATOM_INTEGRATED_SYSTEM_INFO_V6 Description
3703//ulBootUpEngineClock: VBIOS bootup Engine clock frequency, in 10kHz unit. 4303ulBootUpEngineClock: VBIOS bootup Engine clock frequency, in 10kHz unit. if it is equal 0, then VBIOS use pre-defined bootup engine clock
3704//ulDentistVCOFreq: Dentist VCO clock in 10kHz unit. 4304ulDentistVCOFreq: Dentist VCO clock in 10kHz unit.
3705//ulBootUpUMAClock: System memory boot up clock frequency in 10Khz unit. 4305ulBootUpUMAClock: System memory boot up clock frequency in 10Khz unit.
3706//ulReserved1[8] Reserved by now, must be 0x0. 4306sDISPCLK_Voltage: Report Display clock voltage requirement.
3707//ulBootUpReqDisplayVector VBIOS boot up display IDs 4307
3708// ATOM_DEVICE_CRT1_SUPPORT 0x0001 4308ulBootUpReqDisplayVector: VBIOS boot up display IDs, following are supported devices in Liano/Ontaio projects:
3709// ATOM_DEVICE_CRT2_SUPPORT 0x0010 4309 ATOM_DEVICE_CRT1_SUPPORT 0x0001
3710// ATOM_DEVICE_DFP1_SUPPORT 0x0008 4310 ATOM_DEVICE_CRT2_SUPPORT 0x0010
3711// ATOM_DEVICE_DFP6_SUPPORT 0x0040 4311 ATOM_DEVICE_DFP1_SUPPORT 0x0008
3712// ATOM_DEVICE_DFP2_SUPPORT 0x0080 4312 ATOM_DEVICE_DFP6_SUPPORT 0x0040
3713// ATOM_DEVICE_DFP3_SUPPORT 0x0200 4313 ATOM_DEVICE_DFP2_SUPPORT 0x0080
3714// ATOM_DEVICE_DFP4_SUPPORT 0x0400 4314 ATOM_DEVICE_DFP3_SUPPORT 0x0200
3715// ATOM_DEVICE_DFP5_SUPPORT 0x0800 4315 ATOM_DEVICE_DFP4_SUPPORT 0x0400
3716// ATOM_DEVICE_LCD1_SUPPORT 0x0002 4316 ATOM_DEVICE_DFP5_SUPPORT 0x0800
3717//ulOtherDisplayMisc Other display related flags, not defined yet. 4317 ATOM_DEVICE_LCD1_SUPPORT 0x0002
3718//ulGPUCapInfo TBD 4318ulOtherDisplayMisc: Other display related flags, not defined yet.
3719//ulReserved2[3] must be 0x0 for the reserved. 4319ulGPUCapInfo: bit[0]=0: TMDS/HDMI Coherent Mode use cascade PLL mode.
3720//ulSystemConfig TBD 4320 =1: TMDS/HDMI Coherent Mode use signel PLL mode.
3721//ulCPUCapInfo TBD 4321 bit[3]=0: Enable HW AUX mode detection logic
3722//usMaxNBVoltage High NB voltage in unit of mv, calculated using current VDDNB (D24F2xDC) and VDDNB offset fuse. 4322 =1: Disable HW AUX mode dettion logic
3723//usMinNBVoltage Low NB voltage in unit of mv, calculated using current VDDNB (D24F2xDC) and VDDNB offset fuse. 4323ulSB_MMIO_Base_Addr: Physical Base address to SB MMIO space. Driver needs to initialize it for SMU usage.
3724//usBootUpNBVoltage Boot up NB voltage in unit of mv. 4324
3725//ucHtcTmpLmt Bit [22:16] of D24F3x64 Thermal Control (HTC) Register. 4325usRequestedPWMFreqInHz: When it's set to 0x0 by SBIOS: the LCD BackLight is not controlled by GPU(SW).
3726//ucTjOffset Bit [28:22] of D24F3xE4 Thermtrip Status Register,may not be needed. 4326 Any attempt to change BL using VBIOS function or enable VariBri from PP table is not effective since ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==0;
3727//ucMemoryType [3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved. 4327
3728//ucUMAChannelNumber System memory channel numbers. 4328 When it's set to a non-zero frequency, the BackLight is controlled by GPU (SW) in one of two ways below:
3729//usExtDispConnectionInfoOffset ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO offset relative to beginning of this table. 4329 1. SW uses the GPU BL PWM output to control the BL, in chis case, this non-zero frequency determines what freq GPU should use;
3730//ulCSR_M3_ARB_CNTL_DEFAULT[10] Arrays with values for CSR M3 arbiter for default 4330 VBIOS will set up proper PWM frequency and ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1,as the result,
3731//ulCSR_M3_ARB_CNTL_UVD[10] Arrays with values for CSR M3 arbiter for UVD playback. 4331 Changing BL using VBIOS function is functional in both driver and non-driver present environment;
3732//ulCSR_M3_ARB_CNTL_FS3D[10] Arrays with values for CSR M3 arbiter for Full Screen 3D applications. 4332 and enabling VariBri under the driver environment from PP table is optional.
4333
4334 2. SW uses other means to control BL (like DPCD),this non-zero frequency serves as a flag only indicating
4335 that BL control from GPU is expected.
4336 VBIOS will NOT set up PWM frequency but make ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1
4337 Changing BL using VBIOS function could be functional in both driver and non-driver present environment,but
4338 it's per platform
4339 and enabling VariBri under the driver environment from PP table is optional.
4340
4341ucHtcTmpLmt: Refer to D18F3x64 bit[22:16], HtcTmpLmt.
4342 Threshold on value to enter HTC_active state.
4343ucHtcHystLmt: Refer to D18F3x64 bit[27:24], HtcHystLmt.
4344 To calculate threshold off value to exit HTC_active state, which is Threshold on vlaue minus ucHtcHystLmt.
4345ulMinEngineClock: Minimum SCLK allowed in 10kHz unit. This is calculated based on WRCK Fuse settings.
4346ulSystemConfig: Bit[0]=0: PCIE Power Gating Disabled
4347 =1: PCIE Power Gating Enabled
4348 Bit[1]=0: DDR-DLL shut-down feature disabled.
4349 1: DDR-DLL shut-down feature enabled.
4350 Bit[2]=0: DDR-PLL Power down feature disabled.
4351 1: DDR-PLL Power down feature enabled.
4352ulCPUCapInfo: TBD
4353usNBP0Voltage: VID for voltage on NB P0 State
4354usNBP1Voltage: VID for voltage on NB P1 State
4355usBootUpNBVoltage: Voltage Index of GNB voltage configured by SBIOS, which is suffcient to support VBIOS DISPCLK requirement.
4356usExtDispConnInfoOffset: Offset to sExtDispConnInfo inside the structure
4357usPanelRefreshRateRange: Bit vector for LCD supported refresh rate range. If DRR is requestd by the platform, at least two bits need to be set
4358 to indicate a range.
4359 SUPPORTED_LCD_REFRESHRATE_30Hz 0x0004
4360 SUPPORTED_LCD_REFRESHRATE_40Hz 0x0008
4361 SUPPORTED_LCD_REFRESHRATE_50Hz 0x0010
4362 SUPPORTED_LCD_REFRESHRATE_60Hz 0x0020
4363ucMemoryType: [3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved.
4364ucUMAChannelNumber: System memory channel numbers.
4365ulCSR_M3_ARB_CNTL_DEFAULT[10]: Arrays with values for CSR M3 arbiter for default
4366ulCSR_M3_ARB_CNTL_UVD[10]: Arrays with values for CSR M3 arbiter for UVD playback.
4367ulCSR_M3_ARB_CNTL_FS3D[10]: Arrays with values for CSR M3 arbiter for Full Screen 3D applications.
4368sAvail_SCLK[5]: Arrays to provide availabe list of SLCK and corresponding voltage, order from low to high
4369ulGMCRestoreResetTime: GMC power restore and GMC reset time to calculate data reconnection latency. Unit in ns.
4370ulMinimumNClk: Minimum NCLK speed among all NB-Pstates to calcualte data reconnection latency. Unit in 10kHz.
4371ulIdleNClk: NCLK speed while memory runs in self-refresh state. Unit in 10kHz.
4372ulDDR_DLL_PowerUpTime: DDR PHY DLL power up time. Unit in ns.
4373ulDDR_PLL_PowerUpTime: DDR PHY PLL power up time. Unit in ns.
4374usPCIEClkSSPercentage: PCIE Clock Spred Spectrum Percentage in unit 0.01%; 100 mean 1%.
4375usPCIEClkSSType: PCIE Clock Spred Spectrum Type. 0 for Down spread(default); 1 for Center spread.
4376usLvdsSSPercentage: LVDS panel ( not include eDP ) Spread Spectrum Percentage in unit of 0.01%, =0, use VBIOS default setting.
4377usLvdsSSpreadRateIn10Hz: LVDS panel ( not include eDP ) Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting.
4378usHDMISSPercentage: HDMI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%, =0, use VBIOS default setting.
4379usHDMISSpreadRateIn10Hz: HDMI Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting.
4380usDVISSPercentage: DVI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%, =0, use VBIOS default setting.
4381usDVISSpreadRateIn10Hz: DVI Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting.
3733**********************************************************************************************************************/ 4382**********************************************************************************************************************/
3734 4383
3735/**************************************************************************/ 4384/**************************************************************************/
@@ -3790,6 +4439,7 @@ typedef struct _ATOM_ASIC_SS_ASSIGNMENT
3790#define ASIC_INTERNAL_SS_ON_LVDS 6 4439#define ASIC_INTERNAL_SS_ON_LVDS 6
3791#define ASIC_INTERNAL_SS_ON_DP 7 4440#define ASIC_INTERNAL_SS_ON_DP 7
3792#define ASIC_INTERNAL_SS_ON_DCPLL 8 4441#define ASIC_INTERNAL_SS_ON_DCPLL 8
4442#define ASIC_EXTERNAL_SS_ON_DP_CLOCK 9
3793 4443
3794typedef struct _ATOM_ASIC_SS_ASSIGNMENT_V2 4444typedef struct _ATOM_ASIC_SS_ASSIGNMENT_V2
3795{ 4445{
@@ -3903,6 +4553,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V3
3903#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_AC 1 4553#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_AC 1
3904#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_DC 2 4554#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_DC 2
3905#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_LITEAC 3 4555#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_LITEAC 3
4556#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_LIT2AC 4
3906 4557
3907//Byte aligned defintion for BIOS usage 4558//Byte aligned defintion for BIOS usage
3908#define ATOM_S0_CRT1_MONOb0 0x01 4559#define ATOM_S0_CRT1_MONOb0 0x01
@@ -4529,7 +5180,8 @@ typedef struct _ATOM_INIT_REG_BLOCK{
4529#define INDEX_ACCESS_RANGE_BEGIN (VALUE_DWORD + 1) 5180#define INDEX_ACCESS_RANGE_BEGIN (VALUE_DWORD + 1)
4530#define INDEX_ACCESS_RANGE_END (INDEX_ACCESS_RANGE_BEGIN + 1) 5181#define INDEX_ACCESS_RANGE_END (INDEX_ACCESS_RANGE_BEGIN + 1)
4531#define VALUE_INDEX_ACCESS_SINGLE (INDEX_ACCESS_RANGE_END + 1) 5182#define VALUE_INDEX_ACCESS_SINGLE (INDEX_ACCESS_RANGE_END + 1)
4532 5183//#define ACCESS_MCIODEBUGIND 0x40 //defined in BIOS code
5184#define ACCESS_PLACEHOLDER 0x80
4533 5185
4534typedef struct _ATOM_MC_INIT_PARAM_TABLE 5186typedef struct _ATOM_MC_INIT_PARAM_TABLE
4535{ 5187{
@@ -4554,6 +5206,10 @@ typedef struct _ATOM_MC_INIT_PARAM_TABLE
4554#define _32Mx32 0x33 5206#define _32Mx32 0x33
4555#define _64Mx8 0x41 5207#define _64Mx8 0x41
4556#define _64Mx16 0x42 5208#define _64Mx16 0x42
5209#define _64Mx32 0x43
5210#define _128Mx8 0x51
5211#define _128Mx16 0x52
5212#define _256Mx8 0x61
4557 5213
4558#define SAMSUNG 0x1 5214#define SAMSUNG 0x1
4559#define INFINEON 0x2 5215#define INFINEON 0x2
@@ -4569,10 +5225,11 @@ typedef struct _ATOM_MC_INIT_PARAM_TABLE
4569#define QIMONDA INFINEON 5225#define QIMONDA INFINEON
4570#define PROMOS MOSEL 5226#define PROMOS MOSEL
4571#define KRETON INFINEON 5227#define KRETON INFINEON
5228#define ELIXIR NANYA
4572 5229
4573/////////////Support for GDDR5 MC uCode to reside in upper 64K of ROM///////////// 5230/////////////Support for GDDR5 MC uCode to reside in upper 64K of ROM/////////////
4574 5231
4575#define UCODE_ROM_START_ADDRESS 0x1c000 5232#define UCODE_ROM_START_ADDRESS 0x1b800
4576#define UCODE_SIGNATURE 0x4375434d // 'MCuC' - MC uCode 5233#define UCODE_SIGNATURE 0x4375434d // 'MCuC' - MC uCode
4577 5234
4578//uCode block header for reference 5235//uCode block header for reference
@@ -4903,7 +5560,34 @@ typedef struct _ATOM_VRAM_MODULE_V6
4903 ATOM_MEMORY_TIMING_FORMAT_V2 asMemTiming[5];//Memory Timing block sort from lower clock to higher clock 5560 ATOM_MEMORY_TIMING_FORMAT_V2 asMemTiming[5];//Memory Timing block sort from lower clock to higher clock
4904}ATOM_VRAM_MODULE_V6; 5561}ATOM_VRAM_MODULE_V6;
4905 5562
4906 5563typedef struct _ATOM_VRAM_MODULE_V7
5564{
5565// Design Specific Values
5566 ULONG ulChannelMapCfg; // mmMC_SHARED_CHREMAP
5567 USHORT usModuleSize; // Size of ATOM_VRAM_MODULE_V7
5568 USHORT usPrivateReserved; // MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS)
5569 USHORT usReserved;
5570 UCHAR ucExtMemoryID; // Current memory module ID
5571 UCHAR ucMemoryType; // MEM_TYPE_DDR2/DDR3/GDDR3/GDDR5
5572 UCHAR ucChannelNum; // Number of mem. channels supported in this module
5573 UCHAR ucChannelWidth; // CHANNEL_16BIT/CHANNEL_32BIT/CHANNEL_64BIT
5574 UCHAR ucDensity; // _8Mx32, _16Mx32, _16Mx16, _32Mx16
5575 UCHAR ucReserve; // Former container for Mx_FLAGS like DBI_AC_MODE_ENABLE_ASIC for GDDR4. Not used now.
5576 UCHAR ucMisc; // RANK_OF_THISMEMORY etc.
5577 UCHAR ucVREFI; // Not used.
5578 UCHAR ucNPL_RT; // Round trip delay (MC_SEQ_CAS_TIMING [28:24]:TCL=CL+NPL_RT-2). Always 2.
5579 UCHAR ucPreamble; // [7:4] Write Preamble, [3:0] Read Preamble
5580 UCHAR ucMemorySize; // Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros
5581 UCHAR ucReserved[3];
5582// Memory Module specific values
5583 USHORT usEMRS2Value; // EMRS2/MR2 Value.
5584 USHORT usEMRS3Value; // EMRS3/MR3 Value.
5585 UCHAR ucMemoryVenderID; // [7:4] Revision, [3:0] Vendor code
5586 UCHAR ucRefreshRateFactor; // [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms)
5587 UCHAR ucFIFODepth; // FIFO depth can be detected during vendor detection, here is hardcoded per memory
5588 UCHAR ucCDR_Bandwidth; // [0:3]=Read CDR bandwidth, [4:7] - Write CDR Bandwidth
5589 char strMemPNString[20]; // part number end with '0'.
5590}ATOM_VRAM_MODULE_V7;
4907 5591
4908typedef struct _ATOM_VRAM_INFO_V2 5592typedef struct _ATOM_VRAM_INFO_V2
4909{ 5593{
@@ -4942,6 +5626,20 @@ typedef struct _ATOM_VRAM_INFO_V4
4942 // ATOM_INIT_REG_BLOCK aMemAdjust; 5626 // ATOM_INIT_REG_BLOCK aMemAdjust;
4943}ATOM_VRAM_INFO_V4; 5627}ATOM_VRAM_INFO_V4;
4944 5628
5629typedef struct _ATOM_VRAM_INFO_HEADER_V2_1
5630{
5631 ATOM_COMMON_TABLE_HEADER sHeader;
5632 USHORT usMemAdjustTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting
5633 USHORT usMemClkPatchTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting
5634 USHORT usReserved[4];
5635 UCHAR ucNumOfVRAMModule; // indicate number of VRAM module
5636 UCHAR ucMemoryClkPatchTblVer; // version of memory AC timing register list
5637 UCHAR ucVramModuleVer; // indicate ATOM_VRAM_MODUE version
5638 UCHAR ucReserved;
5639 ATOM_VRAM_MODULE_V7 aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; // just for allocation, real number of blocks is in ucNumOfVRAMModule;
5640}ATOM_VRAM_INFO_HEADER_V2_1;
5641
5642
4945typedef struct _ATOM_VRAM_GPIO_DETECTION_INFO 5643typedef struct _ATOM_VRAM_GPIO_DETECTION_INFO
4946{ 5644{
4947 ATOM_COMMON_TABLE_HEADER sHeader; 5645 ATOM_COMMON_TABLE_HEADER sHeader;
@@ -5182,6 +5880,16 @@ typedef struct _ASIC_TRANSMITTER_INFO
5182 UCHAR ucReserved; 5880 UCHAR ucReserved;
5183}ASIC_TRANSMITTER_INFO; 5881}ASIC_TRANSMITTER_INFO;
5184 5882
5883#define ASIC_TRANSMITTER_INFO_CONFIG__DVO_SDR_MODE 0x01
5884#define ASIC_TRANSMITTER_INFO_CONFIG__COHERENT_MODE 0x02
5885#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODEROBJ_ID_MASK 0xc4
5886#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_A 0x00
5887#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_B 0x04
5888#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_C 0x40
5889#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_D 0x44
5890#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_E 0x80
5891#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_F 0x84
5892
5185typedef struct _ASIC_ENCODER_INFO 5893typedef struct _ASIC_ENCODER_INFO
5186{ 5894{
5187 UCHAR ucEncoderID; 5895 UCHAR ucEncoderID;
@@ -5284,6 +5992,28 @@ typedef struct _DP_ENCODER_SERVICE_PARAMETERS
5284/* /obselete */ 5992/* /obselete */
5285#define DP_ENCODER_SERVICE_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS 5993#define DP_ENCODER_SERVICE_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS
5286 5994
5995
5996typedef struct _DP_ENCODER_SERVICE_PARAMETERS_V2
5997{
5998 USHORT usExtEncoderObjId; // External Encoder Object Id, output parameter only, use when ucAction = DP_SERVICE_V2_ACTION_DET_EXT_CONNECTION
5999 UCHAR ucAuxId;
6000 UCHAR ucAction;
6001 UCHAR ucSinkType; // Iput and Output parameters.
6002 UCHAR ucHPDId; // Input parameter, used when ucAction = DP_SERVICE_V2_ACTION_DET_EXT_CONNECTION
6003 UCHAR ucReserved[2];
6004}DP_ENCODER_SERVICE_PARAMETERS_V2;
6005
6006typedef struct _DP_ENCODER_SERVICE_PS_ALLOCATION_V2
6007{
6008 DP_ENCODER_SERVICE_PARAMETERS_V2 asDPServiceParam;
6009 PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 asAuxParam;
6010}DP_ENCODER_SERVICE_PS_ALLOCATION_V2;
6011
6012// ucAction
6013#define DP_SERVICE_V2_ACTION_GET_SINK_TYPE 0x01
6014#define DP_SERVICE_V2_ACTION_DET_LCD_CONNECTION 0x02
6015
6016
5287// DP_TRAINING_TABLE 6017// DP_TRAINING_TABLE
5288#define DPCD_SET_LINKRATE_LANENUM_PATTERN1_TBL_ADDR ATOM_DP_TRAINING_TBL_ADDR 6018#define DPCD_SET_LINKRATE_LANENUM_PATTERN1_TBL_ADDR ATOM_DP_TRAINING_TBL_ADDR
5289#define DPCD_SET_SS_CNTL_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 8 ) 6019#define DPCD_SET_SS_CNTL_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 8 )
@@ -5339,6 +6069,7 @@ typedef struct _SET_HWBLOCK_INSTANCE_PARAMETER_V2
5339#define SELECT_DCIO_IMPCAL 4 6069#define SELECT_DCIO_IMPCAL 4
5340#define SELECT_DCIO_DIG 6 6070#define SELECT_DCIO_DIG 6
5341#define SELECT_CRTC_PIXEL_RATE 7 6071#define SELECT_CRTC_PIXEL_RATE 7
6072#define SELECT_VGA_BLK 8
5342 6073
5343/****************************************************************************/ 6074/****************************************************************************/
5344//Portion VI: Definitinos for vbios MC scratch registers that driver used 6075//Portion VI: Definitinos for vbios MC scratch registers that driver used
@@ -5744,7 +6475,17 @@ typedef struct _ATOM_PPLIB_THERMALCONTROLLER
5744#define ATOM_PP_THERMALCONTROLLER_ADT7473 9 6475#define ATOM_PP_THERMALCONTROLLER_ADT7473 9
5745#define ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO 11 6476#define ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO 11
5746#define ATOM_PP_THERMALCONTROLLER_EVERGREEN 12 6477#define ATOM_PP_THERMALCONTROLLER_EVERGREEN 12
6478#define ATOM_PP_THERMALCONTROLLER_EMC2103 13 /* 0x0D */ // Only fan control will be implemented, do NOT show this in PPGen.
6479#define ATOM_PP_THERMALCONTROLLER_SUMO 14 /* 0x0E */ // Sumo type, used internally
6480#define ATOM_PP_THERMALCONTROLLER_NISLANDS 15
6481
6482// Thermal controller 'combo type' to use an external controller for Fan control and an internal controller for thermal.
6483// We probably should reserve the bit 0x80 for this use.
6484// To keep the number of these types low we should also use the same code for all ASICs (i.e. do not distinguish RV6xx and RV7xx Internal here).
6485// The driver can pick the correct internal controller based on the ASIC.
6486
5747#define ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL 0x89 // ADT7473 Fan Control + Internal Thermal Controller 6487#define ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL 0x89 // ADT7473 Fan Control + Internal Thermal Controller
6488#define ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL 0x8D // EMC2103 Fan Control + Internal Thermal Controller
5748 6489
5749typedef struct _ATOM_PPLIB_STATE 6490typedef struct _ATOM_PPLIB_STATE
5750{ 6491{
@@ -5841,6 +6582,29 @@ typedef struct _ATOM_PPLIB_POWERPLAYTABLE3
5841 USHORT usExtendendedHeaderOffset; 6582 USHORT usExtendendedHeaderOffset;
5842} ATOM_PPLIB_POWERPLAYTABLE3, *LPATOM_PPLIB_POWERPLAYTABLE3; 6583} ATOM_PPLIB_POWERPLAYTABLE3, *LPATOM_PPLIB_POWERPLAYTABLE3;
5843 6584
6585typedef struct _ATOM_PPLIB_POWERPLAYTABLE4
6586{
6587 ATOM_PPLIB_POWERPLAYTABLE3 basicTable3;
6588 ULONG ulGoldenPPID; // PPGen use only
6589 ULONG ulGoldenRevision; // PPGen use only
6590 USHORT usVddcDependencyOnSCLKOffset;
6591 USHORT usVddciDependencyOnMCLKOffset;
6592 USHORT usVddcDependencyOnMCLKOffset;
6593 USHORT usMaxClockVoltageOnDCOffset;
6594 USHORT usReserved[2];
6595} ATOM_PPLIB_POWERPLAYTABLE4, *LPATOM_PPLIB_POWERPLAYTABLE4;
6596
6597typedef struct _ATOM_PPLIB_POWERPLAYTABLE5
6598{
6599 ATOM_PPLIB_POWERPLAYTABLE4 basicTable4;
6600 ULONG ulTDPLimit;
6601 ULONG ulNearTDPLimit;
6602 ULONG ulSQRampingThreshold;
6603 USHORT usCACLeakageTableOffset; // Points to ATOM_PPLIB_CAC_Leakage_Table
6604 ULONG ulCACLeakage; // TBD, this parameter is still under discussion. Change to ulReserved if not needed.
6605 ULONG ulReserved;
6606} ATOM_PPLIB_POWERPLAYTABLE5, *LPATOM_PPLIB_POWERPLAYTABLE5;
6607
5844//// ATOM_PPLIB_NONCLOCK_INFO::usClassification 6608//// ATOM_PPLIB_NONCLOCK_INFO::usClassification
5845#define ATOM_PPLIB_CLASSIFICATION_UI_MASK 0x0007 6609#define ATOM_PPLIB_CLASSIFICATION_UI_MASK 0x0007
5846#define ATOM_PPLIB_CLASSIFICATION_UI_SHIFT 0 6610#define ATOM_PPLIB_CLASSIFICATION_UI_SHIFT 0
@@ -5864,6 +6628,10 @@ typedef struct _ATOM_PPLIB_POWERPLAYTABLE3
5864#define ATOM_PPLIB_CLASSIFICATION_HDSTATE 0x4000 6628#define ATOM_PPLIB_CLASSIFICATION_HDSTATE 0x4000
5865#define ATOM_PPLIB_CLASSIFICATION_SDSTATE 0x8000 6629#define ATOM_PPLIB_CLASSIFICATION_SDSTATE 0x8000
5866 6630
6631//// ATOM_PPLIB_NONCLOCK_INFO::usClassification2
6632#define ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2 0x0001
6633#define ATOM_PPLIB_CLASSIFICATION2_ULV 0x0002
6634
5867//// ATOM_PPLIB_NONCLOCK_INFO::ulCapsAndSettings 6635//// ATOM_PPLIB_NONCLOCK_INFO::ulCapsAndSettings
5868#define ATOM_PPLIB_SINGLE_DISPLAY_ONLY 0x00000001 6636#define ATOM_PPLIB_SINGLE_DISPLAY_ONLY 0x00000001
5869#define ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK 0x00000002 6637#define ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK 0x00000002
@@ -5896,9 +6664,21 @@ typedef struct _ATOM_PPLIB_POWERPLAYTABLE3
5896#define ATOM_PPLIB_M3ARB_MASK 0x00060000 6664#define ATOM_PPLIB_M3ARB_MASK 0x00060000
5897#define ATOM_PPLIB_M3ARB_SHIFT 17 6665#define ATOM_PPLIB_M3ARB_SHIFT 17
5898 6666
6667#define ATOM_PPLIB_ENABLE_DRR 0x00080000
6668
6669// remaining 16 bits are reserved
6670typedef struct _ATOM_PPLIB_THERMAL_STATE
6671{
6672 UCHAR ucMinTemperature;
6673 UCHAR ucMaxTemperature;
6674 UCHAR ucThermalAction;
6675}ATOM_PPLIB_THERMAL_STATE, *LPATOM_PPLIB_THERMAL_STATE;
6676
5899// Contained in an array starting at the offset 6677// Contained in an array starting at the offset
5900// in ATOM_PPLIB_POWERPLAYTABLE::usNonClockInfoArrayOffset. 6678// in ATOM_PPLIB_POWERPLAYTABLE::usNonClockInfoArrayOffset.
5901// referenced from ATOM_PPLIB_STATE_INFO::ucNonClockStateIndex 6679// referenced from ATOM_PPLIB_STATE_INFO::ucNonClockStateIndex
6680#define ATOM_PPLIB_NONCLOCKINFO_VER1 12
6681#define ATOM_PPLIB_NONCLOCKINFO_VER2 24
5902typedef struct _ATOM_PPLIB_NONCLOCK_INFO 6682typedef struct _ATOM_PPLIB_NONCLOCK_INFO
5903{ 6683{
5904 USHORT usClassification; 6684 USHORT usClassification;
@@ -5906,15 +6686,15 @@ typedef struct _ATOM_PPLIB_NONCLOCK_INFO
5906 UCHAR ucMaxTemperature; 6686 UCHAR ucMaxTemperature;
5907 ULONG ulCapsAndSettings; 6687 ULONG ulCapsAndSettings;
5908 UCHAR ucRequiredPower; 6688 UCHAR ucRequiredPower;
5909 UCHAR ucUnused1[3]; 6689 USHORT usClassification2;
6690 ULONG ulVCLK;
6691 ULONG ulDCLK;
6692 UCHAR ucUnused[5];
5910} ATOM_PPLIB_NONCLOCK_INFO; 6693} ATOM_PPLIB_NONCLOCK_INFO;
5911 6694
5912// Contained in an array starting at the offset 6695// Contained in an array starting at the offset
5913// in ATOM_PPLIB_POWERPLAYTABLE::usClockInfoArrayOffset. 6696// in ATOM_PPLIB_POWERPLAYTABLE::usClockInfoArrayOffset.
5914// referenced from ATOM_PPLIB_STATE::ucClockStateIndices 6697// referenced from ATOM_PPLIB_STATE::ucClockStateIndices
5915#define ATOM_PPLIB_NONCLOCKINFO_VER1 12
5916#define ATOM_PPLIB_NONCLOCKINFO_VER2 24
5917
5918typedef struct _ATOM_PPLIB_R600_CLOCK_INFO 6698typedef struct _ATOM_PPLIB_R600_CLOCK_INFO
5919{ 6699{
5920 USHORT usEngineClockLow; 6700 USHORT usEngineClockLow;
@@ -5985,6 +6765,93 @@ typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
5985#define ATOM_PPLIB_RS780_HTLINKFREQ_LOW 1 6765#define ATOM_PPLIB_RS780_HTLINKFREQ_LOW 1
5986#define ATOM_PPLIB_RS780_HTLINKFREQ_HIGH 2 6766#define ATOM_PPLIB_RS780_HTLINKFREQ_HIGH 2
5987 6767
6768typedef struct _ATOM_PPLIB_SUMO_CLOCK_INFO{
6769 USHORT usEngineClockLow; //clockfrequency & 0xFFFF. The unit is in 10khz
6770 UCHAR ucEngineClockHigh; //clockfrequency >> 16.
6771 UCHAR vddcIndex; //2-bit vddc index;
6772 UCHAR leakage; //please use 8-bit absolute value, not the 6-bit % value
6773 //please initalize to 0
6774 UCHAR rsv;
6775 //please initalize to 0
6776 USHORT rsv1;
6777 //please initialize to 0s
6778 ULONG rsv2[2];
6779}ATOM_PPLIB_SUMO_CLOCK_INFO;
6780
6781
6782
6783typedef struct _ATOM_PPLIB_STATE_V2
6784{
6785 //number of valid dpm levels in this state; Driver uses it to calculate the whole
6786 //size of the state: sizeof(ATOM_PPLIB_STATE_V2) + (ucNumDPMLevels - 1) * sizeof(UCHAR)
6787 UCHAR ucNumDPMLevels;
6788
6789 //a index to the array of nonClockInfos
6790 UCHAR nonClockInfoIndex;
6791 /**
6792 * Driver will read the first ucNumDPMLevels in this array
6793 */
6794 UCHAR clockInfoIndex[1];
6795} ATOM_PPLIB_STATE_V2;
6796
6797typedef struct StateArray{
6798 //how many states we have
6799 UCHAR ucNumEntries;
6800
6801 ATOM_PPLIB_STATE_V2 states[1];
6802}StateArray;
6803
6804
6805typedef struct ClockInfoArray{
6806 //how many clock levels we have
6807 UCHAR ucNumEntries;
6808
6809 //sizeof(ATOM_PPLIB_SUMO_CLOCK_INFO)
6810 UCHAR ucEntrySize;
6811
6812 //this is for Sumo
6813 ATOM_PPLIB_SUMO_CLOCK_INFO clockInfo[1];
6814}ClockInfoArray;
6815
6816typedef struct NonClockInfoArray{
6817
6818 //how many non-clock levels we have. normally should be same as number of states
6819 UCHAR ucNumEntries;
6820 //sizeof(ATOM_PPLIB_NONCLOCK_INFO)
6821 UCHAR ucEntrySize;
6822
6823 ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[1];
6824}NonClockInfoArray;
6825
6826typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Record
6827{
6828 USHORT usClockLow;
6829 UCHAR ucClockHigh;
6830 USHORT usVoltage;
6831}ATOM_PPLIB_Clock_Voltage_Dependency_Record;
6832
6833typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Table
6834{
6835 UCHAR ucNumEntries; // Number of entries.
6836 ATOM_PPLIB_Clock_Voltage_Dependency_Record entries[1]; // Dynamically allocate entries.
6837}ATOM_PPLIB_Clock_Voltage_Dependency_Table;
6838
6839typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Record
6840{
6841 USHORT usSclkLow;
6842 UCHAR ucSclkHigh;
6843 USHORT usMclkLow;
6844 UCHAR ucMclkHigh;
6845 USHORT usVddc;
6846 USHORT usVddci;
6847}ATOM_PPLIB_Clock_Voltage_Limit_Record;
6848
6849typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Table
6850{
6851 UCHAR ucNumEntries; // Number of entries.
6852 ATOM_PPLIB_Clock_Voltage_Limit_Record entries[1]; // Dynamically allocate entries.
6853}ATOM_PPLIB_Clock_Voltage_Limit_Table;
6854
5988/**************************************************************************/ 6855/**************************************************************************/
5989 6856
5990 6857
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 9fbabaa6ee44..b0ab185b86f6 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -403,6 +403,7 @@ union atom_enable_ss {
403 ENABLE_LVDS_SS_PARAMETERS_V2 lvds_ss_2; 403 ENABLE_LVDS_SS_PARAMETERS_V2 lvds_ss_2;
404 ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION v1; 404 ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION v1;
405 ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2 v2; 405 ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2 v2;
406 ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3 v3;
406}; 407};
407 408
408static void atombios_crtc_program_ss(struct drm_crtc *crtc, 409static void atombios_crtc_program_ss(struct drm_crtc *crtc,
@@ -417,7 +418,30 @@ static void atombios_crtc_program_ss(struct drm_crtc *crtc,
417 418
418 memset(&args, 0, sizeof(args)); 419 memset(&args, 0, sizeof(args));
419 420
420 if (ASIC_IS_DCE4(rdev)) { 421 if (ASIC_IS_DCE5(rdev)) {
422 args.v3.usSpreadSpectrumAmountFrac = 0;
423 args.v3.ucSpreadSpectrumType = ss->type;
424 switch (pll_id) {
425 case ATOM_PPLL1:
426 args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P1PLL;
427 args.v3.usSpreadSpectrumAmount = ss->amount;
428 args.v3.usSpreadSpectrumStep = ss->step;
429 break;
430 case ATOM_PPLL2:
431 args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P2PLL;
432 args.v3.usSpreadSpectrumAmount = ss->amount;
433 args.v3.usSpreadSpectrumStep = ss->step;
434 break;
435 case ATOM_DCPLL:
436 args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_DCPLL;
437 args.v3.usSpreadSpectrumAmount = 0;
438 args.v3.usSpreadSpectrumStep = 0;
439 break;
440 case ATOM_PPLL_INVALID:
441 return;
442 }
443 args.v2.ucEnable = enable;
444 } else if (ASIC_IS_DCE4(rdev)) {
421 args.v2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage); 445 args.v2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
422 args.v2.ucSpreadSpectrumType = ss->type; 446 args.v2.ucSpreadSpectrumType = ss->type;
423 switch (pll_id) { 447 switch (pll_id) {
@@ -673,9 +697,14 @@ union set_pixel_clock {
673 PIXEL_CLOCK_PARAMETERS_V2 v2; 697 PIXEL_CLOCK_PARAMETERS_V2 v2;
674 PIXEL_CLOCK_PARAMETERS_V3 v3; 698 PIXEL_CLOCK_PARAMETERS_V3 v3;
675 PIXEL_CLOCK_PARAMETERS_V5 v5; 699 PIXEL_CLOCK_PARAMETERS_V5 v5;
700 PIXEL_CLOCK_PARAMETERS_V6 v6;
676}; 701};
677 702
678static void atombios_crtc_set_dcpll(struct drm_crtc *crtc) 703/* on DCE5, make sure the voltage is high enough to support the
704 * required disp clk.
705 */
706static void atombios_crtc_set_dcpll(struct drm_crtc *crtc,
707 u32 dispclk)
679{ 708{
680 struct drm_device *dev = crtc->dev; 709 struct drm_device *dev = crtc->dev;
681 struct radeon_device *rdev = dev->dev_private; 710 struct radeon_device *rdev = dev->dev_private;
@@ -698,9 +727,16 @@ static void atombios_crtc_set_dcpll(struct drm_crtc *crtc)
698 * SetPixelClock provides the dividers 727 * SetPixelClock provides the dividers
699 */ 728 */
700 args.v5.ucCRTC = ATOM_CRTC_INVALID; 729 args.v5.ucCRTC = ATOM_CRTC_INVALID;
701 args.v5.usPixelClock = rdev->clock.default_dispclk; 730 args.v5.usPixelClock = dispclk;
702 args.v5.ucPpll = ATOM_DCPLL; 731 args.v5.ucPpll = ATOM_DCPLL;
703 break; 732 break;
733 case 6:
734 /* if the default dcpll clock is specified,
735 * SetPixelClock provides the dividers
736 */
737 args.v6.ulDispEngClkFreq = dispclk;
738 args.v6.ucPpll = ATOM_DCPLL;
739 break;
704 default: 740 default:
705 DRM_ERROR("Unknown table version %d %d\n", frev, crev); 741 DRM_ERROR("Unknown table version %d %d\n", frev, crev);
706 return; 742 return;
@@ -784,6 +820,18 @@ static void atombios_crtc_program_pll(struct drm_crtc *crtc,
784 args.v5.ucEncoderMode = encoder_mode; 820 args.v5.ucEncoderMode = encoder_mode;
785 args.v5.ucPpll = pll_id; 821 args.v5.ucPpll = pll_id;
786 break; 822 break;
823 case 6:
824 args.v6.ulCrtcPclkFreq.ucCRTC = crtc_id;
825 args.v6.ulCrtcPclkFreq.ulPixelClock = cpu_to_le32(clock / 10);
826 args.v6.ucRefDiv = ref_div;
827 args.v6.usFbDiv = cpu_to_le16(fb_div);
828 args.v6.ulFbDivDecFrac = cpu_to_le32(frac_fb_div * 100000);
829 args.v6.ucPostDiv = post_div;
830 args.v6.ucMiscInfo = 0; /* HDMI depth, etc. */
831 args.v6.ucTransmitterID = encoder_id;
832 args.v6.ucEncoderMode = encoder_mode;
833 args.v6.ucPpll = pll_id;
834 break;
787 default: 835 default:
788 DRM_ERROR("Unknown table version %d %d\n", frev, crev); 836 DRM_ERROR("Unknown table version %d %d\n", frev, crev);
789 return; 837 return;
@@ -1377,7 +1425,8 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
1377 rdev->clock.default_dispclk); 1425 rdev->clock.default_dispclk);
1378 if (ss_enabled) 1426 if (ss_enabled)
1379 atombios_crtc_program_ss(crtc, ATOM_DISABLE, ATOM_DCPLL, &ss); 1427 atombios_crtc_program_ss(crtc, ATOM_DISABLE, ATOM_DCPLL, &ss);
1380 atombios_crtc_set_dcpll(crtc); 1428 /* XXX: DCE5, make sure voltage, dispclk is high enough */
1429 atombios_crtc_set_dcpll(crtc, rdev->clock.default_dispclk);
1381 if (ss_enabled) 1430 if (ss_enabled)
1382 atombios_crtc_program_ss(crtc, ATOM_ENABLE, ATOM_DCPLL, &ss); 1431 atombios_crtc_program_ss(crtc, ATOM_ENABLE, ATOM_DCPLL, &ss);
1383 } 1432 }
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 7b337c361a12..7fe8ebdcdc0e 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -39,6 +39,62 @@
39 39
40static void evergreen_gpu_init(struct radeon_device *rdev); 40static void evergreen_gpu_init(struct radeon_device *rdev);
41void evergreen_fini(struct radeon_device *rdev); 41void evergreen_fini(struct radeon_device *rdev);
42static void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
43
44void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc)
45{
46 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc];
47 u32 tmp;
48
49 /* make sure flip is at vb rather than hb */
50 tmp = RREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset);
51 tmp &= ~EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN;
52 WREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp);
53
54 /* set pageflip to happen anywhere in vblank interval */
55 WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0);
56
57 /* enable the pflip int */
58 radeon_irq_kms_pflip_irq_get(rdev, crtc);
59}
60
61void evergreen_post_page_flip(struct radeon_device *rdev, int crtc)
62{
63 /* disable the pflip int */
64 radeon_irq_kms_pflip_irq_put(rdev, crtc);
65}
66
67u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
68{
69 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
70 u32 tmp = RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset);
71
72 /* Lock the graphics update lock */
73 tmp |= EVERGREEN_GRPH_UPDATE_LOCK;
74 WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
75
76 /* update the scanout addresses */
77 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
78 upper_32_bits(crtc_base));
79 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
80 (u32)crtc_base);
81
82 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
83 upper_32_bits(crtc_base));
84 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
85 (u32)crtc_base);
86
87 /* Wait for update_pending to go high. */
88 while (!(RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING));
89 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
90
91 /* Unlock the lock, so double-buffering can take place inside vblank */
92 tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK;
93 WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
94
95 /* Return current update_pending status: */
96 return RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING;
97}
42 98
43/* get temperature in millidegrees */ 99/* get temperature in millidegrees */
44u32 evergreen_get_temp(struct radeon_device *rdev) 100u32 evergreen_get_temp(struct radeon_device *rdev)
@@ -57,6 +113,14 @@ u32 evergreen_get_temp(struct radeon_device *rdev)
57 return actual_temp * 1000; 113 return actual_temp * 1000;
58} 114}
59 115
116u32 sumo_get_temp(struct radeon_device *rdev)
117{
118 u32 temp = RREG32(CG_THERMAL_STATUS) & 0xff;
119 u32 actual_temp = (temp >> 1) & 0xff;
120
121 return actual_temp * 1000;
122}
123
60void evergreen_pm_misc(struct radeon_device *rdev) 124void evergreen_pm_misc(struct radeon_device *rdev)
61{ 125{
62 int req_ps_idx = rdev->pm.requested_power_state_index; 126 int req_ps_idx = rdev->pm.requested_power_state_index;
@@ -337,16 +401,28 @@ static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev,
337 case 0: 401 case 0:
338 case 4: 402 case 4:
339 default: 403 default:
340 return 3840 * 2; 404 if (ASIC_IS_DCE5(rdev))
405 return 4096 * 2;
406 else
407 return 3840 * 2;
341 case 1: 408 case 1:
342 case 5: 409 case 5:
343 return 5760 * 2; 410 if (ASIC_IS_DCE5(rdev))
411 return 6144 * 2;
412 else
413 return 5760 * 2;
344 case 2: 414 case 2:
345 case 6: 415 case 6:
346 return 7680 * 2; 416 if (ASIC_IS_DCE5(rdev))
417 return 8192 * 2;
418 else
419 return 7680 * 2;
347 case 3: 420 case 3:
348 case 7: 421 case 7:
349 return 1920 * 2; 422 if (ASIC_IS_DCE5(rdev))
423 return 2048 * 2;
424 else
425 return 1920 * 2;
350 } 426 }
351} 427}
352 428
@@ -890,31 +966,39 @@ static void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_sa
890 save->vga_hdp_control = RREG32(VGA_HDP_CONTROL); 966 save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
891 save->crtc_control[0] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET); 967 save->crtc_control[0] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET);
892 save->crtc_control[1] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET); 968 save->crtc_control[1] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
893 save->crtc_control[2] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET); 969 if (!(rdev->flags & RADEON_IS_IGP)) {
894 save->crtc_control[3] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET); 970 save->crtc_control[2] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET);
895 save->crtc_control[4] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET); 971 save->crtc_control[3] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
896 save->crtc_control[5] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET); 972 save->crtc_control[4] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET);
973 save->crtc_control[5] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
974 }
897 975
898 /* Stop all video */ 976 /* Stop all video */
899 WREG32(VGA_RENDER_CONTROL, 0); 977 WREG32(VGA_RENDER_CONTROL, 0);
900 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1); 978 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1);
901 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1); 979 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1);
902 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1); 980 if (!(rdev->flags & RADEON_IS_IGP)) {
903 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1); 981 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
904 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1); 982 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
905 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1); 983 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
984 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
985 }
906 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); 986 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
907 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); 987 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
908 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); 988 if (!(rdev->flags & RADEON_IS_IGP)) {
909 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); 989 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
910 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); 990 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
911 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); 991 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
992 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
993 }
912 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); 994 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
913 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); 995 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
914 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); 996 if (!(rdev->flags & RADEON_IS_IGP)) {
915 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); 997 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
916 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); 998 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
917 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); 999 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
1000 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
1001 }
918 1002
919 WREG32(D1VGA_CONTROL, 0); 1003 WREG32(D1VGA_CONTROL, 0);
920 WREG32(D2VGA_CONTROL, 0); 1004 WREG32(D2VGA_CONTROL, 0);
@@ -944,41 +1028,43 @@ static void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_
944 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET, 1028 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET,
945 (u32)rdev->mc.vram_start); 1029 (u32)rdev->mc.vram_start);
946 1030
947 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET, 1031 if (!(rdev->flags & RADEON_IS_IGP)) {
948 upper_32_bits(rdev->mc.vram_start)); 1032 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
949 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET, 1033 upper_32_bits(rdev->mc.vram_start));
950 upper_32_bits(rdev->mc.vram_start)); 1034 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
951 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET, 1035 upper_32_bits(rdev->mc.vram_start));
952 (u32)rdev->mc.vram_start); 1036 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET,
953 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET, 1037 (u32)rdev->mc.vram_start);
954 (u32)rdev->mc.vram_start); 1038 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET,
955 1039 (u32)rdev->mc.vram_start);
956 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET, 1040
957 upper_32_bits(rdev->mc.vram_start)); 1041 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET,
958 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET, 1042 upper_32_bits(rdev->mc.vram_start));
959 upper_32_bits(rdev->mc.vram_start)); 1043 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET,
960 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET, 1044 upper_32_bits(rdev->mc.vram_start));
961 (u32)rdev->mc.vram_start); 1045 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
962 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET, 1046 (u32)rdev->mc.vram_start);
963 (u32)rdev->mc.vram_start); 1047 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
964 1048 (u32)rdev->mc.vram_start);
965 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET, 1049
966 upper_32_bits(rdev->mc.vram_start)); 1050 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
967 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET, 1051 upper_32_bits(rdev->mc.vram_start));
968 upper_32_bits(rdev->mc.vram_start)); 1052 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
969 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET, 1053 upper_32_bits(rdev->mc.vram_start));
970 (u32)rdev->mc.vram_start); 1054 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET,
971 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET, 1055 (u32)rdev->mc.vram_start);
972 (u32)rdev->mc.vram_start); 1056 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET,
973 1057 (u32)rdev->mc.vram_start);
974 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET, 1058
975 upper_32_bits(rdev->mc.vram_start)); 1059 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET,
976 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET, 1060 upper_32_bits(rdev->mc.vram_start));
977 upper_32_bits(rdev->mc.vram_start)); 1061 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET,
978 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET, 1062 upper_32_bits(rdev->mc.vram_start));
979 (u32)rdev->mc.vram_start); 1063 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET,
980 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET, 1064 (u32)rdev->mc.vram_start);
981 (u32)rdev->mc.vram_start); 1065 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET,
1066 (u32)rdev->mc.vram_start);
1067 }
982 1068
983 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start)); 1069 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start));
984 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start); 1070 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
@@ -994,22 +1080,28 @@ static void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_
994 WREG32(EVERGREEN_D6VGA_CONTROL, save->vga_control[5]); 1080 WREG32(EVERGREEN_D6VGA_CONTROL, save->vga_control[5]);
995 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1); 1081 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1);
996 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1); 1082 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1);
997 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1); 1083 if (!(rdev->flags & RADEON_IS_IGP)) {
998 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1); 1084 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
999 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1); 1085 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
1000 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1); 1086 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
1087 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
1088 }
1001 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, save->crtc_control[0]); 1089 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, save->crtc_control[0]);
1002 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, save->crtc_control[1]); 1090 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, save->crtc_control[1]);
1003 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, save->crtc_control[2]); 1091 if (!(rdev->flags & RADEON_IS_IGP)) {
1004 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, save->crtc_control[3]); 1092 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, save->crtc_control[2]);
1005 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, save->crtc_control[4]); 1093 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, save->crtc_control[3]);
1006 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, save->crtc_control[5]); 1094 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, save->crtc_control[4]);
1095 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, save->crtc_control[5]);
1096 }
1007 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); 1097 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
1008 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); 1098 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
1009 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); 1099 if (!(rdev->flags & RADEON_IS_IGP)) {
1010 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); 1100 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
1011 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); 1101 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
1012 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); 1102 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
1103 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
1104 }
1013 WREG32(VGA_RENDER_CONTROL, save->vga_render_control); 1105 WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
1014} 1106}
1015 1107
@@ -1057,11 +1149,17 @@ static void evergreen_mc_program(struct radeon_device *rdev)
1057 rdev->mc.vram_end >> 12); 1149 rdev->mc.vram_end >> 12);
1058 } 1150 }
1059 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0); 1151 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
1152 if (rdev->flags & RADEON_IS_IGP) {
1153 tmp = RREG32(MC_FUS_VM_FB_OFFSET) & 0x000FFFFF;
1154 tmp |= ((rdev->mc.vram_end >> 20) & 0xF) << 24;
1155 tmp |= ((rdev->mc.vram_start >> 20) & 0xF) << 20;
1156 WREG32(MC_FUS_VM_FB_OFFSET, tmp);
1157 }
1060 tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16; 1158 tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
1061 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF); 1159 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
1062 WREG32(MC_VM_FB_LOCATION, tmp); 1160 WREG32(MC_VM_FB_LOCATION, tmp);
1063 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); 1161 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
1064 WREG32(HDP_NONSURFACE_INFO, (2 << 7)); 1162 WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
1065 WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF); 1163 WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
1066 if (rdev->flags & RADEON_IS_AGP) { 1164 if (rdev->flags & RADEON_IS_AGP) {
1067 WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16); 1165 WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16);
@@ -1285,11 +1383,15 @@ static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
1285 switch (rdev->family) { 1383 switch (rdev->family) {
1286 case CHIP_CEDAR: 1384 case CHIP_CEDAR:
1287 case CHIP_REDWOOD: 1385 case CHIP_REDWOOD:
1386 case CHIP_PALM:
1387 case CHIP_TURKS:
1388 case CHIP_CAICOS:
1288 force_no_swizzle = false; 1389 force_no_swizzle = false;
1289 break; 1390 break;
1290 case CHIP_CYPRESS: 1391 case CHIP_CYPRESS:
1291 case CHIP_HEMLOCK: 1392 case CHIP_HEMLOCK:
1292 case CHIP_JUNIPER: 1393 case CHIP_JUNIPER:
1394 case CHIP_BARTS:
1293 default: 1395 default:
1294 force_no_swizzle = true; 1396 force_no_swizzle = true;
1295 break; 1397 break;
@@ -1384,6 +1486,46 @@ static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
1384 return backend_map; 1486 return backend_map;
1385} 1487}
1386 1488
1489static void evergreen_program_channel_remap(struct radeon_device *rdev)
1490{
1491 u32 tcp_chan_steer_lo, tcp_chan_steer_hi, mc_shared_chremap, tmp;
1492
1493 tmp = RREG32(MC_SHARED_CHMAP);
1494 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
1495 case 0:
1496 case 1:
1497 case 2:
1498 case 3:
1499 default:
1500 /* default mapping */
1501 mc_shared_chremap = 0x00fac688;
1502 break;
1503 }
1504
1505 switch (rdev->family) {
1506 case CHIP_HEMLOCK:
1507 case CHIP_CYPRESS:
1508 case CHIP_BARTS:
1509 tcp_chan_steer_lo = 0x54763210;
1510 tcp_chan_steer_hi = 0x0000ba98;
1511 break;
1512 case CHIP_JUNIPER:
1513 case CHIP_REDWOOD:
1514 case CHIP_CEDAR:
1515 case CHIP_PALM:
1516 case CHIP_TURKS:
1517 case CHIP_CAICOS:
1518 default:
1519 tcp_chan_steer_lo = 0x76543210;
1520 tcp_chan_steer_hi = 0x0000ba98;
1521 break;
1522 }
1523
1524 WREG32(TCP_CHAN_STEER_LO, tcp_chan_steer_lo);
1525 WREG32(TCP_CHAN_STEER_HI, tcp_chan_steer_hi);
1526 WREG32(MC_SHARED_CHREMAP, mc_shared_chremap);
1527}
1528
1387static void evergreen_gpu_init(struct radeon_device *rdev) 1529static void evergreen_gpu_init(struct radeon_device *rdev)
1388{ 1530{
1389 u32 cc_rb_backend_disable = 0; 1531 u32 cc_rb_backend_disable = 0;
@@ -1495,6 +1637,90 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1495 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; 1637 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1496 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; 1638 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1497 break; 1639 break;
1640 case CHIP_PALM:
1641 rdev->config.evergreen.num_ses = 1;
1642 rdev->config.evergreen.max_pipes = 2;
1643 rdev->config.evergreen.max_tile_pipes = 2;
1644 rdev->config.evergreen.max_simds = 2;
1645 rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
1646 rdev->config.evergreen.max_gprs = 256;
1647 rdev->config.evergreen.max_threads = 192;
1648 rdev->config.evergreen.max_gs_threads = 16;
1649 rdev->config.evergreen.max_stack_entries = 256;
1650 rdev->config.evergreen.sx_num_of_sets = 4;
1651 rdev->config.evergreen.sx_max_export_size = 128;
1652 rdev->config.evergreen.sx_max_export_pos_size = 32;
1653 rdev->config.evergreen.sx_max_export_smx_size = 96;
1654 rdev->config.evergreen.max_hw_contexts = 4;
1655 rdev->config.evergreen.sq_num_cf_insts = 1;
1656
1657 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
1658 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1659 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1660 break;
1661 case CHIP_BARTS:
1662 rdev->config.evergreen.num_ses = 2;
1663 rdev->config.evergreen.max_pipes = 4;
1664 rdev->config.evergreen.max_tile_pipes = 8;
1665 rdev->config.evergreen.max_simds = 7;
1666 rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
1667 rdev->config.evergreen.max_gprs = 256;
1668 rdev->config.evergreen.max_threads = 248;
1669 rdev->config.evergreen.max_gs_threads = 32;
1670 rdev->config.evergreen.max_stack_entries = 512;
1671 rdev->config.evergreen.sx_num_of_sets = 4;
1672 rdev->config.evergreen.sx_max_export_size = 256;
1673 rdev->config.evergreen.sx_max_export_pos_size = 64;
1674 rdev->config.evergreen.sx_max_export_smx_size = 192;
1675 rdev->config.evergreen.max_hw_contexts = 8;
1676 rdev->config.evergreen.sq_num_cf_insts = 2;
1677
1678 rdev->config.evergreen.sc_prim_fifo_size = 0x100;
1679 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1680 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1681 break;
1682 case CHIP_TURKS:
1683 rdev->config.evergreen.num_ses = 1;
1684 rdev->config.evergreen.max_pipes = 4;
1685 rdev->config.evergreen.max_tile_pipes = 4;
1686 rdev->config.evergreen.max_simds = 6;
1687 rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
1688 rdev->config.evergreen.max_gprs = 256;
1689 rdev->config.evergreen.max_threads = 248;
1690 rdev->config.evergreen.max_gs_threads = 32;
1691 rdev->config.evergreen.max_stack_entries = 256;
1692 rdev->config.evergreen.sx_num_of_sets = 4;
1693 rdev->config.evergreen.sx_max_export_size = 256;
1694 rdev->config.evergreen.sx_max_export_pos_size = 64;
1695 rdev->config.evergreen.sx_max_export_smx_size = 192;
1696 rdev->config.evergreen.max_hw_contexts = 8;
1697 rdev->config.evergreen.sq_num_cf_insts = 2;
1698
1699 rdev->config.evergreen.sc_prim_fifo_size = 0x100;
1700 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1701 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1702 break;
1703 case CHIP_CAICOS:
1704 rdev->config.evergreen.num_ses = 1;
1705 rdev->config.evergreen.max_pipes = 4;
1706 rdev->config.evergreen.max_tile_pipes = 2;
1707 rdev->config.evergreen.max_simds = 2;
1708 rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
1709 rdev->config.evergreen.max_gprs = 256;
1710 rdev->config.evergreen.max_threads = 192;
1711 rdev->config.evergreen.max_gs_threads = 16;
1712 rdev->config.evergreen.max_stack_entries = 256;
1713 rdev->config.evergreen.sx_num_of_sets = 4;
1714 rdev->config.evergreen.sx_max_export_size = 128;
1715 rdev->config.evergreen.sx_max_export_pos_size = 32;
1716 rdev->config.evergreen.sx_max_export_smx_size = 96;
1717 rdev->config.evergreen.max_hw_contexts = 4;
1718 rdev->config.evergreen.sq_num_cf_insts = 1;
1719
1720 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
1721 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1722 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1723 break;
1498 } 1724 }
1499 1725
1500 /* Initialize HDP */ 1726 /* Initialize HDP */
@@ -1636,6 +1862,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1636 switch (rdev->family) { 1862 switch (rdev->family) {
1637 case CHIP_CYPRESS: 1863 case CHIP_CYPRESS:
1638 case CHIP_HEMLOCK: 1864 case CHIP_HEMLOCK:
1865 case CHIP_BARTS:
1639 gb_backend_map = 0x66442200; 1866 gb_backend_map = 0x66442200;
1640 break; 1867 break;
1641 case CHIP_JUNIPER: 1868 case CHIP_JUNIPER:
@@ -1687,6 +1914,8 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1687 WREG32(DMIF_ADDR_CONFIG, gb_addr_config); 1914 WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
1688 WREG32(HDP_ADDR_CONFIG, gb_addr_config); 1915 WREG32(HDP_ADDR_CONFIG, gb_addr_config);
1689 1916
1917 evergreen_program_channel_remap(rdev);
1918
1690 num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1; 1919 num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1;
1691 grbm_gfx_index = INSTANCE_BROADCAST_WRITES; 1920 grbm_gfx_index = INSTANCE_BROADCAST_WRITES;
1692 1921
@@ -1769,9 +1998,16 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1769 GS_PRIO(2) | 1998 GS_PRIO(2) |
1770 ES_PRIO(3)); 1999 ES_PRIO(3));
1771 2000
1772 if (rdev->family == CHIP_CEDAR) 2001 switch (rdev->family) {
2002 case CHIP_CEDAR:
2003 case CHIP_PALM:
2004 case CHIP_CAICOS:
1773 /* no vertex cache */ 2005 /* no vertex cache */
1774 sq_config &= ~VC_ENABLE; 2006 sq_config &= ~VC_ENABLE;
2007 break;
2008 default:
2009 break;
2010 }
1775 2011
1776 sq_lds_resource_mgmt = RREG32(SQ_LDS_RESOURCE_MGMT); 2012 sq_lds_resource_mgmt = RREG32(SQ_LDS_RESOURCE_MGMT);
1777 2013
@@ -1783,10 +2019,15 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1783 sq_gpr_resource_mgmt_3 = NUM_HS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32); 2019 sq_gpr_resource_mgmt_3 = NUM_HS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32);
1784 sq_gpr_resource_mgmt_3 |= NUM_LS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32); 2020 sq_gpr_resource_mgmt_3 |= NUM_LS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32);
1785 2021
1786 if (rdev->family == CHIP_CEDAR) 2022 switch (rdev->family) {
2023 case CHIP_CEDAR:
2024 case CHIP_PALM:
1787 ps_thread_count = 96; 2025 ps_thread_count = 96;
1788 else 2026 break;
2027 default:
1789 ps_thread_count = 128; 2028 ps_thread_count = 128;
2029 break;
2030 }
1790 2031
1791 sq_thread_resource_mgmt = NUM_PS_THREADS(ps_thread_count); 2032 sq_thread_resource_mgmt = NUM_PS_THREADS(ps_thread_count);
1792 sq_thread_resource_mgmt |= NUM_VS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8); 2033 sq_thread_resource_mgmt |= NUM_VS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
@@ -1817,10 +2058,16 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1817 WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) | 2058 WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
1818 FORCE_EOV_MAX_REZ_CNT(255))); 2059 FORCE_EOV_MAX_REZ_CNT(255)));
1819 2060
1820 if (rdev->family == CHIP_CEDAR) 2061 switch (rdev->family) {
2062 case CHIP_CEDAR:
2063 case CHIP_PALM:
2064 case CHIP_CAICOS:
1821 vgt_cache_invalidation = CACHE_INVALIDATION(TC_ONLY); 2065 vgt_cache_invalidation = CACHE_INVALIDATION(TC_ONLY);
1822 else 2066 break;
2067 default:
1823 vgt_cache_invalidation = CACHE_INVALIDATION(VC_AND_TC); 2068 vgt_cache_invalidation = CACHE_INVALIDATION(VC_AND_TC);
2069 break;
2070 }
1824 vgt_cache_invalidation |= AUTO_INVLD_EN(ES_AND_GS_AUTO); 2071 vgt_cache_invalidation |= AUTO_INVLD_EN(ES_AND_GS_AUTO);
1825 WREG32(VGT_CACHE_INVALIDATION, vgt_cache_invalidation); 2072 WREG32(VGT_CACHE_INVALIDATION, vgt_cache_invalidation);
1826 2073
@@ -1904,12 +2151,18 @@ int evergreen_mc_init(struct radeon_device *rdev)
1904 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); 2151 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
1905 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); 2152 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
1906 /* Setup GPU memory space */ 2153 /* Setup GPU memory space */
1907 /* size in MB on evergreen */ 2154 if (rdev->flags & RADEON_IS_IGP) {
1908 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; 2155 /* size in bytes on fusion */
1909 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; 2156 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
2157 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
2158 } else {
2159 /* size in MB on evergreen */
2160 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
2161 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
2162 }
1910 rdev->mc.visible_vram_size = rdev->mc.aper_size; 2163 rdev->mc.visible_vram_size = rdev->mc.aper_size;
1911 rdev->mc.active_vram_size = rdev->mc.visible_vram_size; 2164 rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
1912 r600_vram_gtt_location(rdev, &rdev->mc); 2165 r700_vram_gtt_location(rdev, &rdev->mc);
1913 radeon_update_bandwidth_info(rdev); 2166 radeon_update_bandwidth_info(rdev);
1914 2167
1915 return 0; 2168 return 0;
@@ -1917,8 +2170,30 @@ int evergreen_mc_init(struct radeon_device *rdev)
1917 2170
1918bool evergreen_gpu_is_lockup(struct radeon_device *rdev) 2171bool evergreen_gpu_is_lockup(struct radeon_device *rdev)
1919{ 2172{
1920 /* FIXME: implement for evergreen */ 2173 u32 srbm_status;
1921 return false; 2174 u32 grbm_status;
2175 u32 grbm_status_se0, grbm_status_se1;
2176 struct r100_gpu_lockup *lockup = &rdev->config.evergreen.lockup;
2177 int r;
2178
2179 srbm_status = RREG32(SRBM_STATUS);
2180 grbm_status = RREG32(GRBM_STATUS);
2181 grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
2182 grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
2183 if (!(grbm_status & GUI_ACTIVE)) {
2184 r100_gpu_lockup_update(lockup, &rdev->cp);
2185 return false;
2186 }
2187 /* force CP activities */
2188 r = radeon_ring_lock(rdev, 2);
2189 if (!r) {
2190 /* PACKET2 NOP */
2191 radeon_ring_write(rdev, 0x80000000);
2192 radeon_ring_write(rdev, 0x80000000);
2193 radeon_ring_unlock_commit(rdev);
2194 }
2195 rdev->cp.rptr = RREG32(CP_RB_RPTR);
2196 return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp);
1922} 2197}
1923 2198
1924static int evergreen_gpu_soft_reset(struct radeon_device *rdev) 2199static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
@@ -2011,17 +2286,21 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev)
2011 WREG32(GRBM_INT_CNTL, 0); 2286 WREG32(GRBM_INT_CNTL, 0);
2012 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); 2287 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
2013 WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); 2288 WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
2014 WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); 2289 if (!(rdev->flags & RADEON_IS_IGP)) {
2015 WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); 2290 WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
2016 WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); 2291 WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
2017 WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); 2292 WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
2293 WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
2294 }
2018 2295
2019 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); 2296 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
2020 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); 2297 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
2021 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); 2298 if (!(rdev->flags & RADEON_IS_IGP)) {
2022 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); 2299 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
2023 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); 2300 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
2024 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); 2301 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
2302 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
2303 }
2025 2304
2026 WREG32(DACA_AUTODETECT_INT_CONTROL, 0); 2305 WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
2027 WREG32(DACB_AUTODETECT_INT_CONTROL, 0); 2306 WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
@@ -2047,6 +2326,7 @@ int evergreen_irq_set(struct radeon_device *rdev)
2047 u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; 2326 u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
2048 u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6; 2327 u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
2049 u32 grbm_int_cntl = 0; 2328 u32 grbm_int_cntl = 0;
2329 u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
2050 2330
2051 if (!rdev->irq.installed) { 2331 if (!rdev->irq.installed) {
2052 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); 2332 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
@@ -2072,27 +2352,33 @@ int evergreen_irq_set(struct radeon_device *rdev)
2072 cp_int_cntl |= RB_INT_ENABLE; 2352 cp_int_cntl |= RB_INT_ENABLE;
2073 cp_int_cntl |= TIME_STAMP_INT_ENABLE; 2353 cp_int_cntl |= TIME_STAMP_INT_ENABLE;
2074 } 2354 }
2075 if (rdev->irq.crtc_vblank_int[0]) { 2355 if (rdev->irq.crtc_vblank_int[0] ||
2356 rdev->irq.pflip[0]) {
2076 DRM_DEBUG("evergreen_irq_set: vblank 0\n"); 2357 DRM_DEBUG("evergreen_irq_set: vblank 0\n");
2077 crtc1 |= VBLANK_INT_MASK; 2358 crtc1 |= VBLANK_INT_MASK;
2078 } 2359 }
2079 if (rdev->irq.crtc_vblank_int[1]) { 2360 if (rdev->irq.crtc_vblank_int[1] ||
2361 rdev->irq.pflip[1]) {
2080 DRM_DEBUG("evergreen_irq_set: vblank 1\n"); 2362 DRM_DEBUG("evergreen_irq_set: vblank 1\n");
2081 crtc2 |= VBLANK_INT_MASK; 2363 crtc2 |= VBLANK_INT_MASK;
2082 } 2364 }
2083 if (rdev->irq.crtc_vblank_int[2]) { 2365 if (rdev->irq.crtc_vblank_int[2] ||
2366 rdev->irq.pflip[2]) {
2084 DRM_DEBUG("evergreen_irq_set: vblank 2\n"); 2367 DRM_DEBUG("evergreen_irq_set: vblank 2\n");
2085 crtc3 |= VBLANK_INT_MASK; 2368 crtc3 |= VBLANK_INT_MASK;
2086 } 2369 }
2087 if (rdev->irq.crtc_vblank_int[3]) { 2370 if (rdev->irq.crtc_vblank_int[3] ||
2371 rdev->irq.pflip[3]) {
2088 DRM_DEBUG("evergreen_irq_set: vblank 3\n"); 2372 DRM_DEBUG("evergreen_irq_set: vblank 3\n");
2089 crtc4 |= VBLANK_INT_MASK; 2373 crtc4 |= VBLANK_INT_MASK;
2090 } 2374 }
2091 if (rdev->irq.crtc_vblank_int[4]) { 2375 if (rdev->irq.crtc_vblank_int[4] ||
2376 rdev->irq.pflip[4]) {
2092 DRM_DEBUG("evergreen_irq_set: vblank 4\n"); 2377 DRM_DEBUG("evergreen_irq_set: vblank 4\n");
2093 crtc5 |= VBLANK_INT_MASK; 2378 crtc5 |= VBLANK_INT_MASK;
2094 } 2379 }
2095 if (rdev->irq.crtc_vblank_int[5]) { 2380 if (rdev->irq.crtc_vblank_int[5] ||
2381 rdev->irq.pflip[5]) {
2096 DRM_DEBUG("evergreen_irq_set: vblank 5\n"); 2382 DRM_DEBUG("evergreen_irq_set: vblank 5\n");
2097 crtc6 |= VBLANK_INT_MASK; 2383 crtc6 |= VBLANK_INT_MASK;
2098 } 2384 }
@@ -2130,10 +2416,19 @@ int evergreen_irq_set(struct radeon_device *rdev)
2130 2416
2131 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1); 2417 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
2132 WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2); 2418 WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2);
2133 WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3); 2419 if (!(rdev->flags & RADEON_IS_IGP)) {
2134 WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4); 2420 WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3);
2135 WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5); 2421 WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4);
2136 WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6); 2422 WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5);
2423 WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
2424 }
2425
2426 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1);
2427 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2);
2428 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3);
2429 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4);
2430 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5);
2431 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6);
2137 2432
2138 WREG32(DC_HPD1_INT_CONTROL, hpd1); 2433 WREG32(DC_HPD1_INT_CONTROL, hpd1);
2139 WREG32(DC_HPD2_INT_CONTROL, hpd2); 2434 WREG32(DC_HPD2_INT_CONTROL, hpd2);
@@ -2145,79 +2440,92 @@ int evergreen_irq_set(struct radeon_device *rdev)
2145 return 0; 2440 return 0;
2146} 2441}
2147 2442
2148static inline void evergreen_irq_ack(struct radeon_device *rdev, 2443static inline void evergreen_irq_ack(struct radeon_device *rdev)
2149 u32 *disp_int,
2150 u32 *disp_int_cont,
2151 u32 *disp_int_cont2,
2152 u32 *disp_int_cont3,
2153 u32 *disp_int_cont4,
2154 u32 *disp_int_cont5)
2155{ 2444{
2156 u32 tmp; 2445 u32 tmp;
2157 2446
2158 *disp_int = RREG32(DISP_INTERRUPT_STATUS); 2447 rdev->irq.stat_regs.evergreen.disp_int = RREG32(DISP_INTERRUPT_STATUS);
2159 *disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE); 2448 rdev->irq.stat_regs.evergreen.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
2160 *disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2); 2449 rdev->irq.stat_regs.evergreen.disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2);
2161 *disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3); 2450 rdev->irq.stat_regs.evergreen.disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3);
2162 *disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4); 2451 rdev->irq.stat_regs.evergreen.disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4);
2163 *disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5); 2452 rdev->irq.stat_regs.evergreen.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5);
2164 2453 rdev->irq.stat_regs.evergreen.d1grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET);
2165 if (*disp_int & LB_D1_VBLANK_INTERRUPT) 2454 rdev->irq.stat_regs.evergreen.d2grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET);
2455 rdev->irq.stat_regs.evergreen.d3grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET);
2456 rdev->irq.stat_regs.evergreen.d4grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET);
2457 rdev->irq.stat_regs.evergreen.d5grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET);
2458 rdev->irq.stat_regs.evergreen.d6grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET);
2459
2460 if (rdev->irq.stat_regs.evergreen.d1grph_int & GRPH_PFLIP_INT_OCCURRED)
2461 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
2462 if (rdev->irq.stat_regs.evergreen.d2grph_int & GRPH_PFLIP_INT_OCCURRED)
2463 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
2464 if (rdev->irq.stat_regs.evergreen.d3grph_int & GRPH_PFLIP_INT_OCCURRED)
2465 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
2466 if (rdev->irq.stat_regs.evergreen.d4grph_int & GRPH_PFLIP_INT_OCCURRED)
2467 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
2468 if (rdev->irq.stat_regs.evergreen.d5grph_int & GRPH_PFLIP_INT_OCCURRED)
2469 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
2470 if (rdev->irq.stat_regs.evergreen.d6grph_int & GRPH_PFLIP_INT_OCCURRED)
2471 WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
2472
2473 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT)
2166 WREG32(VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK); 2474 WREG32(VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK);
2167 if (*disp_int & LB_D1_VLINE_INTERRUPT) 2475 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT)
2168 WREG32(VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK); 2476 WREG32(VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK);
2169 2477
2170 if (*disp_int_cont & LB_D2_VBLANK_INTERRUPT) 2478 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT)
2171 WREG32(VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK); 2479 WREG32(VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK);
2172 if (*disp_int_cont & LB_D2_VLINE_INTERRUPT) 2480 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT)
2173 WREG32(VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK); 2481 WREG32(VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK);
2174 2482
2175 if (*disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) 2483 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
2176 WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK); 2484 WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK);
2177 if (*disp_int_cont2 & LB_D3_VLINE_INTERRUPT) 2485 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
2178 WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK); 2486 WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK);
2179 2487
2180 if (*disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) 2488 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)
2181 WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK); 2489 WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK);
2182 if (*disp_int_cont3 & LB_D4_VLINE_INTERRUPT) 2490 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT)
2183 WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK); 2491 WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK);
2184 2492
2185 if (*disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) 2493 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
2186 WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK); 2494 WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK);
2187 if (*disp_int_cont4 & LB_D5_VLINE_INTERRUPT) 2495 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
2188 WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK); 2496 WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK);
2189 2497
2190 if (*disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) 2498 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)
2191 WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK); 2499 WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK);
2192 if (*disp_int_cont5 & LB_D6_VLINE_INTERRUPT) 2500 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT)
2193 WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK); 2501 WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK);
2194 2502
2195 if (*disp_int & DC_HPD1_INTERRUPT) { 2503 if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
2196 tmp = RREG32(DC_HPD1_INT_CONTROL); 2504 tmp = RREG32(DC_HPD1_INT_CONTROL);
2197 tmp |= DC_HPDx_INT_ACK; 2505 tmp |= DC_HPDx_INT_ACK;
2198 WREG32(DC_HPD1_INT_CONTROL, tmp); 2506 WREG32(DC_HPD1_INT_CONTROL, tmp);
2199 } 2507 }
2200 if (*disp_int_cont & DC_HPD2_INTERRUPT) { 2508 if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
2201 tmp = RREG32(DC_HPD2_INT_CONTROL); 2509 tmp = RREG32(DC_HPD2_INT_CONTROL);
2202 tmp |= DC_HPDx_INT_ACK; 2510 tmp |= DC_HPDx_INT_ACK;
2203 WREG32(DC_HPD2_INT_CONTROL, tmp); 2511 WREG32(DC_HPD2_INT_CONTROL, tmp);
2204 } 2512 }
2205 if (*disp_int_cont2 & DC_HPD3_INTERRUPT) { 2513 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
2206 tmp = RREG32(DC_HPD3_INT_CONTROL); 2514 tmp = RREG32(DC_HPD3_INT_CONTROL);
2207 tmp |= DC_HPDx_INT_ACK; 2515 tmp |= DC_HPDx_INT_ACK;
2208 WREG32(DC_HPD3_INT_CONTROL, tmp); 2516 WREG32(DC_HPD3_INT_CONTROL, tmp);
2209 } 2517 }
2210 if (*disp_int_cont3 & DC_HPD4_INTERRUPT) { 2518 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
2211 tmp = RREG32(DC_HPD4_INT_CONTROL); 2519 tmp = RREG32(DC_HPD4_INT_CONTROL);
2212 tmp |= DC_HPDx_INT_ACK; 2520 tmp |= DC_HPDx_INT_ACK;
2213 WREG32(DC_HPD4_INT_CONTROL, tmp); 2521 WREG32(DC_HPD4_INT_CONTROL, tmp);
2214 } 2522 }
2215 if (*disp_int_cont4 & DC_HPD5_INTERRUPT) { 2523 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
2216 tmp = RREG32(DC_HPD5_INT_CONTROL); 2524 tmp = RREG32(DC_HPD5_INT_CONTROL);
2217 tmp |= DC_HPDx_INT_ACK; 2525 tmp |= DC_HPDx_INT_ACK;
2218 WREG32(DC_HPD5_INT_CONTROL, tmp); 2526 WREG32(DC_HPD5_INT_CONTROL, tmp);
2219 } 2527 }
2220 if (*disp_int_cont5 & DC_HPD6_INTERRUPT) { 2528 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
2221 tmp = RREG32(DC_HPD5_INT_CONTROL); 2529 tmp = RREG32(DC_HPD5_INT_CONTROL);
2222 tmp |= DC_HPDx_INT_ACK; 2530 tmp |= DC_HPDx_INT_ACK;
2223 WREG32(DC_HPD6_INT_CONTROL, tmp); 2531 WREG32(DC_HPD6_INT_CONTROL, tmp);
@@ -2226,14 +2534,10 @@ static inline void evergreen_irq_ack(struct radeon_device *rdev,
2226 2534
2227void evergreen_irq_disable(struct radeon_device *rdev) 2535void evergreen_irq_disable(struct radeon_device *rdev)
2228{ 2536{
2229 u32 disp_int, disp_int_cont, disp_int_cont2;
2230 u32 disp_int_cont3, disp_int_cont4, disp_int_cont5;
2231
2232 r600_disable_interrupts(rdev); 2537 r600_disable_interrupts(rdev);
2233 /* Wait and acknowledge irq */ 2538 /* Wait and acknowledge irq */
2234 mdelay(1); 2539 mdelay(1);
2235 evergreen_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2, 2540 evergreen_irq_ack(rdev);
2236 &disp_int_cont3, &disp_int_cont4, &disp_int_cont5);
2237 evergreen_disable_interrupt_state(rdev); 2541 evergreen_disable_interrupt_state(rdev);
2238} 2542}
2239 2543
@@ -2273,8 +2577,6 @@ int evergreen_irq_process(struct radeon_device *rdev)
2273 u32 rptr = rdev->ih.rptr; 2577 u32 rptr = rdev->ih.rptr;
2274 u32 src_id, src_data; 2578 u32 src_id, src_data;
2275 u32 ring_index; 2579 u32 ring_index;
2276 u32 disp_int, disp_int_cont, disp_int_cont2;
2277 u32 disp_int_cont3, disp_int_cont4, disp_int_cont5;
2278 unsigned long flags; 2580 unsigned long flags;
2279 bool queue_hotplug = false; 2581 bool queue_hotplug = false;
2280 2582
@@ -2295,8 +2597,7 @@ int evergreen_irq_process(struct radeon_device *rdev)
2295 2597
2296restart_ih: 2598restart_ih:
2297 /* display interrupts */ 2599 /* display interrupts */
2298 evergreen_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2, 2600 evergreen_irq_ack(rdev);
2299 &disp_int_cont3, &disp_int_cont4, &disp_int_cont5);
2300 2601
2301 rdev->ih.wptr = wptr; 2602 rdev->ih.wptr = wptr;
2302 while (rptr != wptr) { 2603 while (rptr != wptr) {
@@ -2309,17 +2610,21 @@ restart_ih:
2309 case 1: /* D1 vblank/vline */ 2610 case 1: /* D1 vblank/vline */
2310 switch (src_data) { 2611 switch (src_data) {
2311 case 0: /* D1 vblank */ 2612 case 0: /* D1 vblank */
2312 if (disp_int & LB_D1_VBLANK_INTERRUPT) { 2613 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) {
2313 drm_handle_vblank(rdev->ddev, 0); 2614 if (rdev->irq.crtc_vblank_int[0]) {
2314 rdev->pm.vblank_sync = true; 2615 drm_handle_vblank(rdev->ddev, 0);
2315 wake_up(&rdev->irq.vblank_queue); 2616 rdev->pm.vblank_sync = true;
2316 disp_int &= ~LB_D1_VBLANK_INTERRUPT; 2617 wake_up(&rdev->irq.vblank_queue);
2618 }
2619 if (rdev->irq.pflip[0])
2620 radeon_crtc_handle_flip(rdev, 0);
2621 rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
2317 DRM_DEBUG("IH: D1 vblank\n"); 2622 DRM_DEBUG("IH: D1 vblank\n");
2318 } 2623 }
2319 break; 2624 break;
2320 case 1: /* D1 vline */ 2625 case 1: /* D1 vline */
2321 if (disp_int & LB_D1_VLINE_INTERRUPT) { 2626 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) {
2322 disp_int &= ~LB_D1_VLINE_INTERRUPT; 2627 rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
2323 DRM_DEBUG("IH: D1 vline\n"); 2628 DRM_DEBUG("IH: D1 vline\n");
2324 } 2629 }
2325 break; 2630 break;
@@ -2331,17 +2636,21 @@ restart_ih:
2331 case 2: /* D2 vblank/vline */ 2636 case 2: /* D2 vblank/vline */
2332 switch (src_data) { 2637 switch (src_data) {
2333 case 0: /* D2 vblank */ 2638 case 0: /* D2 vblank */
2334 if (disp_int_cont & LB_D2_VBLANK_INTERRUPT) { 2639 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
2335 drm_handle_vblank(rdev->ddev, 1); 2640 if (rdev->irq.crtc_vblank_int[1]) {
2336 rdev->pm.vblank_sync = true; 2641 drm_handle_vblank(rdev->ddev, 1);
2337 wake_up(&rdev->irq.vblank_queue); 2642 rdev->pm.vblank_sync = true;
2338 disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT; 2643 wake_up(&rdev->irq.vblank_queue);
2644 }
2645 if (rdev->irq.pflip[1])
2646 radeon_crtc_handle_flip(rdev, 1);
2647 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
2339 DRM_DEBUG("IH: D2 vblank\n"); 2648 DRM_DEBUG("IH: D2 vblank\n");
2340 } 2649 }
2341 break; 2650 break;
2342 case 1: /* D2 vline */ 2651 case 1: /* D2 vline */
2343 if (disp_int_cont & LB_D2_VLINE_INTERRUPT) { 2652 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) {
2344 disp_int_cont &= ~LB_D2_VLINE_INTERRUPT; 2653 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
2345 DRM_DEBUG("IH: D2 vline\n"); 2654 DRM_DEBUG("IH: D2 vline\n");
2346 } 2655 }
2347 break; 2656 break;
@@ -2353,17 +2662,21 @@ restart_ih:
2353 case 3: /* D3 vblank/vline */ 2662 case 3: /* D3 vblank/vline */
2354 switch (src_data) { 2663 switch (src_data) {
2355 case 0: /* D3 vblank */ 2664 case 0: /* D3 vblank */
2356 if (disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) { 2665 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
2357 drm_handle_vblank(rdev->ddev, 2); 2666 if (rdev->irq.crtc_vblank_int[2]) {
2358 rdev->pm.vblank_sync = true; 2667 drm_handle_vblank(rdev->ddev, 2);
2359 wake_up(&rdev->irq.vblank_queue); 2668 rdev->pm.vblank_sync = true;
2360 disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT; 2669 wake_up(&rdev->irq.vblank_queue);
2670 }
2671 if (rdev->irq.pflip[2])
2672 radeon_crtc_handle_flip(rdev, 2);
2673 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
2361 DRM_DEBUG("IH: D3 vblank\n"); 2674 DRM_DEBUG("IH: D3 vblank\n");
2362 } 2675 }
2363 break; 2676 break;
2364 case 1: /* D3 vline */ 2677 case 1: /* D3 vline */
2365 if (disp_int_cont2 & LB_D3_VLINE_INTERRUPT) { 2678 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
2366 disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT; 2679 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
2367 DRM_DEBUG("IH: D3 vline\n"); 2680 DRM_DEBUG("IH: D3 vline\n");
2368 } 2681 }
2369 break; 2682 break;
@@ -2375,17 +2688,21 @@ restart_ih:
2375 case 4: /* D4 vblank/vline */ 2688 case 4: /* D4 vblank/vline */
2376 switch (src_data) { 2689 switch (src_data) {
2377 case 0: /* D4 vblank */ 2690 case 0: /* D4 vblank */
2378 if (disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) { 2691 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
2379 drm_handle_vblank(rdev->ddev, 3); 2692 if (rdev->irq.crtc_vblank_int[3]) {
2380 rdev->pm.vblank_sync = true; 2693 drm_handle_vblank(rdev->ddev, 3);
2381 wake_up(&rdev->irq.vblank_queue); 2694 rdev->pm.vblank_sync = true;
2382 disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT; 2695 wake_up(&rdev->irq.vblank_queue);
2696 }
2697 if (rdev->irq.pflip[3])
2698 radeon_crtc_handle_flip(rdev, 3);
2699 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
2383 DRM_DEBUG("IH: D4 vblank\n"); 2700 DRM_DEBUG("IH: D4 vblank\n");
2384 } 2701 }
2385 break; 2702 break;
2386 case 1: /* D4 vline */ 2703 case 1: /* D4 vline */
2387 if (disp_int_cont3 & LB_D4_VLINE_INTERRUPT) { 2704 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
2388 disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT; 2705 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
2389 DRM_DEBUG("IH: D4 vline\n"); 2706 DRM_DEBUG("IH: D4 vline\n");
2390 } 2707 }
2391 break; 2708 break;
@@ -2397,17 +2714,21 @@ restart_ih:
2397 case 5: /* D5 vblank/vline */ 2714 case 5: /* D5 vblank/vline */
2398 switch (src_data) { 2715 switch (src_data) {
2399 case 0: /* D5 vblank */ 2716 case 0: /* D5 vblank */
2400 if (disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) { 2717 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
2401 drm_handle_vblank(rdev->ddev, 4); 2718 if (rdev->irq.crtc_vblank_int[4]) {
2402 rdev->pm.vblank_sync = true; 2719 drm_handle_vblank(rdev->ddev, 4);
2403 wake_up(&rdev->irq.vblank_queue); 2720 rdev->pm.vblank_sync = true;
2404 disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT; 2721 wake_up(&rdev->irq.vblank_queue);
2722 }
2723 if (rdev->irq.pflip[4])
2724 radeon_crtc_handle_flip(rdev, 4);
2725 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
2405 DRM_DEBUG("IH: D5 vblank\n"); 2726 DRM_DEBUG("IH: D5 vblank\n");
2406 } 2727 }
2407 break; 2728 break;
2408 case 1: /* D5 vline */ 2729 case 1: /* D5 vline */
2409 if (disp_int_cont4 & LB_D5_VLINE_INTERRUPT) { 2730 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
2410 disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT; 2731 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
2411 DRM_DEBUG("IH: D5 vline\n"); 2732 DRM_DEBUG("IH: D5 vline\n");
2412 } 2733 }
2413 break; 2734 break;
@@ -2419,17 +2740,21 @@ restart_ih:
2419 case 6: /* D6 vblank/vline */ 2740 case 6: /* D6 vblank/vline */
2420 switch (src_data) { 2741 switch (src_data) {
2421 case 0: /* D6 vblank */ 2742 case 0: /* D6 vblank */
2422 if (disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) { 2743 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
2423 drm_handle_vblank(rdev->ddev, 5); 2744 if (rdev->irq.crtc_vblank_int[5]) {
2424 rdev->pm.vblank_sync = true; 2745 drm_handle_vblank(rdev->ddev, 5);
2425 wake_up(&rdev->irq.vblank_queue); 2746 rdev->pm.vblank_sync = true;
2426 disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT; 2747 wake_up(&rdev->irq.vblank_queue);
2748 }
2749 if (rdev->irq.pflip[5])
2750 radeon_crtc_handle_flip(rdev, 5);
2751 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
2427 DRM_DEBUG("IH: D6 vblank\n"); 2752 DRM_DEBUG("IH: D6 vblank\n");
2428 } 2753 }
2429 break; 2754 break;
2430 case 1: /* D6 vline */ 2755 case 1: /* D6 vline */
2431 if (disp_int_cont5 & LB_D6_VLINE_INTERRUPT) { 2756 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
2432 disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT; 2757 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
2433 DRM_DEBUG("IH: D6 vline\n"); 2758 DRM_DEBUG("IH: D6 vline\n");
2434 } 2759 }
2435 break; 2760 break;
@@ -2441,43 +2766,43 @@ restart_ih:
2441 case 42: /* HPD hotplug */ 2766 case 42: /* HPD hotplug */
2442 switch (src_data) { 2767 switch (src_data) {
2443 case 0: 2768 case 0:
2444 if (disp_int & DC_HPD1_INTERRUPT) { 2769 if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
2445 disp_int &= ~DC_HPD1_INTERRUPT; 2770 rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
2446 queue_hotplug = true; 2771 queue_hotplug = true;
2447 DRM_DEBUG("IH: HPD1\n"); 2772 DRM_DEBUG("IH: HPD1\n");
2448 } 2773 }
2449 break; 2774 break;
2450 case 1: 2775 case 1:
2451 if (disp_int_cont & DC_HPD2_INTERRUPT) { 2776 if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
2452 disp_int_cont &= ~DC_HPD2_INTERRUPT; 2777 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
2453 queue_hotplug = true; 2778 queue_hotplug = true;
2454 DRM_DEBUG("IH: HPD2\n"); 2779 DRM_DEBUG("IH: HPD2\n");
2455 } 2780 }
2456 break; 2781 break;
2457 case 2: 2782 case 2:
2458 if (disp_int_cont2 & DC_HPD3_INTERRUPT) { 2783 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
2459 disp_int_cont2 &= ~DC_HPD3_INTERRUPT; 2784 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
2460 queue_hotplug = true; 2785 queue_hotplug = true;
2461 DRM_DEBUG("IH: HPD3\n"); 2786 DRM_DEBUG("IH: HPD3\n");
2462 } 2787 }
2463 break; 2788 break;
2464 case 3: 2789 case 3:
2465 if (disp_int_cont3 & DC_HPD4_INTERRUPT) { 2790 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
2466 disp_int_cont3 &= ~DC_HPD4_INTERRUPT; 2791 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
2467 queue_hotplug = true; 2792 queue_hotplug = true;
2468 DRM_DEBUG("IH: HPD4\n"); 2793 DRM_DEBUG("IH: HPD4\n");
2469 } 2794 }
2470 break; 2795 break;
2471 case 4: 2796 case 4:
2472 if (disp_int_cont4 & DC_HPD5_INTERRUPT) { 2797 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
2473 disp_int_cont4 &= ~DC_HPD5_INTERRUPT; 2798 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
2474 queue_hotplug = true; 2799 queue_hotplug = true;
2475 DRM_DEBUG("IH: HPD5\n"); 2800 DRM_DEBUG("IH: HPD5\n");
2476 } 2801 }
2477 break; 2802 break;
2478 case 5: 2803 case 5:
2479 if (disp_int_cont5 & DC_HPD6_INTERRUPT) { 2804 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
2480 disp_int_cont5 &= ~DC_HPD6_INTERRUPT; 2805 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
2481 queue_hotplug = true; 2806 queue_hotplug = true;
2482 DRM_DEBUG("IH: HPD6\n"); 2807 DRM_DEBUG("IH: HPD6\n");
2483 } 2808 }
@@ -2516,7 +2841,7 @@ restart_ih:
2516 if (wptr != rdev->ih.wptr) 2841 if (wptr != rdev->ih.wptr)
2517 goto restart_ih; 2842 goto restart_ih;
2518 if (queue_hotplug) 2843 if (queue_hotplug)
2519 queue_work(rdev->wq, &rdev->hotplug_work); 2844 schedule_work(&rdev->hotplug_work);
2520 rdev->ih.rptr = rptr; 2845 rdev->ih.rptr = rptr;
2521 WREG32(IH_RB_RPTR, rdev->ih.rptr); 2846 WREG32(IH_RB_RPTR, rdev->ih.rptr);
2522 spin_unlock_irqrestore(&rdev->ih.lock, flags); 2847 spin_unlock_irqrestore(&rdev->ih.lock, flags);
@@ -2527,12 +2852,31 @@ static int evergreen_startup(struct radeon_device *rdev)
2527{ 2852{
2528 int r; 2853 int r;
2529 2854
2530 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { 2855 /* enable pcie gen2 link */
2531 r = r600_init_microcode(rdev); 2856 if (!ASIC_IS_DCE5(rdev))
2857 evergreen_pcie_gen2_enable(rdev);
2858
2859 if (ASIC_IS_DCE5(rdev)) {
2860 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
2861 r = ni_init_microcode(rdev);
2862 if (r) {
2863 DRM_ERROR("Failed to load firmware!\n");
2864 return r;
2865 }
2866 }
2867 r = btc_mc_load_microcode(rdev);
2532 if (r) { 2868 if (r) {
2533 DRM_ERROR("Failed to load firmware!\n"); 2869 DRM_ERROR("Failed to load MC firmware!\n");
2534 return r; 2870 return r;
2535 } 2871 }
2872 } else {
2873 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
2874 r = r600_init_microcode(rdev);
2875 if (r) {
2876 DRM_ERROR("Failed to load firmware!\n");
2877 return r;
2878 }
2879 }
2536 } 2880 }
2537 2881
2538 evergreen_mc_program(rdev); 2882 evergreen_mc_program(rdev);
@@ -2551,6 +2895,11 @@ static int evergreen_startup(struct radeon_device *rdev)
2551 rdev->asic->copy = NULL; 2895 rdev->asic->copy = NULL;
2552 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); 2896 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
2553 } 2897 }
2898 /* XXX: ontario has problems blitting to gart at the moment */
2899 if (rdev->family == CHIP_PALM) {
2900 rdev->asic->copy = NULL;
2901 rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
2902 }
2554 2903
2555 /* allocate wb buffer */ 2904 /* allocate wb buffer */
2556 r = radeon_wb_init(rdev); 2905 r = radeon_wb_init(rdev);
@@ -2658,12 +3007,16 @@ static bool evergreen_card_posted(struct radeon_device *rdev)
2658 u32 reg; 3007 u32 reg;
2659 3008
2660 /* first check CRTCs */ 3009 /* first check CRTCs */
2661 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | 3010 if (rdev->flags & RADEON_IS_IGP)
2662 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) | 3011 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
2663 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) | 3012 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
2664 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) | 3013 else
2665 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) | 3014 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
2666 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET); 3015 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) |
3016 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
3017 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) |
3018 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
3019 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
2667 if (reg & EVERGREEN_CRTC_MASTER_EN) 3020 if (reg & EVERGREEN_CRTC_MASTER_EN)
2668 return true; 3021 return true;
2669 3022
@@ -2800,3 +3153,52 @@ void evergreen_fini(struct radeon_device *rdev)
2800 rdev->bios = NULL; 3153 rdev->bios = NULL;
2801 radeon_dummy_page_fini(rdev); 3154 radeon_dummy_page_fini(rdev);
2802} 3155}
3156
3157static void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
3158{
3159 u32 link_width_cntl, speed_cntl;
3160
3161 if (rdev->flags & RADEON_IS_IGP)
3162 return;
3163
3164 if (!(rdev->flags & RADEON_IS_PCIE))
3165 return;
3166
3167 /* x2 cards have a special sequence */
3168 if (ASIC_IS_X2(rdev))
3169 return;
3170
3171 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3172 if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) ||
3173 (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
3174
3175 link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
3176 link_width_cntl &= ~LC_UPCONFIGURE_DIS;
3177 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3178
3179 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3180 speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
3181 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
3182
3183 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3184 speed_cntl |= LC_CLR_FAILED_SPD_CHANGE_CNT;
3185 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
3186
3187 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3188 speed_cntl &= ~LC_CLR_FAILED_SPD_CHANGE_CNT;
3189 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
3190
3191 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3192 speed_cntl |= LC_GEN2_EN_STRAP;
3193 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
3194
3195 } else {
3196 link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
3197 /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
3198 if (1)
3199 link_width_cntl |= LC_UPCONFIGURE_DIS;
3200 else
3201 link_width_cntl &= ~LC_UPCONFIGURE_DIS;
3202 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3203 }
3204}
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
index e0e590110dd4..b758dc7f2f2c 100644
--- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c
+++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
@@ -147,7 +147,9 @@ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
147 radeon_ring_write(rdev, 0); 147 radeon_ring_write(rdev, 0);
148 radeon_ring_write(rdev, SQ_TEX_VTX_VALID_BUFFER << 30); 148 radeon_ring_write(rdev, SQ_TEX_VTX_VALID_BUFFER << 30);
149 149
150 if (rdev->family == CHIP_CEDAR) 150 if ((rdev->family == CHIP_CEDAR) ||
151 (rdev->family == CHIP_PALM) ||
152 (rdev->family == CHIP_CAICOS))
151 cp_set_surface_sync(rdev, 153 cp_set_surface_sync(rdev,
152 PACKET3_TC_ACTION_ENA, 48, gpu_addr); 154 PACKET3_TC_ACTION_ENA, 48, gpu_addr);
153 else 155 else
@@ -331,9 +333,95 @@ set_default_state(struct radeon_device *rdev)
331 num_hs_stack_entries = 85; 333 num_hs_stack_entries = 85;
332 num_ls_stack_entries = 85; 334 num_ls_stack_entries = 85;
333 break; 335 break;
336 case CHIP_PALM:
337 num_ps_gprs = 93;
338 num_vs_gprs = 46;
339 num_temp_gprs = 4;
340 num_gs_gprs = 31;
341 num_es_gprs = 31;
342 num_hs_gprs = 23;
343 num_ls_gprs = 23;
344 num_ps_threads = 96;
345 num_vs_threads = 16;
346 num_gs_threads = 16;
347 num_es_threads = 16;
348 num_hs_threads = 16;
349 num_ls_threads = 16;
350 num_ps_stack_entries = 42;
351 num_vs_stack_entries = 42;
352 num_gs_stack_entries = 42;
353 num_es_stack_entries = 42;
354 num_hs_stack_entries = 42;
355 num_ls_stack_entries = 42;
356 break;
357 case CHIP_BARTS:
358 num_ps_gprs = 93;
359 num_vs_gprs = 46;
360 num_temp_gprs = 4;
361 num_gs_gprs = 31;
362 num_es_gprs = 31;
363 num_hs_gprs = 23;
364 num_ls_gprs = 23;
365 num_ps_threads = 128;
366 num_vs_threads = 20;
367 num_gs_threads = 20;
368 num_es_threads = 20;
369 num_hs_threads = 20;
370 num_ls_threads = 20;
371 num_ps_stack_entries = 85;
372 num_vs_stack_entries = 85;
373 num_gs_stack_entries = 85;
374 num_es_stack_entries = 85;
375 num_hs_stack_entries = 85;
376 num_ls_stack_entries = 85;
377 break;
378 case CHIP_TURKS:
379 num_ps_gprs = 93;
380 num_vs_gprs = 46;
381 num_temp_gprs = 4;
382 num_gs_gprs = 31;
383 num_es_gprs = 31;
384 num_hs_gprs = 23;
385 num_ls_gprs = 23;
386 num_ps_threads = 128;
387 num_vs_threads = 20;
388 num_gs_threads = 20;
389 num_es_threads = 20;
390 num_hs_threads = 20;
391 num_ls_threads = 20;
392 num_ps_stack_entries = 42;
393 num_vs_stack_entries = 42;
394 num_gs_stack_entries = 42;
395 num_es_stack_entries = 42;
396 num_hs_stack_entries = 42;
397 num_ls_stack_entries = 42;
398 break;
399 case CHIP_CAICOS:
400 num_ps_gprs = 93;
401 num_vs_gprs = 46;
402 num_temp_gprs = 4;
403 num_gs_gprs = 31;
404 num_es_gprs = 31;
405 num_hs_gprs = 23;
406 num_ls_gprs = 23;
407 num_ps_threads = 128;
408 num_vs_threads = 10;
409 num_gs_threads = 10;
410 num_es_threads = 10;
411 num_hs_threads = 10;
412 num_ls_threads = 10;
413 num_ps_stack_entries = 42;
414 num_vs_stack_entries = 42;
415 num_gs_stack_entries = 42;
416 num_es_stack_entries = 42;
417 num_hs_stack_entries = 42;
418 num_ls_stack_entries = 42;
419 break;
334 } 420 }
335 421
336 if (rdev->family == CHIP_CEDAR) 422 if ((rdev->family == CHIP_CEDAR) ||
423 (rdev->family == CHIP_PALM) ||
424 (rdev->family == CHIP_CAICOS))
337 sq_config = 0; 425 sq_config = 0;
338 else 426 else
339 sq_config = VC_ENABLE; 427 sq_config = VC_ENABLE;
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index 2330f3a36fd5..c781c92c3451 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -105,6 +105,11 @@
105#define EVERGREEN_GRPH_Y_START 0x6830 105#define EVERGREEN_GRPH_Y_START 0x6830
106#define EVERGREEN_GRPH_X_END 0x6834 106#define EVERGREEN_GRPH_X_END 0x6834
107#define EVERGREEN_GRPH_Y_END 0x6838 107#define EVERGREEN_GRPH_Y_END 0x6838
108#define EVERGREEN_GRPH_UPDATE 0x6844
109# define EVERGREEN_GRPH_SURFACE_UPDATE_PENDING (1 << 2)
110# define EVERGREEN_GRPH_UPDATE_LOCK (1 << 16)
111#define EVERGREEN_GRPH_FLIP_CONTROL 0x6848
112# define EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN (1 << 0)
108 113
109/* CUR blocks at 0x6998, 0x7598, 0x10198, 0x10d98, 0x11998, 0x12598 */ 114/* CUR blocks at 0x6998, 0x7598, 0x10198, 0x10d98, 0x11998, 0x12598 */
110#define EVERGREEN_CUR_CONTROL 0x6998 115#define EVERGREEN_CUR_CONTROL 0x6998
@@ -178,6 +183,7 @@
178# define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24) 183# define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24)
179#define EVERGREEN_CRTC_STATUS 0x6e8c 184#define EVERGREEN_CRTC_STATUS 0x6e8c
180#define EVERGREEN_CRTC_STATUS_POSITION 0x6e90 185#define EVERGREEN_CRTC_STATUS_POSITION 0x6e90
186#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8
181#define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4 187#define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4
182 188
183#define EVERGREEN_DC_GPIO_HPD_MASK 0x64b0 189#define EVERGREEN_DC_GPIO_HPD_MASK 0x64b0
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index a73b53c44359..36d32d83d866 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -164,11 +164,13 @@
164#define SE_SC_BUSY (1 << 29) 164#define SE_SC_BUSY (1 << 29)
165#define SE_DB_BUSY (1 << 30) 165#define SE_DB_BUSY (1 << 30)
166#define SE_CB_BUSY (1 << 31) 166#define SE_CB_BUSY (1 << 31)
167 167/* evergreen */
168#define CG_MULT_THERMAL_STATUS 0x740 168#define CG_MULT_THERMAL_STATUS 0x740
169#define ASIC_T(x) ((x) << 16) 169#define ASIC_T(x) ((x) << 16)
170#define ASIC_T_MASK 0x7FF0000 170#define ASIC_T_MASK 0x7FF0000
171#define ASIC_T_SHIFT 16 171#define ASIC_T_SHIFT 16
172/* APU */
173#define CG_THERMAL_STATUS 0x678
172 174
173#define HDP_HOST_PATH_CNTL 0x2C00 175#define HDP_HOST_PATH_CNTL 0x2C00
174#define HDP_NONSURFACE_BASE 0x2C04 176#define HDP_NONSURFACE_BASE 0x2C04
@@ -181,6 +183,7 @@
181#define MC_SHARED_CHMAP 0x2004 183#define MC_SHARED_CHMAP 0x2004
182#define NOOFCHAN_SHIFT 12 184#define NOOFCHAN_SHIFT 12
183#define NOOFCHAN_MASK 0x00003000 185#define NOOFCHAN_MASK 0x00003000
186#define MC_SHARED_CHREMAP 0x2008
184 187
185#define MC_ARB_RAMCFG 0x2760 188#define MC_ARB_RAMCFG 0x2760
186#define NOOFBANK_SHIFT 0 189#define NOOFBANK_SHIFT 0
@@ -200,6 +203,7 @@
200#define MC_VM_AGP_BOT 0x202C 203#define MC_VM_AGP_BOT 0x202C
201#define MC_VM_AGP_BASE 0x2030 204#define MC_VM_AGP_BASE 0x2030
202#define MC_VM_FB_LOCATION 0x2024 205#define MC_VM_FB_LOCATION 0x2024
206#define MC_FUS_VM_FB_OFFSET 0x2898
203#define MC_VM_MB_L1_TLB0_CNTL 0x2234 207#define MC_VM_MB_L1_TLB0_CNTL 0x2234
204#define MC_VM_MB_L1_TLB1_CNTL 0x2238 208#define MC_VM_MB_L1_TLB1_CNTL 0x2238
205#define MC_VM_MB_L1_TLB2_CNTL 0x223C 209#define MC_VM_MB_L1_TLB2_CNTL 0x223C
@@ -349,6 +353,9 @@
349#define SYNC_WALKER (1 << 25) 353#define SYNC_WALKER (1 << 25)
350#define SYNC_ALIGNER (1 << 26) 354#define SYNC_ALIGNER (1 << 26)
351 355
356#define TCP_CHAN_STEER_LO 0x960c
357#define TCP_CHAN_STEER_HI 0x9610
358
352#define VGT_CACHE_INVALIDATION 0x88C4 359#define VGT_CACHE_INVALIDATION 0x88C4
353#define CACHE_INVALIDATION(x) ((x) << 0) 360#define CACHE_INVALIDATION(x) ((x) << 0)
354#define VC_ONLY 0 361#define VC_ONLY 0
@@ -574,6 +581,44 @@
574# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16) 581# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16)
575# define DC_HPDx_EN (1 << 28) 582# define DC_HPDx_EN (1 << 28)
576 583
584/* PCIE link stuff */
585#define PCIE_LC_TRAINING_CNTL 0xa1 /* PCIE_P */
586#define PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE_P */
587# define LC_LINK_WIDTH_SHIFT 0
588# define LC_LINK_WIDTH_MASK 0x7
589# define LC_LINK_WIDTH_X0 0
590# define LC_LINK_WIDTH_X1 1
591# define LC_LINK_WIDTH_X2 2
592# define LC_LINK_WIDTH_X4 3
593# define LC_LINK_WIDTH_X8 4
594# define LC_LINK_WIDTH_X16 6
595# define LC_LINK_WIDTH_RD_SHIFT 4
596# define LC_LINK_WIDTH_RD_MASK 0x70
597# define LC_RECONFIG_ARC_MISSING_ESCAPE (1 << 7)
598# define LC_RECONFIG_NOW (1 << 8)
599# define LC_RENEGOTIATION_SUPPORT (1 << 9)
600# define LC_RENEGOTIATE_EN (1 << 10)
601# define LC_SHORT_RECONFIG_EN (1 << 11)
602# define LC_UPCONFIGURE_SUPPORT (1 << 12)
603# define LC_UPCONFIGURE_DIS (1 << 13)
604#define PCIE_LC_SPEED_CNTL 0xa4 /* PCIE_P */
605# define LC_GEN2_EN_STRAP (1 << 0)
606# define LC_TARGET_LINK_SPEED_OVERRIDE_EN (1 << 1)
607# define LC_FORCE_EN_HW_SPEED_CHANGE (1 << 5)
608# define LC_FORCE_DIS_HW_SPEED_CHANGE (1 << 6)
609# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK (0x3 << 8)
610# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT 3
611# define LC_CURRENT_DATA_RATE (1 << 11)
612# define LC_VOLTAGE_TIMER_SEL_MASK (0xf << 14)
613# define LC_CLR_FAILED_SPD_CHANGE_CNT (1 << 21)
614# define LC_OTHER_SIDE_EVER_SENT_GEN2 (1 << 23)
615# define LC_OTHER_SIDE_SUPPORTS_GEN2 (1 << 24)
616#define MM_CFGREGS_CNTL 0x544c
617# define MM_WR_TO_CFG_EN (1 << 3)
618#define LINK_CNTL2 0x88 /* F0 */
619# define TARGET_LINK_SPEED_MASK (0xf << 0)
620# define SELECTABLE_DEEMPHASIS (1 << 6)
621
577/* 622/*
578 * PM4 623 * PM4
579 */ 624 */
@@ -603,7 +648,7 @@
603#define PACKET3_NOP 0x10 648#define PACKET3_NOP 0x10
604#define PACKET3_SET_BASE 0x11 649#define PACKET3_SET_BASE 0x11
605#define PACKET3_CLEAR_STATE 0x12 650#define PACKET3_CLEAR_STATE 0x12
606#define PACKET3_INDIRECT_BUFFER_SIZE 0x13 651#define PACKET3_INDEX_BUFFER_SIZE 0x13
607#define PACKET3_DISPATCH_DIRECT 0x15 652#define PACKET3_DISPATCH_DIRECT 0x15
608#define PACKET3_DISPATCH_INDIRECT 0x16 653#define PACKET3_DISPATCH_INDIRECT 0x16
609#define PACKET3_INDIRECT_BUFFER_END 0x17 654#define PACKET3_INDIRECT_BUFFER_END 0x17
@@ -644,14 +689,14 @@
644# define PACKET3_CB8_DEST_BASE_ENA (1 << 15) 689# define PACKET3_CB8_DEST_BASE_ENA (1 << 15)
645# define PACKET3_CB9_DEST_BASE_ENA (1 << 16) 690# define PACKET3_CB9_DEST_BASE_ENA (1 << 16)
646# define PACKET3_CB10_DEST_BASE_ENA (1 << 17) 691# define PACKET3_CB10_DEST_BASE_ENA (1 << 17)
647# define PACKET3_CB11_DEST_BASE_ENA (1 << 17) 692# define PACKET3_CB11_DEST_BASE_ENA (1 << 18)
648# define PACKET3_FULL_CACHE_ENA (1 << 20) 693# define PACKET3_FULL_CACHE_ENA (1 << 20)
649# define PACKET3_TC_ACTION_ENA (1 << 23) 694# define PACKET3_TC_ACTION_ENA (1 << 23)
650# define PACKET3_VC_ACTION_ENA (1 << 24) 695# define PACKET3_VC_ACTION_ENA (1 << 24)
651# define PACKET3_CB_ACTION_ENA (1 << 25) 696# define PACKET3_CB_ACTION_ENA (1 << 25)
652# define PACKET3_DB_ACTION_ENA (1 << 26) 697# define PACKET3_DB_ACTION_ENA (1 << 26)
653# define PACKET3_SH_ACTION_ENA (1 << 27) 698# define PACKET3_SH_ACTION_ENA (1 << 27)
654# define PACKET3_SMX_ACTION_ENA (1 << 28) 699# define PACKET3_SX_ACTION_ENA (1 << 28)
655#define PACKET3_ME_INITIALIZE 0x44 700#define PACKET3_ME_INITIALIZE 0x44
656#define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16) 701#define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
657#define PACKET3_COND_WRITE 0x45 702#define PACKET3_COND_WRITE 0x45
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
new file mode 100644
index 000000000000..5e0bef80ad7f
--- /dev/null
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -0,0 +1,316 @@
1/*
2 * Copyright 2010 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24#include <linux/firmware.h>
25#include <linux/platform_device.h>
26#include <linux/slab.h>
27#include "drmP.h"
28#include "radeon.h"
29#include "radeon_asic.h"
30#include "radeon_drm.h"
31#include "nid.h"
32#include "atom.h"
33#include "ni_reg.h"
34
35#define EVERGREEN_PFP_UCODE_SIZE 1120
36#define EVERGREEN_PM4_UCODE_SIZE 1376
37#define EVERGREEN_RLC_UCODE_SIZE 768
38#define BTC_MC_UCODE_SIZE 6024
39
40/* Firmware Names */
41MODULE_FIRMWARE("radeon/BARTS_pfp.bin");
42MODULE_FIRMWARE("radeon/BARTS_me.bin");
43MODULE_FIRMWARE("radeon/BARTS_mc.bin");
44MODULE_FIRMWARE("radeon/BTC_rlc.bin");
45MODULE_FIRMWARE("radeon/TURKS_pfp.bin");
46MODULE_FIRMWARE("radeon/TURKS_me.bin");
47MODULE_FIRMWARE("radeon/TURKS_mc.bin");
48MODULE_FIRMWARE("radeon/CAICOS_pfp.bin");
49MODULE_FIRMWARE("radeon/CAICOS_me.bin");
50MODULE_FIRMWARE("radeon/CAICOS_mc.bin");
51
52#define BTC_IO_MC_REGS_SIZE 29
53
54static const u32 barts_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
55 {0x00000077, 0xff010100},
56 {0x00000078, 0x00000000},
57 {0x00000079, 0x00001434},
58 {0x0000007a, 0xcc08ec08},
59 {0x0000007b, 0x00040000},
60 {0x0000007c, 0x000080c0},
61 {0x0000007d, 0x09000000},
62 {0x0000007e, 0x00210404},
63 {0x00000081, 0x08a8e800},
64 {0x00000082, 0x00030444},
65 {0x00000083, 0x00000000},
66 {0x00000085, 0x00000001},
67 {0x00000086, 0x00000002},
68 {0x00000087, 0x48490000},
69 {0x00000088, 0x20244647},
70 {0x00000089, 0x00000005},
71 {0x0000008b, 0x66030000},
72 {0x0000008c, 0x00006603},
73 {0x0000008d, 0x00000100},
74 {0x0000008f, 0x00001c0a},
75 {0x00000090, 0xff000001},
76 {0x00000094, 0x00101101},
77 {0x00000095, 0x00000fff},
78 {0x00000096, 0x00116fff},
79 {0x00000097, 0x60010000},
80 {0x00000098, 0x10010000},
81 {0x00000099, 0x00006000},
82 {0x0000009a, 0x00001000},
83 {0x0000009f, 0x00946a00}
84};
85
86static const u32 turks_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
87 {0x00000077, 0xff010100},
88 {0x00000078, 0x00000000},
89 {0x00000079, 0x00001434},
90 {0x0000007a, 0xcc08ec08},
91 {0x0000007b, 0x00040000},
92 {0x0000007c, 0x000080c0},
93 {0x0000007d, 0x09000000},
94 {0x0000007e, 0x00210404},
95 {0x00000081, 0x08a8e800},
96 {0x00000082, 0x00030444},
97 {0x00000083, 0x00000000},
98 {0x00000085, 0x00000001},
99 {0x00000086, 0x00000002},
100 {0x00000087, 0x48490000},
101 {0x00000088, 0x20244647},
102 {0x00000089, 0x00000005},
103 {0x0000008b, 0x66030000},
104 {0x0000008c, 0x00006603},
105 {0x0000008d, 0x00000100},
106 {0x0000008f, 0x00001c0a},
107 {0x00000090, 0xff000001},
108 {0x00000094, 0x00101101},
109 {0x00000095, 0x00000fff},
110 {0x00000096, 0x00116fff},
111 {0x00000097, 0x60010000},
112 {0x00000098, 0x10010000},
113 {0x00000099, 0x00006000},
114 {0x0000009a, 0x00001000},
115 {0x0000009f, 0x00936a00}
116};
117
118static const u32 caicos_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
119 {0x00000077, 0xff010100},
120 {0x00000078, 0x00000000},
121 {0x00000079, 0x00001434},
122 {0x0000007a, 0xcc08ec08},
123 {0x0000007b, 0x00040000},
124 {0x0000007c, 0x000080c0},
125 {0x0000007d, 0x09000000},
126 {0x0000007e, 0x00210404},
127 {0x00000081, 0x08a8e800},
128 {0x00000082, 0x00030444},
129 {0x00000083, 0x00000000},
130 {0x00000085, 0x00000001},
131 {0x00000086, 0x00000002},
132 {0x00000087, 0x48490000},
133 {0x00000088, 0x20244647},
134 {0x00000089, 0x00000005},
135 {0x0000008b, 0x66030000},
136 {0x0000008c, 0x00006603},
137 {0x0000008d, 0x00000100},
138 {0x0000008f, 0x00001c0a},
139 {0x00000090, 0xff000001},
140 {0x00000094, 0x00101101},
141 {0x00000095, 0x00000fff},
142 {0x00000096, 0x00116fff},
143 {0x00000097, 0x60010000},
144 {0x00000098, 0x10010000},
145 {0x00000099, 0x00006000},
146 {0x0000009a, 0x00001000},
147 {0x0000009f, 0x00916a00}
148};
149
150int btc_mc_load_microcode(struct radeon_device *rdev)
151{
152 const __be32 *fw_data;
153 u32 mem_type, running, blackout = 0;
154 u32 *io_mc_regs;
155 int i;
156
157 if (!rdev->mc_fw)
158 return -EINVAL;
159
160 switch (rdev->family) {
161 case CHIP_BARTS:
162 io_mc_regs = (u32 *)&barts_io_mc_regs;
163 break;
164 case CHIP_TURKS:
165 io_mc_regs = (u32 *)&turks_io_mc_regs;
166 break;
167 case CHIP_CAICOS:
168 default:
169 io_mc_regs = (u32 *)&caicos_io_mc_regs;
170 break;
171 }
172
173 mem_type = (RREG32(MC_SEQ_MISC0) & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT;
174 running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
175
176 if ((mem_type == MC_SEQ_MISC0_GDDR5_VALUE) && (running == 0)) {
177 if (running) {
178 blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
179 WREG32(MC_SHARED_BLACKOUT_CNTL, 1);
180 }
181
182 /* reset the engine and set to writable */
183 WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
184 WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
185
186 /* load mc io regs */
187 for (i = 0; i < BTC_IO_MC_REGS_SIZE; i++) {
188 WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
189 WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
190 }
191 /* load the MC ucode */
192 fw_data = (const __be32 *)rdev->mc_fw->data;
193 for (i = 0; i < BTC_MC_UCODE_SIZE; i++)
194 WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
195
196 /* put the engine back into the active state */
197 WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
198 WREG32(MC_SEQ_SUP_CNTL, 0x00000004);
199 WREG32(MC_SEQ_SUP_CNTL, 0x00000001);
200
201 /* wait for training to complete */
202 while (!(RREG32(MC_IO_PAD_CNTL_D0) & MEM_FALL_OUT_CMD))
203 udelay(10);
204
205 if (running)
206 WREG32(MC_SHARED_BLACKOUT_CNTL, blackout);
207 }
208
209 return 0;
210}
211
212int ni_init_microcode(struct radeon_device *rdev)
213{
214 struct platform_device *pdev;
215 const char *chip_name;
216 const char *rlc_chip_name;
217 size_t pfp_req_size, me_req_size, rlc_req_size, mc_req_size;
218 char fw_name[30];
219 int err;
220
221 DRM_DEBUG("\n");
222
223 pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
224 err = IS_ERR(pdev);
225 if (err) {
226 printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
227 return -EINVAL;
228 }
229
230 switch (rdev->family) {
231 case CHIP_BARTS:
232 chip_name = "BARTS";
233 rlc_chip_name = "BTC";
234 break;
235 case CHIP_TURKS:
236 chip_name = "TURKS";
237 rlc_chip_name = "BTC";
238 break;
239 case CHIP_CAICOS:
240 chip_name = "CAICOS";
241 rlc_chip_name = "BTC";
242 break;
243 default: BUG();
244 }
245
246 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
247 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
248 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
249 mc_req_size = BTC_MC_UCODE_SIZE * 4;
250
251 DRM_INFO("Loading %s Microcode\n", chip_name);
252
253 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
254 err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
255 if (err)
256 goto out;
257 if (rdev->pfp_fw->size != pfp_req_size) {
258 printk(KERN_ERR
259 "ni_cp: Bogus length %zu in firmware \"%s\"\n",
260 rdev->pfp_fw->size, fw_name);
261 err = -EINVAL;
262 goto out;
263 }
264
265 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
266 err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
267 if (err)
268 goto out;
269 if (rdev->me_fw->size != me_req_size) {
270 printk(KERN_ERR
271 "ni_cp: Bogus length %zu in firmware \"%s\"\n",
272 rdev->me_fw->size, fw_name);
273 err = -EINVAL;
274 }
275
276 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
277 err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
278 if (err)
279 goto out;
280 if (rdev->rlc_fw->size != rlc_req_size) {
281 printk(KERN_ERR
282 "ni_rlc: Bogus length %zu in firmware \"%s\"\n",
283 rdev->rlc_fw->size, fw_name);
284 err = -EINVAL;
285 }
286
287 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
288 err = request_firmware(&rdev->mc_fw, fw_name, &pdev->dev);
289 if (err)
290 goto out;
291 if (rdev->mc_fw->size != mc_req_size) {
292 printk(KERN_ERR
293 "ni_mc: Bogus length %zu in firmware \"%s\"\n",
294 rdev->mc_fw->size, fw_name);
295 err = -EINVAL;
296 }
297out:
298 platform_device_unregister(pdev);
299
300 if (err) {
301 if (err != -EINVAL)
302 printk(KERN_ERR
303 "ni_cp: Failed to load firmware \"%s\"\n",
304 fw_name);
305 release_firmware(rdev->pfp_fw);
306 rdev->pfp_fw = NULL;
307 release_firmware(rdev->me_fw);
308 rdev->me_fw = NULL;
309 release_firmware(rdev->rlc_fw);
310 rdev->rlc_fw = NULL;
311 release_firmware(rdev->mc_fw);
312 rdev->mc_fw = NULL;
313 }
314 return err;
315}
316
diff --git a/drivers/gpu/drm/radeon/ni_reg.h b/drivers/gpu/drm/radeon/ni_reg.h
new file mode 100644
index 000000000000..5db7b7d6feb0
--- /dev/null
+++ b/drivers/gpu/drm/radeon/ni_reg.h
@@ -0,0 +1,86 @@
1/*
2 * Copyright 2010 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24#ifndef __NI_REG_H__
25#define __NI_REG_H__
26
27/* northern islands - DCE5 */
28
29#define NI_INPUT_GAMMA_CONTROL 0x6840
30# define NI_GRPH_INPUT_GAMMA_MODE(x) (((x) & 0x3) << 0)
31# define NI_INPUT_GAMMA_USE_LUT 0
32# define NI_INPUT_GAMMA_BYPASS 1
33# define NI_INPUT_GAMMA_SRGB_24 2
34# define NI_INPUT_GAMMA_XVYCC_222 3
35# define NI_OVL_INPUT_GAMMA_MODE(x) (((x) & 0x3) << 4)
36
37#define NI_PRESCALE_GRPH_CONTROL 0x68b4
38# define NI_GRPH_PRESCALE_BYPASS (1 << 4)
39
40#define NI_PRESCALE_OVL_CONTROL 0x68c4
41# define NI_OVL_PRESCALE_BYPASS (1 << 4)
42
43#define NI_INPUT_CSC_CONTROL 0x68d4
44# define NI_INPUT_CSC_GRPH_MODE(x) (((x) & 0x3) << 0)
45# define NI_INPUT_CSC_BYPASS 0
46# define NI_INPUT_CSC_PROG_COEFF 1
47# define NI_INPUT_CSC_PROG_SHARED_MATRIXA 2
48# define NI_INPUT_CSC_OVL_MODE(x) (((x) & 0x3) << 4)
49
50#define NI_OUTPUT_CSC_CONTROL 0x68f0
51# define NI_OUTPUT_CSC_GRPH_MODE(x) (((x) & 0x7) << 0)
52# define NI_OUTPUT_CSC_BYPASS 0
53# define NI_OUTPUT_CSC_TV_RGB 1
54# define NI_OUTPUT_CSC_YCBCR_601 2
55# define NI_OUTPUT_CSC_YCBCR_709 3
56# define NI_OUTPUT_CSC_PROG_COEFF 4
57# define NI_OUTPUT_CSC_PROG_SHARED_MATRIXB 5
58# define NI_OUTPUT_CSC_OVL_MODE(x) (((x) & 0x7) << 4)
59
60#define NI_DEGAMMA_CONTROL 0x6960
61# define NI_GRPH_DEGAMMA_MODE(x) (((x) & 0x3) << 0)
62# define NI_DEGAMMA_BYPASS 0
63# define NI_DEGAMMA_SRGB_24 1
64# define NI_DEGAMMA_XVYCC_222 2
65# define NI_OVL_DEGAMMA_MODE(x) (((x) & 0x3) << 4)
66# define NI_ICON_DEGAMMA_MODE(x) (((x) & 0x3) << 8)
67# define NI_CURSOR_DEGAMMA_MODE(x) (((x) & 0x3) << 12)
68
69#define NI_GAMUT_REMAP_CONTROL 0x6964
70# define NI_GRPH_GAMUT_REMAP_MODE(x) (((x) & 0x3) << 0)
71# define NI_GAMUT_REMAP_BYPASS 0
72# define NI_GAMUT_REMAP_PROG_COEFF 1
73# define NI_GAMUT_REMAP_PROG_SHARED_MATRIXA 2
74# define NI_GAMUT_REMAP_PROG_SHARED_MATRIXB 3
75# define NI_OVL_GAMUT_REMAP_MODE(x) (((x) & 0x3) << 4)
76
77#define NI_REGAMMA_CONTROL 0x6a80
78# define NI_GRPH_REGAMMA_MODE(x) (((x) & 0x7) << 0)
79# define NI_REGAMMA_BYPASS 0
80# define NI_REGAMMA_SRGB_24 1
81# define NI_REGAMMA_XVYCC_222 2
82# define NI_REGAMMA_PROG_A 3
83# define NI_REGAMMA_PROG_B 4
84# define NI_OVL_REGAMMA_MODE(x) (((x) & 0x7) << 4)
85
86#endif
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
new file mode 100644
index 000000000000..f7b445390e02
--- /dev/null
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -0,0 +1,41 @@
1/*
2 * Copyright 2010 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Alex Deucher
23 */
24#ifndef NI_H
25#define NI_H
26
27#define MC_SHARED_BLACKOUT_CNTL 0x20ac
28#define MC_SEQ_SUP_CNTL 0x28c8
29#define RUN_MASK (1 << 0)
30#define MC_SEQ_SUP_PGM 0x28cc
31#define MC_IO_PAD_CNTL_D0 0x29d0
32#define MEM_FALL_OUT_CMD (1 << 8)
33#define MC_SEQ_MISC0 0x2a00
34#define MC_SEQ_MISC0_GDDR5_SHIFT 28
35#define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000
36#define MC_SEQ_MISC0_GDDR5_VALUE 5
37#define MC_SEQ_IO_DEBUG_INDEX 0x2a44
38#define MC_SEQ_IO_DEBUG_DATA 0x2a48
39
40#endif
41
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 8e10aa9f74b0..f637595b14e1 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -68,6 +68,56 @@ MODULE_FIRMWARE(FIRMWARE_R520);
68 * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 68 * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
69 */ 69 */
70 70
71void r100_pre_page_flip(struct radeon_device *rdev, int crtc)
72{
73 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc];
74 u32 tmp;
75
76 /* make sure flip is at vb rather than hb */
77 tmp = RREG32(RADEON_CRTC_OFFSET_CNTL + radeon_crtc->crtc_offset);
78 tmp &= ~RADEON_CRTC_OFFSET_FLIP_CNTL;
79 /* make sure pending bit is asserted */
80 tmp |= RADEON_CRTC_GUI_TRIG_OFFSET_LEFT_EN;
81 WREG32(RADEON_CRTC_OFFSET_CNTL + radeon_crtc->crtc_offset, tmp);
82
83 /* set pageflip to happen as late as possible in the vblank interval.
84 * same field for crtc1/2
85 */
86 tmp = RREG32(RADEON_CRTC_GEN_CNTL);
87 tmp &= ~RADEON_CRTC_VSTAT_MODE_MASK;
88 WREG32(RADEON_CRTC_GEN_CNTL, tmp);
89
90 /* enable the pflip int */
91 radeon_irq_kms_pflip_irq_get(rdev, crtc);
92}
93
94void r100_post_page_flip(struct radeon_device *rdev, int crtc)
95{
96 /* disable the pflip int */
97 radeon_irq_kms_pflip_irq_put(rdev, crtc);
98}
99
100u32 r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
101{
102 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
103 u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK;
104
105 /* Lock the graphics update lock */
106 /* update the scanout addresses */
107 WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp);
108
109 /* Wait for update_pending to go high. */
110 while (!(RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET));
111 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
112
113 /* Unlock the lock, so double-buffering can take place inside vblank */
114 tmp &= ~RADEON_CRTC_OFFSET__OFFSET_LOCK;
115 WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp);
116
117 /* Return current update_pending status: */
118 return RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET;
119}
120
71void r100_pm_get_dynpm_state(struct radeon_device *rdev) 121void r100_pm_get_dynpm_state(struct radeon_device *rdev)
72{ 122{
73 int i; 123 int i;
@@ -526,10 +576,12 @@ int r100_irq_set(struct radeon_device *rdev)
526 if (rdev->irq.gui_idle) { 576 if (rdev->irq.gui_idle) {
527 tmp |= RADEON_GUI_IDLE_MASK; 577 tmp |= RADEON_GUI_IDLE_MASK;
528 } 578 }
529 if (rdev->irq.crtc_vblank_int[0]) { 579 if (rdev->irq.crtc_vblank_int[0] ||
580 rdev->irq.pflip[0]) {
530 tmp |= RADEON_CRTC_VBLANK_MASK; 581 tmp |= RADEON_CRTC_VBLANK_MASK;
531 } 582 }
532 if (rdev->irq.crtc_vblank_int[1]) { 583 if (rdev->irq.crtc_vblank_int[1] ||
584 rdev->irq.pflip[1]) {
533 tmp |= RADEON_CRTC2_VBLANK_MASK; 585 tmp |= RADEON_CRTC2_VBLANK_MASK;
534 } 586 }
535 if (rdev->irq.hpd[0]) { 587 if (rdev->irq.hpd[0]) {
@@ -600,14 +652,22 @@ int r100_irq_process(struct radeon_device *rdev)
600 } 652 }
601 /* Vertical blank interrupts */ 653 /* Vertical blank interrupts */
602 if (status & RADEON_CRTC_VBLANK_STAT) { 654 if (status & RADEON_CRTC_VBLANK_STAT) {
603 drm_handle_vblank(rdev->ddev, 0); 655 if (rdev->irq.crtc_vblank_int[0]) {
604 rdev->pm.vblank_sync = true; 656 drm_handle_vblank(rdev->ddev, 0);
605 wake_up(&rdev->irq.vblank_queue); 657 rdev->pm.vblank_sync = true;
658 wake_up(&rdev->irq.vblank_queue);
659 }
660 if (rdev->irq.pflip[0])
661 radeon_crtc_handle_flip(rdev, 0);
606 } 662 }
607 if (status & RADEON_CRTC2_VBLANK_STAT) { 663 if (status & RADEON_CRTC2_VBLANK_STAT) {
608 drm_handle_vblank(rdev->ddev, 1); 664 if (rdev->irq.crtc_vblank_int[1]) {
609 rdev->pm.vblank_sync = true; 665 drm_handle_vblank(rdev->ddev, 1);
610 wake_up(&rdev->irq.vblank_queue); 666 rdev->pm.vblank_sync = true;
667 wake_up(&rdev->irq.vblank_queue);
668 }
669 if (rdev->irq.pflip[1])
670 radeon_crtc_handle_flip(rdev, 1);
611 } 671 }
612 if (status & RADEON_FP_DETECT_STAT) { 672 if (status & RADEON_FP_DETECT_STAT) {
613 queue_hotplug = true; 673 queue_hotplug = true;
@@ -622,7 +682,7 @@ int r100_irq_process(struct radeon_device *rdev)
622 /* reset gui idle ack. the status bit is broken */ 682 /* reset gui idle ack. the status bit is broken */
623 rdev->irq.gui_idle_acked = false; 683 rdev->irq.gui_idle_acked = false;
624 if (queue_hotplug) 684 if (queue_hotplug)
625 queue_work(rdev->wq, &rdev->hotplug_work); 685 schedule_work(&rdev->hotplug_work);
626 if (rdev->msi_enabled) { 686 if (rdev->msi_enabled) {
627 switch (rdev->family) { 687 switch (rdev->family) {
628 case CHIP_RS400: 688 case CHIP_RS400:
diff --git a/drivers/gpu/drm/radeon/r100d.h b/drivers/gpu/drm/radeon/r100d.h
index b121b6c678d4..eab91760fae0 100644
--- a/drivers/gpu/drm/radeon/r100d.h
+++ b/drivers/gpu/drm/radeon/r100d.h
@@ -551,7 +551,7 @@
551#define S_000360_CUR2_LOCK(x) (((x) & 0x1) << 31) 551#define S_000360_CUR2_LOCK(x) (((x) & 0x1) << 31)
552#define G_000360_CUR2_LOCK(x) (((x) >> 31) & 0x1) 552#define G_000360_CUR2_LOCK(x) (((x) >> 31) & 0x1)
553#define C_000360_CUR2_LOCK 0x7FFFFFFF 553#define C_000360_CUR2_LOCK 0x7FFFFFFF
554#define R_0003C2_GENMO_WT 0x0003C0 554#define R_0003C2_GENMO_WT 0x0003C2
555#define S_0003C2_GENMO_MONO_ADDRESS_B(x) (((x) & 0x1) << 0) 555#define S_0003C2_GENMO_MONO_ADDRESS_B(x) (((x) & 0x1) << 0)
556#define G_0003C2_GENMO_MONO_ADDRESS_B(x) (((x) >> 0) & 0x1) 556#define G_0003C2_GENMO_MONO_ADDRESS_B(x) (((x) >> 0) & 0x1)
557#define C_0003C2_GENMO_MONO_ADDRESS_B 0xFE 557#define C_0003C2_GENMO_MONO_ADDRESS_B 0xFE
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index cde1d3480d93..fae5e709f270 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -558,10 +558,7 @@ int rv370_get_pcie_lanes(struct radeon_device *rdev)
558 558
559 /* FIXME wait for idle */ 559 /* FIXME wait for idle */
560 560
561 if (rdev->family < CHIP_R600) 561 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
562 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
563 else
564 link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
565 562
566 switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) { 563 switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) {
567 case RADEON_PCIE_LC_LINK_WIDTH_X0: 564 case RADEON_PCIE_LC_LINK_WIDTH_X0:
@@ -745,6 +742,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
745 break; 742 break;
746 case 0x4E00: 743 case 0x4E00:
747 /* RB3D_CCTL */ 744 /* RB3D_CCTL */
745 if ((idx_value & (1 << 10)) && /* CMASK_ENABLE */
746 p->rdev->cmask_filp != p->filp) {
747 DRM_ERROR("Invalid RB3D_CCTL: Cannot enable CMASK.\n");
748 return -EINVAL;
749 }
748 track->num_cb = ((idx_value >> 5) & 0x3) + 1; 750 track->num_cb = ((idx_value >> 5) & 0x3) + 1;
749 break; 751 break;
750 case 0x4E38: 752 case 0x4E38:
@@ -787,6 +789,13 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
787 case 15: 789 case 15:
788 track->cb[i].cpp = 2; 790 track->cb[i].cpp = 2;
789 break; 791 break;
792 case 5:
793 if (p->rdev->family < CHIP_RV515) {
794 DRM_ERROR("Invalid color buffer format (%d)!\n",
795 ((idx_value >> 21) & 0xF));
796 return -EINVAL;
797 }
798 /* Pass through. */
790 case 6: 799 case 6:
791 track->cb[i].cpp = 4; 800 track->cb[i].cpp = 4;
792 break; 801 break;
@@ -1199,6 +1208,10 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
1199 if (p->rdev->hyperz_filp != p->filp) 1208 if (p->rdev->hyperz_filp != p->filp)
1200 return -EINVAL; 1209 return -EINVAL;
1201 break; 1210 break;
1211 case PACKET3_3D_CLEAR_CMASK:
1212 if (p->rdev->cmask_filp != p->filp)
1213 return -EINVAL;
1214 break;
1202 case PACKET3_NOP: 1215 case PACKET3_NOP:
1203 break; 1216 break;
1204 default: 1217 default:
diff --git a/drivers/gpu/drm/radeon/r300d.h b/drivers/gpu/drm/radeon/r300d.h
index 0c036c60d9df..1f519a5ffb8c 100644
--- a/drivers/gpu/drm/radeon/r300d.h
+++ b/drivers/gpu/drm/radeon/r300d.h
@@ -54,6 +54,7 @@
54#define PACKET3_3D_DRAW_IMMD_2 0x35 54#define PACKET3_3D_DRAW_IMMD_2 0x35
55#define PACKET3_3D_DRAW_INDX_2 0x36 55#define PACKET3_3D_DRAW_INDX_2 0x36
56#define PACKET3_3D_CLEAR_HIZ 0x37 56#define PACKET3_3D_CLEAR_HIZ 0x37
57#define PACKET3_3D_CLEAR_CMASK 0x38
57#define PACKET3_BITBLT_MULTI 0x9B 58#define PACKET3_BITBLT_MULTI 0x9B
58 59
59#define PACKET0(reg, n) (CP_PACKET0 | \ 60#define PACKET0(reg, n) (CP_PACKET0 | \
diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h
index 6ac1f604e29b..fc437059918f 100644
--- a/drivers/gpu/drm/radeon/r500_reg.h
+++ b/drivers/gpu/drm/radeon/r500_reg.h
@@ -355,6 +355,8 @@
355#define AVIVO_D1CRTC_FRAME_COUNT 0x60a4 355#define AVIVO_D1CRTC_FRAME_COUNT 0x60a4
356#define AVIVO_D1CRTC_STEREO_CONTROL 0x60c4 356#define AVIVO_D1CRTC_STEREO_CONTROL 0x60c4
357 357
358#define AVIVO_D1MODE_MASTER_UPDATE_MODE 0x60e4
359
358/* master controls */ 360/* master controls */
359#define AVIVO_DC_CRTC_MASTER_EN 0x60f8 361#define AVIVO_DC_CRTC_MASTER_EN 0x60f8
360#define AVIVO_DC_CRTC_TV_CONTROL 0x60fc 362#define AVIVO_DC_CRTC_TV_CONTROL 0x60fc
@@ -409,8 +411,10 @@
409#define AVIVO_D1GRPH_X_END 0x6134 411#define AVIVO_D1GRPH_X_END 0x6134
410#define AVIVO_D1GRPH_Y_END 0x6138 412#define AVIVO_D1GRPH_Y_END 0x6138
411#define AVIVO_D1GRPH_UPDATE 0x6144 413#define AVIVO_D1GRPH_UPDATE 0x6144
414# define AVIVO_D1GRPH_SURFACE_UPDATE_PENDING (1 << 2)
412# define AVIVO_D1GRPH_UPDATE_LOCK (1 << 16) 415# define AVIVO_D1GRPH_UPDATE_LOCK (1 << 16)
413#define AVIVO_D1GRPH_FLIP_CONTROL 0x6148 416#define AVIVO_D1GRPH_FLIP_CONTROL 0x6148
417# define AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN (1 << 0)
414 418
415#define AVIVO_D1CUR_CONTROL 0x6400 419#define AVIVO_D1CUR_CONTROL 0x6400
416# define AVIVO_D1CURSOR_EN (1 << 0) 420# define AVIVO_D1CURSOR_EN (1 << 0)
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 9c92db7c896b..6b50716267c0 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -83,6 +83,9 @@ MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin");
83MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin"); 83MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
84MODULE_FIRMWARE("radeon/CYPRESS_me.bin"); 84MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
85MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin"); 85MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
86MODULE_FIRMWARE("radeon/PALM_pfp.bin");
87MODULE_FIRMWARE("radeon/PALM_me.bin");
88MODULE_FIRMWARE("radeon/SUMO_rlc.bin");
86 89
87int r600_debugfs_mc_info_init(struct radeon_device *rdev); 90int r600_debugfs_mc_info_init(struct radeon_device *rdev);
88 91
@@ -91,6 +94,7 @@ int r600_mc_wait_for_idle(struct radeon_device *rdev);
91void r600_gpu_init(struct radeon_device *rdev); 94void r600_gpu_init(struct radeon_device *rdev);
92void r600_fini(struct radeon_device *rdev); 95void r600_fini(struct radeon_device *rdev);
93void r600_irq_disable(struct radeon_device *rdev); 96void r600_irq_disable(struct radeon_device *rdev);
97static void r600_pcie_gen2_enable(struct radeon_device *rdev);
94 98
95/* get temperature in millidegrees */ 99/* get temperature in millidegrees */
96u32 rv6xx_get_temp(struct radeon_device *rdev) 100u32 rv6xx_get_temp(struct radeon_device *rdev)
@@ -1164,7 +1168,7 @@ static void r600_mc_program(struct radeon_device *rdev)
1164 * Note: GTT start, end, size should be initialized before calling this 1168 * Note: GTT start, end, size should be initialized before calling this
1165 * function on AGP platform. 1169 * function on AGP platform.
1166 */ 1170 */
1167void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) 1171static void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
1168{ 1172{
1169 u64 size_bf, size_af; 1173 u64 size_bf, size_af;
1170 1174
@@ -2009,6 +2013,10 @@ int r600_init_microcode(struct radeon_device *rdev)
2009 chip_name = "CYPRESS"; 2013 chip_name = "CYPRESS";
2010 rlc_chip_name = "CYPRESS"; 2014 rlc_chip_name = "CYPRESS";
2011 break; 2015 break;
2016 case CHIP_PALM:
2017 chip_name = "PALM";
2018 rlc_chip_name = "SUMO";
2019 break;
2012 default: BUG(); 2020 default: BUG();
2013 } 2021 }
2014 2022
@@ -2372,6 +2380,9 @@ int r600_startup(struct radeon_device *rdev)
2372{ 2380{
2373 int r; 2381 int r;
2374 2382
2383 /* enable pcie gen2 link */
2384 r600_pcie_gen2_enable(rdev);
2385
2375 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { 2386 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
2376 r = r600_init_microcode(rdev); 2387 r = r600_init_microcode(rdev);
2377 if (r) { 2388 if (r) {
@@ -2874,6 +2885,8 @@ static void r600_disable_interrupt_state(struct radeon_device *rdev)
2874 WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); 2885 WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
2875 WREG32(GRBM_INT_CNTL, 0); 2886 WREG32(GRBM_INT_CNTL, 0);
2876 WREG32(DxMODE_INT_MASK, 0); 2887 WREG32(DxMODE_INT_MASK, 0);
2888 WREG32(D1GRPH_INTERRUPT_CONTROL, 0);
2889 WREG32(D2GRPH_INTERRUPT_CONTROL, 0);
2877 if (ASIC_IS_DCE3(rdev)) { 2890 if (ASIC_IS_DCE3(rdev)) {
2878 WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0); 2891 WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0);
2879 WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0); 2892 WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0);
@@ -2998,6 +3011,7 @@ int r600_irq_set(struct radeon_device *rdev)
2998 u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0; 3011 u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
2999 u32 grbm_int_cntl = 0; 3012 u32 grbm_int_cntl = 0;
3000 u32 hdmi1, hdmi2; 3013 u32 hdmi1, hdmi2;
3014 u32 d1grph = 0, d2grph = 0;
3001 3015
3002 if (!rdev->irq.installed) { 3016 if (!rdev->irq.installed) {
3003 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); 3017 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
@@ -3034,11 +3048,13 @@ int r600_irq_set(struct radeon_device *rdev)
3034 cp_int_cntl |= RB_INT_ENABLE; 3048 cp_int_cntl |= RB_INT_ENABLE;
3035 cp_int_cntl |= TIME_STAMP_INT_ENABLE; 3049 cp_int_cntl |= TIME_STAMP_INT_ENABLE;
3036 } 3050 }
3037 if (rdev->irq.crtc_vblank_int[0]) { 3051 if (rdev->irq.crtc_vblank_int[0] ||
3052 rdev->irq.pflip[0]) {
3038 DRM_DEBUG("r600_irq_set: vblank 0\n"); 3053 DRM_DEBUG("r600_irq_set: vblank 0\n");
3039 mode_int |= D1MODE_VBLANK_INT_MASK; 3054 mode_int |= D1MODE_VBLANK_INT_MASK;
3040 } 3055 }
3041 if (rdev->irq.crtc_vblank_int[1]) { 3056 if (rdev->irq.crtc_vblank_int[1] ||
3057 rdev->irq.pflip[1]) {
3042 DRM_DEBUG("r600_irq_set: vblank 1\n"); 3058 DRM_DEBUG("r600_irq_set: vblank 1\n");
3043 mode_int |= D2MODE_VBLANK_INT_MASK; 3059 mode_int |= D2MODE_VBLANK_INT_MASK;
3044 } 3060 }
@@ -3081,6 +3097,8 @@ int r600_irq_set(struct radeon_device *rdev)
3081 3097
3082 WREG32(CP_INT_CNTL, cp_int_cntl); 3098 WREG32(CP_INT_CNTL, cp_int_cntl);
3083 WREG32(DxMODE_INT_MASK, mode_int); 3099 WREG32(DxMODE_INT_MASK, mode_int);
3100 WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph);
3101 WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph);
3084 WREG32(GRBM_INT_CNTL, grbm_int_cntl); 3102 WREG32(GRBM_INT_CNTL, grbm_int_cntl);
3085 WREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, hdmi1); 3103 WREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, hdmi1);
3086 if (ASIC_IS_DCE3(rdev)) { 3104 if (ASIC_IS_DCE3(rdev)) {
@@ -3103,32 +3121,35 @@ int r600_irq_set(struct radeon_device *rdev)
3103 return 0; 3121 return 0;
3104} 3122}
3105 3123
3106static inline void r600_irq_ack(struct radeon_device *rdev, 3124static inline void r600_irq_ack(struct radeon_device *rdev)
3107 u32 *disp_int,
3108 u32 *disp_int_cont,
3109 u32 *disp_int_cont2)
3110{ 3125{
3111 u32 tmp; 3126 u32 tmp;
3112 3127
3113 if (ASIC_IS_DCE3(rdev)) { 3128 if (ASIC_IS_DCE3(rdev)) {
3114 *disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS); 3129 rdev->irq.stat_regs.r600.disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
3115 *disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE); 3130 rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE);
3116 *disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2); 3131 rdev->irq.stat_regs.r600.disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2);
3117 } else { 3132 } else {
3118 *disp_int = RREG32(DISP_INTERRUPT_STATUS); 3133 rdev->irq.stat_regs.r600.disp_int = RREG32(DISP_INTERRUPT_STATUS);
3119 *disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE); 3134 rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
3120 *disp_int_cont2 = 0; 3135 rdev->irq.stat_regs.r600.disp_int_cont2 = 0;
3121 } 3136 }
3122 3137 rdev->irq.stat_regs.r600.d1grph_int = RREG32(D1GRPH_INTERRUPT_STATUS);
3123 if (*disp_int & LB_D1_VBLANK_INTERRUPT) 3138 rdev->irq.stat_regs.r600.d2grph_int = RREG32(D2GRPH_INTERRUPT_STATUS);
3139
3140 if (rdev->irq.stat_regs.r600.d1grph_int & DxGRPH_PFLIP_INT_OCCURRED)
3141 WREG32(D1GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
3142 if (rdev->irq.stat_regs.r600.d2grph_int & DxGRPH_PFLIP_INT_OCCURRED)
3143 WREG32(D2GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
3144 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT)
3124 WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK); 3145 WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
3125 if (*disp_int & LB_D1_VLINE_INTERRUPT) 3146 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT)
3126 WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK); 3147 WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
3127 if (*disp_int & LB_D2_VBLANK_INTERRUPT) 3148 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT)
3128 WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK); 3149 WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
3129 if (*disp_int & LB_D2_VLINE_INTERRUPT) 3150 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT)
3130 WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK); 3151 WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
3131 if (*disp_int & DC_HPD1_INTERRUPT) { 3152 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
3132 if (ASIC_IS_DCE3(rdev)) { 3153 if (ASIC_IS_DCE3(rdev)) {
3133 tmp = RREG32(DC_HPD1_INT_CONTROL); 3154 tmp = RREG32(DC_HPD1_INT_CONTROL);
3134 tmp |= DC_HPDx_INT_ACK; 3155 tmp |= DC_HPDx_INT_ACK;
@@ -3139,7 +3160,7 @@ static inline void r600_irq_ack(struct radeon_device *rdev,
3139 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp); 3160 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
3140 } 3161 }
3141 } 3162 }
3142 if (*disp_int & DC_HPD2_INTERRUPT) { 3163 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
3143 if (ASIC_IS_DCE3(rdev)) { 3164 if (ASIC_IS_DCE3(rdev)) {
3144 tmp = RREG32(DC_HPD2_INT_CONTROL); 3165 tmp = RREG32(DC_HPD2_INT_CONTROL);
3145 tmp |= DC_HPDx_INT_ACK; 3166 tmp |= DC_HPDx_INT_ACK;
@@ -3150,7 +3171,7 @@ static inline void r600_irq_ack(struct radeon_device *rdev,
3150 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp); 3171 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
3151 } 3172 }
3152 } 3173 }
3153 if (*disp_int_cont & DC_HPD3_INTERRUPT) { 3174 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
3154 if (ASIC_IS_DCE3(rdev)) { 3175 if (ASIC_IS_DCE3(rdev)) {
3155 tmp = RREG32(DC_HPD3_INT_CONTROL); 3176 tmp = RREG32(DC_HPD3_INT_CONTROL);
3156 tmp |= DC_HPDx_INT_ACK; 3177 tmp |= DC_HPDx_INT_ACK;
@@ -3161,18 +3182,18 @@ static inline void r600_irq_ack(struct radeon_device *rdev,
3161 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp); 3182 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
3162 } 3183 }
3163 } 3184 }
3164 if (*disp_int_cont & DC_HPD4_INTERRUPT) { 3185 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
3165 tmp = RREG32(DC_HPD4_INT_CONTROL); 3186 tmp = RREG32(DC_HPD4_INT_CONTROL);
3166 tmp |= DC_HPDx_INT_ACK; 3187 tmp |= DC_HPDx_INT_ACK;
3167 WREG32(DC_HPD4_INT_CONTROL, tmp); 3188 WREG32(DC_HPD4_INT_CONTROL, tmp);
3168 } 3189 }
3169 if (ASIC_IS_DCE32(rdev)) { 3190 if (ASIC_IS_DCE32(rdev)) {
3170 if (*disp_int_cont2 & DC_HPD5_INTERRUPT) { 3191 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
3171 tmp = RREG32(DC_HPD5_INT_CONTROL); 3192 tmp = RREG32(DC_HPD5_INT_CONTROL);
3172 tmp |= DC_HPDx_INT_ACK; 3193 tmp |= DC_HPDx_INT_ACK;
3173 WREG32(DC_HPD5_INT_CONTROL, tmp); 3194 WREG32(DC_HPD5_INT_CONTROL, tmp);
3174 } 3195 }
3175 if (*disp_int_cont2 & DC_HPD6_INTERRUPT) { 3196 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
3176 tmp = RREG32(DC_HPD5_INT_CONTROL); 3197 tmp = RREG32(DC_HPD5_INT_CONTROL);
3177 tmp |= DC_HPDx_INT_ACK; 3198 tmp |= DC_HPDx_INT_ACK;
3178 WREG32(DC_HPD6_INT_CONTROL, tmp); 3199 WREG32(DC_HPD6_INT_CONTROL, tmp);
@@ -3194,12 +3215,10 @@ static inline void r600_irq_ack(struct radeon_device *rdev,
3194 3215
3195void r600_irq_disable(struct radeon_device *rdev) 3216void r600_irq_disable(struct radeon_device *rdev)
3196{ 3217{
3197 u32 disp_int, disp_int_cont, disp_int_cont2;
3198
3199 r600_disable_interrupts(rdev); 3218 r600_disable_interrupts(rdev);
3200 /* Wait and acknowledge irq */ 3219 /* Wait and acknowledge irq */
3201 mdelay(1); 3220 mdelay(1);
3202 r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2); 3221 r600_irq_ack(rdev);
3203 r600_disable_interrupt_state(rdev); 3222 r600_disable_interrupt_state(rdev);
3204} 3223}
3205 3224
@@ -3262,7 +3281,7 @@ int r600_irq_process(struct radeon_device *rdev)
3262 u32 wptr = r600_get_ih_wptr(rdev); 3281 u32 wptr = r600_get_ih_wptr(rdev);
3263 u32 rptr = rdev->ih.rptr; 3282 u32 rptr = rdev->ih.rptr;
3264 u32 src_id, src_data; 3283 u32 src_id, src_data;
3265 u32 ring_index, disp_int, disp_int_cont, disp_int_cont2; 3284 u32 ring_index;
3266 unsigned long flags; 3285 unsigned long flags;
3267 bool queue_hotplug = false; 3286 bool queue_hotplug = false;
3268 3287
@@ -3283,7 +3302,7 @@ int r600_irq_process(struct radeon_device *rdev)
3283 3302
3284restart_ih: 3303restart_ih:
3285 /* display interrupts */ 3304 /* display interrupts */
3286 r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2); 3305 r600_irq_ack(rdev);
3287 3306
3288 rdev->ih.wptr = wptr; 3307 rdev->ih.wptr = wptr;
3289 while (rptr != wptr) { 3308 while (rptr != wptr) {
@@ -3296,17 +3315,21 @@ restart_ih:
3296 case 1: /* D1 vblank/vline */ 3315 case 1: /* D1 vblank/vline */
3297 switch (src_data) { 3316 switch (src_data) {
3298 case 0: /* D1 vblank */ 3317 case 0: /* D1 vblank */
3299 if (disp_int & LB_D1_VBLANK_INTERRUPT) { 3318 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT) {
3300 drm_handle_vblank(rdev->ddev, 0); 3319 if (rdev->irq.crtc_vblank_int[0]) {
3301 rdev->pm.vblank_sync = true; 3320 drm_handle_vblank(rdev->ddev, 0);
3302 wake_up(&rdev->irq.vblank_queue); 3321 rdev->pm.vblank_sync = true;
3303 disp_int &= ~LB_D1_VBLANK_INTERRUPT; 3322 wake_up(&rdev->irq.vblank_queue);
3323 }
3324 if (rdev->irq.pflip[0])
3325 radeon_crtc_handle_flip(rdev, 0);
3326 rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
3304 DRM_DEBUG("IH: D1 vblank\n"); 3327 DRM_DEBUG("IH: D1 vblank\n");
3305 } 3328 }
3306 break; 3329 break;
3307 case 1: /* D1 vline */ 3330 case 1: /* D1 vline */
3308 if (disp_int & LB_D1_VLINE_INTERRUPT) { 3331 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT) {
3309 disp_int &= ~LB_D1_VLINE_INTERRUPT; 3332 rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT;
3310 DRM_DEBUG("IH: D1 vline\n"); 3333 DRM_DEBUG("IH: D1 vline\n");
3311 } 3334 }
3312 break; 3335 break;
@@ -3318,17 +3341,21 @@ restart_ih:
3318 case 5: /* D2 vblank/vline */ 3341 case 5: /* D2 vblank/vline */
3319 switch (src_data) { 3342 switch (src_data) {
3320 case 0: /* D2 vblank */ 3343 case 0: /* D2 vblank */
3321 if (disp_int & LB_D2_VBLANK_INTERRUPT) { 3344 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT) {
3322 drm_handle_vblank(rdev->ddev, 1); 3345 if (rdev->irq.crtc_vblank_int[1]) {
3323 rdev->pm.vblank_sync = true; 3346 drm_handle_vblank(rdev->ddev, 1);
3324 wake_up(&rdev->irq.vblank_queue); 3347 rdev->pm.vblank_sync = true;
3325 disp_int &= ~LB_D2_VBLANK_INTERRUPT; 3348 wake_up(&rdev->irq.vblank_queue);
3349 }
3350 if (rdev->irq.pflip[1])
3351 radeon_crtc_handle_flip(rdev, 1);
3352 rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
3326 DRM_DEBUG("IH: D2 vblank\n"); 3353 DRM_DEBUG("IH: D2 vblank\n");
3327 } 3354 }
3328 break; 3355 break;
3329 case 1: /* D1 vline */ 3356 case 1: /* D1 vline */
3330 if (disp_int & LB_D2_VLINE_INTERRUPT) { 3357 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT) {
3331 disp_int &= ~LB_D2_VLINE_INTERRUPT; 3358 rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT;
3332 DRM_DEBUG("IH: D2 vline\n"); 3359 DRM_DEBUG("IH: D2 vline\n");
3333 } 3360 }
3334 break; 3361 break;
@@ -3340,43 +3367,43 @@ restart_ih:
3340 case 19: /* HPD/DAC hotplug */ 3367 case 19: /* HPD/DAC hotplug */
3341 switch (src_data) { 3368 switch (src_data) {
3342 case 0: 3369 case 0:
3343 if (disp_int & DC_HPD1_INTERRUPT) { 3370 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
3344 disp_int &= ~DC_HPD1_INTERRUPT; 3371 rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT;
3345 queue_hotplug = true; 3372 queue_hotplug = true;
3346 DRM_DEBUG("IH: HPD1\n"); 3373 DRM_DEBUG("IH: HPD1\n");
3347 } 3374 }
3348 break; 3375 break;
3349 case 1: 3376 case 1:
3350 if (disp_int & DC_HPD2_INTERRUPT) { 3377 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
3351 disp_int &= ~DC_HPD2_INTERRUPT; 3378 rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT;
3352 queue_hotplug = true; 3379 queue_hotplug = true;
3353 DRM_DEBUG("IH: HPD2\n"); 3380 DRM_DEBUG("IH: HPD2\n");
3354 } 3381 }
3355 break; 3382 break;
3356 case 4: 3383 case 4:
3357 if (disp_int_cont & DC_HPD3_INTERRUPT) { 3384 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
3358 disp_int_cont &= ~DC_HPD3_INTERRUPT; 3385 rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT;
3359 queue_hotplug = true; 3386 queue_hotplug = true;
3360 DRM_DEBUG("IH: HPD3\n"); 3387 DRM_DEBUG("IH: HPD3\n");
3361 } 3388 }
3362 break; 3389 break;
3363 case 5: 3390 case 5:
3364 if (disp_int_cont & DC_HPD4_INTERRUPT) { 3391 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
3365 disp_int_cont &= ~DC_HPD4_INTERRUPT; 3392 rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT;
3366 queue_hotplug = true; 3393 queue_hotplug = true;
3367 DRM_DEBUG("IH: HPD4\n"); 3394 DRM_DEBUG("IH: HPD4\n");
3368 } 3395 }
3369 break; 3396 break;
3370 case 10: 3397 case 10:
3371 if (disp_int_cont2 & DC_HPD5_INTERRUPT) { 3398 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
3372 disp_int_cont2 &= ~DC_HPD5_INTERRUPT; 3399 rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
3373 queue_hotplug = true; 3400 queue_hotplug = true;
3374 DRM_DEBUG("IH: HPD5\n"); 3401 DRM_DEBUG("IH: HPD5\n");
3375 } 3402 }
3376 break; 3403 break;
3377 case 12: 3404 case 12:
3378 if (disp_int_cont2 & DC_HPD6_INTERRUPT) { 3405 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
3379 disp_int_cont2 &= ~DC_HPD6_INTERRUPT; 3406 rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
3380 queue_hotplug = true; 3407 queue_hotplug = true;
3381 DRM_DEBUG("IH: HPD6\n"); 3408 DRM_DEBUG("IH: HPD6\n");
3382 } 3409 }
@@ -3419,7 +3446,7 @@ restart_ih:
3419 if (wptr != rdev->ih.wptr) 3446 if (wptr != rdev->ih.wptr)
3420 goto restart_ih; 3447 goto restart_ih;
3421 if (queue_hotplug) 3448 if (queue_hotplug)
3422 queue_work(rdev->wq, &rdev->hotplug_work); 3449 schedule_work(&rdev->hotplug_work);
3423 rdev->ih.rptr = rptr; 3450 rdev->ih.rptr = rptr;
3424 WREG32(IH_RB_RPTR, rdev->ih.rptr); 3451 WREG32(IH_RB_RPTR, rdev->ih.rptr);
3425 spin_unlock_irqrestore(&rdev->ih.lock, flags); 3452 spin_unlock_irqrestore(&rdev->ih.lock, flags);
@@ -3508,3 +3535,219 @@ void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
3508 } else 3535 } else
3509 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); 3536 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
3510} 3537}
3538
3539void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes)
3540{
3541 u32 link_width_cntl, mask, target_reg;
3542
3543 if (rdev->flags & RADEON_IS_IGP)
3544 return;
3545
3546 if (!(rdev->flags & RADEON_IS_PCIE))
3547 return;
3548
3549 /* x2 cards have a special sequence */
3550 if (ASIC_IS_X2(rdev))
3551 return;
3552
3553 /* FIXME wait for idle */
3554
3555 switch (lanes) {
3556 case 0:
3557 mask = RADEON_PCIE_LC_LINK_WIDTH_X0;
3558 break;
3559 case 1:
3560 mask = RADEON_PCIE_LC_LINK_WIDTH_X1;
3561 break;
3562 case 2:
3563 mask = RADEON_PCIE_LC_LINK_WIDTH_X2;
3564 break;
3565 case 4:
3566 mask = RADEON_PCIE_LC_LINK_WIDTH_X4;
3567 break;
3568 case 8:
3569 mask = RADEON_PCIE_LC_LINK_WIDTH_X8;
3570 break;
3571 case 12:
3572 mask = RADEON_PCIE_LC_LINK_WIDTH_X12;
3573 break;
3574 case 16:
3575 default:
3576 mask = RADEON_PCIE_LC_LINK_WIDTH_X16;
3577 break;
3578 }
3579
3580 link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
3581
3582 if ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) ==
3583 (mask << RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT))
3584 return;
3585
3586 if (link_width_cntl & R600_PCIE_LC_UPCONFIGURE_DIS)
3587 return;
3588
3589 link_width_cntl &= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK |
3590 RADEON_PCIE_LC_RECONFIG_NOW |
3591 R600_PCIE_LC_RENEGOTIATE_EN |
3592 R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE);
3593 link_width_cntl |= mask;
3594
3595 WREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3596
3597 /* some northbridges can renegotiate the link rather than requiring
3598 * a complete re-config.
3599 * e.g., AMD 780/790 northbridges (pci ids: 0x5956, 0x5957, 0x5958, etc.)
3600 */
3601 if (link_width_cntl & R600_PCIE_LC_RENEGOTIATION_SUPPORT)
3602 link_width_cntl |= R600_PCIE_LC_RENEGOTIATE_EN | R600_PCIE_LC_UPCONFIGURE_SUPPORT;
3603 else
3604 link_width_cntl |= R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE;
3605
3606 WREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL, (link_width_cntl |
3607 RADEON_PCIE_LC_RECONFIG_NOW));
3608
3609 if (rdev->family >= CHIP_RV770)
3610 target_reg = R700_TARGET_AND_CURRENT_PROFILE_INDEX;
3611 else
3612 target_reg = R600_TARGET_AND_CURRENT_PROFILE_INDEX;
3613
3614 /* wait for lane set to complete */
3615 link_width_cntl = RREG32(target_reg);
3616 while (link_width_cntl == 0xffffffff)
3617 link_width_cntl = RREG32(target_reg);
3618
3619}
3620
3621int r600_get_pcie_lanes(struct radeon_device *rdev)
3622{
3623 u32 link_width_cntl;
3624
3625 if (rdev->flags & RADEON_IS_IGP)
3626 return 0;
3627
3628 if (!(rdev->flags & RADEON_IS_PCIE))
3629 return 0;
3630
3631 /* x2 cards have a special sequence */
3632 if (ASIC_IS_X2(rdev))
3633 return 0;
3634
3635 /* FIXME wait for idle */
3636
3637 link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
3638
3639 switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) {
3640 case RADEON_PCIE_LC_LINK_WIDTH_X0:
3641 return 0;
3642 case RADEON_PCIE_LC_LINK_WIDTH_X1:
3643 return 1;
3644 case RADEON_PCIE_LC_LINK_WIDTH_X2:
3645 return 2;
3646 case RADEON_PCIE_LC_LINK_WIDTH_X4:
3647 return 4;
3648 case RADEON_PCIE_LC_LINK_WIDTH_X8:
3649 return 8;
3650 case RADEON_PCIE_LC_LINK_WIDTH_X16:
3651 default:
3652 return 16;
3653 }
3654}
3655
3656static void r600_pcie_gen2_enable(struct radeon_device *rdev)
3657{
3658 u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp;
3659 u16 link_cntl2;
3660
3661 if (rdev->flags & RADEON_IS_IGP)
3662 return;
3663
3664 if (!(rdev->flags & RADEON_IS_PCIE))
3665 return;
3666
3667 /* x2 cards have a special sequence */
3668 if (ASIC_IS_X2(rdev))
3669 return;
3670
3671 /* only RV6xx+ chips are supported */
3672 if (rdev->family <= CHIP_R600)
3673 return;
3674
3675 /* 55 nm r6xx asics */
3676 if ((rdev->family == CHIP_RV670) ||
3677 (rdev->family == CHIP_RV620) ||
3678 (rdev->family == CHIP_RV635)) {
3679 /* advertise upconfig capability */
3680 link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
3681 link_width_cntl &= ~LC_UPCONFIGURE_DIS;
3682 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3683 link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
3684 if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) {
3685 lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT;
3686 link_width_cntl &= ~(LC_LINK_WIDTH_MASK |
3687 LC_RECONFIG_ARC_MISSING_ESCAPE);
3688 link_width_cntl |= lanes | LC_RECONFIG_NOW | LC_RENEGOTIATE_EN;
3689 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3690 } else {
3691 link_width_cntl |= LC_UPCONFIGURE_DIS;
3692 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3693 }
3694 }
3695
3696 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3697 if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
3698 (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
3699
3700 /* 55 nm r6xx asics */
3701 if ((rdev->family == CHIP_RV670) ||
3702 (rdev->family == CHIP_RV620) ||
3703 (rdev->family == CHIP_RV635)) {
3704 WREG32(MM_CFGREGS_CNTL, 0x8);
3705 link_cntl2 = RREG32(0x4088);
3706 WREG32(MM_CFGREGS_CNTL, 0);
3707 /* not supported yet */
3708 if (link_cntl2 & SELECTABLE_DEEMPHASIS)
3709 return;
3710 }
3711
3712 speed_cntl &= ~LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK;
3713 speed_cntl |= (0x3 << LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT);
3714 speed_cntl &= ~LC_VOLTAGE_TIMER_SEL_MASK;
3715 speed_cntl &= ~LC_FORCE_DIS_HW_SPEED_CHANGE;
3716 speed_cntl |= LC_FORCE_EN_HW_SPEED_CHANGE;
3717 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
3718
3719 tmp = RREG32(0x541c);
3720 WREG32(0x541c, tmp | 0x8);
3721 WREG32(MM_CFGREGS_CNTL, MM_WR_TO_CFG_EN);
3722 link_cntl2 = RREG16(0x4088);
3723 link_cntl2 &= ~TARGET_LINK_SPEED_MASK;
3724 link_cntl2 |= 0x2;
3725 WREG16(0x4088, link_cntl2);
3726 WREG32(MM_CFGREGS_CNTL, 0);
3727
3728 if ((rdev->family == CHIP_RV670) ||
3729 (rdev->family == CHIP_RV620) ||
3730 (rdev->family == CHIP_RV635)) {
3731 training_cntl = RREG32_PCIE_P(PCIE_LC_TRAINING_CNTL);
3732 training_cntl &= ~LC_POINT_7_PLUS_EN;
3733 WREG32_PCIE_P(PCIE_LC_TRAINING_CNTL, training_cntl);
3734 } else {
3735 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3736 speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
3737 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
3738 }
3739
3740 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3741 speed_cntl |= LC_GEN2_EN_STRAP;
3742 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
3743
3744 } else {
3745 link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
3746 /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
3747 if (1)
3748 link_width_cntl |= LC_UPCONFIGURE_DIS;
3749 else
3750 link_width_cntl &= ~LC_UPCONFIGURE_DIS;
3751 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3752 }
3753}
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index bff4dc4f410f..a5d898b4bad2 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -728,6 +728,54 @@
728/* DCE 3.2 */ 728/* DCE 3.2 */
729# define DC_HPDx_EN (1 << 28) 729# define DC_HPDx_EN (1 << 28)
730 730
731#define D1GRPH_INTERRUPT_STATUS 0x6158
732#define D2GRPH_INTERRUPT_STATUS 0x6958
733# define DxGRPH_PFLIP_INT_OCCURRED (1 << 0)
734# define DxGRPH_PFLIP_INT_CLEAR (1 << 8)
735#define D1GRPH_INTERRUPT_CONTROL 0x615c
736#define D2GRPH_INTERRUPT_CONTROL 0x695c
737# define DxGRPH_PFLIP_INT_MASK (1 << 0)
738# define DxGRPH_PFLIP_INT_TYPE (1 << 8)
739
740/* PCIE link stuff */
741#define PCIE_LC_TRAINING_CNTL 0xa1 /* PCIE_P */
742# define LC_POINT_7_PLUS_EN (1 << 6)
743#define PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE_P */
744# define LC_LINK_WIDTH_SHIFT 0
745# define LC_LINK_WIDTH_MASK 0x7
746# define LC_LINK_WIDTH_X0 0
747# define LC_LINK_WIDTH_X1 1
748# define LC_LINK_WIDTH_X2 2
749# define LC_LINK_WIDTH_X4 3
750# define LC_LINK_WIDTH_X8 4
751# define LC_LINK_WIDTH_X16 6
752# define LC_LINK_WIDTH_RD_SHIFT 4
753# define LC_LINK_WIDTH_RD_MASK 0x70
754# define LC_RECONFIG_ARC_MISSING_ESCAPE (1 << 7)
755# define LC_RECONFIG_NOW (1 << 8)
756# define LC_RENEGOTIATION_SUPPORT (1 << 9)
757# define LC_RENEGOTIATE_EN (1 << 10)
758# define LC_SHORT_RECONFIG_EN (1 << 11)
759# define LC_UPCONFIGURE_SUPPORT (1 << 12)
760# define LC_UPCONFIGURE_DIS (1 << 13)
761#define PCIE_LC_SPEED_CNTL 0xa4 /* PCIE_P */
762# define LC_GEN2_EN_STRAP (1 << 0)
763# define LC_TARGET_LINK_SPEED_OVERRIDE_EN (1 << 1)
764# define LC_FORCE_EN_HW_SPEED_CHANGE (1 << 5)
765# define LC_FORCE_DIS_HW_SPEED_CHANGE (1 << 6)
766# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK (0x3 << 8)
767# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT 3
768# define LC_CURRENT_DATA_RATE (1 << 11)
769# define LC_VOLTAGE_TIMER_SEL_MASK (0xf << 14)
770# define LC_CLR_FAILED_SPD_CHANGE_CNT (1 << 21)
771# define LC_OTHER_SIDE_EVER_SENT_GEN2 (1 << 23)
772# define LC_OTHER_SIDE_SUPPORTS_GEN2 (1 << 24)
773#define MM_CFGREGS_CNTL 0x544c
774# define MM_WR_TO_CFG_EN (1 << 3)
775#define LINK_CNTL2 0x88 /* F0 */
776# define TARGET_LINK_SPEED_MASK (0xf << 0)
777# define SELECTABLE_DEEMPHASIS (1 << 6)
778
731/* 779/*
732 * PM4 780 * PM4
733 */ 781 */
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 3a7095743d44..e9486630a467 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -69,6 +69,7 @@
69#include <ttm/ttm_bo_driver.h> 69#include <ttm/ttm_bo_driver.h>
70#include <ttm/ttm_placement.h> 70#include <ttm/ttm_placement.h>
71#include <ttm/ttm_module.h> 71#include <ttm/ttm_module.h>
72#include <ttm/ttm_execbuf_util.h>
72 73
73#include "radeon_family.h" 74#include "radeon_family.h"
74#include "radeon_mode.h" 75#include "radeon_mode.h"
@@ -180,6 +181,7 @@ void rs690_pm_info(struct radeon_device *rdev);
180extern u32 rv6xx_get_temp(struct radeon_device *rdev); 181extern u32 rv6xx_get_temp(struct radeon_device *rdev);
181extern u32 rv770_get_temp(struct radeon_device *rdev); 182extern u32 rv770_get_temp(struct radeon_device *rdev);
182extern u32 evergreen_get_temp(struct radeon_device *rdev); 183extern u32 evergreen_get_temp(struct radeon_device *rdev);
184extern u32 sumo_get_temp(struct radeon_device *rdev);
183 185
184/* 186/*
185 * Fences. 187 * Fences.
@@ -259,13 +261,12 @@ struct radeon_bo {
259}; 261};
260 262
261struct radeon_bo_list { 263struct radeon_bo_list {
262 struct list_head list; 264 struct ttm_validate_buffer tv;
263 struct radeon_bo *bo; 265 struct radeon_bo *bo;
264 uint64_t gpu_offset; 266 uint64_t gpu_offset;
265 unsigned rdomain; 267 unsigned rdomain;
266 unsigned wdomain; 268 unsigned wdomain;
267 u32 tiling_flags; 269 u32 tiling_flags;
268 bool reserved;
269}; 270};
270 271
271/* 272/*
@@ -377,11 +378,56 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg);
377/* 378/*
378 * IRQS. 379 * IRQS.
379 */ 380 */
381
382struct radeon_unpin_work {
383 struct work_struct work;
384 struct radeon_device *rdev;
385 int crtc_id;
386 struct radeon_fence *fence;
387 struct drm_pending_vblank_event *event;
388 struct radeon_bo *old_rbo;
389 u64 new_crtc_base;
390};
391
392struct r500_irq_stat_regs {
393 u32 disp_int;
394};
395
396struct r600_irq_stat_regs {
397 u32 disp_int;
398 u32 disp_int_cont;
399 u32 disp_int_cont2;
400 u32 d1grph_int;
401 u32 d2grph_int;
402};
403
404struct evergreen_irq_stat_regs {
405 u32 disp_int;
406 u32 disp_int_cont;
407 u32 disp_int_cont2;
408 u32 disp_int_cont3;
409 u32 disp_int_cont4;
410 u32 disp_int_cont5;
411 u32 d1grph_int;
412 u32 d2grph_int;
413 u32 d3grph_int;
414 u32 d4grph_int;
415 u32 d5grph_int;
416 u32 d6grph_int;
417};
418
419union radeon_irq_stat_regs {
420 struct r500_irq_stat_regs r500;
421 struct r600_irq_stat_regs r600;
422 struct evergreen_irq_stat_regs evergreen;
423};
424
380struct radeon_irq { 425struct radeon_irq {
381 bool installed; 426 bool installed;
382 bool sw_int; 427 bool sw_int;
383 /* FIXME: use a define max crtc rather than hardcode it */ 428 /* FIXME: use a define max crtc rather than hardcode it */
384 bool crtc_vblank_int[6]; 429 bool crtc_vblank_int[6];
430 bool pflip[6];
385 wait_queue_head_t vblank_queue; 431 wait_queue_head_t vblank_queue;
386 /* FIXME: use defines for max hpd/dacs */ 432 /* FIXME: use defines for max hpd/dacs */
387 bool hpd[6]; 433 bool hpd[6];
@@ -392,12 +438,17 @@ struct radeon_irq {
392 bool hdmi[2]; 438 bool hdmi[2];
393 spinlock_t sw_lock; 439 spinlock_t sw_lock;
394 int sw_refcount; 440 int sw_refcount;
441 union radeon_irq_stat_regs stat_regs;
442 spinlock_t pflip_lock[6];
443 int pflip_refcount[6];
395}; 444};
396 445
397int radeon_irq_kms_init(struct radeon_device *rdev); 446int radeon_irq_kms_init(struct radeon_device *rdev);
398void radeon_irq_kms_fini(struct radeon_device *rdev); 447void radeon_irq_kms_fini(struct radeon_device *rdev);
399void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev); 448void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev);
400void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev); 449void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev);
450void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc);
451void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc);
401 452
402/* 453/*
403 * CP & ring. 454 * CP & ring.
@@ -687,6 +738,8 @@ enum radeon_int_thermal_type {
687 THERMAL_TYPE_RV6XX, 738 THERMAL_TYPE_RV6XX,
688 THERMAL_TYPE_RV770, 739 THERMAL_TYPE_RV770,
689 THERMAL_TYPE_EVERGREEN, 740 THERMAL_TYPE_EVERGREEN,
741 THERMAL_TYPE_SUMO,
742 THERMAL_TYPE_NI,
690}; 743};
691 744
692struct radeon_voltage { 745struct radeon_voltage {
@@ -770,6 +823,9 @@ struct radeon_pm {
770 u32 current_sclk; 823 u32 current_sclk;
771 u32 current_mclk; 824 u32 current_mclk;
772 u32 current_vddc; 825 u32 current_vddc;
826 u32 default_sclk;
827 u32 default_mclk;
828 u32 default_vddc;
773 struct radeon_i2c_chan *i2c_bus; 829 struct radeon_i2c_chan *i2c_bus;
774 /* selected pm method */ 830 /* selected pm method */
775 enum radeon_pm_method pm_method; 831 enum radeon_pm_method pm_method;
@@ -881,6 +937,10 @@ struct radeon_asic {
881 void (*pm_finish)(struct radeon_device *rdev); 937 void (*pm_finish)(struct radeon_device *rdev);
882 void (*pm_init_profile)(struct radeon_device *rdev); 938 void (*pm_init_profile)(struct radeon_device *rdev);
883 void (*pm_get_dynpm_state)(struct radeon_device *rdev); 939 void (*pm_get_dynpm_state)(struct radeon_device *rdev);
940 /* pageflipping */
941 void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
942 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
943 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
884}; 944};
885 945
886/* 946/*
@@ -975,6 +1035,7 @@ struct evergreen_asic {
975 unsigned tiling_npipes; 1035 unsigned tiling_npipes;
976 unsigned tiling_group_size; 1036 unsigned tiling_group_size;
977 unsigned tile_config; 1037 unsigned tile_config;
1038 struct r100_gpu_lockup lockup;
978}; 1039};
979 1040
980union radeon_asic_config { 1041union radeon_asic_config {
@@ -1091,11 +1152,11 @@ struct radeon_device {
1091 const struct firmware *me_fw; /* all family ME firmware */ 1152 const struct firmware *me_fw; /* all family ME firmware */
1092 const struct firmware *pfp_fw; /* r6/700 PFP firmware */ 1153 const struct firmware *pfp_fw; /* r6/700 PFP firmware */
1093 const struct firmware *rlc_fw; /* r6/700 RLC firmware */ 1154 const struct firmware *rlc_fw; /* r6/700 RLC firmware */
1155 const struct firmware *mc_fw; /* NI MC firmware */
1094 struct r600_blit r600_blit; 1156 struct r600_blit r600_blit;
1095 struct r700_vram_scratch vram_scratch; 1157 struct r700_vram_scratch vram_scratch;
1096 int msi_enabled; /* msi enabled */ 1158 int msi_enabled; /* msi enabled */
1097 struct r600_ih ih; /* r6/700 interrupt ring */ 1159 struct r600_ih ih; /* r6/700 interrupt ring */
1098 struct workqueue_struct *wq;
1099 struct work_struct hotplug_work; 1160 struct work_struct hotplug_work;
1100 int num_crtc; /* number of crtcs */ 1161 int num_crtc; /* number of crtcs */
1101 struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */ 1162 struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */
@@ -1110,10 +1171,10 @@ struct radeon_device {
1110 uint8_t audio_status_bits; 1171 uint8_t audio_status_bits;
1111 uint8_t audio_category_code; 1172 uint8_t audio_category_code;
1112 1173
1113 bool powered_down;
1114 struct notifier_block acpi_nb; 1174 struct notifier_block acpi_nb;
1115 /* only one userspace can use Hyperz features at a time */ 1175 /* only one userspace can use Hyperz features or CMASK at a time */
1116 struct drm_file *hyperz_filp; 1176 struct drm_file *hyperz_filp;
1177 struct drm_file *cmask_filp;
1117 /* i2c buses */ 1178 /* i2c buses */
1118 struct radeon_i2c_chan *i2c_bus[RADEON_MAX_I2C_BUS]; 1179 struct radeon_i2c_chan *i2c_bus[RADEON_MAX_I2C_BUS];
1119}; 1180};
@@ -1188,6 +1249,8 @@ static inline void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v)
1188 */ 1249 */
1189#define RREG8(reg) readb(((void __iomem *)rdev->rmmio) + (reg)) 1250#define RREG8(reg) readb(((void __iomem *)rdev->rmmio) + (reg))
1190#define WREG8(reg, v) writeb(v, ((void __iomem *)rdev->rmmio) + (reg)) 1251#define WREG8(reg, v) writeb(v, ((void __iomem *)rdev->rmmio) + (reg))
1252#define RREG16(reg) readw(((void __iomem *)rdev->rmmio) + (reg))
1253#define WREG16(reg, v) writew(v, ((void __iomem *)rdev->rmmio) + (reg))
1191#define RREG32(reg) r100_mm_rreg(rdev, (reg)) 1254#define RREG32(reg) r100_mm_rreg(rdev, (reg))
1192#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", r100_mm_rreg(rdev, (reg))) 1255#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", r100_mm_rreg(rdev, (reg)))
1193#define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v)) 1256#define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v))
@@ -1261,6 +1324,14 @@ void r100_pll_errata_after_index(struct radeon_device *rdev);
1261 (rdev->family == CHIP_RV410) || \ 1324 (rdev->family == CHIP_RV410) || \
1262 (rdev->family == CHIP_RS400) || \ 1325 (rdev->family == CHIP_RS400) || \
1263 (rdev->family == CHIP_RS480)) 1326 (rdev->family == CHIP_RS480))
1327#define ASIC_IS_X2(rdev) ((rdev->ddev->pdev->device == 0x9441) || \
1328 (rdev->ddev->pdev->device == 0x9443) || \
1329 (rdev->ddev->pdev->device == 0x944B) || \
1330 (rdev->ddev->pdev->device == 0x9506) || \
1331 (rdev->ddev->pdev->device == 0x9509) || \
1332 (rdev->ddev->pdev->device == 0x950F) || \
1333 (rdev->ddev->pdev->device == 0x689C) || \
1334 (rdev->ddev->pdev->device == 0x689D))
1264#define ASIC_IS_AVIVO(rdev) ((rdev->family >= CHIP_RS600)) 1335#define ASIC_IS_AVIVO(rdev) ((rdev->family >= CHIP_RS600))
1265#define ASIC_IS_DCE2(rdev) ((rdev->family == CHIP_RS600) || \ 1336#define ASIC_IS_DCE2(rdev) ((rdev->family == CHIP_RS600) || \
1266 (rdev->family == CHIP_RS690) || \ 1337 (rdev->family == CHIP_RS690) || \
@@ -1269,6 +1340,9 @@ void r100_pll_errata_after_index(struct radeon_device *rdev);
1269#define ASIC_IS_DCE3(rdev) ((rdev->family >= CHIP_RV620)) 1340#define ASIC_IS_DCE3(rdev) ((rdev->family >= CHIP_RV620))
1270#define ASIC_IS_DCE32(rdev) ((rdev->family >= CHIP_RV730)) 1341#define ASIC_IS_DCE32(rdev) ((rdev->family >= CHIP_RV730))
1271#define ASIC_IS_DCE4(rdev) ((rdev->family >= CHIP_CEDAR)) 1342#define ASIC_IS_DCE4(rdev) ((rdev->family >= CHIP_CEDAR))
1343#define ASIC_IS_DCE41(rdev) ((rdev->family >= CHIP_PALM) && \
1344 (rdev->flags & RADEON_IS_IGP))
1345#define ASIC_IS_DCE5(rdev) ((rdev->family >= CHIP_BARTS))
1272 1346
1273/* 1347/*
1274 * BIOS helpers. 1348 * BIOS helpers.
@@ -1344,6 +1418,9 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
1344#define radeon_pm_finish(rdev) (rdev)->asic->pm_finish((rdev)) 1418#define radeon_pm_finish(rdev) (rdev)->asic->pm_finish((rdev))
1345#define radeon_pm_init_profile(rdev) (rdev)->asic->pm_init_profile((rdev)) 1419#define radeon_pm_init_profile(rdev) (rdev)->asic->pm_init_profile((rdev))
1346#define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm_get_dynpm_state((rdev)) 1420#define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm_get_dynpm_state((rdev))
1421#define radeon_pre_page_flip(rdev, crtc) rdev->asic->pre_page_flip((rdev), (crtc))
1422#define radeon_page_flip(rdev, crtc, base) rdev->asic->page_flip((rdev), (crtc), (base))
1423#define radeon_post_page_flip(rdev, crtc) rdev->asic->post_page_flip((rdev), (crtc))
1347 1424
1348/* Common functions */ 1425/* Common functions */
1349/* AGP */ 1426/* AGP */
@@ -1372,67 +1449,7 @@ extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc
1372extern int radeon_resume_kms(struct drm_device *dev); 1449extern int radeon_resume_kms(struct drm_device *dev);
1373extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state); 1450extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
1374 1451
1375/* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */
1376extern void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_cp *cp);
1377extern bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_cp *cp);
1378
1379/* rv200,rv250,rv280 */
1380extern void r200_set_safe_registers(struct radeon_device *rdev);
1381
1382/* r300,r350,rv350,rv370,rv380 */
1383extern void r300_set_reg_safe(struct radeon_device *rdev);
1384extern void r300_mc_program(struct radeon_device *rdev);
1385extern void r300_mc_init(struct radeon_device *rdev);
1386extern void r300_clock_startup(struct radeon_device *rdev);
1387extern int r300_mc_wait_for_idle(struct radeon_device *rdev);
1388extern int rv370_pcie_gart_init(struct radeon_device *rdev);
1389extern void rv370_pcie_gart_fini(struct radeon_device *rdev);
1390extern int rv370_pcie_gart_enable(struct radeon_device *rdev);
1391extern void rv370_pcie_gart_disable(struct radeon_device *rdev);
1392
1393/* r420,r423,rv410 */
1394extern u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg);
1395extern void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v);
1396extern int r420_debugfs_pipes_info_init(struct radeon_device *rdev);
1397extern void r420_pipes_init(struct radeon_device *rdev);
1398
1399/* rv515 */
1400struct rv515_mc_save {
1401 u32 d1vga_control;
1402 u32 d2vga_control;
1403 u32 vga_render_control;
1404 u32 vga_hdp_control;
1405 u32 d1crtc_control;
1406 u32 d2crtc_control;
1407};
1408extern void rv515_bandwidth_avivo_update(struct radeon_device *rdev);
1409extern void rv515_vga_render_disable(struct radeon_device *rdev);
1410extern void rv515_set_safe_registers(struct radeon_device *rdev);
1411extern void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save);
1412extern void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save);
1413extern void rv515_clock_startup(struct radeon_device *rdev);
1414extern void rv515_debugfs(struct radeon_device *rdev);
1415extern int rv515_suspend(struct radeon_device *rdev);
1416
1417/* rs400 */
1418extern int rs400_gart_init(struct radeon_device *rdev);
1419extern int rs400_gart_enable(struct radeon_device *rdev);
1420extern void rs400_gart_adjust_size(struct radeon_device *rdev);
1421extern void rs400_gart_disable(struct radeon_device *rdev);
1422extern void rs400_gart_fini(struct radeon_device *rdev);
1423
1424/* rs600 */
1425extern void rs600_set_safe_registers(struct radeon_device *rdev);
1426extern int rs600_irq_set(struct radeon_device *rdev);
1427extern void rs600_irq_disable(struct radeon_device *rdev);
1428
1429/* rs690, rs740 */
1430extern void rs690_line_buffer_adjust(struct radeon_device *rdev,
1431 struct drm_display_mode *mode1,
1432 struct drm_display_mode *mode2);
1433
1434/* r600, rv610, rv630, rv620, rv635, rv670, rs780, rs880 */ 1452/* r600, rv610, rv630, rv620, rv635, rv670, rs780, rs880 */
1435extern void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
1436extern bool r600_card_posted(struct radeon_device *rdev); 1453extern bool r600_card_posted(struct radeon_device *rdev);
1437extern void r600_cp_stop(struct radeon_device *rdev); 1454extern void r600_cp_stop(struct radeon_device *rdev);
1438extern int r600_cp_start(struct radeon_device *rdev); 1455extern int r600_cp_start(struct radeon_device *rdev);
@@ -1478,6 +1495,7 @@ extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mo
1478extern int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder); 1495extern int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
1479extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder); 1496extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder);
1480 1497
1498extern void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
1481extern void r700_cp_stop(struct radeon_device *rdev); 1499extern void r700_cp_stop(struct radeon_device *rdev);
1482extern void r700_cp_fini(struct radeon_device *rdev); 1500extern void r700_cp_fini(struct radeon_device *rdev);
1483extern void evergreen_disable_interrupt_state(struct radeon_device *rdev); 1501extern void evergreen_disable_interrupt_state(struct radeon_device *rdev);
@@ -1485,6 +1503,9 @@ extern int evergreen_irq_set(struct radeon_device *rdev);
1485extern int evergreen_blit_init(struct radeon_device *rdev); 1503extern int evergreen_blit_init(struct radeon_device *rdev);
1486extern void evergreen_blit_fini(struct radeon_device *rdev); 1504extern void evergreen_blit_fini(struct radeon_device *rdev);
1487 1505
1506extern int ni_init_microcode(struct radeon_device *rdev);
1507extern int btc_mc_load_microcode(struct radeon_device *rdev);
1508
1488/* radeon_acpi.c */ 1509/* radeon_acpi.c */
1489#if defined(CONFIG_ACPI) 1510#if defined(CONFIG_ACPI)
1490extern int radeon_acpi_init(struct radeon_device *rdev); 1511extern int radeon_acpi_init(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 64fb89ecbf74..3a1b16186224 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -94,7 +94,7 @@ static void radeon_register_accessor_init(struct radeon_device *rdev)
94 rdev->mc_rreg = &rs600_mc_rreg; 94 rdev->mc_rreg = &rs600_mc_rreg;
95 rdev->mc_wreg = &rs600_mc_wreg; 95 rdev->mc_wreg = &rs600_mc_wreg;
96 } 96 }
97 if ((rdev->family >= CHIP_R600) && (rdev->family <= CHIP_RV740)) { 97 if ((rdev->family >= CHIP_R600) && (rdev->family <= CHIP_HEMLOCK)) {
98 rdev->pciep_rreg = &r600_pciep_rreg; 98 rdev->pciep_rreg = &r600_pciep_rreg;
99 rdev->pciep_wreg = &r600_pciep_wreg; 99 rdev->pciep_wreg = &r600_pciep_wreg;
100 } 100 }
@@ -171,6 +171,9 @@ static struct radeon_asic r100_asic = {
171 .pm_finish = &r100_pm_finish, 171 .pm_finish = &r100_pm_finish,
172 .pm_init_profile = &r100_pm_init_profile, 172 .pm_init_profile = &r100_pm_init_profile,
173 .pm_get_dynpm_state = &r100_pm_get_dynpm_state, 173 .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
174 .pre_page_flip = &r100_pre_page_flip,
175 .page_flip = &r100_page_flip,
176 .post_page_flip = &r100_post_page_flip,
174}; 177};
175 178
176static struct radeon_asic r200_asic = { 179static struct radeon_asic r200_asic = {
@@ -215,6 +218,9 @@ static struct radeon_asic r200_asic = {
215 .pm_finish = &r100_pm_finish, 218 .pm_finish = &r100_pm_finish,
216 .pm_init_profile = &r100_pm_init_profile, 219 .pm_init_profile = &r100_pm_init_profile,
217 .pm_get_dynpm_state = &r100_pm_get_dynpm_state, 220 .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
221 .pre_page_flip = &r100_pre_page_flip,
222 .page_flip = &r100_page_flip,
223 .post_page_flip = &r100_post_page_flip,
218}; 224};
219 225
220static struct radeon_asic r300_asic = { 226static struct radeon_asic r300_asic = {
@@ -260,6 +266,9 @@ static struct radeon_asic r300_asic = {
260 .pm_finish = &r100_pm_finish, 266 .pm_finish = &r100_pm_finish,
261 .pm_init_profile = &r100_pm_init_profile, 267 .pm_init_profile = &r100_pm_init_profile,
262 .pm_get_dynpm_state = &r100_pm_get_dynpm_state, 268 .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
269 .pre_page_flip = &r100_pre_page_flip,
270 .page_flip = &r100_page_flip,
271 .post_page_flip = &r100_post_page_flip,
263}; 272};
264 273
265static struct radeon_asic r300_asic_pcie = { 274static struct radeon_asic r300_asic_pcie = {
@@ -304,6 +313,9 @@ static struct radeon_asic r300_asic_pcie = {
304 .pm_finish = &r100_pm_finish, 313 .pm_finish = &r100_pm_finish,
305 .pm_init_profile = &r100_pm_init_profile, 314 .pm_init_profile = &r100_pm_init_profile,
306 .pm_get_dynpm_state = &r100_pm_get_dynpm_state, 315 .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
316 .pre_page_flip = &r100_pre_page_flip,
317 .page_flip = &r100_page_flip,
318 .post_page_flip = &r100_post_page_flip,
307}; 319};
308 320
309static struct radeon_asic r420_asic = { 321static struct radeon_asic r420_asic = {
@@ -349,6 +361,9 @@ static struct radeon_asic r420_asic = {
349 .pm_finish = &r100_pm_finish, 361 .pm_finish = &r100_pm_finish,
350 .pm_init_profile = &r420_pm_init_profile, 362 .pm_init_profile = &r420_pm_init_profile,
351 .pm_get_dynpm_state = &r100_pm_get_dynpm_state, 363 .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
364 .pre_page_flip = &r100_pre_page_flip,
365 .page_flip = &r100_page_flip,
366 .post_page_flip = &r100_post_page_flip,
352}; 367};
353 368
354static struct radeon_asic rs400_asic = { 369static struct radeon_asic rs400_asic = {
@@ -394,6 +409,9 @@ static struct radeon_asic rs400_asic = {
394 .pm_finish = &r100_pm_finish, 409 .pm_finish = &r100_pm_finish,
395 .pm_init_profile = &r100_pm_init_profile, 410 .pm_init_profile = &r100_pm_init_profile,
396 .pm_get_dynpm_state = &r100_pm_get_dynpm_state, 411 .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
412 .pre_page_flip = &r100_pre_page_flip,
413 .page_flip = &r100_page_flip,
414 .post_page_flip = &r100_post_page_flip,
397}; 415};
398 416
399static struct radeon_asic rs600_asic = { 417static struct radeon_asic rs600_asic = {
@@ -439,6 +457,9 @@ static struct radeon_asic rs600_asic = {
439 .pm_finish = &rs600_pm_finish, 457 .pm_finish = &rs600_pm_finish,
440 .pm_init_profile = &r420_pm_init_profile, 458 .pm_init_profile = &r420_pm_init_profile,
441 .pm_get_dynpm_state = &r100_pm_get_dynpm_state, 459 .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
460 .pre_page_flip = &rs600_pre_page_flip,
461 .page_flip = &rs600_page_flip,
462 .post_page_flip = &rs600_post_page_flip,
442}; 463};
443 464
444static struct radeon_asic rs690_asic = { 465static struct radeon_asic rs690_asic = {
@@ -484,6 +505,9 @@ static struct radeon_asic rs690_asic = {
484 .pm_finish = &rs600_pm_finish, 505 .pm_finish = &rs600_pm_finish,
485 .pm_init_profile = &r420_pm_init_profile, 506 .pm_init_profile = &r420_pm_init_profile,
486 .pm_get_dynpm_state = &r100_pm_get_dynpm_state, 507 .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
508 .pre_page_flip = &rs600_pre_page_flip,
509 .page_flip = &rs600_page_flip,
510 .post_page_flip = &rs600_post_page_flip,
487}; 511};
488 512
489static struct radeon_asic rv515_asic = { 513static struct radeon_asic rv515_asic = {
@@ -529,6 +553,9 @@ static struct radeon_asic rv515_asic = {
529 .pm_finish = &rs600_pm_finish, 553 .pm_finish = &rs600_pm_finish,
530 .pm_init_profile = &r420_pm_init_profile, 554 .pm_init_profile = &r420_pm_init_profile,
531 .pm_get_dynpm_state = &r100_pm_get_dynpm_state, 555 .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
556 .pre_page_flip = &rs600_pre_page_flip,
557 .page_flip = &rs600_page_flip,
558 .post_page_flip = &rs600_post_page_flip,
532}; 559};
533 560
534static struct radeon_asic r520_asic = { 561static struct radeon_asic r520_asic = {
@@ -574,6 +601,9 @@ static struct radeon_asic r520_asic = {
574 .pm_finish = &rs600_pm_finish, 601 .pm_finish = &rs600_pm_finish,
575 .pm_init_profile = &r420_pm_init_profile, 602 .pm_init_profile = &r420_pm_init_profile,
576 .pm_get_dynpm_state = &r100_pm_get_dynpm_state, 603 .pm_get_dynpm_state = &r100_pm_get_dynpm_state,
604 .pre_page_flip = &rs600_pre_page_flip,
605 .page_flip = &rs600_page_flip,
606 .post_page_flip = &rs600_post_page_flip,
577}; 607};
578 608
579static struct radeon_asic r600_asic = { 609static struct radeon_asic r600_asic = {
@@ -601,8 +631,8 @@ static struct radeon_asic r600_asic = {
601 .set_engine_clock = &radeon_atom_set_engine_clock, 631 .set_engine_clock = &radeon_atom_set_engine_clock,
602 .get_memory_clock = &radeon_atom_get_memory_clock, 632 .get_memory_clock = &radeon_atom_get_memory_clock,
603 .set_memory_clock = &radeon_atom_set_memory_clock, 633 .set_memory_clock = &radeon_atom_set_memory_clock,
604 .get_pcie_lanes = &rv370_get_pcie_lanes, 634 .get_pcie_lanes = &r600_get_pcie_lanes,
605 .set_pcie_lanes = NULL, 635 .set_pcie_lanes = &r600_set_pcie_lanes,
606 .set_clock_gating = NULL, 636 .set_clock_gating = NULL,
607 .set_surface_reg = r600_set_surface_reg, 637 .set_surface_reg = r600_set_surface_reg,
608 .clear_surface_reg = r600_clear_surface_reg, 638 .clear_surface_reg = r600_clear_surface_reg,
@@ -618,6 +648,9 @@ static struct radeon_asic r600_asic = {
618 .pm_finish = &rs600_pm_finish, 648 .pm_finish = &rs600_pm_finish,
619 .pm_init_profile = &r600_pm_init_profile, 649 .pm_init_profile = &r600_pm_init_profile,
620 .pm_get_dynpm_state = &r600_pm_get_dynpm_state, 650 .pm_get_dynpm_state = &r600_pm_get_dynpm_state,
651 .pre_page_flip = &rs600_pre_page_flip,
652 .page_flip = &rs600_page_flip,
653 .post_page_flip = &rs600_post_page_flip,
621}; 654};
622 655
623static struct radeon_asic rs780_asic = { 656static struct radeon_asic rs780_asic = {
@@ -662,6 +695,9 @@ static struct radeon_asic rs780_asic = {
662 .pm_finish = &rs600_pm_finish, 695 .pm_finish = &rs600_pm_finish,
663 .pm_init_profile = &rs780_pm_init_profile, 696 .pm_init_profile = &rs780_pm_init_profile,
664 .pm_get_dynpm_state = &r600_pm_get_dynpm_state, 697 .pm_get_dynpm_state = &r600_pm_get_dynpm_state,
698 .pre_page_flip = &rs600_pre_page_flip,
699 .page_flip = &rs600_page_flip,
700 .post_page_flip = &rs600_post_page_flip,
665}; 701};
666 702
667static struct radeon_asic rv770_asic = { 703static struct radeon_asic rv770_asic = {
@@ -689,8 +725,8 @@ static struct radeon_asic rv770_asic = {
689 .set_engine_clock = &radeon_atom_set_engine_clock, 725 .set_engine_clock = &radeon_atom_set_engine_clock,
690 .get_memory_clock = &radeon_atom_get_memory_clock, 726 .get_memory_clock = &radeon_atom_get_memory_clock,
691 .set_memory_clock = &radeon_atom_set_memory_clock, 727 .set_memory_clock = &radeon_atom_set_memory_clock,
692 .get_pcie_lanes = &rv370_get_pcie_lanes, 728 .get_pcie_lanes = &r600_get_pcie_lanes,
693 .set_pcie_lanes = NULL, 729 .set_pcie_lanes = &r600_set_pcie_lanes,
694 .set_clock_gating = &radeon_atom_set_clock_gating, 730 .set_clock_gating = &radeon_atom_set_clock_gating,
695 .set_surface_reg = r600_set_surface_reg, 731 .set_surface_reg = r600_set_surface_reg,
696 .clear_surface_reg = r600_clear_surface_reg, 732 .clear_surface_reg = r600_clear_surface_reg,
@@ -706,6 +742,9 @@ static struct radeon_asic rv770_asic = {
706 .pm_finish = &rs600_pm_finish, 742 .pm_finish = &rs600_pm_finish,
707 .pm_init_profile = &r600_pm_init_profile, 743 .pm_init_profile = &r600_pm_init_profile,
708 .pm_get_dynpm_state = &r600_pm_get_dynpm_state, 744 .pm_get_dynpm_state = &r600_pm_get_dynpm_state,
745 .pre_page_flip = &rs600_pre_page_flip,
746 .page_flip = &rv770_page_flip,
747 .post_page_flip = &rs600_post_page_flip,
709}; 748};
710 749
711static struct radeon_asic evergreen_asic = { 750static struct radeon_asic evergreen_asic = {
@@ -733,6 +772,95 @@ static struct radeon_asic evergreen_asic = {
733 .set_engine_clock = &radeon_atom_set_engine_clock, 772 .set_engine_clock = &radeon_atom_set_engine_clock,
734 .get_memory_clock = &radeon_atom_get_memory_clock, 773 .get_memory_clock = &radeon_atom_get_memory_clock,
735 .set_memory_clock = &radeon_atom_set_memory_clock, 774 .set_memory_clock = &radeon_atom_set_memory_clock,
775 .get_pcie_lanes = &r600_get_pcie_lanes,
776 .set_pcie_lanes = &r600_set_pcie_lanes,
777 .set_clock_gating = NULL,
778 .set_surface_reg = r600_set_surface_reg,
779 .clear_surface_reg = r600_clear_surface_reg,
780 .bandwidth_update = &evergreen_bandwidth_update,
781 .hpd_init = &evergreen_hpd_init,
782 .hpd_fini = &evergreen_hpd_fini,
783 .hpd_sense = &evergreen_hpd_sense,
784 .hpd_set_polarity = &evergreen_hpd_set_polarity,
785 .gui_idle = &r600_gui_idle,
786 .pm_misc = &evergreen_pm_misc,
787 .pm_prepare = &evergreen_pm_prepare,
788 .pm_finish = &evergreen_pm_finish,
789 .pm_init_profile = &r600_pm_init_profile,
790 .pm_get_dynpm_state = &r600_pm_get_dynpm_state,
791 .pre_page_flip = &evergreen_pre_page_flip,
792 .page_flip = &evergreen_page_flip,
793 .post_page_flip = &evergreen_post_page_flip,
794};
795
796static struct radeon_asic sumo_asic = {
797 .init = &evergreen_init,
798 .fini = &evergreen_fini,
799 .suspend = &evergreen_suspend,
800 .resume = &evergreen_resume,
801 .cp_commit = &r600_cp_commit,
802 .gpu_is_lockup = &evergreen_gpu_is_lockup,
803 .asic_reset = &evergreen_asic_reset,
804 .vga_set_state = &r600_vga_set_state,
805 .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
806 .gart_set_page = &rs600_gart_set_page,
807 .ring_test = &r600_ring_test,
808 .ring_ib_execute = &r600_ring_ib_execute,
809 .irq_set = &evergreen_irq_set,
810 .irq_process = &evergreen_irq_process,
811 .get_vblank_counter = &evergreen_get_vblank_counter,
812 .fence_ring_emit = &r600_fence_ring_emit,
813 .cs_parse = &evergreen_cs_parse,
814 .copy_blit = &evergreen_copy_blit,
815 .copy_dma = &evergreen_copy_blit,
816 .copy = &evergreen_copy_blit,
817 .get_engine_clock = &radeon_atom_get_engine_clock,
818 .set_engine_clock = &radeon_atom_set_engine_clock,
819 .get_memory_clock = NULL,
820 .set_memory_clock = NULL,
821 .get_pcie_lanes = NULL,
822 .set_pcie_lanes = NULL,
823 .set_clock_gating = NULL,
824 .set_surface_reg = r600_set_surface_reg,
825 .clear_surface_reg = r600_clear_surface_reg,
826 .bandwidth_update = &evergreen_bandwidth_update,
827 .hpd_init = &evergreen_hpd_init,
828 .hpd_fini = &evergreen_hpd_fini,
829 .hpd_sense = &evergreen_hpd_sense,
830 .hpd_set_polarity = &evergreen_hpd_set_polarity,
831 .gui_idle = &r600_gui_idle,
832 .pm_misc = &evergreen_pm_misc,
833 .pm_prepare = &evergreen_pm_prepare,
834 .pm_finish = &evergreen_pm_finish,
835 .pm_init_profile = &rs780_pm_init_profile,
836 .pm_get_dynpm_state = &r600_pm_get_dynpm_state,
837};
838
839static struct radeon_asic btc_asic = {
840 .init = &evergreen_init,
841 .fini = &evergreen_fini,
842 .suspend = &evergreen_suspend,
843 .resume = &evergreen_resume,
844 .cp_commit = &r600_cp_commit,
845 .gpu_is_lockup = &evergreen_gpu_is_lockup,
846 .asic_reset = &evergreen_asic_reset,
847 .vga_set_state = &r600_vga_set_state,
848 .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
849 .gart_set_page = &rs600_gart_set_page,
850 .ring_test = &r600_ring_test,
851 .ring_ib_execute = &r600_ring_ib_execute,
852 .irq_set = &evergreen_irq_set,
853 .irq_process = &evergreen_irq_process,
854 .get_vblank_counter = &evergreen_get_vblank_counter,
855 .fence_ring_emit = &r600_fence_ring_emit,
856 .cs_parse = &evergreen_cs_parse,
857 .copy_blit = &evergreen_copy_blit,
858 .copy_dma = &evergreen_copy_blit,
859 .copy = &evergreen_copy_blit,
860 .get_engine_clock = &radeon_atom_get_engine_clock,
861 .set_engine_clock = &radeon_atom_set_engine_clock,
862 .get_memory_clock = &radeon_atom_get_memory_clock,
863 .set_memory_clock = &radeon_atom_set_memory_clock,
736 .get_pcie_lanes = NULL, 864 .get_pcie_lanes = NULL,
737 .set_pcie_lanes = NULL, 865 .set_pcie_lanes = NULL,
738 .set_clock_gating = NULL, 866 .set_clock_gating = NULL,
@@ -749,6 +877,9 @@ static struct radeon_asic evergreen_asic = {
749 .pm_finish = &evergreen_pm_finish, 877 .pm_finish = &evergreen_pm_finish,
750 .pm_init_profile = &r600_pm_init_profile, 878 .pm_init_profile = &r600_pm_init_profile,
751 .pm_get_dynpm_state = &r600_pm_get_dynpm_state, 879 .pm_get_dynpm_state = &r600_pm_get_dynpm_state,
880 .pre_page_flip = &evergreen_pre_page_flip,
881 .page_flip = &evergreen_page_flip,
882 .post_page_flip = &evergreen_post_page_flip,
752}; 883};
753 884
754int radeon_asic_init(struct radeon_device *rdev) 885int radeon_asic_init(struct radeon_device *rdev)
@@ -835,6 +966,14 @@ int radeon_asic_init(struct radeon_device *rdev)
835 case CHIP_HEMLOCK: 966 case CHIP_HEMLOCK:
836 rdev->asic = &evergreen_asic; 967 rdev->asic = &evergreen_asic;
837 break; 968 break;
969 case CHIP_PALM:
970 rdev->asic = &sumo_asic;
971 break;
972 case CHIP_BARTS:
973 case CHIP_TURKS:
974 case CHIP_CAICOS:
975 rdev->asic = &btc_asic;
976 break;
838 default: 977 default:
839 /* FIXME: not supported yet */ 978 /* FIXME: not supported yet */
840 return -EINVAL; 979 return -EINVAL;
@@ -849,7 +988,9 @@ int radeon_asic_init(struct radeon_device *rdev)
849 if (rdev->flags & RADEON_SINGLE_CRTC) 988 if (rdev->flags & RADEON_SINGLE_CRTC)
850 rdev->num_crtc = 1; 989 rdev->num_crtc = 1;
851 else { 990 else {
852 if (ASIC_IS_DCE4(rdev)) 991 if (ASIC_IS_DCE41(rdev))
992 rdev->num_crtc = 2;
993 else if (ASIC_IS_DCE4(rdev))
853 rdev->num_crtc = 6; 994 rdev->num_crtc = 6;
854 else 995 else
855 rdev->num_crtc = 2; 996 rdev->num_crtc = 2;
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 740988244143..e01f07718539 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -102,6 +102,11 @@ int r100_pci_gart_enable(struct radeon_device *rdev);
102void r100_pci_gart_disable(struct radeon_device *rdev); 102void r100_pci_gart_disable(struct radeon_device *rdev);
103int r100_debugfs_mc_info_init(struct radeon_device *rdev); 103int r100_debugfs_mc_info_init(struct radeon_device *rdev);
104int r100_gui_wait_for_idle(struct radeon_device *rdev); 104int r100_gui_wait_for_idle(struct radeon_device *rdev);
105void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup,
106 struct radeon_cp *cp);
107bool r100_gpu_cp_is_lockup(struct radeon_device *rdev,
108 struct r100_gpu_lockup *lockup,
109 struct radeon_cp *cp);
105void r100_ib_fini(struct radeon_device *rdev); 110void r100_ib_fini(struct radeon_device *rdev);
106int r100_ib_init(struct radeon_device *rdev); 111int r100_ib_init(struct radeon_device *rdev);
107void r100_irq_disable(struct radeon_device *rdev); 112void r100_irq_disable(struct radeon_device *rdev);
@@ -130,15 +135,19 @@ extern void r100_pm_prepare(struct radeon_device *rdev);
130extern void r100_pm_finish(struct radeon_device *rdev); 135extern void r100_pm_finish(struct radeon_device *rdev);
131extern void r100_pm_init_profile(struct radeon_device *rdev); 136extern void r100_pm_init_profile(struct radeon_device *rdev);
132extern void r100_pm_get_dynpm_state(struct radeon_device *rdev); 137extern void r100_pm_get_dynpm_state(struct radeon_device *rdev);
138extern void r100_pre_page_flip(struct radeon_device *rdev, int crtc);
139extern u32 r100_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
140extern void r100_post_page_flip(struct radeon_device *rdev, int crtc);
133 141
134/* 142/*
135 * r200,rv250,rs300,rv280 143 * r200,rv250,rs300,rv280
136 */ 144 */
137extern int r200_copy_dma(struct radeon_device *rdev, 145extern int r200_copy_dma(struct radeon_device *rdev,
138 uint64_t src_offset, 146 uint64_t src_offset,
139 uint64_t dst_offset, 147 uint64_t dst_offset,
140 unsigned num_pages, 148 unsigned num_pages,
141 struct radeon_fence *fence); 149 struct radeon_fence *fence);
150void r200_set_safe_registers(struct radeon_device *rdev);
142 151
143/* 152/*
144 * r300,r350,rv350,rv380 153 * r300,r350,rv350,rv380
@@ -159,6 +168,15 @@ extern uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg);
159extern void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 168extern void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
160extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes); 169extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes);
161extern int rv370_get_pcie_lanes(struct radeon_device *rdev); 170extern int rv370_get_pcie_lanes(struct radeon_device *rdev);
171extern void r300_set_reg_safe(struct radeon_device *rdev);
172extern void r300_mc_program(struct radeon_device *rdev);
173extern void r300_mc_init(struct radeon_device *rdev);
174extern void r300_clock_startup(struct radeon_device *rdev);
175extern int r300_mc_wait_for_idle(struct radeon_device *rdev);
176extern int rv370_pcie_gart_init(struct radeon_device *rdev);
177extern void rv370_pcie_gart_fini(struct radeon_device *rdev);
178extern int rv370_pcie_gart_enable(struct radeon_device *rdev);
179extern void rv370_pcie_gart_disable(struct radeon_device *rdev);
162 180
163/* 181/*
164 * r420,r423,rv410 182 * r420,r423,rv410
@@ -168,6 +186,10 @@ extern void r420_fini(struct radeon_device *rdev);
168extern int r420_suspend(struct radeon_device *rdev); 186extern int r420_suspend(struct radeon_device *rdev);
169extern int r420_resume(struct radeon_device *rdev); 187extern int r420_resume(struct radeon_device *rdev);
170extern void r420_pm_init_profile(struct radeon_device *rdev); 188extern void r420_pm_init_profile(struct radeon_device *rdev);
189extern u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg);
190extern void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v);
191extern int r420_debugfs_pipes_info_init(struct radeon_device *rdev);
192extern void r420_pipes_init(struct radeon_device *rdev);
171 193
172/* 194/*
173 * rs400,rs480 195 * rs400,rs480
@@ -180,6 +202,12 @@ void rs400_gart_tlb_flush(struct radeon_device *rdev);
180int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); 202int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
181uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg); 203uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg);
182void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 204void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
205int rs400_gart_init(struct radeon_device *rdev);
206int rs400_gart_enable(struct radeon_device *rdev);
207void rs400_gart_adjust_size(struct radeon_device *rdev);
208void rs400_gart_disable(struct radeon_device *rdev);
209void rs400_gart_fini(struct radeon_device *rdev);
210
183 211
184/* 212/*
185 * rs600. 213 * rs600.
@@ -191,6 +219,7 @@ extern int rs600_suspend(struct radeon_device *rdev);
191extern int rs600_resume(struct radeon_device *rdev); 219extern int rs600_resume(struct radeon_device *rdev);
192int rs600_irq_set(struct radeon_device *rdev); 220int rs600_irq_set(struct radeon_device *rdev);
193int rs600_irq_process(struct radeon_device *rdev); 221int rs600_irq_process(struct radeon_device *rdev);
222void rs600_irq_disable(struct radeon_device *rdev);
194u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc); 223u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc);
195void rs600_gart_tlb_flush(struct radeon_device *rdev); 224void rs600_gart_tlb_flush(struct radeon_device *rdev);
196int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); 225int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
@@ -205,6 +234,11 @@ void rs600_hpd_set_polarity(struct radeon_device *rdev,
205extern void rs600_pm_misc(struct radeon_device *rdev); 234extern void rs600_pm_misc(struct radeon_device *rdev);
206extern void rs600_pm_prepare(struct radeon_device *rdev); 235extern void rs600_pm_prepare(struct radeon_device *rdev);
207extern void rs600_pm_finish(struct radeon_device *rdev); 236extern void rs600_pm_finish(struct radeon_device *rdev);
237extern void rs600_pre_page_flip(struct radeon_device *rdev, int crtc);
238extern u32 rs600_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
239extern void rs600_post_page_flip(struct radeon_device *rdev, int crtc);
240void rs600_set_safe_registers(struct radeon_device *rdev);
241
208 242
209/* 243/*
210 * rs690,rs740 244 * rs690,rs740
@@ -216,10 +250,21 @@ int rs690_suspend(struct radeon_device *rdev);
216uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg); 250uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg);
217void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 251void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
218void rs690_bandwidth_update(struct radeon_device *rdev); 252void rs690_bandwidth_update(struct radeon_device *rdev);
253void rs690_line_buffer_adjust(struct radeon_device *rdev,
254 struct drm_display_mode *mode1,
255 struct drm_display_mode *mode2);
219 256
220/* 257/*
221 * rv515 258 * rv515
222 */ 259 */
260struct rv515_mc_save {
261 u32 d1vga_control;
262 u32 d2vga_control;
263 u32 vga_render_control;
264 u32 vga_hdp_control;
265 u32 d1crtc_control;
266 u32 d2crtc_control;
267};
223int rv515_init(struct radeon_device *rdev); 268int rv515_init(struct radeon_device *rdev);
224void rv515_fini(struct radeon_device *rdev); 269void rv515_fini(struct radeon_device *rdev);
225uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg); 270uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg);
@@ -230,6 +275,14 @@ void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
230void rv515_bandwidth_update(struct radeon_device *rdev); 275void rv515_bandwidth_update(struct radeon_device *rdev);
231int rv515_resume(struct radeon_device *rdev); 276int rv515_resume(struct radeon_device *rdev);
232int rv515_suspend(struct radeon_device *rdev); 277int rv515_suspend(struct radeon_device *rdev);
278void rv515_bandwidth_avivo_update(struct radeon_device *rdev);
279void rv515_vga_render_disable(struct radeon_device *rdev);
280void rv515_set_safe_registers(struct radeon_device *rdev);
281void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save);
282void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save);
283void rv515_clock_startup(struct radeon_device *rdev);
284void rv515_debugfs(struct radeon_device *rdev);
285
233 286
234/* 287/*
235 * r520,rv530,rv560,rv570,r580 288 * r520,rv530,rv560,rv570,r580
@@ -278,6 +331,8 @@ extern void r600_pm_misc(struct radeon_device *rdev);
278extern void r600_pm_init_profile(struct radeon_device *rdev); 331extern void r600_pm_init_profile(struct radeon_device *rdev);
279extern void rs780_pm_init_profile(struct radeon_device *rdev); 332extern void rs780_pm_init_profile(struct radeon_device *rdev);
280extern void r600_pm_get_dynpm_state(struct radeon_device *rdev); 333extern void r600_pm_get_dynpm_state(struct radeon_device *rdev);
334extern void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes);
335extern int r600_get_pcie_lanes(struct radeon_device *rdev);
281 336
282/* 337/*
283 * rv770,rv730,rv710,rv740 338 * rv770,rv730,rv710,rv740
@@ -287,6 +342,7 @@ void rv770_fini(struct radeon_device *rdev);
287int rv770_suspend(struct radeon_device *rdev); 342int rv770_suspend(struct radeon_device *rdev);
288int rv770_resume(struct radeon_device *rdev); 343int rv770_resume(struct radeon_device *rdev);
289extern void rv770_pm_misc(struct radeon_device *rdev); 344extern void rv770_pm_misc(struct radeon_device *rdev);
345extern u32 rv770_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
290 346
291/* 347/*
292 * evergreen 348 * evergreen
@@ -314,5 +370,8 @@ extern int evergreen_cs_parse(struct radeon_cs_parser *p);
314extern void evergreen_pm_misc(struct radeon_device *rdev); 370extern void evergreen_pm_misc(struct radeon_device *rdev);
315extern void evergreen_pm_prepare(struct radeon_device *rdev); 371extern void evergreen_pm_prepare(struct radeon_device *rdev);
316extern void evergreen_pm_finish(struct radeon_device *rdev); 372extern void evergreen_pm_finish(struct radeon_device *rdev);
373extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc);
374extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
375extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc);
317 376
318#endif 377#endif
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index bc5a2c3382d9..1573202a6418 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -37,7 +37,7 @@ radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device,
37extern void radeon_link_encoder_connector(struct drm_device *dev); 37extern void radeon_link_encoder_connector(struct drm_device *dev);
38extern void 38extern void
39radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, 39radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum,
40 uint32_t supported_device); 40 uint32_t supported_device, u16 caps);
41 41
42/* from radeon_connector.c */ 42/* from radeon_connector.c */
43extern void 43extern void
@@ -313,7 +313,6 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
313 uint16_t *line_mux, 313 uint16_t *line_mux,
314 struct radeon_hpd *hpd) 314 struct radeon_hpd *hpd)
315{ 315{
316 struct radeon_device *rdev = dev->dev_private;
317 316
318 /* Asus M2A-VM HDMI board lists the DVI port as HDMI */ 317 /* Asus M2A-VM HDMI board lists the DVI port as HDMI */
319 if ((dev->pdev->device == 0x791e) && 318 if ((dev->pdev->device == 0x791e) &&
@@ -388,6 +387,17 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
388 *line_mux = 0x90; 387 *line_mux = 0x90;
389 } 388 }
390 389
390 /* mac rv630 */
391 if ((dev->pdev->device == 0x9588) &&
392 (dev->pdev->subsystem_vendor == 0x106b) &&
393 (dev->pdev->subsystem_device == 0x00a6)) {
394 if ((supported_device == ATOM_DEVICE_TV1_SUPPORT) &&
395 (*connector_type == DRM_MODE_CONNECTOR_DVII)) {
396 *connector_type = DRM_MODE_CONNECTOR_9PinDIN;
397 *line_mux = CONNECTOR_7PIN_DIN_ENUM_ID1;
398 }
399 }
400
391 /* ASUS HD 3600 XT board lists the DVI port as HDMI */ 401 /* ASUS HD 3600 XT board lists the DVI port as HDMI */
392 if ((dev->pdev->device == 0x9598) && 402 if ((dev->pdev->device == 0x9598) &&
393 (dev->pdev->subsystem_vendor == 0x1043) && 403 (dev->pdev->subsystem_vendor == 0x1043) &&
@@ -425,21 +435,23 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
425 } 435 }
426 } 436 }
427 437
428 /* Acer laptop reports DVI-D as DVI-I and hpd pins reversed */ 438 /* Acer laptop (Acer TravelMate 5730G) has an HDMI port
439 * on the laptop and a DVI port on the docking station and
440 * both share the same encoder, hpd pin, and ddc line.
441 * So while the bios table is technically correct,
442 * we drop the DVI port here since xrandr has no concept of
443 * encoders and will try and drive both connectors
444 * with different crtcs which isn't possible on the hardware
445 * side and leaves no crtcs for LVDS or VGA.
446 */
429 if ((dev->pdev->device == 0x95c4) && 447 if ((dev->pdev->device == 0x95c4) &&
430 (dev->pdev->subsystem_vendor == 0x1025) && 448 (dev->pdev->subsystem_vendor == 0x1025) &&
431 (dev->pdev->subsystem_device == 0x013c)) { 449 (dev->pdev->subsystem_device == 0x013c)) {
432 struct radeon_gpio_rec gpio;
433
434 if ((*connector_type == DRM_MODE_CONNECTOR_DVII) && 450 if ((*connector_type == DRM_MODE_CONNECTOR_DVII) &&
435 (supported_device == ATOM_DEVICE_DFP1_SUPPORT)) { 451 (supported_device == ATOM_DEVICE_DFP1_SUPPORT)) {
436 gpio = radeon_lookup_gpio(rdev, 6); 452 /* actually it's a DVI-D port not DVI-I */
437 *hpd = radeon_atom_get_hpd_info_from_gpio(rdev, &gpio);
438 *connector_type = DRM_MODE_CONNECTOR_DVID; 453 *connector_type = DRM_MODE_CONNECTOR_DVID;
439 } else if ((*connector_type == DRM_MODE_CONNECTOR_HDMIA) && 454 return false;
440 (supported_device == ATOM_DEVICE_DFP1_SUPPORT)) {
441 gpio = radeon_lookup_gpio(rdev, 7);
442 *hpd = radeon_atom_get_hpd_info_from_gpio(rdev, &gpio);
443 } 455 }
444 } 456 }
445 457
@@ -525,6 +537,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
525 u16 size, data_offset; 537 u16 size, data_offset;
526 u8 frev, crev; 538 u8 frev, crev;
527 ATOM_CONNECTOR_OBJECT_TABLE *con_obj; 539 ATOM_CONNECTOR_OBJECT_TABLE *con_obj;
540 ATOM_ENCODER_OBJECT_TABLE *enc_obj;
528 ATOM_OBJECT_TABLE *router_obj; 541 ATOM_OBJECT_TABLE *router_obj;
529 ATOM_DISPLAY_OBJECT_PATH_TABLE *path_obj; 542 ATOM_DISPLAY_OBJECT_PATH_TABLE *path_obj;
530 ATOM_OBJECT_HEADER *obj_header; 543 ATOM_OBJECT_HEADER *obj_header;
@@ -549,6 +562,9 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
549 con_obj = (ATOM_CONNECTOR_OBJECT_TABLE *) 562 con_obj = (ATOM_CONNECTOR_OBJECT_TABLE *)
550 (ctx->bios + data_offset + 563 (ctx->bios + data_offset +
551 le16_to_cpu(obj_header->usConnectorObjectTableOffset)); 564 le16_to_cpu(obj_header->usConnectorObjectTableOffset));
565 enc_obj = (ATOM_ENCODER_OBJECT_TABLE *)
566 (ctx->bios + data_offset +
567 le16_to_cpu(obj_header->usEncoderObjectTableOffset));
552 router_obj = (ATOM_OBJECT_TABLE *) 568 router_obj = (ATOM_OBJECT_TABLE *)
553 (ctx->bios + data_offset + 569 (ctx->bios + data_offset +
554 le16_to_cpu(obj_header->usRouterObjectTableOffset)); 570 le16_to_cpu(obj_header->usRouterObjectTableOffset));
@@ -654,14 +670,35 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
654 OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT; 670 OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
655 671
656 if (grph_obj_type == GRAPH_OBJECT_TYPE_ENCODER) { 672 if (grph_obj_type == GRAPH_OBJECT_TYPE_ENCODER) {
657 u16 encoder_obj = le16_to_cpu(path->usGraphicObjIds[j]); 673 for (k = 0; k < enc_obj->ucNumberOfObjects; k++) {
658 674 u16 encoder_obj = le16_to_cpu(enc_obj->asObjects[k].usObjectID);
659 radeon_add_atom_encoder(dev, 675 if (le16_to_cpu(path->usGraphicObjIds[j]) == encoder_obj) {
660 encoder_obj, 676 ATOM_COMMON_RECORD_HEADER *record = (ATOM_COMMON_RECORD_HEADER *)
661 le16_to_cpu 677 (ctx->bios + data_offset +
662 (path-> 678 le16_to_cpu(enc_obj->asObjects[k].usRecordOffset));
663 usDeviceTag)); 679 ATOM_ENCODER_CAP_RECORD *cap_record;
680 u16 caps = 0;
664 681
682 while (record->ucRecordType > 0 &&
683 record->ucRecordType <= ATOM_MAX_OBJECT_RECORD_NUMBER) {
684 switch (record->ucRecordType) {
685 case ATOM_ENCODER_CAP_RECORD_TYPE:
686 cap_record =(ATOM_ENCODER_CAP_RECORD *)
687 record;
688 caps = le16_to_cpu(cap_record->usEncoderCap);
689 break;
690 }
691 record = (ATOM_COMMON_RECORD_HEADER *)
692 ((char *)record + record->ucRecordSize);
693 }
694 radeon_add_atom_encoder(dev,
695 encoder_obj,
696 le16_to_cpu
697 (path->
698 usDeviceTag),
699 caps);
700 }
701 }
665 } else if (grph_obj_type == GRAPH_OBJECT_TYPE_ROUTER) { 702 } else if (grph_obj_type == GRAPH_OBJECT_TYPE_ROUTER) {
666 for (k = 0; k < router_obj->ucNumberOfObjects; k++) { 703 for (k = 0; k < router_obj->ucNumberOfObjects; k++) {
667 u16 router_obj_id = le16_to_cpu(router_obj->asObjects[k].usObjectID); 704 u16 router_obj_id = le16_to_cpu(router_obj->asObjects[k].usObjectID);
@@ -995,7 +1032,8 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
995 radeon_get_encoder_enum(dev, 1032 radeon_get_encoder_enum(dev,
996 (1 << i), 1033 (1 << i),
997 dac), 1034 dac),
998 (1 << i)); 1035 (1 << i),
1036 0);
999 else 1037 else
1000 radeon_add_legacy_encoder(dev, 1038 radeon_add_legacy_encoder(dev,
1001 radeon_get_encoder_enum(dev, 1039 radeon_get_encoder_enum(dev,
@@ -1074,6 +1112,7 @@ union firmware_info {
1074 ATOM_FIRMWARE_INFO_V1_3 info_13; 1112 ATOM_FIRMWARE_INFO_V1_3 info_13;
1075 ATOM_FIRMWARE_INFO_V1_4 info_14; 1113 ATOM_FIRMWARE_INFO_V1_4 info_14;
1076 ATOM_FIRMWARE_INFO_V2_1 info_21; 1114 ATOM_FIRMWARE_INFO_V2_1 info_21;
1115 ATOM_FIRMWARE_INFO_V2_2 info_22;
1077}; 1116};
1078 1117
1079bool radeon_atom_get_clock_info(struct drm_device *dev) 1118bool radeon_atom_get_clock_info(struct drm_device *dev)
@@ -1148,8 +1187,12 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
1148 *p2pll = *p1pll; 1187 *p2pll = *p1pll;
1149 1188
1150 /* system clock */ 1189 /* system clock */
1151 spll->reference_freq = 1190 if (ASIC_IS_DCE4(rdev))
1152 le16_to_cpu(firmware_info->info.usReferenceClock); 1191 spll->reference_freq =
1192 le16_to_cpu(firmware_info->info_21.usCoreReferenceClock);
1193 else
1194 spll->reference_freq =
1195 le16_to_cpu(firmware_info->info.usReferenceClock);
1153 spll->reference_div = 0; 1196 spll->reference_div = 0;
1154 1197
1155 spll->pll_out_min = 1198 spll->pll_out_min =
@@ -1171,8 +1214,12 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
1171 le16_to_cpu(firmware_info->info.usMaxEngineClockPLL_Input); 1214 le16_to_cpu(firmware_info->info.usMaxEngineClockPLL_Input);
1172 1215
1173 /* memory clock */ 1216 /* memory clock */
1174 mpll->reference_freq = 1217 if (ASIC_IS_DCE4(rdev))
1175 le16_to_cpu(firmware_info->info.usReferenceClock); 1218 mpll->reference_freq =
1219 le16_to_cpu(firmware_info->info_21.usMemoryReferenceClock);
1220 else
1221 mpll->reference_freq =
1222 le16_to_cpu(firmware_info->info.usReferenceClock);
1176 mpll->reference_div = 0; 1223 mpll->reference_div = 0;
1177 1224
1178 mpll->pll_out_min = 1225 mpll->pll_out_min =
@@ -1201,8 +1248,12 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
1201 if (ASIC_IS_DCE4(rdev)) { 1248 if (ASIC_IS_DCE4(rdev)) {
1202 rdev->clock.default_dispclk = 1249 rdev->clock.default_dispclk =
1203 le32_to_cpu(firmware_info->info_21.ulDefaultDispEngineClkFreq); 1250 le32_to_cpu(firmware_info->info_21.ulDefaultDispEngineClkFreq);
1204 if (rdev->clock.default_dispclk == 0) 1251 if (rdev->clock.default_dispclk == 0) {
1205 rdev->clock.default_dispclk = 60000; /* 600 Mhz */ 1252 if (ASIC_IS_DCE5(rdev))
1253 rdev->clock.default_dispclk = 54000; /* 540 Mhz */
1254 else
1255 rdev->clock.default_dispclk = 60000; /* 600 Mhz */
1256 }
1206 rdev->clock.dp_extclk = 1257 rdev->clock.dp_extclk =
1207 le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq); 1258 le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq);
1208 } 1259 }
@@ -1337,6 +1388,43 @@ bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev,
1337 return false; 1388 return false;
1338} 1389}
1339 1390
1391static void radeon_atombios_get_igp_ss_overrides(struct radeon_device *rdev,
1392 struct radeon_atom_ss *ss,
1393 int id)
1394{
1395 struct radeon_mode_info *mode_info = &rdev->mode_info;
1396 int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
1397 u16 data_offset, size;
1398 struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 *igp_info;
1399 u8 frev, crev;
1400 u16 percentage = 0, rate = 0;
1401
1402 /* get any igp specific overrides */
1403 if (atom_parse_data_header(mode_info->atom_context, index, &size,
1404 &frev, &crev, &data_offset)) {
1405 igp_info = (struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 *)
1406 (mode_info->atom_context->bios + data_offset);
1407 switch (id) {
1408 case ASIC_INTERNAL_SS_ON_TMDS:
1409 percentage = le16_to_cpu(igp_info->usDVISSPercentage);
1410 rate = le16_to_cpu(igp_info->usDVISSpreadRateIn10Hz);
1411 break;
1412 case ASIC_INTERNAL_SS_ON_HDMI:
1413 percentage = le16_to_cpu(igp_info->usHDMISSPercentage);
1414 rate = le16_to_cpu(igp_info->usHDMISSpreadRateIn10Hz);
1415 break;
1416 case ASIC_INTERNAL_SS_ON_LVDS:
1417 percentage = le16_to_cpu(igp_info->usLvdsSSPercentage);
1418 rate = le16_to_cpu(igp_info->usLvdsSSpreadRateIn10Hz);
1419 break;
1420 }
1421 if (percentage)
1422 ss->percentage = percentage;
1423 if (rate)
1424 ss->rate = rate;
1425 }
1426}
1427
1340union asic_ss_info { 1428union asic_ss_info {
1341 struct _ATOM_ASIC_INTERNAL_SS_INFO info; 1429 struct _ATOM_ASIC_INTERNAL_SS_INFO info;
1342 struct _ATOM_ASIC_INTERNAL_SS_INFO_V2 info_2; 1430 struct _ATOM_ASIC_INTERNAL_SS_INFO_V2 info_2;
@@ -1401,6 +1489,8 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
1401 le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadSpectrumPercentage); 1489 le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
1402 ss->type = ss_info->info_3.asSpreadSpectrum[i].ucSpreadSpectrumMode; 1490 ss->type = ss_info->info_3.asSpreadSpectrum[i].ucSpreadSpectrumMode;
1403 ss->rate = le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadRateIn10Hz); 1491 ss->rate = le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadRateIn10Hz);
1492 if (rdev->flags & RADEON_IS_IGP)
1493 radeon_atombios_get_igp_ss_overrides(rdev, ss, id);
1404 return true; 1494 return true;
1405 } 1495 }
1406 } 1496 }
@@ -1477,6 +1567,9 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
1477 if (misc & ATOM_DOUBLE_CLOCK_MODE) 1567 if (misc & ATOM_DOUBLE_CLOCK_MODE)
1478 lvds->native_mode.flags |= DRM_MODE_FLAG_DBLSCAN; 1568 lvds->native_mode.flags |= DRM_MODE_FLAG_DBLSCAN;
1479 1569
1570 lvds->native_mode.width_mm = lvds_info->info.sLCDTiming.usImageHSize;
1571 lvds->native_mode.height_mm = lvds_info->info.sLCDTiming.usImageVSize;
1572
1480 /* set crtc values */ 1573 /* set crtc values */
1481 drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V); 1574 drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V);
1482 1575
@@ -1489,6 +1582,59 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
1489 else 1582 else
1490 lvds->linkb = false; 1583 lvds->linkb = false;
1491 1584
1585 /* parse the lcd record table */
1586 if (lvds_info->info.usModePatchTableOffset) {
1587 ATOM_FAKE_EDID_PATCH_RECORD *fake_edid_record;
1588 ATOM_PANEL_RESOLUTION_PATCH_RECORD *panel_res_record;
1589 bool bad_record = false;
1590 u8 *record = (u8 *)(mode_info->atom_context->bios +
1591 data_offset +
1592 lvds_info->info.usModePatchTableOffset);
1593 while (*record != ATOM_RECORD_END_TYPE) {
1594 switch (*record) {
1595 case LCD_MODE_PATCH_RECORD_MODE_TYPE:
1596 record += sizeof(ATOM_PATCH_RECORD_MODE);
1597 break;
1598 case LCD_RTS_RECORD_TYPE:
1599 record += sizeof(ATOM_LCD_RTS_RECORD);
1600 break;
1601 case LCD_CAP_RECORD_TYPE:
1602 record += sizeof(ATOM_LCD_MODE_CONTROL_CAP);
1603 break;
1604 case LCD_FAKE_EDID_PATCH_RECORD_TYPE:
1605 fake_edid_record = (ATOM_FAKE_EDID_PATCH_RECORD *)record;
1606 if (fake_edid_record->ucFakeEDIDLength) {
1607 struct edid *edid;
1608 int edid_size =
1609 max((int)EDID_LENGTH, (int)fake_edid_record->ucFakeEDIDLength);
1610 edid = kmalloc(edid_size, GFP_KERNEL);
1611 if (edid) {
1612 memcpy((u8 *)edid, (u8 *)&fake_edid_record->ucFakeEDIDString[0],
1613 fake_edid_record->ucFakeEDIDLength);
1614
1615 if (drm_edid_is_valid(edid))
1616 rdev->mode_info.bios_hardcoded_edid = edid;
1617 else
1618 kfree(edid);
1619 }
1620 }
1621 record += sizeof(ATOM_FAKE_EDID_PATCH_RECORD);
1622 break;
1623 case LCD_PANEL_RESOLUTION_RECORD_TYPE:
1624 panel_res_record = (ATOM_PANEL_RESOLUTION_PATCH_RECORD *)record;
1625 lvds->native_mode.width_mm = panel_res_record->usHSize;
1626 lvds->native_mode.height_mm = panel_res_record->usVSize;
1627 record += sizeof(ATOM_PANEL_RESOLUTION_PATCH_RECORD);
1628 break;
1629 default:
1630 DRM_ERROR("Bad LCD record %d\n", *record);
1631 bad_record = true;
1632 break;
1633 }
1634 if (bad_record)
1635 break;
1636 }
1637 }
1492 } 1638 }
1493 return lvds; 1639 return lvds;
1494} 1640}
@@ -1740,496 +1886,614 @@ static const char *pp_lib_thermal_controller_names[] = {
1740 "RV6xx", 1886 "RV6xx",
1741 "RV770", 1887 "RV770",
1742 "adt7473", 1888 "adt7473",
1889 "NONE",
1743 "External GPIO", 1890 "External GPIO",
1744 "Evergreen", 1891 "Evergreen",
1745 "adt7473 with internal", 1892 "emc2103",
1746 1893 "Sumo",
1894 "Northern Islands",
1747}; 1895};
1748 1896
1749union power_info { 1897union power_info {
1750 struct _ATOM_POWERPLAY_INFO info; 1898 struct _ATOM_POWERPLAY_INFO info;
1751 struct _ATOM_POWERPLAY_INFO_V2 info_2; 1899 struct _ATOM_POWERPLAY_INFO_V2 info_2;
1752 struct _ATOM_POWERPLAY_INFO_V3 info_3; 1900 struct _ATOM_POWERPLAY_INFO_V3 info_3;
1753 struct _ATOM_PPLIB_POWERPLAYTABLE info_4; 1901 struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
1902 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
1903 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
1754}; 1904};
1755 1905
1756void radeon_atombios_get_power_modes(struct radeon_device *rdev) 1906union pplib_clock_info {
1907 struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
1908 struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
1909 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
1910 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
1911};
1912
1913union pplib_power_state {
1914 struct _ATOM_PPLIB_STATE v1;
1915 struct _ATOM_PPLIB_STATE_V2 v2;
1916};
1917
1918static void radeon_atombios_parse_misc_flags_1_3(struct radeon_device *rdev,
1919 int state_index,
1920 u32 misc, u32 misc2)
1921{
1922 rdev->pm.power_state[state_index].misc = misc;
1923 rdev->pm.power_state[state_index].misc2 = misc2;
1924 /* order matters! */
1925 if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE)
1926 rdev->pm.power_state[state_index].type =
1927 POWER_STATE_TYPE_POWERSAVE;
1928 if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE)
1929 rdev->pm.power_state[state_index].type =
1930 POWER_STATE_TYPE_BATTERY;
1931 if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE)
1932 rdev->pm.power_state[state_index].type =
1933 POWER_STATE_TYPE_BATTERY;
1934 if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN)
1935 rdev->pm.power_state[state_index].type =
1936 POWER_STATE_TYPE_BALANCED;
1937 if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) {
1938 rdev->pm.power_state[state_index].type =
1939 POWER_STATE_TYPE_PERFORMANCE;
1940 rdev->pm.power_state[state_index].flags &=
1941 ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
1942 }
1943 if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE)
1944 rdev->pm.power_state[state_index].type =
1945 POWER_STATE_TYPE_BALANCED;
1946 if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) {
1947 rdev->pm.power_state[state_index].type =
1948 POWER_STATE_TYPE_DEFAULT;
1949 rdev->pm.default_power_state_index = state_index;
1950 rdev->pm.power_state[state_index].default_clock_mode =
1951 &rdev->pm.power_state[state_index].clock_info[0];
1952 } else if (state_index == 0) {
1953 rdev->pm.power_state[state_index].clock_info[0].flags |=
1954 RADEON_PM_MODE_NO_DISPLAY;
1955 }
1956}
1957
1958static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
1757{ 1959{
1758 struct radeon_mode_info *mode_info = &rdev->mode_info; 1960 struct radeon_mode_info *mode_info = &rdev->mode_info;
1961 u32 misc, misc2 = 0;
1962 int num_modes = 0, i;
1963 int state_index = 0;
1964 struct radeon_i2c_bus_rec i2c_bus;
1965 union power_info *power_info;
1759 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); 1966 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
1760 u16 data_offset; 1967 u16 data_offset;
1761 u8 frev, crev; 1968 u8 frev, crev;
1762 u32 misc, misc2 = 0, sclk, mclk;
1763 union power_info *power_info;
1764 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
1765 struct _ATOM_PPLIB_STATE *power_state;
1766 int num_modes = 0, i, j;
1767 int state_index = 0, mode_index = 0;
1768 struct radeon_i2c_bus_rec i2c_bus;
1769
1770 rdev->pm.default_power_state_index = -1;
1771 1969
1772 if (atom_parse_data_header(mode_info->atom_context, index, NULL, 1970 if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
1773 &frev, &crev, &data_offset)) { 1971 &frev, &crev, &data_offset))
1774 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); 1972 return state_index;
1775 if (frev < 4) { 1973 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
1776 /* add the i2c bus for thermal/fan chip */ 1974
1777 if (power_info->info.ucOverdriveThermalController > 0) { 1975 /* add the i2c bus for thermal/fan chip */
1778 DRM_INFO("Possible %s thermal controller at 0x%02x\n", 1976 if (power_info->info.ucOverdriveThermalController > 0) {
1779 thermal_controller_names[power_info->info.ucOverdriveThermalController], 1977 DRM_INFO("Possible %s thermal controller at 0x%02x\n",
1780 power_info->info.ucOverdriveControllerAddress >> 1); 1978 thermal_controller_names[power_info->info.ucOverdriveThermalController],
1781 i2c_bus = radeon_lookup_i2c_gpio(rdev, power_info->info.ucOverdriveI2cLine); 1979 power_info->info.ucOverdriveControllerAddress >> 1);
1782 rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus); 1980 i2c_bus = radeon_lookup_i2c_gpio(rdev, power_info->info.ucOverdriveI2cLine);
1783 if (rdev->pm.i2c_bus) { 1981 rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
1784 struct i2c_board_info info = { }; 1982 if (rdev->pm.i2c_bus) {
1785 const char *name = thermal_controller_names[power_info->info. 1983 struct i2c_board_info info = { };
1786 ucOverdriveThermalController]; 1984 const char *name = thermal_controller_names[power_info->info.
1787 info.addr = power_info->info.ucOverdriveControllerAddress >> 1; 1985 ucOverdriveThermalController];
1788 strlcpy(info.type, name, sizeof(info.type)); 1986 info.addr = power_info->info.ucOverdriveControllerAddress >> 1;
1789 i2c_new_device(&rdev->pm.i2c_bus->adapter, &info); 1987 strlcpy(info.type, name, sizeof(info.type));
1790 } 1988 i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
1989 }
1990 }
1991 num_modes = power_info->info.ucNumOfPowerModeEntries;
1992 if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK)
1993 num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK;
1994 /* last mode is usually default, array is low to high */
1995 for (i = 0; i < num_modes; i++) {
1996 rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
1997 switch (frev) {
1998 case 1:
1999 rdev->pm.power_state[state_index].num_clock_modes = 1;
2000 rdev->pm.power_state[state_index].clock_info[0].mclk =
2001 le16_to_cpu(power_info->info.asPowerPlayInfo[i].usMemoryClock);
2002 rdev->pm.power_state[state_index].clock_info[0].sclk =
2003 le16_to_cpu(power_info->info.asPowerPlayInfo[i].usEngineClock);
2004 /* skip invalid modes */
2005 if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
2006 (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
2007 continue;
2008 rdev->pm.power_state[state_index].pcie_lanes =
2009 power_info->info.asPowerPlayInfo[i].ucNumPciELanes;
2010 misc = le32_to_cpu(power_info->info.asPowerPlayInfo[i].ulMiscInfo);
2011 if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) ||
2012 (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) {
2013 rdev->pm.power_state[state_index].clock_info[0].voltage.type =
2014 VOLTAGE_GPIO;
2015 rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
2016 radeon_lookup_gpio(rdev,
2017 power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex);
2018 if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
2019 rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
2020 true;
2021 else
2022 rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
2023 false;
2024 } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) {
2025 rdev->pm.power_state[state_index].clock_info[0].voltage.type =
2026 VOLTAGE_VDDC;
2027 rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
2028 power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex;
1791 } 2029 }
1792 num_modes = power_info->info.ucNumOfPowerModeEntries; 2030 rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
1793 if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK) 2031 radeon_atombios_parse_misc_flags_1_3(rdev, state_index, misc, 0);
1794 num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK; 2032 state_index++;
1795 /* last mode is usually default, array is low to high */ 2033 break;
1796 for (i = 0; i < num_modes; i++) { 2034 case 2:
1797 rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; 2035 rdev->pm.power_state[state_index].num_clock_modes = 1;
1798 switch (frev) { 2036 rdev->pm.power_state[state_index].clock_info[0].mclk =
1799 case 1: 2037 le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMemoryClock);
1800 rdev->pm.power_state[state_index].num_clock_modes = 1; 2038 rdev->pm.power_state[state_index].clock_info[0].sclk =
1801 rdev->pm.power_state[state_index].clock_info[0].mclk = 2039 le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulEngineClock);
1802 le16_to_cpu(power_info->info.asPowerPlayInfo[i].usMemoryClock); 2040 /* skip invalid modes */
1803 rdev->pm.power_state[state_index].clock_info[0].sclk = 2041 if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
1804 le16_to_cpu(power_info->info.asPowerPlayInfo[i].usEngineClock); 2042 (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
1805 /* skip invalid modes */ 2043 continue;
1806 if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) || 2044 rdev->pm.power_state[state_index].pcie_lanes =
1807 (rdev->pm.power_state[state_index].clock_info[0].sclk == 0)) 2045 power_info->info_2.asPowerPlayInfo[i].ucNumPciELanes;
1808 continue; 2046 misc = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo);
1809 rdev->pm.power_state[state_index].pcie_lanes = 2047 misc2 = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo2);
1810 power_info->info.asPowerPlayInfo[i].ucNumPciELanes; 2048 if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) ||
1811 misc = le32_to_cpu(power_info->info.asPowerPlayInfo[i].ulMiscInfo); 2049 (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) {
1812 if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) || 2050 rdev->pm.power_state[state_index].clock_info[0].voltage.type =
1813 (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) { 2051 VOLTAGE_GPIO;
1814 rdev->pm.power_state[state_index].clock_info[0].voltage.type = 2052 rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
1815 VOLTAGE_GPIO; 2053 radeon_lookup_gpio(rdev,
1816 rdev->pm.power_state[state_index].clock_info[0].voltage.gpio = 2054 power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex);
1817 radeon_lookup_gpio(rdev, 2055 if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
1818 power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex); 2056 rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
1819 if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH) 2057 true;
1820 rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = 2058 else
1821 true; 2059 rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
1822 else 2060 false;
1823 rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = 2061 } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) {
1824 false; 2062 rdev->pm.power_state[state_index].clock_info[0].voltage.type =
1825 } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) { 2063 VOLTAGE_VDDC;
1826 rdev->pm.power_state[state_index].clock_info[0].voltage.type = 2064 rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
1827 VOLTAGE_VDDC; 2065 power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex;
1828 rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
1829 power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex;
1830 }
1831 rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
1832 rdev->pm.power_state[state_index].misc = misc;
1833 /* order matters! */
1834 if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE)
1835 rdev->pm.power_state[state_index].type =
1836 POWER_STATE_TYPE_POWERSAVE;
1837 if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE)
1838 rdev->pm.power_state[state_index].type =
1839 POWER_STATE_TYPE_BATTERY;
1840 if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE)
1841 rdev->pm.power_state[state_index].type =
1842 POWER_STATE_TYPE_BATTERY;
1843 if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN)
1844 rdev->pm.power_state[state_index].type =
1845 POWER_STATE_TYPE_BALANCED;
1846 if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) {
1847 rdev->pm.power_state[state_index].type =
1848 POWER_STATE_TYPE_PERFORMANCE;
1849 rdev->pm.power_state[state_index].flags &=
1850 ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
1851 }
1852 if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) {
1853 rdev->pm.power_state[state_index].type =
1854 POWER_STATE_TYPE_DEFAULT;
1855 rdev->pm.default_power_state_index = state_index;
1856 rdev->pm.power_state[state_index].default_clock_mode =
1857 &rdev->pm.power_state[state_index].clock_info[0];
1858 rdev->pm.power_state[state_index].flags &=
1859 ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
1860 } else if (state_index == 0) {
1861 rdev->pm.power_state[state_index].clock_info[0].flags |=
1862 RADEON_PM_MODE_NO_DISPLAY;
1863 }
1864 state_index++;
1865 break;
1866 case 2:
1867 rdev->pm.power_state[state_index].num_clock_modes = 1;
1868 rdev->pm.power_state[state_index].clock_info[0].mclk =
1869 le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMemoryClock);
1870 rdev->pm.power_state[state_index].clock_info[0].sclk =
1871 le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulEngineClock);
1872 /* skip invalid modes */
1873 if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
1874 (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
1875 continue;
1876 rdev->pm.power_state[state_index].pcie_lanes =
1877 power_info->info_2.asPowerPlayInfo[i].ucNumPciELanes;
1878 misc = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo);
1879 misc2 = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo2);
1880 if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) ||
1881 (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) {
1882 rdev->pm.power_state[state_index].clock_info[0].voltage.type =
1883 VOLTAGE_GPIO;
1884 rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
1885 radeon_lookup_gpio(rdev,
1886 power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex);
1887 if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
1888 rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
1889 true;
1890 else
1891 rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
1892 false;
1893 } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) {
1894 rdev->pm.power_state[state_index].clock_info[0].voltage.type =
1895 VOLTAGE_VDDC;
1896 rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
1897 power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex;
1898 }
1899 rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
1900 rdev->pm.power_state[state_index].misc = misc;
1901 rdev->pm.power_state[state_index].misc2 = misc2;
1902 /* order matters! */
1903 if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE)
1904 rdev->pm.power_state[state_index].type =
1905 POWER_STATE_TYPE_POWERSAVE;
1906 if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE)
1907 rdev->pm.power_state[state_index].type =
1908 POWER_STATE_TYPE_BATTERY;
1909 if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE)
1910 rdev->pm.power_state[state_index].type =
1911 POWER_STATE_TYPE_BATTERY;
1912 if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN)
1913 rdev->pm.power_state[state_index].type =
1914 POWER_STATE_TYPE_BALANCED;
1915 if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) {
1916 rdev->pm.power_state[state_index].type =
1917 POWER_STATE_TYPE_PERFORMANCE;
1918 rdev->pm.power_state[state_index].flags &=
1919 ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
1920 }
1921 if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE)
1922 rdev->pm.power_state[state_index].type =
1923 POWER_STATE_TYPE_BALANCED;
1924 if (misc2 & ATOM_PM_MISCINFO2_MULTI_DISPLAY_SUPPORT)
1925 rdev->pm.power_state[state_index].flags &=
1926 ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
1927 if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) {
1928 rdev->pm.power_state[state_index].type =
1929 POWER_STATE_TYPE_DEFAULT;
1930 rdev->pm.default_power_state_index = state_index;
1931 rdev->pm.power_state[state_index].default_clock_mode =
1932 &rdev->pm.power_state[state_index].clock_info[0];
1933 rdev->pm.power_state[state_index].flags &=
1934 ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
1935 } else if (state_index == 0) {
1936 rdev->pm.power_state[state_index].clock_info[0].flags |=
1937 RADEON_PM_MODE_NO_DISPLAY;
1938 }
1939 state_index++;
1940 break;
1941 case 3:
1942 rdev->pm.power_state[state_index].num_clock_modes = 1;
1943 rdev->pm.power_state[state_index].clock_info[0].mclk =
1944 le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMemoryClock);
1945 rdev->pm.power_state[state_index].clock_info[0].sclk =
1946 le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulEngineClock);
1947 /* skip invalid modes */
1948 if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
1949 (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
1950 continue;
1951 rdev->pm.power_state[state_index].pcie_lanes =
1952 power_info->info_3.asPowerPlayInfo[i].ucNumPciELanes;
1953 misc = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo);
1954 misc2 = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo2);
1955 if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) ||
1956 (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) {
1957 rdev->pm.power_state[state_index].clock_info[0].voltage.type =
1958 VOLTAGE_GPIO;
1959 rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
1960 radeon_lookup_gpio(rdev,
1961 power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex);
1962 if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
1963 rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
1964 true;
1965 else
1966 rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
1967 false;
1968 } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) {
1969 rdev->pm.power_state[state_index].clock_info[0].voltage.type =
1970 VOLTAGE_VDDC;
1971 rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
1972 power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex;
1973 if (misc2 & ATOM_PM_MISCINFO2_VDDCI_DYNAMIC_VOLTAGE_EN) {
1974 rdev->pm.power_state[state_index].clock_info[0].voltage.vddci_enabled =
1975 true;
1976 rdev->pm.power_state[state_index].clock_info[0].voltage.vddci_id =
1977 power_info->info_3.asPowerPlayInfo[i].ucVDDCI_VoltageDropIndex;
1978 }
1979 }
1980 rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
1981 rdev->pm.power_state[state_index].misc = misc;
1982 rdev->pm.power_state[state_index].misc2 = misc2;
1983 /* order matters! */
1984 if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE)
1985 rdev->pm.power_state[state_index].type =
1986 POWER_STATE_TYPE_POWERSAVE;
1987 if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE)
1988 rdev->pm.power_state[state_index].type =
1989 POWER_STATE_TYPE_BATTERY;
1990 if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE)
1991 rdev->pm.power_state[state_index].type =
1992 POWER_STATE_TYPE_BATTERY;
1993 if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN)
1994 rdev->pm.power_state[state_index].type =
1995 POWER_STATE_TYPE_BALANCED;
1996 if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) {
1997 rdev->pm.power_state[state_index].type =
1998 POWER_STATE_TYPE_PERFORMANCE;
1999 rdev->pm.power_state[state_index].flags &=
2000 ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
2001 }
2002 if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE)
2003 rdev->pm.power_state[state_index].type =
2004 POWER_STATE_TYPE_BALANCED;
2005 if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) {
2006 rdev->pm.power_state[state_index].type =
2007 POWER_STATE_TYPE_DEFAULT;
2008 rdev->pm.default_power_state_index = state_index;
2009 rdev->pm.power_state[state_index].default_clock_mode =
2010 &rdev->pm.power_state[state_index].clock_info[0];
2011 } else if (state_index == 0) {
2012 rdev->pm.power_state[state_index].clock_info[0].flags |=
2013 RADEON_PM_MODE_NO_DISPLAY;
2014 }
2015 state_index++;
2016 break;
2017 }
2018 } 2066 }
2019 /* last mode is usually default */ 2067 rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
2020 if (rdev->pm.default_power_state_index == -1) { 2068 radeon_atombios_parse_misc_flags_1_3(rdev, state_index, misc, misc2);
2021 rdev->pm.power_state[state_index - 1].type = 2069 state_index++;
2022 POWER_STATE_TYPE_DEFAULT; 2070 break;
2023 rdev->pm.default_power_state_index = state_index - 1; 2071 case 3:
2024 rdev->pm.power_state[state_index - 1].default_clock_mode = 2072 rdev->pm.power_state[state_index].num_clock_modes = 1;
2025 &rdev->pm.power_state[state_index - 1].clock_info[0]; 2073 rdev->pm.power_state[state_index].clock_info[0].mclk =
2026 rdev->pm.power_state[state_index].flags &= 2074 le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMemoryClock);
2027 ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; 2075 rdev->pm.power_state[state_index].clock_info[0].sclk =
2028 rdev->pm.power_state[state_index].misc = 0; 2076 le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulEngineClock);
2029 rdev->pm.power_state[state_index].misc2 = 0; 2077 /* skip invalid modes */
2078 if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
2079 (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
2080 continue;
2081 rdev->pm.power_state[state_index].pcie_lanes =
2082 power_info->info_3.asPowerPlayInfo[i].ucNumPciELanes;
2083 misc = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo);
2084 misc2 = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo2);
2085 if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) ||
2086 (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) {
2087 rdev->pm.power_state[state_index].clock_info[0].voltage.type =
2088 VOLTAGE_GPIO;
2089 rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
2090 radeon_lookup_gpio(rdev,
2091 power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex);
2092 if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
2093 rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
2094 true;
2095 else
2096 rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
2097 false;
2098 } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) {
2099 rdev->pm.power_state[state_index].clock_info[0].voltage.type =
2100 VOLTAGE_VDDC;
2101 rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
2102 power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex;
2103 if (misc2 & ATOM_PM_MISCINFO2_VDDCI_DYNAMIC_VOLTAGE_EN) {
2104 rdev->pm.power_state[state_index].clock_info[0].voltage.vddci_enabled =
2105 true;
2106 rdev->pm.power_state[state_index].clock_info[0].voltage.vddci_id =
2107 power_info->info_3.asPowerPlayInfo[i].ucVDDCI_VoltageDropIndex;
2108 }
2030 } 2109 }
2110 rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
2111 radeon_atombios_parse_misc_flags_1_3(rdev, state_index, misc, misc2);
2112 state_index++;
2113 break;
2114 }
2115 }
2116 /* last mode is usually default */
2117 if (rdev->pm.default_power_state_index == -1) {
2118 rdev->pm.power_state[state_index - 1].type =
2119 POWER_STATE_TYPE_DEFAULT;
2120 rdev->pm.default_power_state_index = state_index - 1;
2121 rdev->pm.power_state[state_index - 1].default_clock_mode =
2122 &rdev->pm.power_state[state_index - 1].clock_info[0];
2123 rdev->pm.power_state[state_index].flags &=
2124 ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
2125 rdev->pm.power_state[state_index].misc = 0;
2126 rdev->pm.power_state[state_index].misc2 = 0;
2127 }
2128 return state_index;
2129}
2130
2131static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *rdev,
2132 ATOM_PPLIB_THERMALCONTROLLER *controller)
2133{
2134 struct radeon_i2c_bus_rec i2c_bus;
2135
2136 /* add the i2c bus for thermal/fan chip */
2137 if (controller->ucType > 0) {
2138 if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
2139 DRM_INFO("Internal thermal controller %s fan control\n",
2140 (controller->ucFanParameters &
2141 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
2142 rdev->pm.int_thermal_type = THERMAL_TYPE_RV6XX;
2143 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) {
2144 DRM_INFO("Internal thermal controller %s fan control\n",
2145 (controller->ucFanParameters &
2146 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
2147 rdev->pm.int_thermal_type = THERMAL_TYPE_RV770;
2148 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) {
2149 DRM_INFO("Internal thermal controller %s fan control\n",
2150 (controller->ucFanParameters &
2151 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
2152 rdev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN;
2153 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) {
2154 DRM_INFO("Internal thermal controller %s fan control\n",
2155 (controller->ucFanParameters &
2156 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
2157 rdev->pm.int_thermal_type = THERMAL_TYPE_SUMO;
2158 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) {
2159 DRM_INFO("Internal thermal controller %s fan control\n",
2160 (controller->ucFanParameters &
2161 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
2162 rdev->pm.int_thermal_type = THERMAL_TYPE_NI;
2163 } else if ((controller->ucType ==
2164 ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) ||
2165 (controller->ucType ==
2166 ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) ||
2167 (controller->ucType ==
2168 ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL)) {
2169 DRM_INFO("Special thermal controller config\n");
2031 } else { 2170 } else {
2032 int fw_index = GetIndexIntoMasterTable(DATA, FirmwareInfo); 2171 DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
2033 uint8_t fw_frev, fw_crev; 2172 pp_lib_thermal_controller_names[controller->ucType],
2034 uint16_t fw_data_offset, vddc = 0; 2173 controller->ucI2cAddress >> 1,
2035 union firmware_info *firmware_info; 2174 (controller->ucFanParameters &
2036 ATOM_PPLIB_THERMALCONTROLLER *controller = &power_info->info_4.sThermalController; 2175 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
2037 2176 i2c_bus = radeon_lookup_i2c_gpio(rdev, controller->ucI2cLine);
2038 if (atom_parse_data_header(mode_info->atom_context, fw_index, NULL, 2177 rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
2039 &fw_frev, &fw_crev, &fw_data_offset)) { 2178 if (rdev->pm.i2c_bus) {
2040 firmware_info = 2179 struct i2c_board_info info = { };
2041 (union firmware_info *)(mode_info->atom_context->bios + 2180 const char *name = pp_lib_thermal_controller_names[controller->ucType];
2042 fw_data_offset); 2181 info.addr = controller->ucI2cAddress >> 1;
2043 vddc = firmware_info->info_14.usBootUpVDDCVoltage; 2182 strlcpy(info.type, name, sizeof(info.type));
2183 i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
2044 } 2184 }
2185 }
2186 }
2187}
2045 2188
2046 /* add the i2c bus for thermal/fan chip */ 2189static u16 radeon_atombios_get_default_vddc(struct radeon_device *rdev)
2047 if (controller->ucType > 0) { 2190{
2048 if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) { 2191 struct radeon_mode_info *mode_info = &rdev->mode_info;
2049 DRM_INFO("Internal thermal controller %s fan control\n", 2192 int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
2050 (controller->ucFanParameters & 2193 u8 frev, crev;
2051 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 2194 u16 data_offset;
2052 rdev->pm.int_thermal_type = THERMAL_TYPE_RV6XX; 2195 union firmware_info *firmware_info;
2053 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) { 2196 u16 vddc = 0;
2054 DRM_INFO("Internal thermal controller %s fan control\n",
2055 (controller->ucFanParameters &
2056 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
2057 rdev->pm.int_thermal_type = THERMAL_TYPE_RV770;
2058 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) {
2059 DRM_INFO("Internal thermal controller %s fan control\n",
2060 (controller->ucFanParameters &
2061 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
2062 rdev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN;
2063 } else if ((controller->ucType ==
2064 ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) ||
2065 (controller->ucType ==
2066 ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL)) {
2067 DRM_INFO("Special thermal controller config\n");
2068 } else {
2069 DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
2070 pp_lib_thermal_controller_names[controller->ucType],
2071 controller->ucI2cAddress >> 1,
2072 (controller->ucFanParameters &
2073 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
2074 i2c_bus = radeon_lookup_i2c_gpio(rdev, controller->ucI2cLine);
2075 rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
2076 if (rdev->pm.i2c_bus) {
2077 struct i2c_board_info info = { };
2078 const char *name = pp_lib_thermal_controller_names[controller->ucType];
2079 info.addr = controller->ucI2cAddress >> 1;
2080 strlcpy(info.type, name, sizeof(info.type));
2081 i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
2082 }
2083 2197
2084 } 2198 if (atom_parse_data_header(mode_info->atom_context, index, NULL,
2085 } 2199 &frev, &crev, &data_offset)) {
2086 /* first mode is usually default, followed by low to high */ 2200 firmware_info =
2087 for (i = 0; i < power_info->info_4.ucNumStates; i++) { 2201 (union firmware_info *)(mode_info->atom_context->bios +
2088 mode_index = 0; 2202 data_offset);
2089 power_state = (struct _ATOM_PPLIB_STATE *) 2203 vddc = firmware_info->info_14.usBootUpVDDCVoltage;
2090 (mode_info->atom_context->bios + 2204 }
2091 data_offset + 2205
2092 le16_to_cpu(power_info->info_4.usStateArrayOffset) + 2206 return vddc;
2093 i * power_info->info_4.ucStateEntrySize); 2207}
2094 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) 2208
2095 (mode_info->atom_context->bios + 2209static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rdev,
2096 data_offset + 2210 int state_index, int mode_index,
2097 le16_to_cpu(power_info->info_4.usNonClockInfoArrayOffset) + 2211 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info)
2098 (power_state->ucNonClockStateIndex * 2212{
2099 power_info->info_4.ucNonClockSize)); 2213 int j;
2100 for (j = 0; j < (power_info->info_4.ucStateEntrySize - 1); j++) { 2214 u32 misc = le32_to_cpu(non_clock_info->ulCapsAndSettings);
2101 if (rdev->flags & RADEON_IS_IGP) { 2215 u32 misc2 = le16_to_cpu(non_clock_info->usClassification);
2102 struct _ATOM_PPLIB_RS780_CLOCK_INFO *clock_info = 2216 u16 vddc = radeon_atombios_get_default_vddc(rdev);
2103 (struct _ATOM_PPLIB_RS780_CLOCK_INFO *) 2217
2104 (mode_info->atom_context->bios + 2218 rdev->pm.power_state[state_index].misc = misc;
2105 data_offset + 2219 rdev->pm.power_state[state_index].misc2 = misc2;
2106 le16_to_cpu(power_info->info_4.usClockInfoArrayOffset) + 2220 rdev->pm.power_state[state_index].pcie_lanes =
2107 (power_state->ucClockStateIndices[j] * 2221 ((misc & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >>
2108 power_info->info_4.ucClockInfoSize)); 2222 ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
2109 sclk = le16_to_cpu(clock_info->usLowEngineClockLow); 2223 switch (misc2 & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
2110 sclk |= clock_info->ucLowEngineClockHigh << 16; 2224 case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
2111 rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk; 2225 rdev->pm.power_state[state_index].type =
2112 /* skip invalid modes */ 2226 POWER_STATE_TYPE_BATTERY;
2113 if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0) 2227 break;
2114 continue; 2228 case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
2115 /* voltage works differently on IGPs */ 2229 rdev->pm.power_state[state_index].type =
2116 mode_index++; 2230 POWER_STATE_TYPE_BALANCED;
2117 } else if (ASIC_IS_DCE4(rdev)) { 2231 break;
2118 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO *clock_info = 2232 case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
2119 (struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO *) 2233 rdev->pm.power_state[state_index].type =
2120 (mode_info->atom_context->bios + 2234 POWER_STATE_TYPE_PERFORMANCE;
2121 data_offset + 2235 break;
2122 le16_to_cpu(power_info->info_4.usClockInfoArrayOffset) + 2236 case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
2123 (power_state->ucClockStateIndices[j] * 2237 if (misc2 & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
2124 power_info->info_4.ucClockInfoSize)); 2238 rdev->pm.power_state[state_index].type =
2125 sclk = le16_to_cpu(clock_info->usEngineClockLow); 2239 POWER_STATE_TYPE_PERFORMANCE;
2126 sclk |= clock_info->ucEngineClockHigh << 16; 2240 break;
2127 mclk = le16_to_cpu(clock_info->usMemoryClockLow); 2241 }
2128 mclk |= clock_info->ucMemoryClockHigh << 16; 2242 rdev->pm.power_state[state_index].flags = 0;
2129 rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk; 2243 if (misc & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
2130 rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk; 2244 rdev->pm.power_state[state_index].flags |=
2131 /* skip invalid modes */ 2245 RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
2132 if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk == 0) || 2246 if (misc2 & ATOM_PPLIB_CLASSIFICATION_BOOT) {
2133 (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0)) 2247 rdev->pm.power_state[state_index].type =
2134 continue; 2248 POWER_STATE_TYPE_DEFAULT;
2135 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type = 2249 rdev->pm.default_power_state_index = state_index;
2136 VOLTAGE_SW; 2250 rdev->pm.power_state[state_index].default_clock_mode =
2137 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = 2251 &rdev->pm.power_state[state_index].clock_info[mode_index - 1];
2138 clock_info->usVDDC; 2252 if (ASIC_IS_DCE5(rdev)) {
2139 /* XXX usVDDCI */ 2253 /* NI chips post without MC ucode, so default clocks are strobe mode only */
2140 mode_index++; 2254 rdev->pm.default_sclk = rdev->pm.power_state[state_index].clock_info[0].sclk;
2141 } else { 2255 rdev->pm.default_mclk = rdev->pm.power_state[state_index].clock_info[0].mclk;
2142 struct _ATOM_PPLIB_R600_CLOCK_INFO *clock_info = 2256 rdev->pm.default_vddc = rdev->pm.power_state[state_index].clock_info[0].voltage.voltage;
2143 (struct _ATOM_PPLIB_R600_CLOCK_INFO *) 2257 } else {
2144 (mode_info->atom_context->bios + 2258 /* patch the table values with the default slck/mclk from firmware info */
2145 data_offset + 2259 for (j = 0; j < mode_index; j++) {
2146 le16_to_cpu(power_info->info_4.usClockInfoArrayOffset) + 2260 rdev->pm.power_state[state_index].clock_info[j].mclk =
2147 (power_state->ucClockStateIndices[j] * 2261 rdev->clock.default_mclk;
2148 power_info->info_4.ucClockInfoSize)); 2262 rdev->pm.power_state[state_index].clock_info[j].sclk =
2149 sclk = le16_to_cpu(clock_info->usEngineClockLow); 2263 rdev->clock.default_sclk;
2150 sclk |= clock_info->ucEngineClockHigh << 16; 2264 if (vddc)
2151 mclk = le16_to_cpu(clock_info->usMemoryClockLow); 2265 rdev->pm.power_state[state_index].clock_info[j].voltage.voltage =
2152 mclk |= clock_info->ucMemoryClockHigh << 16; 2266 vddc;
2153 rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk;
2154 rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
2155 /* skip invalid modes */
2156 if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk == 0) ||
2157 (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0))
2158 continue;
2159 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
2160 VOLTAGE_SW;
2161 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
2162 clock_info->usVDDC;
2163 mode_index++;
2164 }
2165 }
2166 rdev->pm.power_state[state_index].num_clock_modes = mode_index;
2167 if (mode_index) {
2168 misc = le32_to_cpu(non_clock_info->ulCapsAndSettings);
2169 misc2 = le16_to_cpu(non_clock_info->usClassification);
2170 rdev->pm.power_state[state_index].misc = misc;
2171 rdev->pm.power_state[state_index].misc2 = misc2;
2172 rdev->pm.power_state[state_index].pcie_lanes =
2173 ((misc & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >>
2174 ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
2175 switch (misc2 & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
2176 case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
2177 rdev->pm.power_state[state_index].type =
2178 POWER_STATE_TYPE_BATTERY;
2179 break;
2180 case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
2181 rdev->pm.power_state[state_index].type =
2182 POWER_STATE_TYPE_BALANCED;
2183 break;
2184 case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
2185 rdev->pm.power_state[state_index].type =
2186 POWER_STATE_TYPE_PERFORMANCE;
2187 break;
2188 case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
2189 if (misc2 & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
2190 rdev->pm.power_state[state_index].type =
2191 POWER_STATE_TYPE_PERFORMANCE;
2192 break;
2193 }
2194 rdev->pm.power_state[state_index].flags = 0;
2195 if (misc & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
2196 rdev->pm.power_state[state_index].flags |=
2197 RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
2198 if (misc2 & ATOM_PPLIB_CLASSIFICATION_BOOT) {
2199 rdev->pm.power_state[state_index].type =
2200 POWER_STATE_TYPE_DEFAULT;
2201 rdev->pm.default_power_state_index = state_index;
2202 rdev->pm.power_state[state_index].default_clock_mode =
2203 &rdev->pm.power_state[state_index].clock_info[mode_index - 1];
2204 /* patch the table values with the default slck/mclk from firmware info */
2205 for (j = 0; j < mode_index; j++) {
2206 rdev->pm.power_state[state_index].clock_info[j].mclk =
2207 rdev->clock.default_mclk;
2208 rdev->pm.power_state[state_index].clock_info[j].sclk =
2209 rdev->clock.default_sclk;
2210 if (vddc)
2211 rdev->pm.power_state[state_index].clock_info[j].voltage.voltage =
2212 vddc;
2213 }
2214 }
2215 state_index++;
2216 }
2217 }
2218 /* if multiple clock modes, mark the lowest as no display */
2219 for (i = 0; i < state_index; i++) {
2220 if (rdev->pm.power_state[i].num_clock_modes > 1)
2221 rdev->pm.power_state[i].clock_info[0].flags |=
2222 RADEON_PM_MODE_NO_DISPLAY;
2223 }
2224 /* first mode is usually default */
2225 if (rdev->pm.default_power_state_index == -1) {
2226 rdev->pm.power_state[0].type =
2227 POWER_STATE_TYPE_DEFAULT;
2228 rdev->pm.default_power_state_index = 0;
2229 rdev->pm.power_state[0].default_clock_mode =
2230 &rdev->pm.power_state[0].clock_info[0];
2231 } 2267 }
2232 } 2268 }
2269 }
2270}
2271
2272static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev,
2273 int state_index, int mode_index,
2274 union pplib_clock_info *clock_info)
2275{
2276 u32 sclk, mclk;
2277
2278 if (rdev->flags & RADEON_IS_IGP) {
2279 if (rdev->family >= CHIP_PALM) {
2280 sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow);
2281 sclk |= clock_info->sumo.ucEngineClockHigh << 16;
2282 rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
2283 } else {
2284 sclk = le16_to_cpu(clock_info->rs780.usLowEngineClockLow);
2285 sclk |= clock_info->rs780.ucLowEngineClockHigh << 16;
2286 rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
2287 }
2288 } else if (ASIC_IS_DCE4(rdev)) {
2289 sclk = le16_to_cpu(clock_info->evergreen.usEngineClockLow);
2290 sclk |= clock_info->evergreen.ucEngineClockHigh << 16;
2291 mclk = le16_to_cpu(clock_info->evergreen.usMemoryClockLow);
2292 mclk |= clock_info->evergreen.ucMemoryClockHigh << 16;
2293 rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk;
2294 rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
2295 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
2296 VOLTAGE_SW;
2297 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
2298 clock_info->evergreen.usVDDC;
2299 } else {
2300 sclk = le16_to_cpu(clock_info->r600.usEngineClockLow);
2301 sclk |= clock_info->r600.ucEngineClockHigh << 16;
2302 mclk = le16_to_cpu(clock_info->r600.usMemoryClockLow);
2303 mclk |= clock_info->r600.ucMemoryClockHigh << 16;
2304 rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk;
2305 rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
2306 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
2307 VOLTAGE_SW;
2308 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
2309 clock_info->r600.usVDDC;
2310 }
2311
2312 if (rdev->flags & RADEON_IS_IGP) {
2313 /* skip invalid modes */
2314 if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0)
2315 return false;
2316 } else {
2317 /* skip invalid modes */
2318 if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk == 0) ||
2319 (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0))
2320 return false;
2321 }
2322 return true;
2323}
2324
2325static int radeon_atombios_parse_power_table_4_5(struct radeon_device *rdev)
2326{
2327 struct radeon_mode_info *mode_info = &rdev->mode_info;
2328 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
2329 union pplib_power_state *power_state;
2330 int i, j;
2331 int state_index = 0, mode_index = 0;
2332 union pplib_clock_info *clock_info;
2333 bool valid;
2334 union power_info *power_info;
2335 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
2336 u16 data_offset;
2337 u8 frev, crev;
2338
2339 if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
2340 &frev, &crev, &data_offset))
2341 return state_index;
2342 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
2343
2344 radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController);
2345 /* first mode is usually default, followed by low to high */
2346 for (i = 0; i < power_info->pplib.ucNumStates; i++) {
2347 mode_index = 0;
2348 power_state = (union pplib_power_state *)
2349 (mode_info->atom_context->bios + data_offset +
2350 le16_to_cpu(power_info->pplib.usStateArrayOffset) +
2351 i * power_info->pplib.ucStateEntrySize);
2352 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
2353 (mode_info->atom_context->bios + data_offset +
2354 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset) +
2355 (power_state->v1.ucNonClockStateIndex *
2356 power_info->pplib.ucNonClockSize));
2357 for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) {
2358 clock_info = (union pplib_clock_info *)
2359 (mode_info->atom_context->bios + data_offset +
2360 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) +
2361 (power_state->v1.ucClockStateIndices[j] *
2362 power_info->pplib.ucClockInfoSize));
2363 valid = radeon_atombios_parse_pplib_clock_info(rdev,
2364 state_index, mode_index,
2365 clock_info);
2366 if (valid)
2367 mode_index++;
2368 }
2369 rdev->pm.power_state[state_index].num_clock_modes = mode_index;
2370 if (mode_index) {
2371 radeon_atombios_parse_pplib_non_clock_info(rdev, state_index, mode_index,
2372 non_clock_info);
2373 state_index++;
2374 }
2375 }
2376 /* if multiple clock modes, mark the lowest as no display */
2377 for (i = 0; i < state_index; i++) {
2378 if (rdev->pm.power_state[i].num_clock_modes > 1)
2379 rdev->pm.power_state[i].clock_info[0].flags |=
2380 RADEON_PM_MODE_NO_DISPLAY;
2381 }
2382 /* first mode is usually default */
2383 if (rdev->pm.default_power_state_index == -1) {
2384 rdev->pm.power_state[0].type =
2385 POWER_STATE_TYPE_DEFAULT;
2386 rdev->pm.default_power_state_index = 0;
2387 rdev->pm.power_state[0].default_clock_mode =
2388 &rdev->pm.power_state[0].clock_info[0];
2389 }
2390 return state_index;
2391}
2392
2393static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
2394{
2395 struct radeon_mode_info *mode_info = &rdev->mode_info;
2396 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
2397 union pplib_power_state *power_state;
2398 int i, j, non_clock_array_index, clock_array_index;
2399 int state_index = 0, mode_index = 0;
2400 union pplib_clock_info *clock_info;
2401 struct StateArray *state_array;
2402 struct ClockInfoArray *clock_info_array;
2403 struct NonClockInfoArray *non_clock_info_array;
2404 bool valid;
2405 union power_info *power_info;
2406 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
2407 u16 data_offset;
2408 u8 frev, crev;
2409
2410 if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
2411 &frev, &crev, &data_offset))
2412 return state_index;
2413 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
2414
2415 radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController);
2416 state_array = (struct StateArray *)
2417 (mode_info->atom_context->bios + data_offset +
2418 power_info->pplib.usStateArrayOffset);
2419 clock_info_array = (struct ClockInfoArray *)
2420 (mode_info->atom_context->bios + data_offset +
2421 power_info->pplib.usClockInfoArrayOffset);
2422 non_clock_info_array = (struct NonClockInfoArray *)
2423 (mode_info->atom_context->bios + data_offset +
2424 power_info->pplib.usNonClockInfoArrayOffset);
2425 for (i = 0; i < state_array->ucNumEntries; i++) {
2426 mode_index = 0;
2427 power_state = (union pplib_power_state *)&state_array->states[i];
2428 /* XXX this might be an inagua bug... */
2429 non_clock_array_index = i; /* power_state->v2.nonClockInfoIndex */
2430 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
2431 &non_clock_info_array->nonClockInfo[non_clock_array_index];
2432 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
2433 clock_array_index = power_state->v2.clockInfoIndex[j];
2434 /* XXX this might be an inagua bug... */
2435 if (clock_array_index >= clock_info_array->ucNumEntries)
2436 continue;
2437 clock_info = (union pplib_clock_info *)
2438 &clock_info_array->clockInfo[clock_array_index];
2439 valid = radeon_atombios_parse_pplib_clock_info(rdev,
2440 state_index, mode_index,
2441 clock_info);
2442 if (valid)
2443 mode_index++;
2444 }
2445 rdev->pm.power_state[state_index].num_clock_modes = mode_index;
2446 if (mode_index) {
2447 radeon_atombios_parse_pplib_non_clock_info(rdev, state_index, mode_index,
2448 non_clock_info);
2449 state_index++;
2450 }
2451 }
2452 /* if multiple clock modes, mark the lowest as no display */
2453 for (i = 0; i < state_index; i++) {
2454 if (rdev->pm.power_state[i].num_clock_modes > 1)
2455 rdev->pm.power_state[i].clock_info[0].flags |=
2456 RADEON_PM_MODE_NO_DISPLAY;
2457 }
2458 /* first mode is usually default */
2459 if (rdev->pm.default_power_state_index == -1) {
2460 rdev->pm.power_state[0].type =
2461 POWER_STATE_TYPE_DEFAULT;
2462 rdev->pm.default_power_state_index = 0;
2463 rdev->pm.power_state[0].default_clock_mode =
2464 &rdev->pm.power_state[0].clock_info[0];
2465 }
2466 return state_index;
2467}
2468
2469void radeon_atombios_get_power_modes(struct radeon_device *rdev)
2470{
2471 struct radeon_mode_info *mode_info = &rdev->mode_info;
2472 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
2473 u16 data_offset;
2474 u8 frev, crev;
2475 int state_index = 0;
2476
2477 rdev->pm.default_power_state_index = -1;
2478
2479 if (atom_parse_data_header(mode_info->atom_context, index, NULL,
2480 &frev, &crev, &data_offset)) {
2481 switch (frev) {
2482 case 1:
2483 case 2:
2484 case 3:
2485 state_index = radeon_atombios_parse_power_table_1_3(rdev);
2486 break;
2487 case 4:
2488 case 5:
2489 state_index = radeon_atombios_parse_power_table_4_5(rdev);
2490 break;
2491 case 6:
2492 state_index = radeon_atombios_parse_power_table_6(rdev);
2493 break;
2494 default:
2495 break;
2496 }
2233 } else { 2497 } else {
2234 /* add the default mode */ 2498 /* add the default mode */
2235 rdev->pm.power_state[state_index].type = 2499 rdev->pm.power_state[state_index].type =
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index 8f2c7b50dcf5..1aba85cad1a8 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -131,6 +131,45 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
131 return true; 131 return true;
132} 132}
133 133
134static bool ni_read_disabled_bios(struct radeon_device *rdev)
135{
136 u32 bus_cntl;
137 u32 d1vga_control;
138 u32 d2vga_control;
139 u32 vga_render_control;
140 u32 rom_cntl;
141 bool r;
142
143 bus_cntl = RREG32(R600_BUS_CNTL);
144 d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
145 d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
146 vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL);
147 rom_cntl = RREG32(R600_ROM_CNTL);
148
149 /* enable the rom */
150 WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS));
151 /* Disable VGA mode */
152 WREG32(AVIVO_D1VGA_CONTROL,
153 (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
154 AVIVO_DVGA_CONTROL_TIMING_SELECT)));
155 WREG32(AVIVO_D2VGA_CONTROL,
156 (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
157 AVIVO_DVGA_CONTROL_TIMING_SELECT)));
158 WREG32(AVIVO_VGA_RENDER_CONTROL,
159 (vga_render_control & ~AVIVO_VGA_VSTATUS_CNTL_MASK));
160 WREG32(R600_ROM_CNTL, rom_cntl | R600_SCK_OVERWRITE);
161
162 r = radeon_read_bios(rdev);
163
164 /* restore regs */
165 WREG32(R600_BUS_CNTL, bus_cntl);
166 WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
167 WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
168 WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control);
169 WREG32(R600_ROM_CNTL, rom_cntl);
170 return r;
171}
172
134static bool r700_read_disabled_bios(struct radeon_device *rdev) 173static bool r700_read_disabled_bios(struct radeon_device *rdev)
135{ 174{
136 uint32_t viph_control; 175 uint32_t viph_control;
@@ -416,6 +455,8 @@ static bool radeon_read_disabled_bios(struct radeon_device *rdev)
416{ 455{
417 if (rdev->flags & RADEON_IS_IGP) 456 if (rdev->flags & RADEON_IS_IGP)
418 return igp_read_bios_from_vram(rdev); 457 return igp_read_bios_from_vram(rdev);
458 else if (rdev->family >= CHIP_BARTS)
459 return ni_read_disabled_bios(rdev);
419 else if (rdev->family >= CHIP_RV770) 460 else if (rdev->family >= CHIP_RV770)
420 return r700_read_disabled_bios(rdev); 461 return r700_read_disabled_bios(rdev);
421 else if (rdev->family >= CHIP_R600) 462 else if (rdev->family >= CHIP_R600)
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 137b8075f6e7..591fcae8f224 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -471,8 +471,9 @@ bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev)
471 return true; 471 return true;
472} 472}
473 473
474/* this is used for atom LCDs as well */
474struct edid * 475struct edid *
475radeon_combios_get_hardcoded_edid(struct radeon_device *rdev) 476radeon_bios_get_hardcoded_edid(struct radeon_device *rdev)
476{ 477{
477 if (rdev->mode_info.bios_hardcoded_edid) 478 if (rdev->mode_info.bios_hardcoded_edid)
478 return rdev->mode_info.bios_hardcoded_edid; 479 return rdev->mode_info.bios_hardcoded_edid;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 8afaf7a7459e..22b7e3dc0eca 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -472,6 +472,9 @@ static int radeon_lvds_get_modes(struct drm_connector *connector)
472 if (mode) { 472 if (mode) {
473 ret = 1; 473 ret = 1;
474 drm_mode_probed_add(connector, mode); 474 drm_mode_probed_add(connector, mode);
475 /* add the width/height from vbios tables if available */
476 connector->display_info.width_mm = mode->width_mm;
477 connector->display_info.height_mm = mode->height_mm;
475 /* add scaled modes */ 478 /* add scaled modes */
476 radeon_add_common_modes(encoder, connector); 479 radeon_add_common_modes(encoder, connector);
477 } 480 }
@@ -1216,7 +1219,7 @@ radeon_add_atom_connector(struct drm_device *dev,
1216 if (ASIC_IS_AVIVO(rdev)) { 1219 if (ASIC_IS_AVIVO(rdev)) {
1217 drm_connector_attach_property(&radeon_connector->base, 1220 drm_connector_attach_property(&radeon_connector->base,
1218 rdev->mode_info.underscan_property, 1221 rdev->mode_info.underscan_property,
1219 UNDERSCAN_AUTO); 1222 UNDERSCAN_OFF);
1220 drm_connector_attach_property(&radeon_connector->base, 1223 drm_connector_attach_property(&radeon_connector->base,
1221 rdev->mode_info.underscan_hborder_property, 1224 rdev->mode_info.underscan_hborder_property,
1222 0); 1225 0);
@@ -1256,7 +1259,7 @@ radeon_add_atom_connector(struct drm_device *dev,
1256 if (ASIC_IS_AVIVO(rdev)) { 1259 if (ASIC_IS_AVIVO(rdev)) {
1257 drm_connector_attach_property(&radeon_connector->base, 1260 drm_connector_attach_property(&radeon_connector->base,
1258 rdev->mode_info.underscan_property, 1261 rdev->mode_info.underscan_property,
1259 UNDERSCAN_AUTO); 1262 UNDERSCAN_OFF);
1260 drm_connector_attach_property(&radeon_connector->base, 1263 drm_connector_attach_property(&radeon_connector->base,
1261 rdev->mode_info.underscan_hborder_property, 1264 rdev->mode_info.underscan_hborder_property,
1262 0); 1265 0);
@@ -1299,7 +1302,7 @@ radeon_add_atom_connector(struct drm_device *dev,
1299 if (ASIC_IS_AVIVO(rdev)) { 1302 if (ASIC_IS_AVIVO(rdev)) {
1300 drm_connector_attach_property(&radeon_connector->base, 1303 drm_connector_attach_property(&radeon_connector->base,
1301 rdev->mode_info.underscan_property, 1304 rdev->mode_info.underscan_property,
1302 UNDERSCAN_AUTO); 1305 UNDERSCAN_OFF);
1303 drm_connector_attach_property(&radeon_connector->base, 1306 drm_connector_attach_property(&radeon_connector->base,
1304 rdev->mode_info.underscan_hborder_property, 1307 rdev->mode_info.underscan_hborder_property,
1305 0); 1308 0);
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 6d64a2705f12..35b5eb8fbe2a 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -77,13 +77,13 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
77 p->relocs_ptr[i] = &p->relocs[i]; 77 p->relocs_ptr[i] = &p->relocs[i];
78 p->relocs[i].robj = p->relocs[i].gobj->driver_private; 78 p->relocs[i].robj = p->relocs[i].gobj->driver_private;
79 p->relocs[i].lobj.bo = p->relocs[i].robj; 79 p->relocs[i].lobj.bo = p->relocs[i].robj;
80 p->relocs[i].lobj.rdomain = r->read_domains;
81 p->relocs[i].lobj.wdomain = r->write_domain; 80 p->relocs[i].lobj.wdomain = r->write_domain;
81 p->relocs[i].lobj.rdomain = r->read_domains;
82 p->relocs[i].lobj.tv.bo = &p->relocs[i].robj->tbo;
82 p->relocs[i].handle = r->handle; 83 p->relocs[i].handle = r->handle;
83 p->relocs[i].flags = r->flags; 84 p->relocs[i].flags = r->flags;
84 INIT_LIST_HEAD(&p->relocs[i].lobj.list);
85 radeon_bo_list_add_object(&p->relocs[i].lobj, 85 radeon_bo_list_add_object(&p->relocs[i].lobj,
86 &p->validated); 86 &p->validated);
87 } 87 }
88 } 88 }
89 return radeon_bo_list_validate(&p->validated); 89 return radeon_bo_list_validate(&p->validated);
@@ -189,10 +189,13 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
189{ 189{
190 unsigned i; 190 unsigned i;
191 191
192 if (!error && parser->ib) { 192
193 radeon_bo_list_fence(&parser->validated, parser->ib->fence); 193 if (!error && parser->ib)
194 } 194 ttm_eu_fence_buffer_objects(&parser->validated,
195 radeon_bo_list_unreserve(&parser->validated); 195 parser->ib->fence);
196 else
197 ttm_eu_backoff_reservation(&parser->validated);
198
196 if (parser->relocs != NULL) { 199 if (parser->relocs != NULL) {
197 for (i = 0; i < parser->nrelocs; i++) { 200 for (i = 0; i < parser->nrelocs; i++) {
198 if (parser->relocs[i].gobj) 201 if (parser->relocs[i].gobj)
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 501966a13f48..26091d602b84 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -81,6 +81,10 @@ static const char radeon_family_name[][16] = {
81 "JUNIPER", 81 "JUNIPER",
82 "CYPRESS", 82 "CYPRESS",
83 "HEMLOCK", 83 "HEMLOCK",
84 "PALM",
85 "BARTS",
86 "TURKS",
87 "CAICOS",
84 "LAST", 88 "LAST",
85}; 89};
86 90
@@ -224,6 +228,11 @@ int radeon_wb_init(struct radeon_device *rdev)
224 rdev->wb.use_event = true; 228 rdev->wb.use_event = true;
225 } 229 }
226 } 230 }
231 /* always use writeback/events on NI */
232 if (ASIC_IS_DCE5(rdev)) {
233 rdev->wb.enabled = true;
234 rdev->wb.use_event = true;
235 }
227 236
228 dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis"); 237 dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
229 238
@@ -335,7 +344,12 @@ bool radeon_card_posted(struct radeon_device *rdev)
335 uint32_t reg; 344 uint32_t reg;
336 345
337 /* first check CRTCs */ 346 /* first check CRTCs */
338 if (ASIC_IS_DCE4(rdev)) { 347 if (ASIC_IS_DCE41(rdev)) {
348 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
349 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
350 if (reg & EVERGREEN_CRTC_MASTER_EN)
351 return true;
352 } else if (ASIC_IS_DCE4(rdev)) {
339 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | 353 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
340 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) | 354 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) |
341 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) | 355 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
@@ -636,20 +650,20 @@ void radeon_check_arguments(struct radeon_device *rdev)
636static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) 650static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
637{ 651{
638 struct drm_device *dev = pci_get_drvdata(pdev); 652 struct drm_device *dev = pci_get_drvdata(pdev);
639 struct radeon_device *rdev = dev->dev_private;
640 pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; 653 pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
641 if (state == VGA_SWITCHEROO_ON) { 654 if (state == VGA_SWITCHEROO_ON) {
642 printk(KERN_INFO "radeon: switched on\n"); 655 printk(KERN_INFO "radeon: switched on\n");
643 /* don't suspend or resume card normally */ 656 /* don't suspend or resume card normally */
644 rdev->powered_down = false; 657 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
645 radeon_resume_kms(dev); 658 radeon_resume_kms(dev);
659 dev->switch_power_state = DRM_SWITCH_POWER_ON;
646 drm_kms_helper_poll_enable(dev); 660 drm_kms_helper_poll_enable(dev);
647 } else { 661 } else {
648 printk(KERN_INFO "radeon: switched off\n"); 662 printk(KERN_INFO "radeon: switched off\n");
649 drm_kms_helper_poll_disable(dev); 663 drm_kms_helper_poll_disable(dev);
664 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
650 radeon_suspend_kms(dev, pmm); 665 radeon_suspend_kms(dev, pmm);
651 /* don't suspend or resume card normally */ 666 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
652 rdev->powered_down = true;
653 } 667 }
654} 668}
655 669
@@ -704,11 +718,6 @@ int radeon_device_init(struct radeon_device *rdev,
704 init_waitqueue_head(&rdev->irq.vblank_queue); 718 init_waitqueue_head(&rdev->irq.vblank_queue);
705 init_waitqueue_head(&rdev->irq.idle_queue); 719 init_waitqueue_head(&rdev->irq.idle_queue);
706 720
707 /* setup workqueue */
708 rdev->wq = create_workqueue("radeon");
709 if (rdev->wq == NULL)
710 return -ENOMEM;
711
712 /* Set asic functions */ 721 /* Set asic functions */
713 r = radeon_asic_init(rdev); 722 r = radeon_asic_init(rdev);
714 if (r) 723 if (r)
@@ -773,6 +782,7 @@ int radeon_device_init(struct radeon_device *rdev,
773 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode); 782 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
774 vga_switcheroo_register_client(rdev->pdev, 783 vga_switcheroo_register_client(rdev->pdev,
775 radeon_switcheroo_set_state, 784 radeon_switcheroo_set_state,
785 NULL,
776 radeon_switcheroo_can_switch); 786 radeon_switcheroo_can_switch);
777 787
778 r = radeon_init(rdev); 788 r = radeon_init(rdev);
@@ -806,7 +816,6 @@ void radeon_device_fini(struct radeon_device *rdev)
806 /* evict vram memory */ 816 /* evict vram memory */
807 radeon_bo_evict_vram(rdev); 817 radeon_bo_evict_vram(rdev);
808 radeon_fini(rdev); 818 radeon_fini(rdev);
809 destroy_workqueue(rdev->wq);
810 vga_switcheroo_unregister_client(rdev->pdev); 819 vga_switcheroo_unregister_client(rdev->pdev);
811 vga_client_register(rdev->pdev, NULL, NULL, NULL); 820 vga_client_register(rdev->pdev, NULL, NULL, NULL);
812 if (rdev->rio_mem) 821 if (rdev->rio_mem)
@@ -835,7 +844,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
835 } 844 }
836 rdev = dev->dev_private; 845 rdev = dev->dev_private;
837 846
838 if (rdev->powered_down) 847 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
839 return 0; 848 return 0;
840 849
841 /* turn off display hw */ 850 /* turn off display hw */
@@ -893,7 +902,7 @@ int radeon_resume_kms(struct drm_device *dev)
893 struct drm_connector *connector; 902 struct drm_connector *connector;
894 struct radeon_device *rdev = dev->dev_private; 903 struct radeon_device *rdev = dev->dev_private;
895 904
896 if (rdev->powered_down) 905 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
897 return 0; 906 return 0;
898 907
899 acquire_console_sem(); 908 acquire_console_sem();
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 1df4dc6c063c..d26dabf878d9 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -68,7 +68,7 @@ static void avivo_crtc_load_lut(struct drm_crtc *crtc)
68 WREG32(AVIVO_D1GRPH_LUT_SEL + radeon_crtc->crtc_offset, radeon_crtc->crtc_id); 68 WREG32(AVIVO_D1GRPH_LUT_SEL + radeon_crtc->crtc_offset, radeon_crtc->crtc_id);
69} 69}
70 70
71static void evergreen_crtc_load_lut(struct drm_crtc *crtc) 71static void dce4_crtc_load_lut(struct drm_crtc *crtc)
72{ 72{
73 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 73 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
74 struct drm_device *dev = crtc->dev; 74 struct drm_device *dev = crtc->dev;
@@ -98,6 +98,66 @@ static void evergreen_crtc_load_lut(struct drm_crtc *crtc)
98 } 98 }
99} 99}
100 100
101static void dce5_crtc_load_lut(struct drm_crtc *crtc)
102{
103 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
104 struct drm_device *dev = crtc->dev;
105 struct radeon_device *rdev = dev->dev_private;
106 int i;
107
108 DRM_DEBUG_KMS("%d\n", radeon_crtc->crtc_id);
109
110 WREG32(NI_INPUT_CSC_CONTROL + radeon_crtc->crtc_offset,
111 (NI_INPUT_CSC_GRPH_MODE(NI_INPUT_CSC_BYPASS) |
112 NI_INPUT_CSC_OVL_MODE(NI_INPUT_CSC_BYPASS)));
113 WREG32(NI_PRESCALE_GRPH_CONTROL + radeon_crtc->crtc_offset,
114 NI_GRPH_PRESCALE_BYPASS);
115 WREG32(NI_PRESCALE_OVL_CONTROL + radeon_crtc->crtc_offset,
116 NI_OVL_PRESCALE_BYPASS);
117 WREG32(NI_INPUT_GAMMA_CONTROL + radeon_crtc->crtc_offset,
118 (NI_GRPH_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT) |
119 NI_OVL_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT)));
120
121 WREG32(EVERGREEN_DC_LUT_CONTROL + radeon_crtc->crtc_offset, 0);
122
123 WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0);
124 WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN + radeon_crtc->crtc_offset, 0);
125 WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_RED + radeon_crtc->crtc_offset, 0);
126
127 WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE + radeon_crtc->crtc_offset, 0xffff);
128 WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff);
129 WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff);
130
131 WREG32(EVERGREEN_DC_LUT_RW_MODE + radeon_crtc->crtc_offset, 0);
132 WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK + radeon_crtc->crtc_offset, 0x00000007);
133
134 WREG32(EVERGREEN_DC_LUT_RW_INDEX + radeon_crtc->crtc_offset, 0);
135 for (i = 0; i < 256; i++) {
136 WREG32(EVERGREEN_DC_LUT_30_COLOR + radeon_crtc->crtc_offset,
137 (radeon_crtc->lut_r[i] << 20) |
138 (radeon_crtc->lut_g[i] << 10) |
139 (radeon_crtc->lut_b[i] << 0));
140 }
141
142 WREG32(NI_DEGAMMA_CONTROL + radeon_crtc->crtc_offset,
143 (NI_GRPH_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
144 NI_OVL_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
145 NI_ICON_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
146 NI_CURSOR_DEGAMMA_MODE(NI_DEGAMMA_BYPASS)));
147 WREG32(NI_GAMUT_REMAP_CONTROL + radeon_crtc->crtc_offset,
148 (NI_GRPH_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS) |
149 NI_OVL_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS)));
150 WREG32(NI_REGAMMA_CONTROL + radeon_crtc->crtc_offset,
151 (NI_GRPH_REGAMMA_MODE(NI_REGAMMA_BYPASS) |
152 NI_OVL_REGAMMA_MODE(NI_REGAMMA_BYPASS)));
153 WREG32(NI_OUTPUT_CSC_CONTROL + radeon_crtc->crtc_offset,
154 (NI_OUTPUT_CSC_GRPH_MODE(NI_OUTPUT_CSC_BYPASS) |
155 NI_OUTPUT_CSC_OVL_MODE(NI_OUTPUT_CSC_BYPASS)));
156 /* XXX match this to the depth of the crtc fmt block, move to modeset? */
157 WREG32(0x6940 + radeon_crtc->crtc_offset, 0);
158
159}
160
101static void legacy_crtc_load_lut(struct drm_crtc *crtc) 161static void legacy_crtc_load_lut(struct drm_crtc *crtc)
102{ 162{
103 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 163 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
@@ -130,8 +190,10 @@ void radeon_crtc_load_lut(struct drm_crtc *crtc)
130 if (!crtc->enabled) 190 if (!crtc->enabled)
131 return; 191 return;
132 192
133 if (ASIC_IS_DCE4(rdev)) 193 if (ASIC_IS_DCE5(rdev))
134 evergreen_crtc_load_lut(crtc); 194 dce5_crtc_load_lut(crtc);
195 else if (ASIC_IS_DCE4(rdev))
196 dce4_crtc_load_lut(crtc);
135 else if (ASIC_IS_AVIVO(rdev)) 197 else if (ASIC_IS_AVIVO(rdev))
136 avivo_crtc_load_lut(crtc); 198 avivo_crtc_load_lut(crtc);
137 else 199 else
@@ -183,12 +245,272 @@ static void radeon_crtc_destroy(struct drm_crtc *crtc)
183 kfree(radeon_crtc); 245 kfree(radeon_crtc);
184} 246}
185 247
248/*
249 * Handle unpin events outside the interrupt handler proper.
250 */
251static void radeon_unpin_work_func(struct work_struct *__work)
252{
253 struct radeon_unpin_work *work =
254 container_of(__work, struct radeon_unpin_work, work);
255 int r;
256
257 /* unpin of the old buffer */
258 r = radeon_bo_reserve(work->old_rbo, false);
259 if (likely(r == 0)) {
260 r = radeon_bo_unpin(work->old_rbo);
261 if (unlikely(r != 0)) {
262 DRM_ERROR("failed to unpin buffer after flip\n");
263 }
264 radeon_bo_unreserve(work->old_rbo);
265 } else
266 DRM_ERROR("failed to reserve buffer after flip\n");
267 kfree(work);
268}
269
270void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
271{
272 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
273 struct radeon_unpin_work *work;
274 struct drm_pending_vblank_event *e;
275 struct timeval now;
276 unsigned long flags;
277 u32 update_pending;
278 int vpos, hpos;
279
280 spin_lock_irqsave(&rdev->ddev->event_lock, flags);
281 work = radeon_crtc->unpin_work;
282 if (work == NULL ||
283 !radeon_fence_signaled(work->fence)) {
284 spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
285 return;
286 }
287 /* New pageflip, or just completion of a previous one? */
288 if (!radeon_crtc->deferred_flip_completion) {
289 /* do the flip (mmio) */
290 update_pending = radeon_page_flip(rdev, crtc_id, work->new_crtc_base);
291 } else {
292 /* This is just a completion of a flip queued in crtc
293 * at last invocation. Make sure we go directly to
294 * completion routine.
295 */
296 update_pending = 0;
297 radeon_crtc->deferred_flip_completion = 0;
298 }
299
300 /* Has the pageflip already completed in crtc, or is it certain
301 * to complete in this vblank?
302 */
303 if (update_pending &&
304 (DRM_SCANOUTPOS_VALID & radeon_get_crtc_scanoutpos(rdev->ddev, crtc_id,
305 &vpos, &hpos)) &&
306 (vpos >=0) &&
307 (vpos < (99 * rdev->mode_info.crtcs[crtc_id]->base.hwmode.crtc_vdisplay)/100)) {
308 /* crtc didn't flip in this target vblank interval,
309 * but flip is pending in crtc. It will complete it
310 * in next vblank interval, so complete the flip at
311 * next vblank irq.
312 */
313 radeon_crtc->deferred_flip_completion = 1;
314 spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
315 return;
316 }
317
318 /* Pageflip (will be) certainly completed in this vblank. Clean up. */
319 radeon_crtc->unpin_work = NULL;
320
321 /* wakeup userspace */
322 if (work->event) {
323 e = work->event;
324 e->event.sequence = drm_vblank_count_and_time(rdev->ddev, crtc_id, &now);
325 e->event.tv_sec = now.tv_sec;
326 e->event.tv_usec = now.tv_usec;
327 list_add_tail(&e->base.link, &e->base.file_priv->event_list);
328 wake_up_interruptible(&e->base.file_priv->event_wait);
329 }
330 spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
331
332 drm_vblank_put(rdev->ddev, radeon_crtc->crtc_id);
333 radeon_fence_unref(&work->fence);
334 radeon_post_page_flip(work->rdev, work->crtc_id);
335 schedule_work(&work->work);
336}
337
338static int radeon_crtc_page_flip(struct drm_crtc *crtc,
339 struct drm_framebuffer *fb,
340 struct drm_pending_vblank_event *event)
341{
342 struct drm_device *dev = crtc->dev;
343 struct radeon_device *rdev = dev->dev_private;
344 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
345 struct radeon_framebuffer *old_radeon_fb;
346 struct radeon_framebuffer *new_radeon_fb;
347 struct drm_gem_object *obj;
348 struct radeon_bo *rbo;
349 struct radeon_fence *fence;
350 struct radeon_unpin_work *work;
351 unsigned long flags;
352 u32 tiling_flags, pitch_pixels;
353 u64 base;
354 int r;
355
356 work = kzalloc(sizeof *work, GFP_KERNEL);
357 if (work == NULL)
358 return -ENOMEM;
359
360 r = radeon_fence_create(rdev, &fence);
361 if (unlikely(r != 0)) {
362 kfree(work);
363 DRM_ERROR("flip queue: failed to create fence.\n");
364 return -ENOMEM;
365 }
366 work->event = event;
367 work->rdev = rdev;
368 work->crtc_id = radeon_crtc->crtc_id;
369 work->fence = radeon_fence_ref(fence);
370 old_radeon_fb = to_radeon_framebuffer(crtc->fb);
371 new_radeon_fb = to_radeon_framebuffer(fb);
372 /* schedule unpin of the old buffer */
373 obj = old_radeon_fb->obj;
374 rbo = obj->driver_private;
375 work->old_rbo = rbo;
376 INIT_WORK(&work->work, radeon_unpin_work_func);
377
378 /* We borrow the event spin lock for protecting unpin_work */
379 spin_lock_irqsave(&dev->event_lock, flags);
380 if (radeon_crtc->unpin_work) {
381 spin_unlock_irqrestore(&dev->event_lock, flags);
382 kfree(work);
383 radeon_fence_unref(&fence);
384
385 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
386 return -EBUSY;
387 }
388 radeon_crtc->unpin_work = work;
389 radeon_crtc->deferred_flip_completion = 0;
390 spin_unlock_irqrestore(&dev->event_lock, flags);
391
392 /* pin the new buffer */
393 obj = new_radeon_fb->obj;
394 rbo = obj->driver_private;
395
396 DRM_DEBUG_DRIVER("flip-ioctl() cur_fbo = %p, cur_bbo = %p\n",
397 work->old_rbo, rbo);
398
399 r = radeon_bo_reserve(rbo, false);
400 if (unlikely(r != 0)) {
401 DRM_ERROR("failed to reserve new rbo buffer before flip\n");
402 goto pflip_cleanup;
403 }
404 r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &base);
405 if (unlikely(r != 0)) {
406 radeon_bo_unreserve(rbo);
407 r = -EINVAL;
408 DRM_ERROR("failed to pin new rbo buffer before flip\n");
409 goto pflip_cleanup;
410 }
411 radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
412 radeon_bo_unreserve(rbo);
413
414 if (!ASIC_IS_AVIVO(rdev)) {
415 /* crtc offset is from display base addr not FB location */
416 base -= radeon_crtc->legacy_display_base_addr;
417 pitch_pixels = fb->pitch / (fb->bits_per_pixel / 8);
418
419 if (tiling_flags & RADEON_TILING_MACRO) {
420 if (ASIC_IS_R300(rdev)) {
421 base &= ~0x7ff;
422 } else {
423 int byteshift = fb->bits_per_pixel >> 4;
424 int tile_addr = (((crtc->y >> 3) * pitch_pixels + crtc->x) >> (8 - byteshift)) << 11;
425 base += tile_addr + ((crtc->x << byteshift) % 256) + ((crtc->y % 8) << 8);
426 }
427 } else {
428 int offset = crtc->y * pitch_pixels + crtc->x;
429 switch (fb->bits_per_pixel) {
430 case 8:
431 default:
432 offset *= 1;
433 break;
434 case 15:
435 case 16:
436 offset *= 2;
437 break;
438 case 24:
439 offset *= 3;
440 break;
441 case 32:
442 offset *= 4;
443 break;
444 }
445 base += offset;
446 }
447 base &= ~7;
448 }
449
450 spin_lock_irqsave(&dev->event_lock, flags);
451 work->new_crtc_base = base;
452 spin_unlock_irqrestore(&dev->event_lock, flags);
453
454 /* update crtc fb */
455 crtc->fb = fb;
456
457 r = drm_vblank_get(dev, radeon_crtc->crtc_id);
458 if (r) {
459 DRM_ERROR("failed to get vblank before flip\n");
460 goto pflip_cleanup1;
461 }
462
463 /* 32 ought to cover us */
464 r = radeon_ring_lock(rdev, 32);
465 if (r) {
466 DRM_ERROR("failed to lock the ring before flip\n");
467 goto pflip_cleanup2;
468 }
469
470 /* emit the fence */
471 radeon_fence_emit(rdev, fence);
472 /* set the proper interrupt */
473 radeon_pre_page_flip(rdev, radeon_crtc->crtc_id);
474 /* fire the ring */
475 radeon_ring_unlock_commit(rdev);
476
477 return 0;
478
479pflip_cleanup2:
480 drm_vblank_put(dev, radeon_crtc->crtc_id);
481
482pflip_cleanup1:
483 r = radeon_bo_reserve(rbo, false);
484 if (unlikely(r != 0)) {
485 DRM_ERROR("failed to reserve new rbo in error path\n");
486 goto pflip_cleanup;
487 }
488 r = radeon_bo_unpin(rbo);
489 if (unlikely(r != 0)) {
490 radeon_bo_unreserve(rbo);
491 r = -EINVAL;
492 DRM_ERROR("failed to unpin new rbo in error path\n");
493 goto pflip_cleanup;
494 }
495 radeon_bo_unreserve(rbo);
496
497pflip_cleanup:
498 spin_lock_irqsave(&dev->event_lock, flags);
499 radeon_crtc->unpin_work = NULL;
500 spin_unlock_irqrestore(&dev->event_lock, flags);
501 radeon_fence_unref(&fence);
502 kfree(work);
503
504 return r;
505}
506
186static const struct drm_crtc_funcs radeon_crtc_funcs = { 507static const struct drm_crtc_funcs radeon_crtc_funcs = {
187 .cursor_set = radeon_crtc_cursor_set, 508 .cursor_set = radeon_crtc_cursor_set,
188 .cursor_move = radeon_crtc_cursor_move, 509 .cursor_move = radeon_crtc_cursor_move,
189 .gamma_set = radeon_crtc_gamma_set, 510 .gamma_set = radeon_crtc_gamma_set,
190 .set_config = drm_crtc_helper_set_config, 511 .set_config = drm_crtc_helper_set_config,
191 .destroy = radeon_crtc_destroy, 512 .destroy = radeon_crtc_destroy,
513 .page_flip = radeon_crtc_page_flip,
192}; 514};
193 515
194static void radeon_crtc_init(struct drm_device *dev, int index) 516static void radeon_crtc_init(struct drm_device *dev, int index)
@@ -225,7 +547,7 @@ static void radeon_crtc_init(struct drm_device *dev, int index)
225 radeon_legacy_init_crtc(dev, radeon_crtc); 547 radeon_legacy_init_crtc(dev, radeon_crtc);
226} 548}
227 549
228static const char *encoder_names[34] = { 550static const char *encoder_names[36] = {
229 "NONE", 551 "NONE",
230 "INTERNAL_LVDS", 552 "INTERNAL_LVDS",
231 "INTERNAL_TMDS1", 553 "INTERNAL_TMDS1",
@@ -260,6 +582,8 @@ static const char *encoder_names[34] = {
260 "INTERNAL_KLDSCP_LVTMA", 582 "INTERNAL_KLDSCP_LVTMA",
261 "INTERNAL_UNIPHY1", 583 "INTERNAL_UNIPHY1",
262 "INTERNAL_UNIPHY2", 584 "INTERNAL_UNIPHY2",
585 "NUTMEG",
586 "TRAVIS",
263}; 587};
264 588
265static const char *connector_names[15] = { 589static const char *connector_names[15] = {
@@ -417,9 +741,17 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
417 if (!radeon_connector->edid) { 741 if (!radeon_connector->edid) {
418 radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); 742 radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
419 } 743 }
420 /* some servers provide a hardcoded edid in rom for KVMs */ 744
421 if (!radeon_connector->edid) 745 if (!radeon_connector->edid) {
422 radeon_connector->edid = radeon_combios_get_hardcoded_edid(rdev); 746 if (rdev->is_atom_bios) {
747 /* some laptops provide a hardcoded edid in rom for LCDs */
748 if (((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_LVDS) ||
749 (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)))
750 radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
751 } else
752 /* some servers provide a hardcoded edid in rom for KVMs */
753 radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
754 }
423 if (radeon_connector->edid) { 755 if (radeon_connector->edid) {
424 drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid); 756 drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid);
425 ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid); 757 ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid);
@@ -849,7 +1181,10 @@ int radeon_modeset_init(struct radeon_device *rdev)
849 1181
850 rdev->ddev->mode_config.funcs = (void *)&radeon_mode_funcs; 1182 rdev->ddev->mode_config.funcs = (void *)&radeon_mode_funcs;
851 1183
852 if (ASIC_IS_AVIVO(rdev)) { 1184 if (ASIC_IS_DCE5(rdev)) {
1185 rdev->ddev->mode_config.max_width = 16384;
1186 rdev->ddev->mode_config.max_height = 16384;
1187 } else if (ASIC_IS_AVIVO(rdev)) {
853 rdev->ddev->mode_config.max_width = 8192; 1188 rdev->ddev->mode_config.max_width = 8192;
854 rdev->ddev->mode_config.max_height = 8192; 1189 rdev->ddev->mode_config.max_height = 8192;
855 } else { 1190 } else {
@@ -1019,7 +1354,7 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
1019/* 1354/*
1020 * Retrieve current video scanout position of crtc on a given gpu. 1355 * Retrieve current video scanout position of crtc on a given gpu.
1021 * 1356 *
1022 * \param rdev Device to query. 1357 * \param dev Device to query.
1023 * \param crtc Crtc to query. 1358 * \param crtc Crtc to query.
1024 * \param *vpos Location where vertical scanout position should be stored. 1359 * \param *vpos Location where vertical scanout position should be stored.
1025 * \param *hpos Location where horizontal scanout position should go. 1360 * \param *hpos Location where horizontal scanout position should go.
@@ -1031,72 +1366,74 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
1031 * 1366 *
1032 * \return Flags, or'ed together as follows: 1367 * \return Flags, or'ed together as follows:
1033 * 1368 *
1034 * RADEON_SCANOUTPOS_VALID = Query successfull. 1369 * DRM_SCANOUTPOS_VALID = Query successfull.
1035 * RADEON_SCANOUTPOS_INVBL = Inside vblank. 1370 * DRM_SCANOUTPOS_INVBL = Inside vblank.
1036 * RADEON_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of 1371 * DRM_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of
1037 * this flag means that returned position may be offset by a constant but 1372 * this flag means that returned position may be offset by a constant but
1038 * unknown small number of scanlines wrt. real scanout position. 1373 * unknown small number of scanlines wrt. real scanout position.
1039 * 1374 *
1040 */ 1375 */
1041int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos, int *hpos) 1376int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, int *vpos, int *hpos)
1042{ 1377{
1043 u32 stat_crtc = 0, vbl = 0, position = 0; 1378 u32 stat_crtc = 0, vbl = 0, position = 0;
1044 int vbl_start, vbl_end, vtotal, ret = 0; 1379 int vbl_start, vbl_end, vtotal, ret = 0;
1045 bool in_vbl = true; 1380 bool in_vbl = true;
1046 1381
1382 struct radeon_device *rdev = dev->dev_private;
1383
1047 if (ASIC_IS_DCE4(rdev)) { 1384 if (ASIC_IS_DCE4(rdev)) {
1048 if (crtc == 0) { 1385 if (crtc == 0) {
1049 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + 1386 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
1050 EVERGREEN_CRTC0_REGISTER_OFFSET); 1387 EVERGREEN_CRTC0_REGISTER_OFFSET);
1051 position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + 1388 position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
1052 EVERGREEN_CRTC0_REGISTER_OFFSET); 1389 EVERGREEN_CRTC0_REGISTER_OFFSET);
1053 ret |= RADEON_SCANOUTPOS_VALID; 1390 ret |= DRM_SCANOUTPOS_VALID;
1054 } 1391 }
1055 if (crtc == 1) { 1392 if (crtc == 1) {
1056 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + 1393 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
1057 EVERGREEN_CRTC1_REGISTER_OFFSET); 1394 EVERGREEN_CRTC1_REGISTER_OFFSET);
1058 position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + 1395 position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
1059 EVERGREEN_CRTC1_REGISTER_OFFSET); 1396 EVERGREEN_CRTC1_REGISTER_OFFSET);
1060 ret |= RADEON_SCANOUTPOS_VALID; 1397 ret |= DRM_SCANOUTPOS_VALID;
1061 } 1398 }
1062 if (crtc == 2) { 1399 if (crtc == 2) {
1063 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + 1400 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
1064 EVERGREEN_CRTC2_REGISTER_OFFSET); 1401 EVERGREEN_CRTC2_REGISTER_OFFSET);
1065 position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + 1402 position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
1066 EVERGREEN_CRTC2_REGISTER_OFFSET); 1403 EVERGREEN_CRTC2_REGISTER_OFFSET);
1067 ret |= RADEON_SCANOUTPOS_VALID; 1404 ret |= DRM_SCANOUTPOS_VALID;
1068 } 1405 }
1069 if (crtc == 3) { 1406 if (crtc == 3) {
1070 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + 1407 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
1071 EVERGREEN_CRTC3_REGISTER_OFFSET); 1408 EVERGREEN_CRTC3_REGISTER_OFFSET);
1072 position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + 1409 position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
1073 EVERGREEN_CRTC3_REGISTER_OFFSET); 1410 EVERGREEN_CRTC3_REGISTER_OFFSET);
1074 ret |= RADEON_SCANOUTPOS_VALID; 1411 ret |= DRM_SCANOUTPOS_VALID;
1075 } 1412 }
1076 if (crtc == 4) { 1413 if (crtc == 4) {
1077 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + 1414 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
1078 EVERGREEN_CRTC4_REGISTER_OFFSET); 1415 EVERGREEN_CRTC4_REGISTER_OFFSET);
1079 position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + 1416 position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
1080 EVERGREEN_CRTC4_REGISTER_OFFSET); 1417 EVERGREEN_CRTC4_REGISTER_OFFSET);
1081 ret |= RADEON_SCANOUTPOS_VALID; 1418 ret |= DRM_SCANOUTPOS_VALID;
1082 } 1419 }
1083 if (crtc == 5) { 1420 if (crtc == 5) {
1084 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + 1421 vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
1085 EVERGREEN_CRTC5_REGISTER_OFFSET); 1422 EVERGREEN_CRTC5_REGISTER_OFFSET);
1086 position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + 1423 position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
1087 EVERGREEN_CRTC5_REGISTER_OFFSET); 1424 EVERGREEN_CRTC5_REGISTER_OFFSET);
1088 ret |= RADEON_SCANOUTPOS_VALID; 1425 ret |= DRM_SCANOUTPOS_VALID;
1089 } 1426 }
1090 } else if (ASIC_IS_AVIVO(rdev)) { 1427 } else if (ASIC_IS_AVIVO(rdev)) {
1091 if (crtc == 0) { 1428 if (crtc == 0) {
1092 vbl = RREG32(AVIVO_D1CRTC_V_BLANK_START_END); 1429 vbl = RREG32(AVIVO_D1CRTC_V_BLANK_START_END);
1093 position = RREG32(AVIVO_D1CRTC_STATUS_POSITION); 1430 position = RREG32(AVIVO_D1CRTC_STATUS_POSITION);
1094 ret |= RADEON_SCANOUTPOS_VALID; 1431 ret |= DRM_SCANOUTPOS_VALID;
1095 } 1432 }
1096 if (crtc == 1) { 1433 if (crtc == 1) {
1097 vbl = RREG32(AVIVO_D2CRTC_V_BLANK_START_END); 1434 vbl = RREG32(AVIVO_D2CRTC_V_BLANK_START_END);
1098 position = RREG32(AVIVO_D2CRTC_STATUS_POSITION); 1435 position = RREG32(AVIVO_D2CRTC_STATUS_POSITION);
1099 ret |= RADEON_SCANOUTPOS_VALID; 1436 ret |= DRM_SCANOUTPOS_VALID;
1100 } 1437 }
1101 } else { 1438 } else {
1102 /* Pre-AVIVO: Different encoding of scanout pos and vblank interval. */ 1439 /* Pre-AVIVO: Different encoding of scanout pos and vblank interval. */
@@ -1112,7 +1449,7 @@ int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos,
1112 if (!(stat_crtc & 1)) 1449 if (!(stat_crtc & 1))
1113 in_vbl = false; 1450 in_vbl = false;
1114 1451
1115 ret |= RADEON_SCANOUTPOS_VALID; 1452 ret |= DRM_SCANOUTPOS_VALID;
1116 } 1453 }
1117 if (crtc == 1) { 1454 if (crtc == 1) {
1118 vbl = (RREG32(RADEON_CRTC2_V_TOTAL_DISP) & 1455 vbl = (RREG32(RADEON_CRTC2_V_TOTAL_DISP) &
@@ -1122,7 +1459,7 @@ int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos,
1122 if (!(stat_crtc & 1)) 1459 if (!(stat_crtc & 1))
1123 in_vbl = false; 1460 in_vbl = false;
1124 1461
1125 ret |= RADEON_SCANOUTPOS_VALID; 1462 ret |= DRM_SCANOUTPOS_VALID;
1126 } 1463 }
1127 } 1464 }
1128 1465
@@ -1133,13 +1470,13 @@ int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos,
1133 /* Valid vblank area boundaries from gpu retrieved? */ 1470 /* Valid vblank area boundaries from gpu retrieved? */
1134 if (vbl > 0) { 1471 if (vbl > 0) {
1135 /* Yes: Decode. */ 1472 /* Yes: Decode. */
1136 ret |= RADEON_SCANOUTPOS_ACCURATE; 1473 ret |= DRM_SCANOUTPOS_ACCURATE;
1137 vbl_start = vbl & 0x1fff; 1474 vbl_start = vbl & 0x1fff;
1138 vbl_end = (vbl >> 16) & 0x1fff; 1475 vbl_end = (vbl >> 16) & 0x1fff;
1139 } 1476 }
1140 else { 1477 else {
1141 /* No: Fake something reasonable which gives at least ok results. */ 1478 /* No: Fake something reasonable which gives at least ok results. */
1142 vbl_start = rdev->mode_info.crtcs[crtc]->base.mode.crtc_vdisplay; 1479 vbl_start = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vdisplay;
1143 vbl_end = 0; 1480 vbl_end = 0;
1144 } 1481 }
1145 1482
@@ -1155,7 +1492,7 @@ int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos,
1155 1492
1156 /* Inside "upper part" of vblank area? Apply corrective offset if so: */ 1493 /* Inside "upper part" of vblank area? Apply corrective offset if so: */
1157 if (in_vbl && (*vpos >= vbl_start)) { 1494 if (in_vbl && (*vpos >= vbl_start)) {
1158 vtotal = rdev->mode_info.crtcs[crtc]->base.mode.crtc_vtotal; 1495 vtotal = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vtotal;
1159 *vpos = *vpos - vtotal; 1496 *vpos = *vpos - vtotal;
1160 } 1497 }
1161 1498
@@ -1164,7 +1501,7 @@ int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos,
1164 1501
1165 /* In vblank? */ 1502 /* In vblank? */
1166 if (in_vbl) 1503 if (in_vbl)
1167 ret |= RADEON_SCANOUTPOS_INVBL; 1504 ret |= DRM_SCANOUTPOS_INVBL;
1168 1505
1169 return ret; 1506 return ret;
1170} 1507}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 60e689f2d048..be5cb4f28c29 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -48,9 +48,10 @@
48 * - 2.5.0 - add get accel 2 to work around ddx breakage for evergreen 48 * - 2.5.0 - add get accel 2 to work around ddx breakage for evergreen
49 * - 2.6.0 - add tiling config query (r6xx+), add initial HiZ support (r300->r500) 49 * - 2.6.0 - add tiling config query (r6xx+), add initial HiZ support (r300->r500)
50 * 2.7.0 - fixups for r600 2D tiling support. (no external ABI change), add eg dyn gpr regs 50 * 2.7.0 - fixups for r600 2D tiling support. (no external ABI change), add eg dyn gpr regs
51 * 2.8.0 - pageflip support, r500 US_FORMAT regs. r500 ARGB2101010 colorbuf, r300->r500 CMASK
51 */ 52 */
52#define KMS_DRIVER_MAJOR 2 53#define KMS_DRIVER_MAJOR 2
53#define KMS_DRIVER_MINOR 7 54#define KMS_DRIVER_MINOR 8
54#define KMS_DRIVER_PATCHLEVEL 0 55#define KMS_DRIVER_PATCHLEVEL 0
55int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); 56int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
56int radeon_driver_unload_kms(struct drm_device *dev); 57int radeon_driver_unload_kms(struct drm_device *dev);
@@ -66,6 +67,10 @@ int radeon_resume_kms(struct drm_device *dev);
66u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc); 67u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc);
67int radeon_enable_vblank_kms(struct drm_device *dev, int crtc); 68int radeon_enable_vblank_kms(struct drm_device *dev, int crtc);
68void radeon_disable_vblank_kms(struct drm_device *dev, int crtc); 69void radeon_disable_vblank_kms(struct drm_device *dev, int crtc);
70int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
71 int *max_error,
72 struct timeval *vblank_time,
73 unsigned flags);
69void radeon_driver_irq_preinstall_kms(struct drm_device *dev); 74void radeon_driver_irq_preinstall_kms(struct drm_device *dev);
70int radeon_driver_irq_postinstall_kms(struct drm_device *dev); 75int radeon_driver_irq_postinstall_kms(struct drm_device *dev);
71void radeon_driver_irq_uninstall_kms(struct drm_device *dev); 76void radeon_driver_irq_uninstall_kms(struct drm_device *dev);
@@ -74,6 +79,8 @@ int radeon_dma_ioctl_kms(struct drm_device *dev, void *data,
74 struct drm_file *file_priv); 79 struct drm_file *file_priv);
75int radeon_gem_object_init(struct drm_gem_object *obj); 80int radeon_gem_object_init(struct drm_gem_object *obj);
76void radeon_gem_object_free(struct drm_gem_object *obj); 81void radeon_gem_object_free(struct drm_gem_object *obj);
82extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
83 int *vpos, int *hpos);
77extern struct drm_ioctl_desc radeon_ioctls_kms[]; 84extern struct drm_ioctl_desc radeon_ioctls_kms[];
78extern int radeon_max_kms_ioctl; 85extern int radeon_max_kms_ioctl;
79int radeon_mmap(struct file *filp, struct vm_area_struct *vma); 86int radeon_mmap(struct file *filp, struct vm_area_struct *vma);
@@ -296,6 +303,8 @@ static struct drm_driver kms_driver = {
296 .get_vblank_counter = radeon_get_vblank_counter_kms, 303 .get_vblank_counter = radeon_get_vblank_counter_kms,
297 .enable_vblank = radeon_enable_vblank_kms, 304 .enable_vblank = radeon_enable_vblank_kms,
298 .disable_vblank = radeon_disable_vblank_kms, 305 .disable_vblank = radeon_disable_vblank_kms,
306 .get_vblank_timestamp = radeon_get_vblank_timestamp_kms,
307 .get_scanout_position = radeon_get_crtc_scanoutpos,
299#if defined(CONFIG_DEBUG_FS) 308#if defined(CONFIG_DEBUG_FS)
300 .debugfs_init = radeon_debugfs_init, 309 .debugfs_init = radeon_debugfs_init,
301 .debugfs_cleanup = radeon_debugfs_cleanup, 310 .debugfs_cleanup = radeon_debugfs_cleanup,
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 041943df966b..8fd184286c0b 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -641,7 +641,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
641 switch (connector->connector_type) { 641 switch (connector->connector_type) {
642 case DRM_MODE_CONNECTOR_DVII: 642 case DRM_MODE_CONNECTOR_DVII:
643 case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */ 643 case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
644 if (drm_detect_hdmi_monitor(radeon_connector->edid)) { 644 if (drm_detect_monitor_audio(radeon_connector->edid)) {
645 /* fix me */ 645 /* fix me */
646 if (ASIC_IS_DCE4(rdev)) 646 if (ASIC_IS_DCE4(rdev))
647 return ATOM_ENCODER_MODE_DVI; 647 return ATOM_ENCODER_MODE_DVI;
@@ -655,7 +655,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
655 case DRM_MODE_CONNECTOR_DVID: 655 case DRM_MODE_CONNECTOR_DVID:
656 case DRM_MODE_CONNECTOR_HDMIA: 656 case DRM_MODE_CONNECTOR_HDMIA:
657 default: 657 default:
658 if (drm_detect_hdmi_monitor(radeon_connector->edid)) { 658 if (drm_detect_monitor_audio(radeon_connector->edid)) {
659 /* fix me */ 659 /* fix me */
660 if (ASIC_IS_DCE4(rdev)) 660 if (ASIC_IS_DCE4(rdev))
661 return ATOM_ENCODER_MODE_DVI; 661 return ATOM_ENCODER_MODE_DVI;
@@ -673,7 +673,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
673 if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || 673 if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
674 (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) 674 (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
675 return ATOM_ENCODER_MODE_DP; 675 return ATOM_ENCODER_MODE_DP;
676 else if (drm_detect_hdmi_monitor(radeon_connector->edid)) { 676 else if (drm_detect_monitor_audio(radeon_connector->edid)) {
677 /* fix me */ 677 /* fix me */
678 if (ASIC_IS_DCE4(rdev)) 678 if (ASIC_IS_DCE4(rdev))
679 return ATOM_ENCODER_MODE_DVI; 679 return ATOM_ENCODER_MODE_DVI;
@@ -712,8 +712,8 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
712 * - 2 DIG encoder blocks. 712 * - 2 DIG encoder blocks.
713 * DIG1/2 can drive UNIPHY0/1/2 link A or link B 713 * DIG1/2 can drive UNIPHY0/1/2 link A or link B
714 * 714 *
715 * DCE 4.0 715 * DCE 4.0/5.0
716 * - 3 DIG transmitter blocks UNPHY0/1/2 (links A and B). 716 * - 3 DIG transmitter blocks UNIPHY0/1/2 (links A and B).
717 * Supports up to 6 digital outputs 717 * Supports up to 6 digital outputs
718 * - 6 DIG encoder blocks. 718 * - 6 DIG encoder blocks.
719 * - DIG to PHY mapping is hardcoded 719 * - DIG to PHY mapping is hardcoded
@@ -724,6 +724,12 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
724 * DIG5 drives UNIPHY2 link A, A+B 724 * DIG5 drives UNIPHY2 link A, A+B
725 * DIG6 drives UNIPHY2 link B 725 * DIG6 drives UNIPHY2 link B
726 * 726 *
727 * DCE 4.1
728 * - 3 DIG transmitter blocks UNIPHY0/1/2 (links A and B).
729 * Supports up to 6 digital outputs
730 * - 2 DIG encoder blocks.
731 * DIG1/2 can drive UNIPHY0/1/2 link A or link B
732 *
727 * Routing 733 * Routing
728 * crtc -> dig encoder -> UNIPHY/LVTMA (1 or 2 links) 734 * crtc -> dig encoder -> UNIPHY/LVTMA (1 or 2 links)
729 * Examples: 735 * Examples:
@@ -737,6 +743,7 @@ union dig_encoder_control {
737 DIG_ENCODER_CONTROL_PS_ALLOCATION v1; 743 DIG_ENCODER_CONTROL_PS_ALLOCATION v1;
738 DIG_ENCODER_CONTROL_PARAMETERS_V2 v2; 744 DIG_ENCODER_CONTROL_PARAMETERS_V2 v2;
739 DIG_ENCODER_CONTROL_PARAMETERS_V3 v3; 745 DIG_ENCODER_CONTROL_PARAMETERS_V3 v3;
746 DIG_ENCODER_CONTROL_PARAMETERS_V4 v4;
740}; 747};
741 748
742void 749void
@@ -752,6 +759,7 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
752 uint8_t frev, crev; 759 uint8_t frev, crev;
753 int dp_clock = 0; 760 int dp_clock = 0;
754 int dp_lane_count = 0; 761 int dp_lane_count = 0;
762 int hpd_id = RADEON_HPD_NONE;
755 763
756 if (connector) { 764 if (connector) {
757 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 765 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
@@ -760,6 +768,7 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
760 768
761 dp_clock = dig_connector->dp_clock; 769 dp_clock = dig_connector->dp_clock;
762 dp_lane_count = dig_connector->dp_lane_count; 770 dp_lane_count = dig_connector->dp_lane_count;
771 hpd_id = radeon_connector->hpd.hpd;
763 } 772 }
764 773
765 /* no dig encoder assigned */ 774 /* no dig encoder assigned */
@@ -784,19 +793,36 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
784 args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); 793 args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
785 args.v1.ucEncoderMode = atombios_get_encoder_mode(encoder); 794 args.v1.ucEncoderMode = atombios_get_encoder_mode(encoder);
786 795
787 if (args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) { 796 if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) ||
788 if (dp_clock == 270000) 797 (args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP_MST))
789 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
790 args.v1.ucLaneNum = dp_lane_count; 798 args.v1.ucLaneNum = dp_lane_count;
791 } else if (radeon_encoder->pixel_clock > 165000) 799 else if (radeon_encoder->pixel_clock > 165000)
792 args.v1.ucLaneNum = 8; 800 args.v1.ucLaneNum = 8;
793 else 801 else
794 args.v1.ucLaneNum = 4; 802 args.v1.ucLaneNum = 4;
795 803
796 if (ASIC_IS_DCE4(rdev)) { 804 if (ASIC_IS_DCE5(rdev)) {
805 if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) ||
806 (args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP_MST)) {
807 if (dp_clock == 270000)
808 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_2_70GHZ;
809 else if (dp_clock == 540000)
810 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ;
811 }
812 args.v4.acConfig.ucDigSel = dig->dig_encoder;
813 args.v4.ucBitPerColor = PANEL_8BIT_PER_COLOR;
814 if (hpd_id == RADEON_HPD_NONE)
815 args.v4.ucHPD_ID = 0;
816 else
817 args.v4.ucHPD_ID = hpd_id + 1;
818 } else if (ASIC_IS_DCE4(rdev)) {
819 if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) && (dp_clock == 270000))
820 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ;
797 args.v3.acConfig.ucDigSel = dig->dig_encoder; 821 args.v3.acConfig.ucDigSel = dig->dig_encoder;
798 args.v3.ucBitPerColor = PANEL_8BIT_PER_COLOR; 822 args.v3.ucBitPerColor = PANEL_8BIT_PER_COLOR;
799 } else { 823 } else {
824 if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) && (dp_clock == 270000))
825 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
800 switch (radeon_encoder->encoder_id) { 826 switch (radeon_encoder->encoder_id) {
801 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 827 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
802 args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1; 828 args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1;
@@ -823,6 +849,7 @@ union dig_transmitter_control {
823 DIG_TRANSMITTER_CONTROL_PS_ALLOCATION v1; 849 DIG_TRANSMITTER_CONTROL_PS_ALLOCATION v1;
824 DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2; 850 DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2;
825 DIG_TRANSMITTER_CONTROL_PARAMETERS_V3 v3; 851 DIG_TRANSMITTER_CONTROL_PARAMETERS_V3 v3;
852 DIG_TRANSMITTER_CONTROL_PARAMETERS_V4 v4;
826}; 853};
827 854
828void 855void
@@ -917,10 +944,18 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
917 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); 944 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
918 pll_id = radeon_crtc->pll_id; 945 pll_id = radeon_crtc->pll_id;
919 } 946 }
920 if (is_dp && rdev->clock.dp_extclk) 947
921 args.v3.acConfig.ucRefClkSource = 2; /* external src */ 948 if (ASIC_IS_DCE5(rdev)) {
922 else 949 if (is_dp && rdev->clock.dp_extclk)
923 args.v3.acConfig.ucRefClkSource = pll_id; 950 args.v4.acConfig.ucRefClkSource = 3; /* external src */
951 else
952 args.v4.acConfig.ucRefClkSource = pll_id;
953 } else {
954 if (is_dp && rdev->clock.dp_extclk)
955 args.v3.acConfig.ucRefClkSource = 2; /* external src */
956 else
957 args.v3.acConfig.ucRefClkSource = pll_id;
958 }
924 959
925 switch (radeon_encoder->encoder_id) { 960 switch (radeon_encoder->encoder_id) {
926 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 961 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
@@ -1044,6 +1079,7 @@ atombios_set_edp_panel_power(struct drm_connector *connector, int action)
1044 1079
1045union external_encoder_control { 1080union external_encoder_control {
1046 EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION v1; 1081 EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION v1;
1082 EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3 v3;
1047}; 1083};
1048 1084
1049static void 1085static void
@@ -1054,6 +1090,7 @@ atombios_external_encoder_setup(struct drm_encoder *encoder,
1054 struct drm_device *dev = encoder->dev; 1090 struct drm_device *dev = encoder->dev;
1055 struct radeon_device *rdev = dev->dev_private; 1091 struct radeon_device *rdev = dev->dev_private;
1056 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 1092 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1093 struct radeon_encoder *ext_radeon_encoder = to_radeon_encoder(ext_encoder);
1057 union external_encoder_control args; 1094 union external_encoder_control args;
1058 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); 1095 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
1059 int index = GetIndexIntoMasterTable(COMMAND, ExternalEncoderControl); 1096 int index = GetIndexIntoMasterTable(COMMAND, ExternalEncoderControl);
@@ -1061,6 +1098,7 @@ atombios_external_encoder_setup(struct drm_encoder *encoder,
1061 int dp_clock = 0; 1098 int dp_clock = 0;
1062 int dp_lane_count = 0; 1099 int dp_lane_count = 0;
1063 int connector_object_id = 0; 1100 int connector_object_id = 0;
1101 u32 ext_enum = (ext_radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
1064 1102
1065 if (connector) { 1103 if (connector) {
1066 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 1104 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
@@ -1099,6 +1137,37 @@ atombios_external_encoder_setup(struct drm_encoder *encoder,
1099 else 1137 else
1100 args.v1.sDigEncoder.ucLaneNum = 4; 1138 args.v1.sDigEncoder.ucLaneNum = 4;
1101 break; 1139 break;
1140 case 3:
1141 args.v3.sExtEncoder.ucAction = action;
1142 if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT)
1143 args.v3.sExtEncoder.usConnectorId = connector_object_id;
1144 else
1145 args.v3.sExtEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
1146 args.v3.sExtEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder);
1147
1148 if (args.v3.sExtEncoder.ucEncoderMode == ATOM_ENCODER_MODE_DP) {
1149 if (dp_clock == 270000)
1150 args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ;
1151 else if (dp_clock == 540000)
1152 args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_5_40GHZ;
1153 args.v3.sExtEncoder.ucLaneNum = dp_lane_count;
1154 } else if (radeon_encoder->pixel_clock > 165000)
1155 args.v3.sExtEncoder.ucLaneNum = 8;
1156 else
1157 args.v3.sExtEncoder.ucLaneNum = 4;
1158 switch (ext_enum) {
1159 case GRAPH_OBJECT_ENUM_ID1:
1160 args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER1;
1161 break;
1162 case GRAPH_OBJECT_ENUM_ID2:
1163 args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER2;
1164 break;
1165 case GRAPH_OBJECT_ENUM_ID3:
1166 args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER3;
1167 break;
1168 }
1169 args.v3.sExtEncoder.ucBitPerColor = PANEL_8BIT_PER_COLOR;
1170 break;
1102 default: 1171 default:
1103 DRM_ERROR("Unknown table version: %d, %d\n", frev, crev); 1172 DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
1104 return; 1173 return;
@@ -1158,6 +1227,8 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
1158 DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION args; 1227 DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION args;
1159 int index = 0; 1228 int index = 0;
1160 bool is_dig = false; 1229 bool is_dig = false;
1230 bool is_dce5_dac = false;
1231 bool is_dce5_dvo = false;
1161 1232
1162 memset(&args, 0, sizeof(args)); 1233 memset(&args, 0, sizeof(args));
1163 1234
@@ -1180,7 +1251,9 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
1180 index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl); 1251 index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl);
1181 break; 1252 break;
1182 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: 1253 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
1183 if (ASIC_IS_DCE3(rdev)) 1254 if (ASIC_IS_DCE5(rdev))
1255 is_dce5_dvo = true;
1256 else if (ASIC_IS_DCE3(rdev))
1184 is_dig = true; 1257 is_dig = true;
1185 else 1258 else
1186 index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl); 1259 index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl);
@@ -1196,12 +1269,16 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
1196 break; 1269 break;
1197 case ENCODER_OBJECT_ID_INTERNAL_DAC1: 1270 case ENCODER_OBJECT_ID_INTERNAL_DAC1:
1198 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: 1271 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
1199 if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) 1272 if (ASIC_IS_DCE5(rdev))
1200 index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl); 1273 is_dce5_dac = true;
1201 else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) 1274 else {
1202 index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl); 1275 if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
1203 else 1276 index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl);
1204 index = GetIndexIntoMasterTable(COMMAND, DAC1OutputControl); 1277 else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
1278 index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl);
1279 else
1280 index = GetIndexIntoMasterTable(COMMAND, DAC1OutputControl);
1281 }
1205 break; 1282 break;
1206 case ENCODER_OBJECT_ID_INTERNAL_DAC2: 1283 case ENCODER_OBJECT_ID_INTERNAL_DAC2:
1207 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: 1284 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
@@ -1260,6 +1337,28 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
1260 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0); 1337 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0);
1261 break; 1338 break;
1262 } 1339 }
1340 } else if (is_dce5_dac) {
1341 switch (mode) {
1342 case DRM_MODE_DPMS_ON:
1343 atombios_dac_setup(encoder, ATOM_ENABLE);
1344 break;
1345 case DRM_MODE_DPMS_STANDBY:
1346 case DRM_MODE_DPMS_SUSPEND:
1347 case DRM_MODE_DPMS_OFF:
1348 atombios_dac_setup(encoder, ATOM_DISABLE);
1349 break;
1350 }
1351 } else if (is_dce5_dvo) {
1352 switch (mode) {
1353 case DRM_MODE_DPMS_ON:
1354 atombios_dvo_setup(encoder, ATOM_ENABLE);
1355 break;
1356 case DRM_MODE_DPMS_STANDBY:
1357 case DRM_MODE_DPMS_SUSPEND:
1358 case DRM_MODE_DPMS_OFF:
1359 atombios_dvo_setup(encoder, ATOM_DISABLE);
1360 break;
1361 }
1263 } else { 1362 } else {
1264 switch (mode) { 1363 switch (mode) {
1265 case DRM_MODE_DPMS_ON: 1364 case DRM_MODE_DPMS_ON:
@@ -1289,12 +1388,18 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
1289 switch (mode) { 1388 switch (mode) {
1290 case DRM_MODE_DPMS_ON: 1389 case DRM_MODE_DPMS_ON:
1291 default: 1390 default:
1292 action = ATOM_ENABLE; 1391 if (ASIC_IS_DCE41(rdev))
1392 action = EXTERNAL_ENCODER_ACTION_V3_ENABLE_OUTPUT;
1393 else
1394 action = ATOM_ENABLE;
1293 break; 1395 break;
1294 case DRM_MODE_DPMS_STANDBY: 1396 case DRM_MODE_DPMS_STANDBY:
1295 case DRM_MODE_DPMS_SUSPEND: 1397 case DRM_MODE_DPMS_SUSPEND:
1296 case DRM_MODE_DPMS_OFF: 1398 case DRM_MODE_DPMS_OFF:
1297 action = ATOM_DISABLE; 1399 if (ASIC_IS_DCE41(rdev))
1400 action = EXTERNAL_ENCODER_ACTION_V3_DISABLE_OUTPUT;
1401 else
1402 action = ATOM_DISABLE;
1298 break; 1403 break;
1299 } 1404 }
1300 atombios_external_encoder_setup(encoder, ext_encoder, action); 1405 atombios_external_encoder_setup(encoder, ext_encoder, action);
@@ -1483,27 +1588,35 @@ static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder)
1483 struct radeon_encoder_atom_dig *dig; 1588 struct radeon_encoder_atom_dig *dig;
1484 uint32_t dig_enc_in_use = 0; 1589 uint32_t dig_enc_in_use = 0;
1485 1590
1591 /* DCE4/5 */
1486 if (ASIC_IS_DCE4(rdev)) { 1592 if (ASIC_IS_DCE4(rdev)) {
1487 dig = radeon_encoder->enc_priv; 1593 dig = radeon_encoder->enc_priv;
1488 switch (radeon_encoder->encoder_id) { 1594 if (ASIC_IS_DCE41(rdev)) {
1489 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
1490 if (dig->linkb) 1595 if (dig->linkb)
1491 return 1; 1596 return 1;
1492 else 1597 else
1493 return 0; 1598 return 0;
1494 break; 1599 } else {
1495 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 1600 switch (radeon_encoder->encoder_id) {
1496 if (dig->linkb) 1601 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
1497 return 3; 1602 if (dig->linkb)
1498 else 1603 return 1;
1499 return 2; 1604 else
1500 break; 1605 return 0;
1501 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 1606 break;
1502 if (dig->linkb) 1607 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
1503 return 5; 1608 if (dig->linkb)
1504 else 1609 return 3;
1505 return 4; 1610 else
1506 break; 1611 return 2;
1612 break;
1613 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
1614 if (dig->linkb)
1615 return 5;
1616 else
1617 return 4;
1618 break;
1619 }
1507 } 1620 }
1508 } 1621 }
1509 1622
@@ -1610,7 +1723,13 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
1610 } 1723 }
1611 1724
1612 if (ext_encoder) { 1725 if (ext_encoder) {
1613 atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE); 1726 if (ASIC_IS_DCE41(rdev)) {
1727 atombios_external_encoder_setup(encoder, ext_encoder,
1728 EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT);
1729 atombios_external_encoder_setup(encoder, ext_encoder,
1730 EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP);
1731 } else
1732 atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
1614 } 1733 }
1615 1734
1616 atombios_apply_encoder_quirks(encoder, adjusted_mode); 1735 atombios_apply_encoder_quirks(encoder, adjusted_mode);
@@ -1927,7 +2046,10 @@ radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder)
1927} 2046}
1928 2047
1929void 2048void
1930radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t supported_device) 2049radeon_add_atom_encoder(struct drm_device *dev,
2050 uint32_t encoder_enum,
2051 uint32_t supported_device,
2052 u16 caps)
1931{ 2053{
1932 struct radeon_device *rdev = dev->dev_private; 2054 struct radeon_device *rdev = dev->dev_private;
1933 struct drm_encoder *encoder; 2055 struct drm_encoder *encoder;
@@ -1970,6 +2092,7 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t
1970 radeon_encoder->rmx_type = RMX_OFF; 2092 radeon_encoder->rmx_type = RMX_OFF;
1971 radeon_encoder->underscan_type = UNDERSCAN_OFF; 2093 radeon_encoder->underscan_type = UNDERSCAN_OFF;
1972 radeon_encoder->is_ext_encoder = false; 2094 radeon_encoder->is_ext_encoder = false;
2095 radeon_encoder->caps = caps;
1973 2096
1974 switch (radeon_encoder->encoder_id) { 2097 switch (radeon_encoder->encoder_id) {
1975 case ENCODER_OBJECT_ID_INTERNAL_LVDS: 2098 case ENCODER_OBJECT_ID_INTERNAL_LVDS:
@@ -2029,6 +2152,8 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t
2029 case ENCODER_OBJECT_ID_TITFP513: 2152 case ENCODER_OBJECT_ID_TITFP513:
2030 case ENCODER_OBJECT_ID_VT1623: 2153 case ENCODER_OBJECT_ID_VT1623:
2031 case ENCODER_OBJECT_ID_HDMI_SI1930: 2154 case ENCODER_OBJECT_ID_HDMI_SI1930:
2155 case ENCODER_OBJECT_ID_TRAVIS:
2156 case ENCODER_OBJECT_ID_NUTMEG:
2032 /* these are handled by the primary encoders */ 2157 /* these are handled by the primary encoders */
2033 radeon_encoder->is_ext_encoder = true; 2158 radeon_encoder->is_ext_encoder = true;
2034 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) 2159 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
diff --git a/drivers/gpu/drm/radeon/radeon_family.h b/drivers/gpu/drm/radeon/radeon_family.h
index e329066dcabd..1ca55eb09ad3 100644
--- a/drivers/gpu/drm/radeon/radeon_family.h
+++ b/drivers/gpu/drm/radeon/radeon_family.h
@@ -80,6 +80,10 @@ enum radeon_family {
80 CHIP_JUNIPER, 80 CHIP_JUNIPER,
81 CHIP_CYPRESS, 81 CHIP_CYPRESS,
82 CHIP_HEMLOCK, 82 CHIP_HEMLOCK,
83 CHIP_PALM,
84 CHIP_BARTS,
85 CHIP_TURKS,
86 CHIP_CAICOS,
83 CHIP_LAST, 87 CHIP_LAST,
84}; 88};
85 89
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 6abea32be5e8..ca32e9c1e91d 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -225,8 +225,6 @@ static int radeonfb_create(struct radeon_fbdev *rfbdev,
225 225
226 strcpy(info->fix.id, "radeondrmfb"); 226 strcpy(info->fix.id, "radeondrmfb");
227 227
228 drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
229
230 info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT; 228 info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
231 info->fbops = &radeonfb_ops; 229 info->fbops = &radeonfb_ops;
232 230
@@ -247,8 +245,6 @@ static int radeonfb_create(struct radeon_fbdev *rfbdev,
247 info->apertures->ranges[0].base = rdev->ddev->mode_config.fb_base; 245 info->apertures->ranges[0].base = rdev->ddev->mode_config.fb_base;
248 info->apertures->ranges[0].size = rdev->mc.aper_size; 246 info->apertures->ranges[0].size = rdev->mc.aper_size;
249 247
250 info->fix.mmio_start = 0;
251 info->fix.mmio_len = 0;
252 info->pixmap.size = 64*1024; 248 info->pixmap.size = 64*1024;
253 info->pixmap.buf_align = 8; 249 info->pixmap.buf_align = 8;
254 info->pixmap.access_align = 32; 250 info->pixmap.access_align = 32;
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index daacb281dfaf..171b0b2e3a64 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -38,6 +38,7 @@
38#include "drm.h" 38#include "drm.h"
39#include "radeon_reg.h" 39#include "radeon_reg.h"
40#include "radeon.h" 40#include "radeon.h"
41#include "radeon_trace.h"
41 42
42int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence) 43int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
43{ 44{
@@ -57,6 +58,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
57 } else 58 } else
58 radeon_fence_ring_emit(rdev, fence); 59 radeon_fence_ring_emit(rdev, fence);
59 60
61 trace_radeon_fence_emit(rdev->ddev, fence->seq);
60 fence->emited = true; 62 fence->emited = true;
61 list_del(&fence->list); 63 list_del(&fence->list);
62 list_add_tail(&fence->list, &rdev->fence_drv.emited); 64 list_add_tail(&fence->list, &rdev->fence_drv.emited);
@@ -213,6 +215,7 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr)
213retry: 215retry:
214 /* save current sequence used to check for GPU lockup */ 216 /* save current sequence used to check for GPU lockup */
215 seq = rdev->fence_drv.last_seq; 217 seq = rdev->fence_drv.last_seq;
218 trace_radeon_fence_wait_begin(rdev->ddev, seq);
216 if (intr) { 219 if (intr) {
217 radeon_irq_kms_sw_irq_get(rdev); 220 radeon_irq_kms_sw_irq_get(rdev);
218 r = wait_event_interruptible_timeout(rdev->fence_drv.queue, 221 r = wait_event_interruptible_timeout(rdev->fence_drv.queue,
@@ -227,6 +230,7 @@ retry:
227 radeon_fence_signaled(fence), timeout); 230 radeon_fence_signaled(fence), timeout);
228 radeon_irq_kms_sw_irq_put(rdev); 231 radeon_irq_kms_sw_irq_put(rdev);
229 } 232 }
233 trace_radeon_fence_wait_end(rdev->ddev, seq);
230 if (unlikely(!radeon_fence_signaled(fence))) { 234 if (unlikely(!radeon_fence_signaled(fence))) {
231 /* we were interrupted for some reason and fence isn't 235 /* we were interrupted for some reason and fence isn't
232 * isn't signaled yet, resume wait 236 * isn't signaled yet, resume wait
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index a108c7ed14f5..a289646e8aa4 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -64,15 +64,15 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
64 struct radeon_device *rdev = dev->dev_private; 64 struct radeon_device *rdev = dev->dev_private;
65 unsigned i; 65 unsigned i;
66 66
67 INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
68
69 /* Disable *all* interrupts */ 67 /* Disable *all* interrupts */
70 rdev->irq.sw_int = false; 68 rdev->irq.sw_int = false;
71 rdev->irq.gui_idle = false; 69 rdev->irq.gui_idle = false;
72 for (i = 0; i < rdev->num_crtc; i++) 70 for (i = 0; i < rdev->num_crtc; i++)
73 rdev->irq.crtc_vblank_int[i] = false; 71 rdev->irq.crtc_vblank_int[i] = false;
74 for (i = 0; i < 6; i++) 72 for (i = 0; i < 6; i++) {
75 rdev->irq.hpd[i] = false; 73 rdev->irq.hpd[i] = false;
74 rdev->irq.pflip[i] = false;
75 }
76 radeon_irq_set(rdev); 76 radeon_irq_set(rdev);
77 /* Clear bits */ 77 /* Clear bits */
78 radeon_irq_process(rdev); 78 radeon_irq_process(rdev);
@@ -101,8 +101,10 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
101 rdev->irq.gui_idle = false; 101 rdev->irq.gui_idle = false;
102 for (i = 0; i < rdev->num_crtc; i++) 102 for (i = 0; i < rdev->num_crtc; i++)
103 rdev->irq.crtc_vblank_int[i] = false; 103 rdev->irq.crtc_vblank_int[i] = false;
104 for (i = 0; i < 6; i++) 104 for (i = 0; i < 6; i++) {
105 rdev->irq.hpd[i] = false; 105 rdev->irq.hpd[i] = false;
106 rdev->irq.pflip[i] = false;
107 }
106 radeon_irq_set(rdev); 108 radeon_irq_set(rdev);
107} 109}
108 110
@@ -110,6 +112,8 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
110{ 112{
111 int r = 0; 113 int r = 0;
112 114
115 INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
116
113 spin_lock_init(&rdev->irq.sw_lock); 117 spin_lock_init(&rdev->irq.sw_lock);
114 r = drm_vblank_init(rdev->ddev, rdev->num_crtc); 118 r = drm_vblank_init(rdev->ddev, rdev->num_crtc);
115 if (r) { 119 if (r) {
@@ -121,7 +125,7 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
121 * chips. Disable MSI on them for now. 125 * chips. Disable MSI on them for now.
122 */ 126 */
123 if ((rdev->family >= CHIP_RV380) && 127 if ((rdev->family >= CHIP_RV380) &&
124 (!(rdev->flags & RADEON_IS_IGP)) && 128 ((!(rdev->flags & RADEON_IS_IGP)) || (rdev->family >= CHIP_PALM)) &&
125 (!(rdev->flags & RADEON_IS_AGP))) { 129 (!(rdev->flags & RADEON_IS_AGP))) {
126 int ret = pci_enable_msi(rdev->pdev); 130 int ret = pci_enable_msi(rdev->pdev);
127 if (!ret) { 131 if (!ret) {
@@ -148,6 +152,7 @@ void radeon_irq_kms_fini(struct radeon_device *rdev)
148 if (rdev->msi_enabled) 152 if (rdev->msi_enabled)
149 pci_disable_msi(rdev->pdev); 153 pci_disable_msi(rdev->pdev);
150 } 154 }
155 flush_work_sync(&rdev->hotplug_work);
151} 156}
152 157
153void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev) 158void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev)
@@ -175,3 +180,34 @@ void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev)
175 spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags); 180 spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags);
176} 181}
177 182
183void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc)
184{
185 unsigned long irqflags;
186
187 if (crtc < 0 || crtc >= rdev->num_crtc)
188 return;
189
190 spin_lock_irqsave(&rdev->irq.pflip_lock[crtc], irqflags);
191 if (rdev->ddev->irq_enabled && (++rdev->irq.pflip_refcount[crtc] == 1)) {
192 rdev->irq.pflip[crtc] = true;
193 radeon_irq_set(rdev);
194 }
195 spin_unlock_irqrestore(&rdev->irq.pflip_lock[crtc], irqflags);
196}
197
198void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc)
199{
200 unsigned long irqflags;
201
202 if (crtc < 0 || crtc >= rdev->num_crtc)
203 return;
204
205 spin_lock_irqsave(&rdev->irq.pflip_lock[crtc], irqflags);
206 BUG_ON(rdev->ddev->irq_enabled && rdev->irq.pflip_refcount[crtc] <= 0);
207 if (rdev->ddev->irq_enabled && (--rdev->irq.pflip_refcount[crtc] == 0)) {
208 rdev->irq.pflip[crtc] = false;
209 radeon_irq_set(rdev);
210 }
211 spin_unlock_irqrestore(&rdev->irq.pflip_lock[crtc], irqflags);
212}
213
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 8fbbe1c6ebbd..28a53e4a925f 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -96,9 +96,27 @@ out:
96 return r; 96 return r;
97} 97}
98 98
99static void radeon_set_filp_rights(struct drm_device *dev,
100 struct drm_file **owner,
101 struct drm_file *applier,
102 uint32_t *value)
103{
104 mutex_lock(&dev->struct_mutex);
105 if (*value == 1) {
106 /* wants rights */
107 if (!*owner)
108 *owner = applier;
109 } else if (*value == 0) {
110 /* revokes rights */
111 if (*owner == applier)
112 *owner = NULL;
113 }
114 *value = *owner == applier ? 1 : 0;
115 mutex_unlock(&dev->struct_mutex);
116}
99 117
100/* 118/*
101 * Userspace get informations ioctl 119 * Userspace get information ioctl
102 */ 120 */
103int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) 121int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
104{ 122{
@@ -173,18 +191,15 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
173 DRM_DEBUG_KMS("WANT_HYPERZ: invalid value %d\n", value); 191 DRM_DEBUG_KMS("WANT_HYPERZ: invalid value %d\n", value);
174 return -EINVAL; 192 return -EINVAL;
175 } 193 }
176 mutex_lock(&dev->struct_mutex); 194 radeon_set_filp_rights(dev, &rdev->hyperz_filp, filp, &value);
177 if (value == 1) { 195 break;
178 /* wants hyper-z */ 196 case RADEON_INFO_WANT_CMASK:
179 if (!rdev->hyperz_filp) 197 /* The same logic as Hyper-Z. */
180 rdev->hyperz_filp = filp; 198 if (value >= 2) {
181 } else if (value == 0) { 199 DRM_DEBUG_KMS("WANT_CMASK: invalid value %d\n", value);
182 /* revokes hyper-z */ 200 return -EINVAL;
183 if (rdev->hyperz_filp == filp)
184 rdev->hyperz_filp = NULL;
185 } 201 }
186 value = rdev->hyperz_filp == filp ? 1 : 0; 202 radeon_set_filp_rights(dev, &rdev->cmask_filp, filp, &value);
187 mutex_unlock(&dev->struct_mutex);
188 break; 203 break;
189 default: 204 default:
190 DRM_DEBUG_KMS("Invalid request %d\n", info->request); 205 DRM_DEBUG_KMS("Invalid request %d\n", info->request);
@@ -203,10 +218,6 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
203 */ 218 */
204int radeon_driver_firstopen_kms(struct drm_device *dev) 219int radeon_driver_firstopen_kms(struct drm_device *dev)
205{ 220{
206 struct radeon_device *rdev = dev->dev_private;
207
208 if (rdev->powered_down)
209 return -EINVAL;
210 return 0; 221 return 0;
211} 222}
212 223
@@ -277,6 +288,27 @@ void radeon_disable_vblank_kms(struct drm_device *dev, int crtc)
277 radeon_irq_set(rdev); 288 radeon_irq_set(rdev);
278} 289}
279 290
291int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
292 int *max_error,
293 struct timeval *vblank_time,
294 unsigned flags)
295{
296 struct drm_crtc *drmcrtc;
297 struct radeon_device *rdev = dev->dev_private;
298
299 if (crtc < 0 || crtc >= dev->num_crtcs) {
300 DRM_ERROR("Invalid crtc %d\n", crtc);
301 return -EINVAL;
302 }
303
304 /* Get associated drm_crtc: */
305 drmcrtc = &rdev->mode_info.crtcs[crtc]->base;
306
307 /* Helper routine in DRM core does all the work: */
308 return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error,
309 vblank_time, flags,
310 drmcrtc);
311}
280 312
281/* 313/*
282 * IOCTL. 314 * IOCTL.
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index e301c6f9e059..12bdeab91c86 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -277,6 +277,9 @@ struct radeon_crtc {
277 fixed20_12 hsc; 277 fixed20_12 hsc;
278 struct drm_display_mode native_mode; 278 struct drm_display_mode native_mode;
279 int pll_id; 279 int pll_id;
280 /* page flipping */
281 struct radeon_unpin_work *unpin_work;
282 int deferred_flip_completion;
280}; 283};
281 284
282struct radeon_encoder_primary_dac { 285struct radeon_encoder_primary_dac {
@@ -376,6 +379,7 @@ struct radeon_encoder {
376 int hdmi_audio_workaround; 379 int hdmi_audio_workaround;
377 int hdmi_buffer_status; 380 int hdmi_buffer_status;
378 bool is_ext_encoder; 381 bool is_ext_encoder;
382 u16 caps;
379}; 383};
380 384
381struct radeon_connector_atom_dig { 385struct radeon_connector_atom_dig {
@@ -442,10 +446,6 @@ struct radeon_framebuffer {
442 struct drm_gem_object *obj; 446 struct drm_gem_object *obj;
443}; 447};
444 448
445/* radeon_get_crtc_scanoutpos() return flags */
446#define RADEON_SCANOUTPOS_VALID (1 << 0)
447#define RADEON_SCANOUTPOS_INVBL (1 << 1)
448#define RADEON_SCANOUTPOS_ACCURATE (1 << 2)
449 449
450extern enum radeon_tv_std 450extern enum radeon_tv_std
451radeon_combios_get_tv_info(struct radeon_device *rdev); 451radeon_combios_get_tv_info(struct radeon_device *rdev);
@@ -562,11 +562,12 @@ extern int radeon_crtc_cursor_set(struct drm_crtc *crtc,
562extern int radeon_crtc_cursor_move(struct drm_crtc *crtc, 562extern int radeon_crtc_cursor_move(struct drm_crtc *crtc,
563 int x, int y); 563 int x, int y);
564 564
565extern int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos, int *hpos); 565extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
566 int *vpos, int *hpos);
566 567
567extern bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev); 568extern bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev);
568extern struct edid * 569extern struct edid *
569radeon_combios_get_hardcoded_edid(struct radeon_device *rdev); 570radeon_bios_get_hardcoded_edid(struct radeon_device *rdev);
570extern bool radeon_atom_get_clock_info(struct drm_device *dev); 571extern bool radeon_atom_get_clock_info(struct drm_device *dev);
571extern bool radeon_combios_get_clock_info(struct drm_device *dev); 572extern bool radeon_combios_get_clock_info(struct drm_device *dev);
572extern struct radeon_encoder_atom_dig * 573extern struct radeon_encoder_atom_dig *
@@ -662,4 +663,7 @@ int radeon_fbdev_total_size(struct radeon_device *rdev);
662bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj); 663bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj);
663 664
664void radeon_fb_output_poll_changed(struct radeon_device *rdev); 665void radeon_fb_output_poll_changed(struct radeon_device *rdev);
666
667void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id);
668
665#endif 669#endif
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index a598d0049aa5..7d6b8e88f746 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -34,6 +34,7 @@
34#include <drm/drmP.h> 34#include <drm/drmP.h>
35#include "radeon_drm.h" 35#include "radeon_drm.h"
36#include "radeon.h" 36#include "radeon.h"
37#include "radeon_trace.h"
37 38
38 39
39int radeon_ttm_init(struct radeon_device *rdev); 40int radeon_ttm_init(struct radeon_device *rdev);
@@ -146,6 +147,7 @@ retry:
146 list_add_tail(&bo->list, &rdev->gem.objects); 147 list_add_tail(&bo->list, &rdev->gem.objects);
147 mutex_unlock(&bo->rdev->gem.mutex); 148 mutex_unlock(&bo->rdev->gem.mutex);
148 } 149 }
150 trace_radeon_bo_create(bo);
149 return 0; 151 return 0;
150} 152}
151 153
@@ -302,34 +304,9 @@ void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
302 struct list_head *head) 304 struct list_head *head)
303{ 305{
304 if (lobj->wdomain) { 306 if (lobj->wdomain) {
305 list_add(&lobj->list, head); 307 list_add(&lobj->tv.head, head);
306 } else { 308 } else {
307 list_add_tail(&lobj->list, head); 309 list_add_tail(&lobj->tv.head, head);
308 }
309}
310
311int radeon_bo_list_reserve(struct list_head *head)
312{
313 struct radeon_bo_list *lobj;
314 int r;
315
316 list_for_each_entry(lobj, head, list){
317 r = radeon_bo_reserve(lobj->bo, false);
318 if (unlikely(r != 0))
319 return r;
320 lobj->reserved = true;
321 }
322 return 0;
323}
324
325void radeon_bo_list_unreserve(struct list_head *head)
326{
327 struct radeon_bo_list *lobj;
328
329 list_for_each_entry(lobj, head, list) {
330 /* only unreserve object we successfully reserved */
331 if (lobj->reserved && radeon_bo_is_reserved(lobj->bo))
332 radeon_bo_unreserve(lobj->bo);
333 } 310 }
334} 311}
335 312
@@ -340,14 +317,11 @@ int radeon_bo_list_validate(struct list_head *head)
340 u32 domain; 317 u32 domain;
341 int r; 318 int r;
342 319
343 list_for_each_entry(lobj, head, list) { 320 r = ttm_eu_reserve_buffers(head);
344 lobj->reserved = false;
345 }
346 r = radeon_bo_list_reserve(head);
347 if (unlikely(r != 0)) { 321 if (unlikely(r != 0)) {
348 return r; 322 return r;
349 } 323 }
350 list_for_each_entry(lobj, head, list) { 324 list_for_each_entry(lobj, head, tv.head) {
351 bo = lobj->bo; 325 bo = lobj->bo;
352 if (!bo->pin_count) { 326 if (!bo->pin_count) {
353 domain = lobj->wdomain ? lobj->wdomain : lobj->rdomain; 327 domain = lobj->wdomain ? lobj->wdomain : lobj->rdomain;
@@ -370,25 +344,6 @@ int radeon_bo_list_validate(struct list_head *head)
370 return 0; 344 return 0;
371} 345}
372 346
373void radeon_bo_list_fence(struct list_head *head, void *fence)
374{
375 struct radeon_bo_list *lobj;
376 struct radeon_bo *bo;
377 struct radeon_fence *old_fence = NULL;
378
379 list_for_each_entry(lobj, head, list) {
380 bo = lobj->bo;
381 spin_lock(&bo->tbo.lock);
382 old_fence = (struct radeon_fence *)bo->tbo.sync_obj;
383 bo->tbo.sync_obj = radeon_fence_ref(fence);
384 bo->tbo.sync_obj_arg = NULL;
385 spin_unlock(&bo->tbo.lock);
386 if (old_fence) {
387 radeon_fence_unref(&old_fence);
388 }
389 }
390}
391
392int radeon_bo_fbdev_mmap(struct radeon_bo *bo, 347int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
393 struct vm_area_struct *vma) 348 struct vm_area_struct *vma)
394{ 349{
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index d143702b244a..22d4c237dea5 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -126,12 +126,12 @@ static inline int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
126 r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0); 126 r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
127 if (unlikely(r != 0)) 127 if (unlikely(r != 0))
128 return r; 128 return r;
129 spin_lock(&bo->tbo.lock); 129 spin_lock(&bo->tbo.bdev->fence_lock);
130 if (mem_type) 130 if (mem_type)
131 *mem_type = bo->tbo.mem.mem_type; 131 *mem_type = bo->tbo.mem.mem_type;
132 if (bo->tbo.sync_obj) 132 if (bo->tbo.sync_obj)
133 r = ttm_bo_wait(&bo->tbo, true, true, no_wait); 133 r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
134 spin_unlock(&bo->tbo.lock); 134 spin_unlock(&bo->tbo.bdev->fence_lock);
135 ttm_bo_unreserve(&bo->tbo); 135 ttm_bo_unreserve(&bo->tbo);
136 return r; 136 return r;
137} 137}
@@ -152,10 +152,7 @@ extern int radeon_bo_init(struct radeon_device *rdev);
152extern void radeon_bo_fini(struct radeon_device *rdev); 152extern void radeon_bo_fini(struct radeon_device *rdev);
153extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj, 153extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
154 struct list_head *head); 154 struct list_head *head);
155extern int radeon_bo_list_reserve(struct list_head *head);
156extern void radeon_bo_list_unreserve(struct list_head *head);
157extern int radeon_bo_list_validate(struct list_head *head); 155extern int radeon_bo_list_validate(struct list_head *head);
158extern void radeon_bo_list_fence(struct list_head *head, void *fence);
159extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo, 156extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
160 struct vm_area_struct *vma); 157 struct vm_area_struct *vma);
161extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo, 158extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 8c9b2ef32c68..3b1b2bf9cdd5 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -167,13 +167,13 @@ static void radeon_set_power_state(struct radeon_device *rdev)
167 if (radeon_gui_idle(rdev)) { 167 if (radeon_gui_idle(rdev)) {
168 sclk = rdev->pm.power_state[rdev->pm.requested_power_state_index]. 168 sclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
169 clock_info[rdev->pm.requested_clock_mode_index].sclk; 169 clock_info[rdev->pm.requested_clock_mode_index].sclk;
170 if (sclk > rdev->clock.default_sclk) 170 if (sclk > rdev->pm.default_sclk)
171 sclk = rdev->clock.default_sclk; 171 sclk = rdev->pm.default_sclk;
172 172
173 mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index]. 173 mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
174 clock_info[rdev->pm.requested_clock_mode_index].mclk; 174 clock_info[rdev->pm.requested_clock_mode_index].mclk;
175 if (mclk > rdev->clock.default_mclk) 175 if (mclk > rdev->pm.default_mclk)
176 mclk = rdev->clock.default_mclk; 176 mclk = rdev->pm.default_mclk;
177 177
178 /* upvolt before raising clocks, downvolt after lowering clocks */ 178 /* upvolt before raising clocks, downvolt after lowering clocks */
179 if (sclk < rdev->pm.current_sclk) 179 if (sclk < rdev->pm.current_sclk)
@@ -405,20 +405,13 @@ static ssize_t radeon_set_pm_method(struct device *dev,
405 rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; 405 rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
406 mutex_unlock(&rdev->pm.mutex); 406 mutex_unlock(&rdev->pm.mutex);
407 } else if (strncmp("profile", buf, strlen("profile")) == 0) { 407 } else if (strncmp("profile", buf, strlen("profile")) == 0) {
408 bool flush_wq = false;
409
410 mutex_lock(&rdev->pm.mutex); 408 mutex_lock(&rdev->pm.mutex);
411 if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
412 cancel_delayed_work(&rdev->pm.dynpm_idle_work);
413 flush_wq = true;
414 }
415 /* disable dynpm */ 409 /* disable dynpm */
416 rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; 410 rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
417 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; 411 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
418 rdev->pm.pm_method = PM_METHOD_PROFILE; 412 rdev->pm.pm_method = PM_METHOD_PROFILE;
419 mutex_unlock(&rdev->pm.mutex); 413 mutex_unlock(&rdev->pm.mutex);
420 if (flush_wq) 414 cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
421 flush_workqueue(rdev->wq);
422 } else { 415 } else {
423 DRM_ERROR("invalid power method!\n"); 416 DRM_ERROR("invalid power method!\n");
424 goto fail; 417 goto fail;
@@ -447,8 +440,12 @@ static ssize_t radeon_hwmon_show_temp(struct device *dev,
447 temp = rv770_get_temp(rdev); 440 temp = rv770_get_temp(rdev);
448 break; 441 break;
449 case THERMAL_TYPE_EVERGREEN: 442 case THERMAL_TYPE_EVERGREEN:
443 case THERMAL_TYPE_NI:
450 temp = evergreen_get_temp(rdev); 444 temp = evergreen_get_temp(rdev);
451 break; 445 break;
446 case THERMAL_TYPE_SUMO:
447 temp = sumo_get_temp(rdev);
448 break;
452 default: 449 default:
453 temp = 0; 450 temp = 0;
454 break; 451 break;
@@ -487,6 +484,7 @@ static int radeon_hwmon_init(struct radeon_device *rdev)
487 case THERMAL_TYPE_RV6XX: 484 case THERMAL_TYPE_RV6XX:
488 case THERMAL_TYPE_RV770: 485 case THERMAL_TYPE_RV770:
489 case THERMAL_TYPE_EVERGREEN: 486 case THERMAL_TYPE_EVERGREEN:
487 case THERMAL_TYPE_SUMO:
490 rdev->pm.int_hwmon_dev = hwmon_device_register(rdev->dev); 488 rdev->pm.int_hwmon_dev = hwmon_device_register(rdev->dev);
491 if (IS_ERR(rdev->pm.int_hwmon_dev)) { 489 if (IS_ERR(rdev->pm.int_hwmon_dev)) {
492 err = PTR_ERR(rdev->pm.int_hwmon_dev); 490 err = PTR_ERR(rdev->pm.int_hwmon_dev);
@@ -520,34 +518,39 @@ static void radeon_hwmon_fini(struct radeon_device *rdev)
520 518
521void radeon_pm_suspend(struct radeon_device *rdev) 519void radeon_pm_suspend(struct radeon_device *rdev)
522{ 520{
523 bool flush_wq = false;
524
525 mutex_lock(&rdev->pm.mutex); 521 mutex_lock(&rdev->pm.mutex);
526 if (rdev->pm.pm_method == PM_METHOD_DYNPM) { 522 if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
527 cancel_delayed_work(&rdev->pm.dynpm_idle_work);
528 if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) 523 if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE)
529 rdev->pm.dynpm_state = DYNPM_STATE_SUSPENDED; 524 rdev->pm.dynpm_state = DYNPM_STATE_SUSPENDED;
530 flush_wq = true;
531 } 525 }
532 mutex_unlock(&rdev->pm.mutex); 526 mutex_unlock(&rdev->pm.mutex);
533 if (flush_wq) 527
534 flush_workqueue(rdev->wq); 528 cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
535} 529}
536 530
537void radeon_pm_resume(struct radeon_device *rdev) 531void radeon_pm_resume(struct radeon_device *rdev)
538{ 532{
533 /* set up the default clocks if the MC ucode is loaded */
534 if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) {
535 if (rdev->pm.default_vddc)
536 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc);
537 if (rdev->pm.default_sclk)
538 radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
539 if (rdev->pm.default_mclk)
540 radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
541 }
539 /* asic init will reset the default power state */ 542 /* asic init will reset the default power state */
540 mutex_lock(&rdev->pm.mutex); 543 mutex_lock(&rdev->pm.mutex);
541 rdev->pm.current_power_state_index = rdev->pm.default_power_state_index; 544 rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
542 rdev->pm.current_clock_mode_index = 0; 545 rdev->pm.current_clock_mode_index = 0;
543 rdev->pm.current_sclk = rdev->clock.default_sclk; 546 rdev->pm.current_sclk = rdev->pm.default_sclk;
544 rdev->pm.current_mclk = rdev->clock.default_mclk; 547 rdev->pm.current_mclk = rdev->pm.default_mclk;
545 rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage; 548 rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
546 if (rdev->pm.pm_method == PM_METHOD_DYNPM 549 if (rdev->pm.pm_method == PM_METHOD_DYNPM
547 && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) { 550 && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) {
548 rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE; 551 rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
549 queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work, 552 schedule_delayed_work(&rdev->pm.dynpm_idle_work,
550 msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); 553 msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
551 } 554 }
552 mutex_unlock(&rdev->pm.mutex); 555 mutex_unlock(&rdev->pm.mutex);
553 radeon_pm_compute_clocks(rdev); 556 radeon_pm_compute_clocks(rdev);
@@ -564,6 +567,8 @@ int radeon_pm_init(struct radeon_device *rdev)
564 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; 567 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
565 rdev->pm.dynpm_can_upclock = true; 568 rdev->pm.dynpm_can_upclock = true;
566 rdev->pm.dynpm_can_downclock = true; 569 rdev->pm.dynpm_can_downclock = true;
570 rdev->pm.default_sclk = rdev->clock.default_sclk;
571 rdev->pm.default_mclk = rdev->clock.default_mclk;
567 rdev->pm.current_sclk = rdev->clock.default_sclk; 572 rdev->pm.current_sclk = rdev->clock.default_sclk;
568 rdev->pm.current_mclk = rdev->clock.default_mclk; 573 rdev->pm.current_mclk = rdev->clock.default_mclk;
569 rdev->pm.int_thermal_type = THERMAL_TYPE_NONE; 574 rdev->pm.int_thermal_type = THERMAL_TYPE_NONE;
@@ -575,12 +580,24 @@ int radeon_pm_init(struct radeon_device *rdev)
575 radeon_combios_get_power_modes(rdev); 580 radeon_combios_get_power_modes(rdev);
576 radeon_pm_print_states(rdev); 581 radeon_pm_print_states(rdev);
577 radeon_pm_init_profile(rdev); 582 radeon_pm_init_profile(rdev);
583 /* set up the default clocks if the MC ucode is loaded */
584 if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) {
585 if (rdev->pm.default_vddc)
586 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc);
587 if (rdev->pm.default_sclk)
588 radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
589 if (rdev->pm.default_mclk)
590 radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
591 }
578 } 592 }
579 593
580 /* set up the internal thermal sensor if applicable */ 594 /* set up the internal thermal sensor if applicable */
581 ret = radeon_hwmon_init(rdev); 595 ret = radeon_hwmon_init(rdev);
582 if (ret) 596 if (ret)
583 return ret; 597 return ret;
598
599 INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler);
600
584 if (rdev->pm.num_power_states > 1) { 601 if (rdev->pm.num_power_states > 1) {
585 /* where's the best place to put these? */ 602 /* where's the best place to put these? */
586 ret = device_create_file(rdev->dev, &dev_attr_power_profile); 603 ret = device_create_file(rdev->dev, &dev_attr_power_profile);
@@ -594,8 +611,6 @@ int radeon_pm_init(struct radeon_device *rdev)
594 rdev->acpi_nb.notifier_call = radeon_acpi_event; 611 rdev->acpi_nb.notifier_call = radeon_acpi_event;
595 register_acpi_notifier(&rdev->acpi_nb); 612 register_acpi_notifier(&rdev->acpi_nb);
596#endif 613#endif
597 INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler);
598
599 if (radeon_debugfs_pm_init(rdev)) { 614 if (radeon_debugfs_pm_init(rdev)) {
600 DRM_ERROR("Failed to register debugfs file for PM!\n"); 615 DRM_ERROR("Failed to register debugfs file for PM!\n");
601 } 616 }
@@ -609,25 +624,20 @@ int radeon_pm_init(struct radeon_device *rdev)
609void radeon_pm_fini(struct radeon_device *rdev) 624void radeon_pm_fini(struct radeon_device *rdev)
610{ 625{
611 if (rdev->pm.num_power_states > 1) { 626 if (rdev->pm.num_power_states > 1) {
612 bool flush_wq = false;
613
614 mutex_lock(&rdev->pm.mutex); 627 mutex_lock(&rdev->pm.mutex);
615 if (rdev->pm.pm_method == PM_METHOD_PROFILE) { 628 if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
616 rdev->pm.profile = PM_PROFILE_DEFAULT; 629 rdev->pm.profile = PM_PROFILE_DEFAULT;
617 radeon_pm_update_profile(rdev); 630 radeon_pm_update_profile(rdev);
618 radeon_pm_set_clocks(rdev); 631 radeon_pm_set_clocks(rdev);
619 } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) { 632 } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
620 /* cancel work */
621 cancel_delayed_work(&rdev->pm.dynpm_idle_work);
622 flush_wq = true;
623 /* reset default clocks */ 633 /* reset default clocks */
624 rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; 634 rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
625 rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; 635 rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
626 radeon_pm_set_clocks(rdev); 636 radeon_pm_set_clocks(rdev);
627 } 637 }
628 mutex_unlock(&rdev->pm.mutex); 638 mutex_unlock(&rdev->pm.mutex);
629 if (flush_wq) 639
630 flush_workqueue(rdev->wq); 640 cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
631 641
632 device_remove_file(rdev->dev, &dev_attr_power_profile); 642 device_remove_file(rdev->dev, &dev_attr_power_profile);
633 device_remove_file(rdev->dev, &dev_attr_power_method); 643 device_remove_file(rdev->dev, &dev_attr_power_method);
@@ -686,12 +696,12 @@ void radeon_pm_compute_clocks(struct radeon_device *rdev)
686 radeon_pm_get_dynpm_state(rdev); 696 radeon_pm_get_dynpm_state(rdev);
687 radeon_pm_set_clocks(rdev); 697 radeon_pm_set_clocks(rdev);
688 698
689 queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work, 699 schedule_delayed_work(&rdev->pm.dynpm_idle_work,
690 msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); 700 msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
691 } else if (rdev->pm.dynpm_state == DYNPM_STATE_PAUSED) { 701 } else if (rdev->pm.dynpm_state == DYNPM_STATE_PAUSED) {
692 rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE; 702 rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
693 queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work, 703 schedule_delayed_work(&rdev->pm.dynpm_idle_work,
694 msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); 704 msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
695 DRM_DEBUG_DRIVER("radeon: dynamic power management activated\n"); 705 DRM_DEBUG_DRIVER("radeon: dynamic power management activated\n");
696 } 706 }
697 } else { /* count == 0 */ 707 } else { /* count == 0 */
@@ -720,9 +730,9 @@ static bool radeon_pm_in_vbl(struct radeon_device *rdev)
720 */ 730 */
721 for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) { 731 for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) {
722 if (rdev->pm.active_crtcs & (1 << crtc)) { 732 if (rdev->pm.active_crtcs & (1 << crtc)) {
723 vbl_status = radeon_get_crtc_scanoutpos(rdev, crtc, &vpos, &hpos); 733 vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, &vpos, &hpos);
724 if ((vbl_status & RADEON_SCANOUTPOS_VALID) && 734 if ((vbl_status & DRM_SCANOUTPOS_VALID) &&
725 !(vbl_status & RADEON_SCANOUTPOS_INVBL)) 735 !(vbl_status & DRM_SCANOUTPOS_INVBL))
726 in_vbl = false; 736 in_vbl = false;
727 } 737 }
728 } 738 }
@@ -796,8 +806,8 @@ static void radeon_dynpm_idle_work_handler(struct work_struct *work)
796 radeon_pm_set_clocks(rdev); 806 radeon_pm_set_clocks(rdev);
797 } 807 }
798 808
799 queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work, 809 schedule_delayed_work(&rdev->pm.dynpm_idle_work,
800 msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); 810 msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
801 } 811 }
802 mutex_unlock(&rdev->pm.mutex); 812 mutex_unlock(&rdev->pm.mutex);
803 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); 813 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
@@ -814,9 +824,9 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
814 struct drm_device *dev = node->minor->dev; 824 struct drm_device *dev = node->minor->dev;
815 struct radeon_device *rdev = dev->dev_private; 825 struct radeon_device *rdev = dev->dev_private;
816 826
817 seq_printf(m, "default engine clock: %u0 kHz\n", rdev->clock.default_sclk); 827 seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk);
818 seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev)); 828 seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
819 seq_printf(m, "default memory clock: %u0 kHz\n", rdev->clock.default_mclk); 829 seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk);
820 if (rdev->asic->get_memory_clock) 830 if (rdev->asic->get_memory_clock)
821 seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev)); 831 seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
822 if (rdev->pm.current_vddc) 832 if (rdev->pm.current_vddc)
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h
index 64928814de53..3cd4dace57c7 100644
--- a/drivers/gpu/drm/radeon/radeon_reg.h
+++ b/drivers/gpu/drm/radeon/radeon_reg.h
@@ -55,6 +55,7 @@
55#include "r500_reg.h" 55#include "r500_reg.h"
56#include "r600_reg.h" 56#include "r600_reg.h"
57#include "evergreen_reg.h" 57#include "evergreen_reg.h"
58#include "ni_reg.h"
58 59
59#define RADEON_MC_AGP_LOCATION 0x014c 60#define RADEON_MC_AGP_LOCATION 0x014c
60#define RADEON_MC_AGP_START_MASK 0x0000FFFF 61#define RADEON_MC_AGP_START_MASK 0x0000FFFF
@@ -320,6 +321,15 @@
320# define RADEON_PCIE_LC_RECONFIG_NOW (1 << 8) 321# define RADEON_PCIE_LC_RECONFIG_NOW (1 << 8)
321# define RADEON_PCIE_LC_RECONFIG_LATER (1 << 9) 322# define RADEON_PCIE_LC_RECONFIG_LATER (1 << 9)
322# define RADEON_PCIE_LC_SHORT_RECONFIG_EN (1 << 10) 323# define RADEON_PCIE_LC_SHORT_RECONFIG_EN (1 << 10)
324# define R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE (1 << 7)
325# define R600_PCIE_LC_RENEGOTIATION_SUPPORT (1 << 9)
326# define R600_PCIE_LC_RENEGOTIATE_EN (1 << 10)
327# define R600_PCIE_LC_SHORT_RECONFIG_EN (1 << 11)
328# define R600_PCIE_LC_UPCONFIGURE_SUPPORT (1 << 12)
329# define R600_PCIE_LC_UPCONFIGURE_DIS (1 << 13)
330
331#define R600_TARGET_AND_CURRENT_PROFILE_INDEX 0x70c
332#define R700_TARGET_AND_CURRENT_PROFILE_INDEX 0x66c
323 333
324#define RADEON_CACHE_CNTL 0x1724 334#define RADEON_CACHE_CNTL 0x1724
325#define RADEON_CACHE_LINE 0x0f0c /* PCI */ 335#define RADEON_CACHE_LINE 0x0f0c /* PCI */
@@ -422,6 +432,7 @@
422# define RADEON_CRTC_CSYNC_EN (1 << 4) 432# define RADEON_CRTC_CSYNC_EN (1 << 4)
423# define RADEON_CRTC_ICON_EN (1 << 15) 433# define RADEON_CRTC_ICON_EN (1 << 15)
424# define RADEON_CRTC_CUR_EN (1 << 16) 434# define RADEON_CRTC_CUR_EN (1 << 16)
435# define RADEON_CRTC_VSTAT_MODE_MASK (3 << 17)
425# define RADEON_CRTC_CUR_MODE_MASK (7 << 20) 436# define RADEON_CRTC_CUR_MODE_MASK (7 << 20)
426# define RADEON_CRTC_CUR_MODE_SHIFT 20 437# define RADEON_CRTC_CUR_MODE_SHIFT 20
427# define RADEON_CRTC_CUR_MODE_MONO 0 438# define RADEON_CRTC_CUR_MODE_MONO 0
@@ -509,6 +520,8 @@
509# define RADEON_CRTC_TILE_EN (1 << 15) 520# define RADEON_CRTC_TILE_EN (1 << 15)
510# define RADEON_CRTC_OFFSET_FLIP_CNTL (1 << 16) 521# define RADEON_CRTC_OFFSET_FLIP_CNTL (1 << 16)
511# define RADEON_CRTC_STEREO_OFFSET_EN (1 << 17) 522# define RADEON_CRTC_STEREO_OFFSET_EN (1 << 17)
523# define RADEON_CRTC_GUI_TRIG_OFFSET_LEFT_EN (1 << 28)
524# define RADEON_CRTC_GUI_TRIG_OFFSET_RIGHT_EN (1 << 29)
512 525
513#define R300_CRTC_TILE_X0_Y0 0x0350 526#define R300_CRTC_TILE_X0_Y0 0x0350
514#define R300_CRTC2_TILE_X0_Y0 0x0358 527#define R300_CRTC2_TILE_X0_Y0 0x0358
diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h
new file mode 100644
index 000000000000..eafd8160a155
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_trace.h
@@ -0,0 +1,82 @@
1#if !defined(_RADEON_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
2#define _RADEON_TRACE_H_
3
4#include <linux/stringify.h>
5#include <linux/types.h>
6#include <linux/tracepoint.h>
7
8#include <drm/drmP.h>
9
10#undef TRACE_SYSTEM
11#define TRACE_SYSTEM radeon
12#define TRACE_SYSTEM_STRING __stringify(TRACE_SYSTEM)
13#define TRACE_INCLUDE_FILE radeon_trace
14
15TRACE_EVENT(radeon_bo_create,
16 TP_PROTO(struct radeon_bo *bo),
17 TP_ARGS(bo),
18 TP_STRUCT__entry(
19 __field(struct radeon_bo *, bo)
20 __field(u32, pages)
21 ),
22
23 TP_fast_assign(
24 __entry->bo = bo;
25 __entry->pages = bo->tbo.num_pages;
26 ),
27 TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages)
28);
29
30DECLARE_EVENT_CLASS(radeon_fence_request,
31
32 TP_PROTO(struct drm_device *dev, u32 seqno),
33
34 TP_ARGS(dev, seqno),
35
36 TP_STRUCT__entry(
37 __field(u32, dev)
38 __field(u32, seqno)
39 ),
40
41 TP_fast_assign(
42 __entry->dev = dev->primary->index;
43 __entry->seqno = seqno;
44 ),
45
46 TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
47);
48
49DEFINE_EVENT(radeon_fence_request, radeon_fence_emit,
50
51 TP_PROTO(struct drm_device *dev, u32 seqno),
52
53 TP_ARGS(dev, seqno)
54);
55
56DEFINE_EVENT(radeon_fence_request, radeon_fence_retire,
57
58 TP_PROTO(struct drm_device *dev, u32 seqno),
59
60 TP_ARGS(dev, seqno)
61);
62
63DEFINE_EVENT(radeon_fence_request, radeon_fence_wait_begin,
64
65 TP_PROTO(struct drm_device *dev, u32 seqno),
66
67 TP_ARGS(dev, seqno)
68);
69
70DEFINE_EVENT(radeon_fence_request, radeon_fence_wait_end,
71
72 TP_PROTO(struct drm_device *dev, u32 seqno),
73
74 TP_ARGS(dev, seqno)
75);
76
77#endif
78
79/* This part must be outside protection */
80#undef TRACE_INCLUDE_PATH
81#define TRACE_INCLUDE_PATH .
82#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/radeon/radeon_trace_points.c b/drivers/gpu/drm/radeon/radeon_trace_points.c
new file mode 100644
index 000000000000..8175993df84d
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_trace_points.c
@@ -0,0 +1,9 @@
1/* Copyright Red Hat Inc 2010.
2 * Author : Dave Airlie <airlied@redhat.com>
3 */
4#include <drm/drmP.h>
5#include "radeon_drm.h"
6#include "radeon.h"
7
8#define CREATE_TRACE_POINTS
9#include "radeon_trace.h"
diff --git a/drivers/gpu/drm/radeon/reg_srcs/rv515 b/drivers/gpu/drm/radeon/reg_srcs/rv515
index b3f9f1d92005..ef422bbacfc1 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/rv515
+++ b/drivers/gpu/drm/radeon/reg_srcs/rv515
@@ -304,6 +304,22 @@ rv515 0x6d40
3040x4630 US_CODE_ADDR 3040x4630 US_CODE_ADDR
3050x4634 US_CODE_RANGE 3050x4634 US_CODE_RANGE
3060x4638 US_CODE_OFFSET 3060x4638 US_CODE_OFFSET
3070x4640 US_FORMAT0_0
3080x4644 US_FORMAT0_1
3090x4648 US_FORMAT0_2
3100x464C US_FORMAT0_3
3110x4650 US_FORMAT0_4
3120x4654 US_FORMAT0_5
3130x4658 US_FORMAT0_6
3140x465C US_FORMAT0_7
3150x4660 US_FORMAT0_8
3160x4664 US_FORMAT0_9
3170x4668 US_FORMAT0_10
3180x466C US_FORMAT0_11
3190x4670 US_FORMAT0_12
3200x4674 US_FORMAT0_13
3210x4678 US_FORMAT0_14
3220x467C US_FORMAT0_15
3070x46A4 US_OUT_FMT_0 3230x46A4 US_OUT_FMT_0
3080x46A8 US_OUT_FMT_1 3240x46A8 US_OUT_FMT_1
3090x46AC US_OUT_FMT_2 3250x46AC US_OUT_FMT_2
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index f1c6e02c2e6b..b4192acaab5f 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -46,6 +46,56 @@
46void rs600_gpu_init(struct radeon_device *rdev); 46void rs600_gpu_init(struct radeon_device *rdev);
47int rs600_mc_wait_for_idle(struct radeon_device *rdev); 47int rs600_mc_wait_for_idle(struct radeon_device *rdev);
48 48
49void rs600_pre_page_flip(struct radeon_device *rdev, int crtc)
50{
51 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc];
52 u32 tmp;
53
54 /* make sure flip is at vb rather than hb */
55 tmp = RREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset);
56 tmp &= ~AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN;
57 WREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp);
58
59 /* set pageflip to happen anywhere in vblank interval */
60 WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0);
61
62 /* enable the pflip int */
63 radeon_irq_kms_pflip_irq_get(rdev, crtc);
64}
65
66void rs600_post_page_flip(struct radeon_device *rdev, int crtc)
67{
68 /* disable the pflip int */
69 radeon_irq_kms_pflip_irq_put(rdev, crtc);
70}
71
72u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
73{
74 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
75 u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
76
77 /* Lock the graphics update lock */
78 tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
79 WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
80
81 /* update the scanout addresses */
82 WREG32(AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
83 (u32)crtc_base);
84 WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
85 (u32)crtc_base);
86
87 /* Wait for update_pending to go high. */
88 while (!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING));
89 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
90
91 /* Unlock the lock, so double-buffering can take place inside vblank */
92 tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK;
93 WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
94
95 /* Return current update_pending status: */
96 return RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING;
97}
98
49void rs600_pm_misc(struct radeon_device *rdev) 99void rs600_pm_misc(struct radeon_device *rdev)
50{ 100{
51 int requested_index = rdev->pm.requested_power_state_index; 101 int requested_index = rdev->pm.requested_power_state_index;
@@ -515,10 +565,12 @@ int rs600_irq_set(struct radeon_device *rdev)
515 if (rdev->irq.gui_idle) { 565 if (rdev->irq.gui_idle) {
516 tmp |= S_000040_GUI_IDLE(1); 566 tmp |= S_000040_GUI_IDLE(1);
517 } 567 }
518 if (rdev->irq.crtc_vblank_int[0]) { 568 if (rdev->irq.crtc_vblank_int[0] ||
569 rdev->irq.pflip[0]) {
519 mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1); 570 mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1);
520 } 571 }
521 if (rdev->irq.crtc_vblank_int[1]) { 572 if (rdev->irq.crtc_vblank_int[1] ||
573 rdev->irq.pflip[1]) {
522 mode_int |= S_006540_D2MODE_VBLANK_INT_MASK(1); 574 mode_int |= S_006540_D2MODE_VBLANK_INT_MASK(1);
523 } 575 }
524 if (rdev->irq.hpd[0]) { 576 if (rdev->irq.hpd[0]) {
@@ -534,7 +586,7 @@ int rs600_irq_set(struct radeon_device *rdev)
534 return 0; 586 return 0;
535} 587}
536 588
537static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_int) 589static inline u32 rs600_irq_ack(struct radeon_device *rdev)
538{ 590{
539 uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS); 591 uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS);
540 uint32_t irq_mask = S_000044_SW_INT(1); 592 uint32_t irq_mask = S_000044_SW_INT(1);
@@ -547,27 +599,27 @@ static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_
547 } 599 }
548 600
549 if (G_000044_DISPLAY_INT_STAT(irqs)) { 601 if (G_000044_DISPLAY_INT_STAT(irqs)) {
550 *r500_disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS); 602 rdev->irq.stat_regs.r500.disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS);
551 if (G_007EDC_LB_D1_VBLANK_INTERRUPT(*r500_disp_int)) { 603 if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
552 WREG32(R_006534_D1MODE_VBLANK_STATUS, 604 WREG32(R_006534_D1MODE_VBLANK_STATUS,
553 S_006534_D1MODE_VBLANK_ACK(1)); 605 S_006534_D1MODE_VBLANK_ACK(1));
554 } 606 }
555 if (G_007EDC_LB_D2_VBLANK_INTERRUPT(*r500_disp_int)) { 607 if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
556 WREG32(R_006D34_D2MODE_VBLANK_STATUS, 608 WREG32(R_006D34_D2MODE_VBLANK_STATUS,
557 S_006D34_D2MODE_VBLANK_ACK(1)); 609 S_006D34_D2MODE_VBLANK_ACK(1));
558 } 610 }
559 if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(*r500_disp_int)) { 611 if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
560 tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL); 612 tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL);
561 tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_ACK(1); 613 tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_ACK(1);
562 WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp); 614 WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
563 } 615 }
564 if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(*r500_disp_int)) { 616 if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
565 tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL); 617 tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL);
566 tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_ACK(1); 618 tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_ACK(1);
567 WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp); 619 WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
568 } 620 }
569 } else { 621 } else {
570 *r500_disp_int = 0; 622 rdev->irq.stat_regs.r500.disp_int = 0;
571 } 623 }
572 624
573 if (irqs) { 625 if (irqs) {
@@ -578,32 +630,30 @@ static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_
578 630
579void rs600_irq_disable(struct radeon_device *rdev) 631void rs600_irq_disable(struct radeon_device *rdev)
580{ 632{
581 u32 tmp;
582
583 WREG32(R_000040_GEN_INT_CNTL, 0); 633 WREG32(R_000040_GEN_INT_CNTL, 0);
584 WREG32(R_006540_DxMODE_INT_MASK, 0); 634 WREG32(R_006540_DxMODE_INT_MASK, 0);
585 /* Wait and acknowledge irq */ 635 /* Wait and acknowledge irq */
586 mdelay(1); 636 mdelay(1);
587 rs600_irq_ack(rdev, &tmp); 637 rs600_irq_ack(rdev);
588} 638}
589 639
590int rs600_irq_process(struct radeon_device *rdev) 640int rs600_irq_process(struct radeon_device *rdev)
591{ 641{
592 uint32_t status, msi_rearm; 642 u32 status, msi_rearm;
593 uint32_t r500_disp_int;
594 bool queue_hotplug = false; 643 bool queue_hotplug = false;
595 644
596 /* reset gui idle ack. the status bit is broken */ 645 /* reset gui idle ack. the status bit is broken */
597 rdev->irq.gui_idle_acked = false; 646 rdev->irq.gui_idle_acked = false;
598 647
599 status = rs600_irq_ack(rdev, &r500_disp_int); 648 status = rs600_irq_ack(rdev);
600 if (!status && !r500_disp_int) { 649 if (!status && !rdev->irq.stat_regs.r500.disp_int) {
601 return IRQ_NONE; 650 return IRQ_NONE;
602 } 651 }
603 while (status || r500_disp_int) { 652 while (status || rdev->irq.stat_regs.r500.disp_int) {
604 /* SW interrupt */ 653 /* SW interrupt */
605 if (G_000044_SW_INT(status)) 654 if (G_000044_SW_INT(status)) {
606 radeon_fence_process(rdev); 655 radeon_fence_process(rdev);
656 }
607 /* GUI idle */ 657 /* GUI idle */
608 if (G_000040_GUI_IDLE(status)) { 658 if (G_000040_GUI_IDLE(status)) {
609 rdev->irq.gui_idle_acked = true; 659 rdev->irq.gui_idle_acked = true;
@@ -611,30 +661,38 @@ int rs600_irq_process(struct radeon_device *rdev)
611 wake_up(&rdev->irq.idle_queue); 661 wake_up(&rdev->irq.idle_queue);
612 } 662 }
613 /* Vertical blank interrupts */ 663 /* Vertical blank interrupts */
614 if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int)) { 664 if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
615 drm_handle_vblank(rdev->ddev, 0); 665 if (rdev->irq.crtc_vblank_int[0]) {
616 rdev->pm.vblank_sync = true; 666 drm_handle_vblank(rdev->ddev, 0);
617 wake_up(&rdev->irq.vblank_queue); 667 rdev->pm.vblank_sync = true;
668 wake_up(&rdev->irq.vblank_queue);
669 }
670 if (rdev->irq.pflip[0])
671 radeon_crtc_handle_flip(rdev, 0);
618 } 672 }
619 if (G_007EDC_LB_D2_VBLANK_INTERRUPT(r500_disp_int)) { 673 if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
620 drm_handle_vblank(rdev->ddev, 1); 674 if (rdev->irq.crtc_vblank_int[1]) {
621 rdev->pm.vblank_sync = true; 675 drm_handle_vblank(rdev->ddev, 1);
622 wake_up(&rdev->irq.vblank_queue); 676 rdev->pm.vblank_sync = true;
677 wake_up(&rdev->irq.vblank_queue);
678 }
679 if (rdev->irq.pflip[1])
680 radeon_crtc_handle_flip(rdev, 1);
623 } 681 }
624 if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(r500_disp_int)) { 682 if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
625 queue_hotplug = true; 683 queue_hotplug = true;
626 DRM_DEBUG("HPD1\n"); 684 DRM_DEBUG("HPD1\n");
627 } 685 }
628 if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(r500_disp_int)) { 686 if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
629 queue_hotplug = true; 687 queue_hotplug = true;
630 DRM_DEBUG("HPD2\n"); 688 DRM_DEBUG("HPD2\n");
631 } 689 }
632 status = rs600_irq_ack(rdev, &r500_disp_int); 690 status = rs600_irq_ack(rdev);
633 } 691 }
634 /* reset gui idle ack. the status bit is broken */ 692 /* reset gui idle ack. the status bit is broken */
635 rdev->irq.gui_idle_acked = false; 693 rdev->irq.gui_idle_acked = false;
636 if (queue_hotplug) 694 if (queue_hotplug)
637 queue_work(rdev->wq, &rdev->hotplug_work); 695 schedule_work(&rdev->hotplug_work);
638 if (rdev->msi_enabled) { 696 if (rdev->msi_enabled) {
639 switch (rdev->family) { 697 switch (rdev->family) {
640 case CHIP_RS600: 698 case CHIP_RS600:
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 4dfead8cee33..3a264aa3a79a 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -41,6 +41,41 @@
41 41
42static void rv770_gpu_init(struct radeon_device *rdev); 42static void rv770_gpu_init(struct radeon_device *rdev);
43void rv770_fini(struct radeon_device *rdev); 43void rv770_fini(struct radeon_device *rdev);
44static void rv770_pcie_gen2_enable(struct radeon_device *rdev);
45
46u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
47{
48 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
49 u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
50
51 /* Lock the graphics update lock */
52 tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
53 WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
54
55 /* update the scanout addresses */
56 if (radeon_crtc->crtc_id) {
57 WREG32(D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base));
58 WREG32(D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base));
59 } else {
60 WREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base));
61 WREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base));
62 }
63 WREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
64 (u32)crtc_base);
65 WREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
66 (u32)crtc_base);
67
68 /* Wait for update_pending to go high. */
69 while (!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING));
70 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
71
72 /* Unlock the lock, so double-buffering can take place inside vblank */
73 tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK;
74 WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
75
76 /* Return current update_pending status: */
77 return RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING;
78}
44 79
45/* get temperature in millidegrees */ 80/* get temperature in millidegrees */
46u32 rv770_get_temp(struct radeon_device *rdev) 81u32 rv770_get_temp(struct radeon_device *rdev)
@@ -489,6 +524,49 @@ static u32 r700_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
489 return backend_map; 524 return backend_map;
490} 525}
491 526
527static void rv770_program_channel_remap(struct radeon_device *rdev)
528{
529 u32 tcp_chan_steer, mc_shared_chremap, tmp;
530 bool force_no_swizzle;
531
532 switch (rdev->family) {
533 case CHIP_RV770:
534 case CHIP_RV730:
535 force_no_swizzle = false;
536 break;
537 case CHIP_RV710:
538 case CHIP_RV740:
539 default:
540 force_no_swizzle = true;
541 break;
542 }
543
544 tmp = RREG32(MC_SHARED_CHMAP);
545 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
546 case 0:
547 case 1:
548 default:
549 /* default mapping */
550 mc_shared_chremap = 0x00fac688;
551 break;
552 case 2:
553 case 3:
554 if (force_no_swizzle)
555 mc_shared_chremap = 0x00fac688;
556 else
557 mc_shared_chremap = 0x00bbc298;
558 break;
559 }
560
561 if (rdev->family == CHIP_RV740)
562 tcp_chan_steer = 0x00ef2a60;
563 else
564 tcp_chan_steer = 0x00fac688;
565
566 WREG32(TCP_CHAN_STEER, tcp_chan_steer);
567 WREG32(MC_SHARED_CHREMAP, mc_shared_chremap);
568}
569
492static void rv770_gpu_init(struct radeon_device *rdev) 570static void rv770_gpu_init(struct radeon_device *rdev)
493{ 571{
494 int i, j, num_qd_pipes; 572 int i, j, num_qd_pipes;
@@ -688,6 +766,8 @@ static void rv770_gpu_init(struct radeon_device *rdev)
688 WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff)); 766 WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
689 WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff)); 767 WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
690 768
769 rv770_program_channel_remap(rdev);
770
691 WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); 771 WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
692 WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); 772 WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
693 WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); 773 WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
@@ -956,6 +1036,45 @@ static void rv770_vram_scratch_fini(struct radeon_device *rdev)
956 radeon_bo_unref(&rdev->vram_scratch.robj); 1036 radeon_bo_unref(&rdev->vram_scratch.robj);
957} 1037}
958 1038
1039void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
1040{
1041 u64 size_bf, size_af;
1042
1043 if (mc->mc_vram_size > 0xE0000000) {
1044 /* leave room for at least 512M GTT */
1045 dev_warn(rdev->dev, "limiting VRAM\n");
1046 mc->real_vram_size = 0xE0000000;
1047 mc->mc_vram_size = 0xE0000000;
1048 }
1049 if (rdev->flags & RADEON_IS_AGP) {
1050 size_bf = mc->gtt_start;
1051 size_af = 0xFFFFFFFF - mc->gtt_end + 1;
1052 if (size_bf > size_af) {
1053 if (mc->mc_vram_size > size_bf) {
1054 dev_warn(rdev->dev, "limiting VRAM\n");
1055 mc->real_vram_size = size_bf;
1056 mc->mc_vram_size = size_bf;
1057 }
1058 mc->vram_start = mc->gtt_start - mc->mc_vram_size;
1059 } else {
1060 if (mc->mc_vram_size > size_af) {
1061 dev_warn(rdev->dev, "limiting VRAM\n");
1062 mc->real_vram_size = size_af;
1063 mc->mc_vram_size = size_af;
1064 }
1065 mc->vram_start = mc->gtt_end;
1066 }
1067 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
1068 dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
1069 mc->mc_vram_size >> 20, mc->vram_start,
1070 mc->vram_end, mc->real_vram_size >> 20);
1071 } else {
1072 radeon_vram_location(rdev, &rdev->mc, 0);
1073 rdev->mc.gtt_base_align = 0;
1074 radeon_gtt_location(rdev, mc);
1075 }
1076}
1077
959int rv770_mc_init(struct radeon_device *rdev) 1078int rv770_mc_init(struct radeon_device *rdev)
960{ 1079{
961 u32 tmp; 1080 u32 tmp;
@@ -996,7 +1115,7 @@ int rv770_mc_init(struct radeon_device *rdev)
996 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); 1115 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
997 rdev->mc.visible_vram_size = rdev->mc.aper_size; 1116 rdev->mc.visible_vram_size = rdev->mc.aper_size;
998 rdev->mc.active_vram_size = rdev->mc.visible_vram_size; 1117 rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
999 r600_vram_gtt_location(rdev, &rdev->mc); 1118 r700_vram_gtt_location(rdev, &rdev->mc);
1000 radeon_update_bandwidth_info(rdev); 1119 radeon_update_bandwidth_info(rdev);
1001 1120
1002 return 0; 1121 return 0;
@@ -1006,6 +1125,9 @@ static int rv770_startup(struct radeon_device *rdev)
1006{ 1125{
1007 int r; 1126 int r;
1008 1127
1128 /* enable pcie gen2 link */
1129 rv770_pcie_gen2_enable(rdev);
1130
1009 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { 1131 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
1010 r = r600_init_microcode(rdev); 1132 r = r600_init_microcode(rdev);
1011 if (r) { 1133 if (r) {
@@ -1244,3 +1366,75 @@ void rv770_fini(struct radeon_device *rdev)
1244 rdev->bios = NULL; 1366 rdev->bios = NULL;
1245 radeon_dummy_page_fini(rdev); 1367 radeon_dummy_page_fini(rdev);
1246} 1368}
1369
1370static void rv770_pcie_gen2_enable(struct radeon_device *rdev)
1371{
1372 u32 link_width_cntl, lanes, speed_cntl, tmp;
1373 u16 link_cntl2;
1374
1375 if (rdev->flags & RADEON_IS_IGP)
1376 return;
1377
1378 if (!(rdev->flags & RADEON_IS_PCIE))
1379 return;
1380
1381 /* x2 cards have a special sequence */
1382 if (ASIC_IS_X2(rdev))
1383 return;
1384
1385 /* advertise upconfig capability */
1386 link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
1387 link_width_cntl &= ~LC_UPCONFIGURE_DIS;
1388 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
1389 link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
1390 if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) {
1391 lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT;
1392 link_width_cntl &= ~(LC_LINK_WIDTH_MASK |
1393 LC_RECONFIG_ARC_MISSING_ESCAPE);
1394 link_width_cntl |= lanes | LC_RECONFIG_NOW |
1395 LC_RENEGOTIATE_EN | LC_UPCONFIGURE_SUPPORT;
1396 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
1397 } else {
1398 link_width_cntl |= LC_UPCONFIGURE_DIS;
1399 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
1400 }
1401
1402 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
1403 if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
1404 (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
1405
1406 tmp = RREG32(0x541c);
1407 WREG32(0x541c, tmp | 0x8);
1408 WREG32(MM_CFGREGS_CNTL, MM_WR_TO_CFG_EN);
1409 link_cntl2 = RREG16(0x4088);
1410 link_cntl2 &= ~TARGET_LINK_SPEED_MASK;
1411 link_cntl2 |= 0x2;
1412 WREG16(0x4088, link_cntl2);
1413 WREG32(MM_CFGREGS_CNTL, 0);
1414
1415 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
1416 speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
1417 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
1418
1419 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
1420 speed_cntl |= LC_CLR_FAILED_SPD_CHANGE_CNT;
1421 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
1422
1423 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
1424 speed_cntl &= ~LC_CLR_FAILED_SPD_CHANGE_CNT;
1425 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
1426
1427 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
1428 speed_cntl |= LC_GEN2_EN_STRAP;
1429 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
1430
1431 } else {
1432 link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
1433 /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
1434 if (1)
1435 link_width_cntl |= LC_UPCONFIGURE_DIS;
1436 else
1437 link_width_cntl &= ~LC_UPCONFIGURE_DIS;
1438 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
1439 }
1440}
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index b7a5a20e81dc..abc8cf5a3672 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -138,6 +138,7 @@
138#define MC_SHARED_CHMAP 0x2004 138#define MC_SHARED_CHMAP 0x2004
139#define NOOFCHAN_SHIFT 12 139#define NOOFCHAN_SHIFT 12
140#define NOOFCHAN_MASK 0x00003000 140#define NOOFCHAN_MASK 0x00003000
141#define MC_SHARED_CHREMAP 0x2008
141 142
142#define MC_ARB_RAMCFG 0x2760 143#define MC_ARB_RAMCFG 0x2760
143#define NOOFBANK_SHIFT 0 144#define NOOFBANK_SHIFT 0
@@ -303,6 +304,7 @@
303#define BILINEAR_PRECISION_8_BIT (1 << 31) 304#define BILINEAR_PRECISION_8_BIT (1 << 31)
304 305
305#define TCP_CNTL 0x9610 306#define TCP_CNTL 0x9610
307#define TCP_CHAN_STEER 0x9614
306 308
307#define VGT_CACHE_INVALIDATION 0x88C4 309#define VGT_CACHE_INVALIDATION 0x88C4
308#define CACHE_INVALIDATION(x) ((x)<<0) 310#define CACHE_INVALIDATION(x) ((x)<<0)
@@ -351,4 +353,49 @@
351 353
352#define SRBM_STATUS 0x0E50 354#define SRBM_STATUS 0x0E50
353 355
356#define D1GRPH_PRIMARY_SURFACE_ADDRESS 0x6110
357#define D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6914
358#define D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6114
359#define D1GRPH_SECONDARY_SURFACE_ADDRESS 0x6118
360#define D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x691c
361#define D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x611c
362
363/* PCIE link stuff */
364#define PCIE_LC_TRAINING_CNTL 0xa1 /* PCIE_P */
365#define PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE_P */
366# define LC_LINK_WIDTH_SHIFT 0
367# define LC_LINK_WIDTH_MASK 0x7
368# define LC_LINK_WIDTH_X0 0
369# define LC_LINK_WIDTH_X1 1
370# define LC_LINK_WIDTH_X2 2
371# define LC_LINK_WIDTH_X4 3
372# define LC_LINK_WIDTH_X8 4
373# define LC_LINK_WIDTH_X16 6
374# define LC_LINK_WIDTH_RD_SHIFT 4
375# define LC_LINK_WIDTH_RD_MASK 0x70
376# define LC_RECONFIG_ARC_MISSING_ESCAPE (1 << 7)
377# define LC_RECONFIG_NOW (1 << 8)
378# define LC_RENEGOTIATION_SUPPORT (1 << 9)
379# define LC_RENEGOTIATE_EN (1 << 10)
380# define LC_SHORT_RECONFIG_EN (1 << 11)
381# define LC_UPCONFIGURE_SUPPORT (1 << 12)
382# define LC_UPCONFIGURE_DIS (1 << 13)
383#define PCIE_LC_SPEED_CNTL 0xa4 /* PCIE_P */
384# define LC_GEN2_EN_STRAP (1 << 0)
385# define LC_TARGET_LINK_SPEED_OVERRIDE_EN (1 << 1)
386# define LC_FORCE_EN_HW_SPEED_CHANGE (1 << 5)
387# define LC_FORCE_DIS_HW_SPEED_CHANGE (1 << 6)
388# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK (0x3 << 8)
389# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT 3
390# define LC_CURRENT_DATA_RATE (1 << 11)
391# define LC_VOLTAGE_TIMER_SEL_MASK (0xf << 14)
392# define LC_CLR_FAILED_SPD_CHANGE_CNT (1 << 21)
393# define LC_OTHER_SIDE_EVER_SENT_GEN2 (1 << 23)
394# define LC_OTHER_SIDE_SUPPORTS_GEN2 (1 << 24)
395#define MM_CFGREGS_CNTL 0x544c
396# define MM_WR_TO_CFG_EN (1 << 3)
397#define LINK_CNTL2 0x88 /* F0 */
398# define TARGET_LINK_SPEED_MASK (0xf << 0)
399# define SELECTABLE_DEEMPHASIS (1 << 6)
400
354#endif 401#endif
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 934a96a78540..af61fc29e843 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -169,7 +169,7 @@ int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
169} 169}
170EXPORT_SYMBOL(ttm_bo_wait_unreserved); 170EXPORT_SYMBOL(ttm_bo_wait_unreserved);
171 171
172static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) 172void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
173{ 173{
174 struct ttm_bo_device *bdev = bo->bdev; 174 struct ttm_bo_device *bdev = bo->bdev;
175 struct ttm_mem_type_manager *man; 175 struct ttm_mem_type_manager *man;
@@ -191,11 +191,7 @@ static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
191 } 191 }
192} 192}
193 193
194/** 194int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
195 * Call with the lru_lock held.
196 */
197
198static int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
199{ 195{
200 int put_count = 0; 196 int put_count = 0;
201 197
@@ -227,9 +223,18 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
227 /** 223 /**
228 * Deadlock avoidance for multi-bo reserving. 224 * Deadlock avoidance for multi-bo reserving.
229 */ 225 */
230 if (use_sequence && bo->seq_valid && 226 if (use_sequence && bo->seq_valid) {
231 (sequence - bo->val_seq < (1 << 31))) { 227 /**
232 return -EAGAIN; 228 * We've already reserved this one.
229 */
230 if (unlikely(sequence == bo->val_seq))
231 return -EDEADLK;
232 /**
233 * Already reserved by a thread that will not back
234 * off for us. We need to back off.
235 */
236 if (unlikely(sequence - bo->val_seq < (1 << 31)))
237 return -EAGAIN;
233 } 238 }
234 239
235 if (no_wait) 240 if (no_wait)
@@ -267,6 +272,13 @@ static void ttm_bo_ref_bug(struct kref *list_kref)
267 BUG(); 272 BUG();
268} 273}
269 274
275void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count,
276 bool never_free)
277{
278 kref_sub(&bo->list_kref, count,
279 (never_free) ? ttm_bo_ref_bug : ttm_bo_release_list);
280}
281
270int ttm_bo_reserve(struct ttm_buffer_object *bo, 282int ttm_bo_reserve(struct ttm_buffer_object *bo,
271 bool interruptible, 283 bool interruptible,
272 bool no_wait, bool use_sequence, uint32_t sequence) 284 bool no_wait, bool use_sequence, uint32_t sequence)
@@ -282,20 +294,24 @@ int ttm_bo_reserve(struct ttm_buffer_object *bo,
282 put_count = ttm_bo_del_from_lru(bo); 294 put_count = ttm_bo_del_from_lru(bo);
283 spin_unlock(&glob->lru_lock); 295 spin_unlock(&glob->lru_lock);
284 296
285 while (put_count--) 297 ttm_bo_list_ref_sub(bo, put_count, true);
286 kref_put(&bo->list_kref, ttm_bo_ref_bug);
287 298
288 return ret; 299 return ret;
289} 300}
290 301
302void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo)
303{
304 ttm_bo_add_to_lru(bo);
305 atomic_set(&bo->reserved, 0);
306 wake_up_all(&bo->event_queue);
307}
308
291void ttm_bo_unreserve(struct ttm_buffer_object *bo) 309void ttm_bo_unreserve(struct ttm_buffer_object *bo)
292{ 310{
293 struct ttm_bo_global *glob = bo->glob; 311 struct ttm_bo_global *glob = bo->glob;
294 312
295 spin_lock(&glob->lru_lock); 313 spin_lock(&glob->lru_lock);
296 ttm_bo_add_to_lru(bo); 314 ttm_bo_unreserve_locked(bo);
297 atomic_set(&bo->reserved, 0);
298 wake_up_all(&bo->event_queue);
299 spin_unlock(&glob->lru_lock); 315 spin_unlock(&glob->lru_lock);
300} 316}
301EXPORT_SYMBOL(ttm_bo_unreserve); 317EXPORT_SYMBOL(ttm_bo_unreserve);
@@ -362,8 +378,13 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
362 int ret = 0; 378 int ret = 0;
363 379
364 if (old_is_pci || new_is_pci || 380 if (old_is_pci || new_is_pci ||
365 ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) 381 ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) {
366 ttm_bo_unmap_virtual(bo); 382 ret = ttm_mem_io_lock(old_man, true);
383 if (unlikely(ret != 0))
384 goto out_err;
385 ttm_bo_unmap_virtual_locked(bo);
386 ttm_mem_io_unlock(old_man);
387 }
367 388
368 /* 389 /*
369 * Create and bind a ttm if required. 390 * Create and bind a ttm if required.
@@ -416,11 +437,9 @@ moved:
416 } 437 }
417 438
418 if (bo->mem.mm_node) { 439 if (bo->mem.mm_node) {
419 spin_lock(&bo->lock);
420 bo->offset = (bo->mem.start << PAGE_SHIFT) + 440 bo->offset = (bo->mem.start << PAGE_SHIFT) +
421 bdev->man[bo->mem.mem_type].gpu_offset; 441 bdev->man[bo->mem.mem_type].gpu_offset;
422 bo->cur_placement = bo->mem.placement; 442 bo->cur_placement = bo->mem.placement;
423 spin_unlock(&bo->lock);
424 } else 443 } else
425 bo->offset = 0; 444 bo->offset = 0;
426 445
@@ -452,7 +471,6 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
452 ttm_tt_destroy(bo->ttm); 471 ttm_tt_destroy(bo->ttm);
453 bo->ttm = NULL; 472 bo->ttm = NULL;
454 } 473 }
455
456 ttm_bo_mem_put(bo, &bo->mem); 474 ttm_bo_mem_put(bo, &bo->mem);
457 475
458 atomic_set(&bo->reserved, 0); 476 atomic_set(&bo->reserved, 0);
@@ -474,14 +492,14 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
474 int put_count; 492 int put_count;
475 int ret; 493 int ret;
476 494
477 spin_lock(&bo->lock); 495 spin_lock(&bdev->fence_lock);
478 (void) ttm_bo_wait(bo, false, false, true); 496 (void) ttm_bo_wait(bo, false, false, true);
479 if (!bo->sync_obj) { 497 if (!bo->sync_obj) {
480 498
481 spin_lock(&glob->lru_lock); 499 spin_lock(&glob->lru_lock);
482 500
483 /** 501 /**
484 * Lock inversion between bo::reserve and bo::lock here, 502 * Lock inversion between bo:reserve and bdev::fence_lock here,
485 * but that's OK, since we're only trylocking. 503 * but that's OK, since we're only trylocking.
486 */ 504 */
487 505
@@ -490,14 +508,13 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
490 if (unlikely(ret == -EBUSY)) 508 if (unlikely(ret == -EBUSY))
491 goto queue; 509 goto queue;
492 510
493 spin_unlock(&bo->lock); 511 spin_unlock(&bdev->fence_lock);
494 put_count = ttm_bo_del_from_lru(bo); 512 put_count = ttm_bo_del_from_lru(bo);
495 513
496 spin_unlock(&glob->lru_lock); 514 spin_unlock(&glob->lru_lock);
497 ttm_bo_cleanup_memtype_use(bo); 515 ttm_bo_cleanup_memtype_use(bo);
498 516
499 while (put_count--) 517 ttm_bo_list_ref_sub(bo, put_count, true);
500 kref_put(&bo->list_kref, ttm_bo_ref_bug);
501 518
502 return; 519 return;
503 } else { 520 } else {
@@ -512,7 +529,7 @@ queue:
512 kref_get(&bo->list_kref); 529 kref_get(&bo->list_kref);
513 list_add_tail(&bo->ddestroy, &bdev->ddestroy); 530 list_add_tail(&bo->ddestroy, &bdev->ddestroy);
514 spin_unlock(&glob->lru_lock); 531 spin_unlock(&glob->lru_lock);
515 spin_unlock(&bo->lock); 532 spin_unlock(&bdev->fence_lock);
516 533
517 if (sync_obj) { 534 if (sync_obj) {
518 driver->sync_obj_flush(sync_obj, sync_obj_arg); 535 driver->sync_obj_flush(sync_obj, sync_obj_arg);
@@ -537,14 +554,15 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
537 bool no_wait_reserve, 554 bool no_wait_reserve,
538 bool no_wait_gpu) 555 bool no_wait_gpu)
539{ 556{
557 struct ttm_bo_device *bdev = bo->bdev;
540 struct ttm_bo_global *glob = bo->glob; 558 struct ttm_bo_global *glob = bo->glob;
541 int put_count; 559 int put_count;
542 int ret = 0; 560 int ret = 0;
543 561
544retry: 562retry:
545 spin_lock(&bo->lock); 563 spin_lock(&bdev->fence_lock);
546 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); 564 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
547 spin_unlock(&bo->lock); 565 spin_unlock(&bdev->fence_lock);
548 566
549 if (unlikely(ret != 0)) 567 if (unlikely(ret != 0))
550 return ret; 568 return ret;
@@ -580,8 +598,7 @@ retry:
580 spin_unlock(&glob->lru_lock); 598 spin_unlock(&glob->lru_lock);
581 ttm_bo_cleanup_memtype_use(bo); 599 ttm_bo_cleanup_memtype_use(bo);
582 600
583 while (put_count--) 601 ttm_bo_list_ref_sub(bo, put_count, true);
584 kref_put(&bo->list_kref, ttm_bo_ref_bug);
585 602
586 return 0; 603 return 0;
587} 604}
@@ -652,6 +669,7 @@ static void ttm_bo_release(struct kref *kref)
652 struct ttm_buffer_object *bo = 669 struct ttm_buffer_object *bo =
653 container_of(kref, struct ttm_buffer_object, kref); 670 container_of(kref, struct ttm_buffer_object, kref);
654 struct ttm_bo_device *bdev = bo->bdev; 671 struct ttm_bo_device *bdev = bo->bdev;
672 struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
655 673
656 if (likely(bo->vm_node != NULL)) { 674 if (likely(bo->vm_node != NULL)) {
657 rb_erase(&bo->vm_rb, &bdev->addr_space_rb); 675 rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
@@ -659,6 +677,9 @@ static void ttm_bo_release(struct kref *kref)
659 bo->vm_node = NULL; 677 bo->vm_node = NULL;
660 } 678 }
661 write_unlock(&bdev->vm_lock); 679 write_unlock(&bdev->vm_lock);
680 ttm_mem_io_lock(man, false);
681 ttm_mem_io_free_vm(bo);
682 ttm_mem_io_unlock(man);
662 ttm_bo_cleanup_refs_or_queue(bo); 683 ttm_bo_cleanup_refs_or_queue(bo);
663 kref_put(&bo->list_kref, ttm_bo_release_list); 684 kref_put(&bo->list_kref, ttm_bo_release_list);
664 write_lock(&bdev->vm_lock); 685 write_lock(&bdev->vm_lock);
@@ -698,9 +719,9 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
698 struct ttm_placement placement; 719 struct ttm_placement placement;
699 int ret = 0; 720 int ret = 0;
700 721
701 spin_lock(&bo->lock); 722 spin_lock(&bdev->fence_lock);
702 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); 723 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
703 spin_unlock(&bo->lock); 724 spin_unlock(&bdev->fence_lock);
704 725
705 if (unlikely(ret != 0)) { 726 if (unlikely(ret != 0)) {
706 if (ret != -ERESTARTSYS) { 727 if (ret != -ERESTARTSYS) {
@@ -715,7 +736,8 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
715 736
716 evict_mem = bo->mem; 737 evict_mem = bo->mem;
717 evict_mem.mm_node = NULL; 738 evict_mem.mm_node = NULL;
718 evict_mem.bus.io_reserved = false; 739 evict_mem.bus.io_reserved_vm = false;
740 evict_mem.bus.io_reserved_count = 0;
719 741
720 placement.fpfn = 0; 742 placement.fpfn = 0;
721 placement.lpfn = 0; 743 placement.lpfn = 0;
@@ -802,8 +824,7 @@ retry:
802 824
803 BUG_ON(ret != 0); 825 BUG_ON(ret != 0);
804 826
805 while (put_count--) 827 ttm_bo_list_ref_sub(bo, put_count, true);
806 kref_put(&bo->list_kref, ttm_bo_ref_bug);
807 828
808 ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu); 829 ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu);
809 ttm_bo_unreserve(bo); 830 ttm_bo_unreserve(bo);
@@ -1036,6 +1057,7 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
1036{ 1057{
1037 int ret = 0; 1058 int ret = 0;
1038 struct ttm_mem_reg mem; 1059 struct ttm_mem_reg mem;
1060 struct ttm_bo_device *bdev = bo->bdev;
1039 1061
1040 BUG_ON(!atomic_read(&bo->reserved)); 1062 BUG_ON(!atomic_read(&bo->reserved));
1041 1063
@@ -1044,15 +1066,16 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
1044 * Have the driver move function wait for idle when necessary, 1066 * Have the driver move function wait for idle when necessary,
1045 * instead of doing it here. 1067 * instead of doing it here.
1046 */ 1068 */
1047 spin_lock(&bo->lock); 1069 spin_lock(&bdev->fence_lock);
1048 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); 1070 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
1049 spin_unlock(&bo->lock); 1071 spin_unlock(&bdev->fence_lock);
1050 if (ret) 1072 if (ret)
1051 return ret; 1073 return ret;
1052 mem.num_pages = bo->num_pages; 1074 mem.num_pages = bo->num_pages;
1053 mem.size = mem.num_pages << PAGE_SHIFT; 1075 mem.size = mem.num_pages << PAGE_SHIFT;
1054 mem.page_alignment = bo->mem.page_alignment; 1076 mem.page_alignment = bo->mem.page_alignment;
1055 mem.bus.io_reserved = false; 1077 mem.bus.io_reserved_vm = false;
1078 mem.bus.io_reserved_count = 0;
1056 /* 1079 /*
1057 * Determine where to move the buffer. 1080 * Determine where to move the buffer.
1058 */ 1081 */
@@ -1163,7 +1186,6 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
1163 } 1186 }
1164 bo->destroy = destroy; 1187 bo->destroy = destroy;
1165 1188
1166 spin_lock_init(&bo->lock);
1167 kref_init(&bo->kref); 1189 kref_init(&bo->kref);
1168 kref_init(&bo->list_kref); 1190 kref_init(&bo->list_kref);
1169 atomic_set(&bo->cpu_writers, 0); 1191 atomic_set(&bo->cpu_writers, 0);
@@ -1172,6 +1194,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
1172 INIT_LIST_HEAD(&bo->lru); 1194 INIT_LIST_HEAD(&bo->lru);
1173 INIT_LIST_HEAD(&bo->ddestroy); 1195 INIT_LIST_HEAD(&bo->ddestroy);
1174 INIT_LIST_HEAD(&bo->swap); 1196 INIT_LIST_HEAD(&bo->swap);
1197 INIT_LIST_HEAD(&bo->io_reserve_lru);
1175 bo->bdev = bdev; 1198 bo->bdev = bdev;
1176 bo->glob = bdev->glob; 1199 bo->glob = bdev->glob;
1177 bo->type = type; 1200 bo->type = type;
@@ -1181,7 +1204,8 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
1181 bo->mem.num_pages = bo->num_pages; 1204 bo->mem.num_pages = bo->num_pages;
1182 bo->mem.mm_node = NULL; 1205 bo->mem.mm_node = NULL;
1183 bo->mem.page_alignment = page_alignment; 1206 bo->mem.page_alignment = page_alignment;
1184 bo->mem.bus.io_reserved = false; 1207 bo->mem.bus.io_reserved_vm = false;
1208 bo->mem.bus.io_reserved_count = 0;
1185 bo->buffer_start = buffer_start & PAGE_MASK; 1209 bo->buffer_start = buffer_start & PAGE_MASK;
1186 bo->priv_flags = 0; 1210 bo->priv_flags = 0;
1187 bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED); 1211 bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
@@ -1355,6 +1379,10 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1355 BUG_ON(type >= TTM_NUM_MEM_TYPES); 1379 BUG_ON(type >= TTM_NUM_MEM_TYPES);
1356 man = &bdev->man[type]; 1380 man = &bdev->man[type];
1357 BUG_ON(man->has_type); 1381 BUG_ON(man->has_type);
1382 man->io_reserve_fastpath = true;
1383 man->use_io_reserve_lru = false;
1384 mutex_init(&man->io_reserve_mutex);
1385 INIT_LIST_HEAD(&man->io_reserve_lru);
1358 1386
1359 ret = bdev->driver->init_mem_type(bdev, type, man); 1387 ret = bdev->driver->init_mem_type(bdev, type, man);
1360 if (ret) 1388 if (ret)
@@ -1526,7 +1554,8 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
1526 bdev->dev_mapping = NULL; 1554 bdev->dev_mapping = NULL;
1527 bdev->glob = glob; 1555 bdev->glob = glob;
1528 bdev->need_dma32 = need_dma32; 1556 bdev->need_dma32 = need_dma32;
1529 1557 bdev->val_seq = 0;
1558 spin_lock_init(&bdev->fence_lock);
1530 mutex_lock(&glob->device_list_mutex); 1559 mutex_lock(&glob->device_list_mutex);
1531 list_add_tail(&bdev->device_list, &glob->device_list); 1560 list_add_tail(&bdev->device_list, &glob->device_list);
1532 mutex_unlock(&glob->device_list_mutex); 1561 mutex_unlock(&glob->device_list_mutex);
@@ -1560,7 +1589,7 @@ bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1560 return true; 1589 return true;
1561} 1590}
1562 1591
1563void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) 1592void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
1564{ 1593{
1565 struct ttm_bo_device *bdev = bo->bdev; 1594 struct ttm_bo_device *bdev = bo->bdev;
1566 loff_t offset = (loff_t) bo->addr_space_offset; 1595 loff_t offset = (loff_t) bo->addr_space_offset;
@@ -1569,8 +1598,20 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1569 if (!bdev->dev_mapping) 1598 if (!bdev->dev_mapping)
1570 return; 1599 return;
1571 unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1); 1600 unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
1572 ttm_mem_io_free(bdev, &bo->mem); 1601 ttm_mem_io_free_vm(bo);
1602}
1603
1604void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1605{
1606 struct ttm_bo_device *bdev = bo->bdev;
1607 struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
1608
1609 ttm_mem_io_lock(man, false);
1610 ttm_bo_unmap_virtual_locked(bo);
1611 ttm_mem_io_unlock(man);
1573} 1612}
1613
1614
1574EXPORT_SYMBOL(ttm_bo_unmap_virtual); 1615EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1575 1616
1576static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo) 1617static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
@@ -1650,6 +1691,7 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
1650 bool lazy, bool interruptible, bool no_wait) 1691 bool lazy, bool interruptible, bool no_wait)
1651{ 1692{
1652 struct ttm_bo_driver *driver = bo->bdev->driver; 1693 struct ttm_bo_driver *driver = bo->bdev->driver;
1694 struct ttm_bo_device *bdev = bo->bdev;
1653 void *sync_obj; 1695 void *sync_obj;
1654 void *sync_obj_arg; 1696 void *sync_obj_arg;
1655 int ret = 0; 1697 int ret = 0;
@@ -1663,9 +1705,9 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
1663 void *tmp_obj = bo->sync_obj; 1705 void *tmp_obj = bo->sync_obj;
1664 bo->sync_obj = NULL; 1706 bo->sync_obj = NULL;
1665 clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); 1707 clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
1666 spin_unlock(&bo->lock); 1708 spin_unlock(&bdev->fence_lock);
1667 driver->sync_obj_unref(&tmp_obj); 1709 driver->sync_obj_unref(&tmp_obj);
1668 spin_lock(&bo->lock); 1710 spin_lock(&bdev->fence_lock);
1669 continue; 1711 continue;
1670 } 1712 }
1671 1713
@@ -1674,29 +1716,29 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
1674 1716
1675 sync_obj = driver->sync_obj_ref(bo->sync_obj); 1717 sync_obj = driver->sync_obj_ref(bo->sync_obj);
1676 sync_obj_arg = bo->sync_obj_arg; 1718 sync_obj_arg = bo->sync_obj_arg;
1677 spin_unlock(&bo->lock); 1719 spin_unlock(&bdev->fence_lock);
1678 ret = driver->sync_obj_wait(sync_obj, sync_obj_arg, 1720 ret = driver->sync_obj_wait(sync_obj, sync_obj_arg,
1679 lazy, interruptible); 1721 lazy, interruptible);
1680 if (unlikely(ret != 0)) { 1722 if (unlikely(ret != 0)) {
1681 driver->sync_obj_unref(&sync_obj); 1723 driver->sync_obj_unref(&sync_obj);
1682 spin_lock(&bo->lock); 1724 spin_lock(&bdev->fence_lock);
1683 return ret; 1725 return ret;
1684 } 1726 }
1685 spin_lock(&bo->lock); 1727 spin_lock(&bdev->fence_lock);
1686 if (likely(bo->sync_obj == sync_obj && 1728 if (likely(bo->sync_obj == sync_obj &&
1687 bo->sync_obj_arg == sync_obj_arg)) { 1729 bo->sync_obj_arg == sync_obj_arg)) {
1688 void *tmp_obj = bo->sync_obj; 1730 void *tmp_obj = bo->sync_obj;
1689 bo->sync_obj = NULL; 1731 bo->sync_obj = NULL;
1690 clear_bit(TTM_BO_PRIV_FLAG_MOVING, 1732 clear_bit(TTM_BO_PRIV_FLAG_MOVING,
1691 &bo->priv_flags); 1733 &bo->priv_flags);
1692 spin_unlock(&bo->lock); 1734 spin_unlock(&bdev->fence_lock);
1693 driver->sync_obj_unref(&sync_obj); 1735 driver->sync_obj_unref(&sync_obj);
1694 driver->sync_obj_unref(&tmp_obj); 1736 driver->sync_obj_unref(&tmp_obj);
1695 spin_lock(&bo->lock); 1737 spin_lock(&bdev->fence_lock);
1696 } else { 1738 } else {
1697 spin_unlock(&bo->lock); 1739 spin_unlock(&bdev->fence_lock);
1698 driver->sync_obj_unref(&sync_obj); 1740 driver->sync_obj_unref(&sync_obj);
1699 spin_lock(&bo->lock); 1741 spin_lock(&bdev->fence_lock);
1700 } 1742 }
1701 } 1743 }
1702 return 0; 1744 return 0;
@@ -1705,6 +1747,7 @@ EXPORT_SYMBOL(ttm_bo_wait);
1705 1747
1706int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) 1748int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
1707{ 1749{
1750 struct ttm_bo_device *bdev = bo->bdev;
1708 int ret = 0; 1751 int ret = 0;
1709 1752
1710 /* 1753 /*
@@ -1714,9 +1757,9 @@ int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
1714 ret = ttm_bo_reserve(bo, true, no_wait, false, 0); 1757 ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
1715 if (unlikely(ret != 0)) 1758 if (unlikely(ret != 0))
1716 return ret; 1759 return ret;
1717 spin_lock(&bo->lock); 1760 spin_lock(&bdev->fence_lock);
1718 ret = ttm_bo_wait(bo, false, true, no_wait); 1761 ret = ttm_bo_wait(bo, false, true, no_wait);
1719 spin_unlock(&bo->lock); 1762 spin_unlock(&bdev->fence_lock);
1720 if (likely(ret == 0)) 1763 if (likely(ret == 0))
1721 atomic_inc(&bo->cpu_writers); 1764 atomic_inc(&bo->cpu_writers);
1722 ttm_bo_unreserve(bo); 1765 ttm_bo_unreserve(bo);
@@ -1782,16 +1825,15 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1782 put_count = ttm_bo_del_from_lru(bo); 1825 put_count = ttm_bo_del_from_lru(bo);
1783 spin_unlock(&glob->lru_lock); 1826 spin_unlock(&glob->lru_lock);
1784 1827
1785 while (put_count--) 1828 ttm_bo_list_ref_sub(bo, put_count, true);
1786 kref_put(&bo->list_kref, ttm_bo_ref_bug);
1787 1829
1788 /** 1830 /**
1789 * Wait for GPU, then move to system cached. 1831 * Wait for GPU, then move to system cached.
1790 */ 1832 */
1791 1833
1792 spin_lock(&bo->lock); 1834 spin_lock(&bo->bdev->fence_lock);
1793 ret = ttm_bo_wait(bo, false, false, false); 1835 ret = ttm_bo_wait(bo, false, false, false);
1794 spin_unlock(&bo->lock); 1836 spin_unlock(&bo->bdev->fence_lock);
1795 1837
1796 if (unlikely(ret != 0)) 1838 if (unlikely(ret != 0))
1797 goto out; 1839 goto out;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 3106d5bcce32..77dbf408c0d0 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -75,37 +75,123 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
75} 75}
76EXPORT_SYMBOL(ttm_bo_move_ttm); 76EXPORT_SYMBOL(ttm_bo_move_ttm);
77 77
78int ttm_mem_io_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) 78int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
79{ 79{
80 int ret; 80 if (likely(man->io_reserve_fastpath))
81 return 0;
82
83 if (interruptible)
84 return mutex_lock_interruptible(&man->io_reserve_mutex);
85
86 mutex_lock(&man->io_reserve_mutex);
87 return 0;
88}
81 89
82 if (!mem->bus.io_reserved) { 90void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
83 mem->bus.io_reserved = true; 91{
92 if (likely(man->io_reserve_fastpath))
93 return;
94
95 mutex_unlock(&man->io_reserve_mutex);
96}
97
98static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
99{
100 struct ttm_buffer_object *bo;
101
102 if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
103 return -EAGAIN;
104
105 bo = list_first_entry(&man->io_reserve_lru,
106 struct ttm_buffer_object,
107 io_reserve_lru);
108 list_del_init(&bo->io_reserve_lru);
109 ttm_bo_unmap_virtual_locked(bo);
110
111 return 0;
112}
113
114static int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
115 struct ttm_mem_reg *mem)
116{
117 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
118 int ret = 0;
119
120 if (!bdev->driver->io_mem_reserve)
121 return 0;
122 if (likely(man->io_reserve_fastpath))
123 return bdev->driver->io_mem_reserve(bdev, mem);
124
125 if (bdev->driver->io_mem_reserve &&
126 mem->bus.io_reserved_count++ == 0) {
127retry:
84 ret = bdev->driver->io_mem_reserve(bdev, mem); 128 ret = bdev->driver->io_mem_reserve(bdev, mem);
129 if (ret == -EAGAIN) {
130 ret = ttm_mem_io_evict(man);
131 if (ret == 0)
132 goto retry;
133 }
134 }
135 return ret;
136}
137
138static void ttm_mem_io_free(struct ttm_bo_device *bdev,
139 struct ttm_mem_reg *mem)
140{
141 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
142
143 if (likely(man->io_reserve_fastpath))
144 return;
145
146 if (bdev->driver->io_mem_reserve &&
147 --mem->bus.io_reserved_count == 0 &&
148 bdev->driver->io_mem_free)
149 bdev->driver->io_mem_free(bdev, mem);
150
151}
152
153int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
154{
155 struct ttm_mem_reg *mem = &bo->mem;
156 int ret;
157
158 if (!mem->bus.io_reserved_vm) {
159 struct ttm_mem_type_manager *man =
160 &bo->bdev->man[mem->mem_type];
161
162 ret = ttm_mem_io_reserve(bo->bdev, mem);
85 if (unlikely(ret != 0)) 163 if (unlikely(ret != 0))
86 return ret; 164 return ret;
165 mem->bus.io_reserved_vm = true;
166 if (man->use_io_reserve_lru)
167 list_add_tail(&bo->io_reserve_lru,
168 &man->io_reserve_lru);
87 } 169 }
88 return 0; 170 return 0;
89} 171}
90 172
91void ttm_mem_io_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) 173void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
92{ 174{
93 if (bdev->driver->io_mem_reserve) { 175 struct ttm_mem_reg *mem = &bo->mem;
94 if (mem->bus.io_reserved) { 176
95 mem->bus.io_reserved = false; 177 if (mem->bus.io_reserved_vm) {
96 bdev->driver->io_mem_free(bdev, mem); 178 mem->bus.io_reserved_vm = false;
97 } 179 list_del_init(&bo->io_reserve_lru);
180 ttm_mem_io_free(bo->bdev, mem);
98 } 181 }
99} 182}
100 183
101int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, 184int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
102 void **virtual) 185 void **virtual)
103{ 186{
187 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
104 int ret; 188 int ret;
105 void *addr; 189 void *addr;
106 190
107 *virtual = NULL; 191 *virtual = NULL;
192 (void) ttm_mem_io_lock(man, false);
108 ret = ttm_mem_io_reserve(bdev, mem); 193 ret = ttm_mem_io_reserve(bdev, mem);
194 ttm_mem_io_unlock(man);
109 if (ret || !mem->bus.is_iomem) 195 if (ret || !mem->bus.is_iomem)
110 return ret; 196 return ret;
111 197
@@ -117,7 +203,9 @@ int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
117 else 203 else
118 addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size); 204 addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
119 if (!addr) { 205 if (!addr) {
206 (void) ttm_mem_io_lock(man, false);
120 ttm_mem_io_free(bdev, mem); 207 ttm_mem_io_free(bdev, mem);
208 ttm_mem_io_unlock(man);
121 return -ENOMEM; 209 return -ENOMEM;
122 } 210 }
123 } 211 }
@@ -134,7 +222,9 @@ void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
134 222
135 if (virtual && mem->bus.addr == NULL) 223 if (virtual && mem->bus.addr == NULL)
136 iounmap(virtual); 224 iounmap(virtual);
225 (void) ttm_mem_io_lock(man, false);
137 ttm_mem_io_free(bdev, mem); 226 ttm_mem_io_free(bdev, mem);
227 ttm_mem_io_unlock(man);
138} 228}
139 229
140static int ttm_copy_io_page(void *dst, void *src, unsigned long page) 230static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
@@ -231,7 +321,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
231 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; 321 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
232 struct ttm_tt *ttm = bo->ttm; 322 struct ttm_tt *ttm = bo->ttm;
233 struct ttm_mem_reg *old_mem = &bo->mem; 323 struct ttm_mem_reg *old_mem = &bo->mem;
234 struct ttm_mem_reg old_copy = *old_mem; 324 struct ttm_mem_reg old_copy;
235 void *old_iomap; 325 void *old_iomap;
236 void *new_iomap; 326 void *new_iomap;
237 int ret; 327 int ret;
@@ -280,8 +370,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
280 } 370 }
281 mb(); 371 mb();
282out2: 372out2:
283 ttm_bo_free_old_node(bo); 373 old_copy = *old_mem;
284
285 *old_mem = *new_mem; 374 *old_mem = *new_mem;
286 new_mem->mm_node = NULL; 375 new_mem->mm_node = NULL;
287 376
@@ -292,9 +381,10 @@ out2:
292 } 381 }
293 382
294out1: 383out1:
295 ttm_mem_reg_iounmap(bdev, new_mem, new_iomap); 384 ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
296out: 385out:
297 ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap); 386 ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
387 ttm_bo_mem_put(bo, &old_copy);
298 return ret; 388 return ret;
299} 389}
300EXPORT_SYMBOL(ttm_bo_move_memcpy); 390EXPORT_SYMBOL(ttm_bo_move_memcpy);
@@ -337,11 +427,11 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
337 * TODO: Explicit member copy would probably be better here. 427 * TODO: Explicit member copy would probably be better here.
338 */ 428 */
339 429
340 spin_lock_init(&fbo->lock);
341 init_waitqueue_head(&fbo->event_queue); 430 init_waitqueue_head(&fbo->event_queue);
342 INIT_LIST_HEAD(&fbo->ddestroy); 431 INIT_LIST_HEAD(&fbo->ddestroy);
343 INIT_LIST_HEAD(&fbo->lru); 432 INIT_LIST_HEAD(&fbo->lru);
344 INIT_LIST_HEAD(&fbo->swap); 433 INIT_LIST_HEAD(&fbo->swap);
434 INIT_LIST_HEAD(&fbo->io_reserve_lru);
345 fbo->vm_node = NULL; 435 fbo->vm_node = NULL;
346 atomic_set(&fbo->cpu_writers, 0); 436 atomic_set(&fbo->cpu_writers, 0);
347 437
@@ -453,6 +543,8 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
453 unsigned long start_page, unsigned long num_pages, 543 unsigned long start_page, unsigned long num_pages,
454 struct ttm_bo_kmap_obj *map) 544 struct ttm_bo_kmap_obj *map)
455{ 545{
546 struct ttm_mem_type_manager *man =
547 &bo->bdev->man[bo->mem.mem_type];
456 unsigned long offset, size; 548 unsigned long offset, size;
457 int ret; 549 int ret;
458 550
@@ -467,7 +559,9 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
467 if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC)) 559 if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
468 return -EPERM; 560 return -EPERM;
469#endif 561#endif
562 (void) ttm_mem_io_lock(man, false);
470 ret = ttm_mem_io_reserve(bo->bdev, &bo->mem); 563 ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
564 ttm_mem_io_unlock(man);
471 if (ret) 565 if (ret)
472 return ret; 566 return ret;
473 if (!bo->mem.bus.is_iomem) { 567 if (!bo->mem.bus.is_iomem) {
@@ -482,12 +576,15 @@ EXPORT_SYMBOL(ttm_bo_kmap);
482 576
483void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) 577void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
484{ 578{
579 struct ttm_buffer_object *bo = map->bo;
580 struct ttm_mem_type_manager *man =
581 &bo->bdev->man[bo->mem.mem_type];
582
485 if (!map->virtual) 583 if (!map->virtual)
486 return; 584 return;
487 switch (map->bo_kmap_type) { 585 switch (map->bo_kmap_type) {
488 case ttm_bo_map_iomap: 586 case ttm_bo_map_iomap:
489 iounmap(map->virtual); 587 iounmap(map->virtual);
490 ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
491 break; 588 break;
492 case ttm_bo_map_vmap: 589 case ttm_bo_map_vmap:
493 vunmap(map->virtual); 590 vunmap(map->virtual);
@@ -500,6 +597,9 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
500 default: 597 default:
501 BUG(); 598 BUG();
502 } 599 }
600 (void) ttm_mem_io_lock(man, false);
601 ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
602 ttm_mem_io_unlock(man);
503 map->virtual = NULL; 603 map->virtual = NULL;
504 map->page = NULL; 604 map->page = NULL;
505} 605}
@@ -520,7 +620,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
520 struct ttm_buffer_object *ghost_obj; 620 struct ttm_buffer_object *ghost_obj;
521 void *tmp_obj = NULL; 621 void *tmp_obj = NULL;
522 622
523 spin_lock(&bo->lock); 623 spin_lock(&bdev->fence_lock);
524 if (bo->sync_obj) { 624 if (bo->sync_obj) {
525 tmp_obj = bo->sync_obj; 625 tmp_obj = bo->sync_obj;
526 bo->sync_obj = NULL; 626 bo->sync_obj = NULL;
@@ -529,7 +629,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
529 bo->sync_obj_arg = sync_obj_arg; 629 bo->sync_obj_arg = sync_obj_arg;
530 if (evict) { 630 if (evict) {
531 ret = ttm_bo_wait(bo, false, false, false); 631 ret = ttm_bo_wait(bo, false, false, false);
532 spin_unlock(&bo->lock); 632 spin_unlock(&bdev->fence_lock);
533 if (tmp_obj) 633 if (tmp_obj)
534 driver->sync_obj_unref(&tmp_obj); 634 driver->sync_obj_unref(&tmp_obj);
535 if (ret) 635 if (ret)
@@ -552,7 +652,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
552 */ 652 */
553 653
554 set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); 654 set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
555 spin_unlock(&bo->lock); 655 spin_unlock(&bdev->fence_lock);
556 if (tmp_obj) 656 if (tmp_obj)
557 driver->sync_obj_unref(&tmp_obj); 657 driver->sync_obj_unref(&tmp_obj);
558 658
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index fe6cb77899f4..221b924acebe 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -83,6 +83,8 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
83 int i; 83 int i;
84 unsigned long address = (unsigned long)vmf->virtual_address; 84 unsigned long address = (unsigned long)vmf->virtual_address;
85 int retval = VM_FAULT_NOPAGE; 85 int retval = VM_FAULT_NOPAGE;
86 struct ttm_mem_type_manager *man =
87 &bdev->man[bo->mem.mem_type];
86 88
87 /* 89 /*
88 * Work around locking order reversal in fault / nopfn 90 * Work around locking order reversal in fault / nopfn
@@ -118,24 +120,28 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
118 * move. 120 * move.
119 */ 121 */
120 122
121 spin_lock(&bo->lock); 123 spin_lock(&bdev->fence_lock);
122 if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) { 124 if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) {
123 ret = ttm_bo_wait(bo, false, true, false); 125 ret = ttm_bo_wait(bo, false, true, false);
124 spin_unlock(&bo->lock); 126 spin_unlock(&bdev->fence_lock);
125 if (unlikely(ret != 0)) { 127 if (unlikely(ret != 0)) {
126 retval = (ret != -ERESTARTSYS) ? 128 retval = (ret != -ERESTARTSYS) ?
127 VM_FAULT_SIGBUS : VM_FAULT_NOPAGE; 129 VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
128 goto out_unlock; 130 goto out_unlock;
129 } 131 }
130 } else 132 } else
131 spin_unlock(&bo->lock); 133 spin_unlock(&bdev->fence_lock);
132 134
133 135 ret = ttm_mem_io_lock(man, true);
134 ret = ttm_mem_io_reserve(bdev, &bo->mem); 136 if (unlikely(ret != 0)) {
135 if (ret) { 137 retval = VM_FAULT_NOPAGE;
136 retval = VM_FAULT_SIGBUS;
137 goto out_unlock; 138 goto out_unlock;
138 } 139 }
140 ret = ttm_mem_io_reserve_vm(bo);
141 if (unlikely(ret != 0)) {
142 retval = VM_FAULT_SIGBUS;
143 goto out_io_unlock;
144 }
139 145
140 page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + 146 page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
141 bo->vm_node->start - vma->vm_pgoff; 147 bo->vm_node->start - vma->vm_pgoff;
@@ -144,7 +150,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
144 150
145 if (unlikely(page_offset >= bo->num_pages)) { 151 if (unlikely(page_offset >= bo->num_pages)) {
146 retval = VM_FAULT_SIGBUS; 152 retval = VM_FAULT_SIGBUS;
147 goto out_unlock; 153 goto out_io_unlock;
148 } 154 }
149 155
150 /* 156 /*
@@ -182,7 +188,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
182 page = ttm_tt_get_page(ttm, page_offset); 188 page = ttm_tt_get_page(ttm, page_offset);
183 if (unlikely(!page && i == 0)) { 189 if (unlikely(!page && i == 0)) {
184 retval = VM_FAULT_OOM; 190 retval = VM_FAULT_OOM;
185 goto out_unlock; 191 goto out_io_unlock;
186 } else if (unlikely(!page)) { 192 } else if (unlikely(!page)) {
187 break; 193 break;
188 } 194 }
@@ -200,14 +206,15 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
200 else if (unlikely(ret != 0)) { 206 else if (unlikely(ret != 0)) {
201 retval = 207 retval =
202 (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS; 208 (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
203 goto out_unlock; 209 goto out_io_unlock;
204 } 210 }
205 211
206 address += PAGE_SIZE; 212 address += PAGE_SIZE;
207 if (unlikely(++page_offset >= page_last)) 213 if (unlikely(++page_offset >= page_last))
208 break; 214 break;
209 } 215 }
210 216out_io_unlock:
217 ttm_mem_io_unlock(man);
211out_unlock: 218out_unlock:
212 ttm_bo_unreserve(bo); 219 ttm_bo_unreserve(bo);
213 return retval; 220 return retval;
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index c285c2902d15..3832fe10b4df 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -32,7 +32,7 @@
32#include <linux/sched.h> 32#include <linux/sched.h>
33#include <linux/module.h> 33#include <linux/module.h>
34 34
35void ttm_eu_backoff_reservation(struct list_head *list) 35static void ttm_eu_backoff_reservation_locked(struct list_head *list)
36{ 36{
37 struct ttm_validate_buffer *entry; 37 struct ttm_validate_buffer *entry;
38 38
@@ -41,10 +41,77 @@ void ttm_eu_backoff_reservation(struct list_head *list)
41 if (!entry->reserved) 41 if (!entry->reserved)
42 continue; 42 continue;
43 43
44 if (entry->removed) {
45 ttm_bo_add_to_lru(bo);
46 entry->removed = false;
47
48 }
44 entry->reserved = false; 49 entry->reserved = false;
45 ttm_bo_unreserve(bo); 50 atomic_set(&bo->reserved, 0);
51 wake_up_all(&bo->event_queue);
52 }
53}
54
55static void ttm_eu_del_from_lru_locked(struct list_head *list)
56{
57 struct ttm_validate_buffer *entry;
58
59 list_for_each_entry(entry, list, head) {
60 struct ttm_buffer_object *bo = entry->bo;
61 if (!entry->reserved)
62 continue;
63
64 if (!entry->removed) {
65 entry->put_count = ttm_bo_del_from_lru(bo);
66 entry->removed = true;
67 }
46 } 68 }
47} 69}
70
71static void ttm_eu_list_ref_sub(struct list_head *list)
72{
73 struct ttm_validate_buffer *entry;
74
75 list_for_each_entry(entry, list, head) {
76 struct ttm_buffer_object *bo = entry->bo;
77
78 if (entry->put_count) {
79 ttm_bo_list_ref_sub(bo, entry->put_count, true);
80 entry->put_count = 0;
81 }
82 }
83}
84
85static int ttm_eu_wait_unreserved_locked(struct list_head *list,
86 struct ttm_buffer_object *bo)
87{
88 struct ttm_bo_global *glob = bo->glob;
89 int ret;
90
91 ttm_eu_del_from_lru_locked(list);
92 spin_unlock(&glob->lru_lock);
93 ret = ttm_bo_wait_unreserved(bo, true);
94 spin_lock(&glob->lru_lock);
95 if (unlikely(ret != 0))
96 ttm_eu_backoff_reservation_locked(list);
97 return ret;
98}
99
100
101void ttm_eu_backoff_reservation(struct list_head *list)
102{
103 struct ttm_validate_buffer *entry;
104 struct ttm_bo_global *glob;
105
106 if (list_empty(list))
107 return;
108
109 entry = list_first_entry(list, struct ttm_validate_buffer, head);
110 glob = entry->bo->glob;
111 spin_lock(&glob->lru_lock);
112 ttm_eu_backoff_reservation_locked(list);
113 spin_unlock(&glob->lru_lock);
114}
48EXPORT_SYMBOL(ttm_eu_backoff_reservation); 115EXPORT_SYMBOL(ttm_eu_backoff_reservation);
49 116
50/* 117/*
@@ -59,37 +126,76 @@ EXPORT_SYMBOL(ttm_eu_backoff_reservation);
59 * buffers in different orders. 126 * buffers in different orders.
60 */ 127 */
61 128
62int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq) 129int ttm_eu_reserve_buffers(struct list_head *list)
63{ 130{
131 struct ttm_bo_global *glob;
64 struct ttm_validate_buffer *entry; 132 struct ttm_validate_buffer *entry;
65 int ret; 133 int ret;
134 uint32_t val_seq;
135
136 if (list_empty(list))
137 return 0;
138
139 list_for_each_entry(entry, list, head) {
140 entry->reserved = false;
141 entry->put_count = 0;
142 entry->removed = false;
143 }
144
145 entry = list_first_entry(list, struct ttm_validate_buffer, head);
146 glob = entry->bo->glob;
66 147
67retry: 148retry:
149 spin_lock(&glob->lru_lock);
150 val_seq = entry->bo->bdev->val_seq++;
151
68 list_for_each_entry(entry, list, head) { 152 list_for_each_entry(entry, list, head) {
69 struct ttm_buffer_object *bo = entry->bo; 153 struct ttm_buffer_object *bo = entry->bo;
70 154
71 entry->reserved = false; 155retry_this_bo:
72 ret = ttm_bo_reserve(bo, true, false, true, val_seq); 156 ret = ttm_bo_reserve_locked(bo, true, true, true, val_seq);
73 if (ret != 0) { 157 switch (ret) {
74 ttm_eu_backoff_reservation(list); 158 case 0:
75 if (ret == -EAGAIN) { 159 break;
76 ret = ttm_bo_wait_unreserved(bo, true); 160 case -EBUSY:
77 if (unlikely(ret != 0)) 161 ret = ttm_eu_wait_unreserved_locked(list, bo);
78 return ret; 162 if (unlikely(ret != 0)) {
79 goto retry; 163 spin_unlock(&glob->lru_lock);
80 } else 164 ttm_eu_list_ref_sub(list);
81 return ret; 165 return ret;
166 }
167 goto retry_this_bo;
168 case -EAGAIN:
169 ttm_eu_backoff_reservation_locked(list);
170 spin_unlock(&glob->lru_lock);
171 ttm_eu_list_ref_sub(list);
172 ret = ttm_bo_wait_unreserved(bo, true);
173 if (unlikely(ret != 0))
174 return ret;
175 goto retry;
176 default:
177 ttm_eu_backoff_reservation_locked(list);
178 spin_unlock(&glob->lru_lock);
179 ttm_eu_list_ref_sub(list);
180 return ret;
82 } 181 }
83 182
84 entry->reserved = true; 183 entry->reserved = true;
85 if (unlikely(atomic_read(&bo->cpu_writers) > 0)) { 184 if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
86 ttm_eu_backoff_reservation(list); 185 ttm_eu_backoff_reservation_locked(list);
186 spin_unlock(&glob->lru_lock);
187 ttm_eu_list_ref_sub(list);
87 ret = ttm_bo_wait_cpu(bo, false); 188 ret = ttm_bo_wait_cpu(bo, false);
88 if (ret) 189 if (ret)
89 return ret; 190 return ret;
90 goto retry; 191 goto retry;
91 } 192 }
92 } 193 }
194
195 ttm_eu_del_from_lru_locked(list);
196 spin_unlock(&glob->lru_lock);
197 ttm_eu_list_ref_sub(list);
198
93 return 0; 199 return 0;
94} 200}
95EXPORT_SYMBOL(ttm_eu_reserve_buffers); 201EXPORT_SYMBOL(ttm_eu_reserve_buffers);
@@ -97,21 +203,36 @@ EXPORT_SYMBOL(ttm_eu_reserve_buffers);
97void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj) 203void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
98{ 204{
99 struct ttm_validate_buffer *entry; 205 struct ttm_validate_buffer *entry;
206 struct ttm_buffer_object *bo;
207 struct ttm_bo_global *glob;
208 struct ttm_bo_device *bdev;
209 struct ttm_bo_driver *driver;
100 210
101 list_for_each_entry(entry, list, head) { 211 if (list_empty(list))
102 struct ttm_buffer_object *bo = entry->bo; 212 return;
103 struct ttm_bo_driver *driver = bo->bdev->driver; 213
104 void *old_sync_obj; 214 bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
215 bdev = bo->bdev;
216 driver = bdev->driver;
217 glob = bo->glob;
105 218
106 spin_lock(&bo->lock); 219 spin_lock(&bdev->fence_lock);
107 old_sync_obj = bo->sync_obj; 220 spin_lock(&glob->lru_lock);
221
222 list_for_each_entry(entry, list, head) {
223 bo = entry->bo;
224 entry->old_sync_obj = bo->sync_obj;
108 bo->sync_obj = driver->sync_obj_ref(sync_obj); 225 bo->sync_obj = driver->sync_obj_ref(sync_obj);
109 bo->sync_obj_arg = entry->new_sync_obj_arg; 226 bo->sync_obj_arg = entry->new_sync_obj_arg;
110 spin_unlock(&bo->lock); 227 ttm_bo_unreserve_locked(bo);
111 ttm_bo_unreserve(bo);
112 entry->reserved = false; 228 entry->reserved = false;
113 if (old_sync_obj) 229 }
114 driver->sync_obj_unref(&old_sync_obj); 230 spin_unlock(&glob->lru_lock);
231 spin_unlock(&bdev->fence_lock);
232
233 list_for_each_entry(entry, list, head) {
234 if (entry->old_sync_obj)
235 driver->sync_obj_unref(&entry->old_sync_obj);
115 } 236 }
116} 237}
117EXPORT_SYMBOL(ttm_eu_fence_buffer_objects); 238EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index e7a58d055041..10fc01f69c40 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -264,7 +264,6 @@ struct vmw_private {
264 */ 264 */
265 265
266 struct vmw_sw_context ctx; 266 struct vmw_sw_context ctx;
267 uint32_t val_seq;
268 struct mutex cmdbuf_mutex; 267 struct mutex cmdbuf_mutex;
269 268
270 /** 269 /**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 76954e3528c1..41b95ed6dbcd 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -653,8 +653,7 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
653 ret = vmw_cmd_check_all(dev_priv, sw_context, cmd, arg->command_size); 653 ret = vmw_cmd_check_all(dev_priv, sw_context, cmd, arg->command_size);
654 if (unlikely(ret != 0)) 654 if (unlikely(ret != 0))
655 goto out_err; 655 goto out_err;
656 ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes, 656 ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes);
657 dev_priv->val_seq++);
658 if (unlikely(ret != 0)) 657 if (unlikely(ret != 0))
659 goto out_err; 658 goto out_err;
660 659
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index fe096a7cc0d7..bfab60c938ac 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -480,9 +480,6 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
480 info->fix.smem_start = 0; 480 info->fix.smem_start = 0;
481 info->fix.smem_len = fb_size; 481 info->fix.smem_len = fb_size;
482 482
483 info->fix.mmio_start = 0;
484 info->fix.mmio_len = 0;
485
486 info->pseudo_palette = par->pseudo_palette; 483 info->pseudo_palette = par->pseudo_palette;
487 info->screen_base = par->vmalloc; 484 info->screen_base = par->vmalloc;
488 info->screen_size = fb_size; 485 info->screen_size = fb_size;
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index c8768f38511e..e01cacba685f 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -33,6 +33,7 @@ struct vga_switcheroo_client {
33 struct fb_info *fb_info; 33 struct fb_info *fb_info;
34 int pwr_state; 34 int pwr_state;
35 void (*set_gpu_state)(struct pci_dev *pdev, enum vga_switcheroo_state); 35 void (*set_gpu_state)(struct pci_dev *pdev, enum vga_switcheroo_state);
36 void (*reprobe)(struct pci_dev *pdev);
36 bool (*can_switch)(struct pci_dev *pdev); 37 bool (*can_switch)(struct pci_dev *pdev);
37 int id; 38 int id;
38 bool active; 39 bool active;
@@ -103,6 +104,7 @@ static void vga_switcheroo_enable(void)
103 104
104int vga_switcheroo_register_client(struct pci_dev *pdev, 105int vga_switcheroo_register_client(struct pci_dev *pdev,
105 void (*set_gpu_state)(struct pci_dev *pdev, enum vga_switcheroo_state), 106 void (*set_gpu_state)(struct pci_dev *pdev, enum vga_switcheroo_state),
107 void (*reprobe)(struct pci_dev *pdev),
106 bool (*can_switch)(struct pci_dev *pdev)) 108 bool (*can_switch)(struct pci_dev *pdev))
107{ 109{
108 int index; 110 int index;
@@ -117,6 +119,7 @@ int vga_switcheroo_register_client(struct pci_dev *pdev,
117 vgasr_priv.clients[index].pwr_state = VGA_SWITCHEROO_ON; 119 vgasr_priv.clients[index].pwr_state = VGA_SWITCHEROO_ON;
118 vgasr_priv.clients[index].pdev = pdev; 120 vgasr_priv.clients[index].pdev = pdev;
119 vgasr_priv.clients[index].set_gpu_state = set_gpu_state; 121 vgasr_priv.clients[index].set_gpu_state = set_gpu_state;
122 vgasr_priv.clients[index].reprobe = reprobe;
120 vgasr_priv.clients[index].can_switch = can_switch; 123 vgasr_priv.clients[index].can_switch = can_switch;
121 vgasr_priv.clients[index].id = -1; 124 vgasr_priv.clients[index].id = -1;
122 if (pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW) 125 if (pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW)
@@ -174,7 +177,8 @@ static int vga_switcheroo_show(struct seq_file *m, void *v)
174 int i; 177 int i;
175 mutex_lock(&vgasr_mutex); 178 mutex_lock(&vgasr_mutex);
176 for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) { 179 for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
177 seq_printf(m, "%d:%c:%s:%s\n", i, 180 seq_printf(m, "%d:%s:%c:%s:%s\n", i,
181 vgasr_priv.clients[i].id == VGA_SWITCHEROO_DIS ? "DIS" : "IGD",
178 vgasr_priv.clients[i].active ? '+' : ' ', 182 vgasr_priv.clients[i].active ? '+' : ' ',
179 vgasr_priv.clients[i].pwr_state ? "Pwr" : "Off", 183 vgasr_priv.clients[i].pwr_state ? "Pwr" : "Off",
180 pci_name(vgasr_priv.clients[i].pdev)); 184 pci_name(vgasr_priv.clients[i].pdev));
@@ -190,9 +194,8 @@ static int vga_switcheroo_debugfs_open(struct inode *inode, struct file *file)
190 194
191static int vga_switchon(struct vga_switcheroo_client *client) 195static int vga_switchon(struct vga_switcheroo_client *client)
192{ 196{
193 int ret; 197 if (vgasr_priv.handler->power_state)
194 198 vgasr_priv.handler->power_state(client->id, VGA_SWITCHEROO_ON);
195 ret = vgasr_priv.handler->power_state(client->id, VGA_SWITCHEROO_ON);
196 /* call the driver callback to turn on device */ 199 /* call the driver callback to turn on device */
197 client->set_gpu_state(client->pdev, VGA_SWITCHEROO_ON); 200 client->set_gpu_state(client->pdev, VGA_SWITCHEROO_ON);
198 client->pwr_state = VGA_SWITCHEROO_ON; 201 client->pwr_state = VGA_SWITCHEROO_ON;
@@ -203,12 +206,14 @@ static int vga_switchoff(struct vga_switcheroo_client *client)
203{ 206{
204 /* call the driver callback to turn off device */ 207 /* call the driver callback to turn off device */
205 client->set_gpu_state(client->pdev, VGA_SWITCHEROO_OFF); 208 client->set_gpu_state(client->pdev, VGA_SWITCHEROO_OFF);
206 vgasr_priv.handler->power_state(client->id, VGA_SWITCHEROO_OFF); 209 if (vgasr_priv.handler->power_state)
210 vgasr_priv.handler->power_state(client->id, VGA_SWITCHEROO_OFF);
207 client->pwr_state = VGA_SWITCHEROO_OFF; 211 client->pwr_state = VGA_SWITCHEROO_OFF;
208 return 0; 212 return 0;
209} 213}
210 214
211static int vga_switchto(struct vga_switcheroo_client *new_client) 215/* stage one happens before delay */
216static int vga_switchto_stage1(struct vga_switcheroo_client *new_client)
212{ 217{
213 int ret; 218 int ret;
214 int i; 219 int i;
@@ -235,10 +240,28 @@ static int vga_switchto(struct vga_switcheroo_client *new_client)
235 vga_switchon(new_client); 240 vga_switchon(new_client);
236 241
237 /* swap shadow resource to denote boot VGA device has changed so X starts on new device */ 242 /* swap shadow resource to denote boot VGA device has changed so X starts on new device */
238 active->active = false;
239
240 active->pdev->resource[PCI_ROM_RESOURCE].flags &= ~IORESOURCE_ROM_SHADOW; 243 active->pdev->resource[PCI_ROM_RESOURCE].flags &= ~IORESOURCE_ROM_SHADOW;
241 new_client->pdev->resource[PCI_ROM_RESOURCE].flags |= IORESOURCE_ROM_SHADOW; 244 new_client->pdev->resource[PCI_ROM_RESOURCE].flags |= IORESOURCE_ROM_SHADOW;
245 return 0;
246}
247
248/* post delay */
249static int vga_switchto_stage2(struct vga_switcheroo_client *new_client)
250{
251 int ret;
252 int i;
253 struct vga_switcheroo_client *active = NULL;
254
255 for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
256 if (vgasr_priv.clients[i].active == true) {
257 active = &vgasr_priv.clients[i];
258 break;
259 }
260 }
261 if (!active)
262 return 0;
263
264 active->active = false;
242 265
243 if (new_client->fb_info) { 266 if (new_client->fb_info) {
244 struct fb_event event; 267 struct fb_event event;
@@ -250,6 +273,9 @@ static int vga_switchto(struct vga_switcheroo_client *new_client)
250 if (ret) 273 if (ret)
251 return ret; 274 return ret;
252 275
276 if (new_client->reprobe)
277 new_client->reprobe(new_client->pdev);
278
253 if (active->pwr_state == VGA_SWITCHEROO_ON) 279 if (active->pwr_state == VGA_SWITCHEROO_ON)
254 vga_switchoff(active); 280 vga_switchoff(active);
255 281
@@ -265,6 +291,7 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
265 const char *pdev_name; 291 const char *pdev_name;
266 int i, ret; 292 int i, ret;
267 bool delay = false, can_switch; 293 bool delay = false, can_switch;
294 bool just_mux = false;
268 int client_id = -1; 295 int client_id = -1;
269 struct vga_switcheroo_client *client = NULL; 296 struct vga_switcheroo_client *client = NULL;
270 297
@@ -319,6 +346,15 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
319 if (strncmp(usercmd, "DIS", 3) == 0) 346 if (strncmp(usercmd, "DIS", 3) == 0)
320 client_id = VGA_SWITCHEROO_DIS; 347 client_id = VGA_SWITCHEROO_DIS;
321 348
349 if (strncmp(usercmd, "MIGD", 4) == 0) {
350 just_mux = true;
351 client_id = VGA_SWITCHEROO_IGD;
352 }
353 if (strncmp(usercmd, "MDIS", 4) == 0) {
354 just_mux = true;
355 client_id = VGA_SWITCHEROO_DIS;
356 }
357
322 if (client_id == -1) 358 if (client_id == -1)
323 goto out; 359 goto out;
324 360
@@ -330,6 +366,12 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
330 } 366 }
331 367
332 vgasr_priv.delayed_switch_active = false; 368 vgasr_priv.delayed_switch_active = false;
369
370 if (just_mux) {
371 ret = vgasr_priv.handler->switchto(client_id);
372 goto out;
373 }
374
333 /* okay we want a switch - test if devices are willing to switch */ 375 /* okay we want a switch - test if devices are willing to switch */
334 can_switch = true; 376 can_switch = true;
335 for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) { 377 for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
@@ -345,18 +387,22 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
345 387
346 if (can_switch == true) { 388 if (can_switch == true) {
347 pdev_name = pci_name(client->pdev); 389 pdev_name = pci_name(client->pdev);
348 ret = vga_switchto(client); 390 ret = vga_switchto_stage1(client);
349 if (ret) 391 if (ret)
350 printk(KERN_ERR "vga_switcheroo: switching failed %d\n", ret); 392 printk(KERN_ERR "vga_switcheroo: switching failed stage 1 %d\n", ret);
393
394 ret = vga_switchto_stage2(client);
395 if (ret)
396 printk(KERN_ERR "vga_switcheroo: switching failed stage 2 %d\n", ret);
397
351 } else { 398 } else {
352 printk(KERN_INFO "vga_switcheroo: setting delayed switch to client %d\n", client->id); 399 printk(KERN_INFO "vga_switcheroo: setting delayed switch to client %d\n", client->id);
353 vgasr_priv.delayed_switch_active = true; 400 vgasr_priv.delayed_switch_active = true;
354 vgasr_priv.delayed_client_id = client_id; 401 vgasr_priv.delayed_client_id = client_id;
355 402
356 /* we should at least power up the card to 403 ret = vga_switchto_stage1(client);
357 make the switch faster */ 404 if (ret)
358 if (client->pwr_state == VGA_SWITCHEROO_OFF) 405 printk(KERN_ERR "vga_switcheroo: delayed switching stage 1 failed %d\n", ret);
359 vga_switchon(client);
360 } 406 }
361 407
362out: 408out:
@@ -438,9 +484,9 @@ int vga_switcheroo_process_delayed_switch(void)
438 goto err; 484 goto err;
439 485
440 pdev_name = pci_name(client->pdev); 486 pdev_name = pci_name(client->pdev);
441 ret = vga_switchto(client); 487 ret = vga_switchto_stage2(client);
442 if (ret) 488 if (ret)
443 printk(KERN_ERR "vga_switcheroo: delayed switching failed %d\n", ret); 489 printk(KERN_ERR "vga_switcheroo: delayed switching failed stage 2 %d\n", ret);
444 490
445 vgasr_priv.delayed_switch_active = false; 491 vgasr_priv.delayed_switch_active = false;
446 err = 0; 492 err = 0;
diff --git a/drivers/i2c/algos/i2c-algo-bit.c b/drivers/i2c/algos/i2c-algo-bit.c
index a39e6cff86e7..38319a69bd0a 100644
--- a/drivers/i2c/algos/i2c-algo-bit.c
+++ b/drivers/i2c/algos/i2c-algo-bit.c
@@ -600,12 +600,14 @@ static const struct i2c_algorithm i2c_bit_algo = {
600/* 600/*
601 * registering functions to load algorithms at runtime 601 * registering functions to load algorithms at runtime
602 */ 602 */
603static int i2c_bit_prepare_bus(struct i2c_adapter *adap) 603static int __i2c_bit_add_bus(struct i2c_adapter *adap,
604 int (*add_adapter)(struct i2c_adapter *))
604{ 605{
605 struct i2c_algo_bit_data *bit_adap = adap->algo_data; 606 struct i2c_algo_bit_data *bit_adap = adap->algo_data;
607 int ret;
606 608
607 if (bit_test) { 609 if (bit_test) {
608 int ret = test_bus(bit_adap, adap->name); 610 ret = test_bus(bit_adap, adap->name);
609 if (ret < 0) 611 if (ret < 0)
610 return -ENODEV; 612 return -ENODEV;
611 } 613 }
@@ -614,30 +616,27 @@ static int i2c_bit_prepare_bus(struct i2c_adapter *adap)
614 adap->algo = &i2c_bit_algo; 616 adap->algo = &i2c_bit_algo;
615 adap->retries = 3; 617 adap->retries = 3;
616 618
619 ret = add_adapter(adap);
620 if (ret < 0)
621 return ret;
622
623 /* Complain if SCL can't be read */
624 if (bit_adap->getscl == NULL) {
625 dev_warn(&adap->dev, "Not I2C compliant: can't read SCL\n");
626 dev_warn(&adap->dev, "Bus may be unreliable\n");
627 }
617 return 0; 628 return 0;
618} 629}
619 630
620int i2c_bit_add_bus(struct i2c_adapter *adap) 631int i2c_bit_add_bus(struct i2c_adapter *adap)
621{ 632{
622 int err; 633 return __i2c_bit_add_bus(adap, i2c_add_adapter);
623
624 err = i2c_bit_prepare_bus(adap);
625 if (err)
626 return err;
627
628 return i2c_add_adapter(adap);
629} 634}
630EXPORT_SYMBOL(i2c_bit_add_bus); 635EXPORT_SYMBOL(i2c_bit_add_bus);
631 636
632int i2c_bit_add_numbered_bus(struct i2c_adapter *adap) 637int i2c_bit_add_numbered_bus(struct i2c_adapter *adap)
633{ 638{
634 int err; 639 return __i2c_bit_add_bus(adap, i2c_add_numbered_adapter);
635
636 err = i2c_bit_prepare_bus(adap);
637 if (err)
638 return err;
639
640 return i2c_add_numbered_adapter(adap);
641} 640}
642EXPORT_SYMBOL(i2c_bit_add_numbered_bus); 641EXPORT_SYMBOL(i2c_bit_add_numbered_bus);
643 642
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 02835ce7ff4b..7979aef7ee7b 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -72,6 +72,7 @@
72#include <linux/acpi.h> 72#include <linux/acpi.h>
73#include <linux/io.h> 73#include <linux/io.h>
74#include <linux/dmi.h> 74#include <linux/dmi.h>
75#include <linux/slab.h>
75 76
76/* I801 SMBus address offsets */ 77/* I801 SMBus address offsets */
77#define SMBHSTSTS(p) (0 + (p)->smba) 78#define SMBHSTSTS(p) (0 + (p)->smba)
diff --git a/drivers/i2c/busses/i2c-nforce2.c b/drivers/i2c/busses/i2c-nforce2.c
index a605a5029cfe..ff1e127dfea8 100644
--- a/drivers/i2c/busses/i2c-nforce2.c
+++ b/drivers/i2c/busses/i2c-nforce2.c
@@ -432,7 +432,7 @@ static int __devinit nforce2_probe(struct pci_dev *dev, const struct pci_device_
432 432
433static void __devexit nforce2_remove(struct pci_dev *dev) 433static void __devexit nforce2_remove(struct pci_dev *dev)
434{ 434{
435 struct nforce2_smbus *smbuses = (void*) pci_get_drvdata(dev); 435 struct nforce2_smbus *smbuses = pci_get_drvdata(dev);
436 436
437 nforce2_set_reference(NULL); 437 nforce2_set_reference(NULL);
438 if (smbuses[0].base) { 438 if (smbuses[0].base) {
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 6b4cc567645b..c7db6980e3a3 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -1362,7 +1362,7 @@ EXPORT_SYMBOL(i2c_transfer);
1362 * 1362 *
1363 * Returns negative errno, or else the number of bytes written. 1363 * Returns negative errno, or else the number of bytes written.
1364 */ 1364 */
1365int i2c_master_send(struct i2c_client *client, const char *buf, int count) 1365int i2c_master_send(const struct i2c_client *client, const char *buf, int count)
1366{ 1366{
1367 int ret; 1367 int ret;
1368 struct i2c_adapter *adap = client->adapter; 1368 struct i2c_adapter *adap = client->adapter;
@@ -1389,7 +1389,7 @@ EXPORT_SYMBOL(i2c_master_send);
1389 * 1389 *
1390 * Returns negative errno, or else the number of bytes read. 1390 * Returns negative errno, or else the number of bytes read.
1391 */ 1391 */
1392int i2c_master_recv(struct i2c_client *client, char *buf, int count) 1392int i2c_master_recv(const struct i2c_client *client, char *buf, int count)
1393{ 1393{
1394 struct i2c_adapter *adap = client->adapter; 1394 struct i2c_adapter *adap = client->adapter;
1395 struct i2c_msg msg; 1395 struct i2c_msg msg;
@@ -1679,7 +1679,7 @@ static int i2c_smbus_check_pec(u8 cpec, struct i2c_msg *msg)
1679 * This executes the SMBus "receive byte" protocol, returning negative errno 1679 * This executes the SMBus "receive byte" protocol, returning negative errno
1680 * else the byte received from the device. 1680 * else the byte received from the device.
1681 */ 1681 */
1682s32 i2c_smbus_read_byte(struct i2c_client *client) 1682s32 i2c_smbus_read_byte(const struct i2c_client *client)
1683{ 1683{
1684 union i2c_smbus_data data; 1684 union i2c_smbus_data data;
1685 int status; 1685 int status;
@@ -1699,7 +1699,7 @@ EXPORT_SYMBOL(i2c_smbus_read_byte);
1699 * This executes the SMBus "send byte" protocol, returning negative errno 1699 * This executes the SMBus "send byte" protocol, returning negative errno
1700 * else zero on success. 1700 * else zero on success.
1701 */ 1701 */
1702s32 i2c_smbus_write_byte(struct i2c_client *client, u8 value) 1702s32 i2c_smbus_write_byte(const struct i2c_client *client, u8 value)
1703{ 1703{
1704 return i2c_smbus_xfer(client->adapter, client->addr, client->flags, 1704 return i2c_smbus_xfer(client->adapter, client->addr, client->flags,
1705 I2C_SMBUS_WRITE, value, I2C_SMBUS_BYTE, NULL); 1705 I2C_SMBUS_WRITE, value, I2C_SMBUS_BYTE, NULL);
@@ -1714,7 +1714,7 @@ EXPORT_SYMBOL(i2c_smbus_write_byte);
1714 * This executes the SMBus "read byte" protocol, returning negative errno 1714 * This executes the SMBus "read byte" protocol, returning negative errno
1715 * else a data byte received from the device. 1715 * else a data byte received from the device.
1716 */ 1716 */
1717s32 i2c_smbus_read_byte_data(struct i2c_client *client, u8 command) 1717s32 i2c_smbus_read_byte_data(const struct i2c_client *client, u8 command)
1718{ 1718{
1719 union i2c_smbus_data data; 1719 union i2c_smbus_data data;
1720 int status; 1720 int status;
@@ -1735,7 +1735,8 @@ EXPORT_SYMBOL(i2c_smbus_read_byte_data);
1735 * This executes the SMBus "write byte" protocol, returning negative errno 1735 * This executes the SMBus "write byte" protocol, returning negative errno
1736 * else zero on success. 1736 * else zero on success.
1737 */ 1737 */
1738s32 i2c_smbus_write_byte_data(struct i2c_client *client, u8 command, u8 value) 1738s32 i2c_smbus_write_byte_data(const struct i2c_client *client, u8 command,
1739 u8 value)
1739{ 1740{
1740 union i2c_smbus_data data; 1741 union i2c_smbus_data data;
1741 data.byte = value; 1742 data.byte = value;
@@ -1753,7 +1754,7 @@ EXPORT_SYMBOL(i2c_smbus_write_byte_data);
1753 * This executes the SMBus "read word" protocol, returning negative errno 1754 * This executes the SMBus "read word" protocol, returning negative errno
1754 * else a 16-bit unsigned "word" received from the device. 1755 * else a 16-bit unsigned "word" received from the device.
1755 */ 1756 */
1756s32 i2c_smbus_read_word_data(struct i2c_client *client, u8 command) 1757s32 i2c_smbus_read_word_data(const struct i2c_client *client, u8 command)
1757{ 1758{
1758 union i2c_smbus_data data; 1759 union i2c_smbus_data data;
1759 int status; 1760 int status;
@@ -1774,7 +1775,8 @@ EXPORT_SYMBOL(i2c_smbus_read_word_data);
1774 * This executes the SMBus "write word" protocol, returning negative errno 1775 * This executes the SMBus "write word" protocol, returning negative errno
1775 * else zero on success. 1776 * else zero on success.
1776 */ 1777 */
1777s32 i2c_smbus_write_word_data(struct i2c_client *client, u8 command, u16 value) 1778s32 i2c_smbus_write_word_data(const struct i2c_client *client, u8 command,
1779 u16 value)
1778{ 1780{
1779 union i2c_smbus_data data; 1781 union i2c_smbus_data data;
1780 data.word = value; 1782 data.word = value;
@@ -1793,7 +1795,8 @@ EXPORT_SYMBOL(i2c_smbus_write_word_data);
1793 * This executes the SMBus "process call" protocol, returning negative errno 1795 * This executes the SMBus "process call" protocol, returning negative errno
1794 * else a 16-bit unsigned "word" received from the device. 1796 * else a 16-bit unsigned "word" received from the device.
1795 */ 1797 */
1796s32 i2c_smbus_process_call(struct i2c_client *client, u8 command, u16 value) 1798s32 i2c_smbus_process_call(const struct i2c_client *client, u8 command,
1799 u16 value)
1797{ 1800{
1798 union i2c_smbus_data data; 1801 union i2c_smbus_data data;
1799 int status; 1802 int status;
@@ -1821,7 +1824,7 @@ EXPORT_SYMBOL(i2c_smbus_process_call);
1821 * support this; its emulation through I2C messaging relies on a specific 1824 * support this; its emulation through I2C messaging relies on a specific
1822 * mechanism (I2C_M_RECV_LEN) which may not be implemented. 1825 * mechanism (I2C_M_RECV_LEN) which may not be implemented.
1823 */ 1826 */
1824s32 i2c_smbus_read_block_data(struct i2c_client *client, u8 command, 1827s32 i2c_smbus_read_block_data(const struct i2c_client *client, u8 command,
1825 u8 *values) 1828 u8 *values)
1826{ 1829{
1827 union i2c_smbus_data data; 1830 union i2c_smbus_data data;
@@ -1848,7 +1851,7 @@ EXPORT_SYMBOL(i2c_smbus_read_block_data);
1848 * This executes the SMBus "block write" protocol, returning negative errno 1851 * This executes the SMBus "block write" protocol, returning negative errno
1849 * else zero on success. 1852 * else zero on success.
1850 */ 1853 */
1851s32 i2c_smbus_write_block_data(struct i2c_client *client, u8 command, 1854s32 i2c_smbus_write_block_data(const struct i2c_client *client, u8 command,
1852 u8 length, const u8 *values) 1855 u8 length, const u8 *values)
1853{ 1856{
1854 union i2c_smbus_data data; 1857 union i2c_smbus_data data;
@@ -1864,7 +1867,7 @@ s32 i2c_smbus_write_block_data(struct i2c_client *client, u8 command,
1864EXPORT_SYMBOL(i2c_smbus_write_block_data); 1867EXPORT_SYMBOL(i2c_smbus_write_block_data);
1865 1868
1866/* Returns the number of read bytes */ 1869/* Returns the number of read bytes */
1867s32 i2c_smbus_read_i2c_block_data(struct i2c_client *client, u8 command, 1870s32 i2c_smbus_read_i2c_block_data(const struct i2c_client *client, u8 command,
1868 u8 length, u8 *values) 1871 u8 length, u8 *values)
1869{ 1872{
1870 union i2c_smbus_data data; 1873 union i2c_smbus_data data;
@@ -1884,7 +1887,7 @@ s32 i2c_smbus_read_i2c_block_data(struct i2c_client *client, u8 command,
1884} 1887}
1885EXPORT_SYMBOL(i2c_smbus_read_i2c_block_data); 1888EXPORT_SYMBOL(i2c_smbus_read_i2c_block_data);
1886 1889
1887s32 i2c_smbus_write_i2c_block_data(struct i2c_client *client, u8 command, 1890s32 i2c_smbus_write_i2c_block_data(const struct i2c_client *client, u8 command,
1888 u8 length, const u8 *values) 1891 u8 length, const u8 *values)
1889{ 1892{
1890 union i2c_smbus_data data; 1893 union i2c_smbus_data data;
diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig
index 4d91d80bfd23..90b7a0163899 100644
--- a/drivers/i2c/muxes/Kconfig
+++ b/drivers/i2c/muxes/Kconfig
@@ -5,6 +5,18 @@
5menu "Multiplexer I2C Chip support" 5menu "Multiplexer I2C Chip support"
6 depends on I2C_MUX 6 depends on I2C_MUX
7 7
8config I2C_MUX_GPIO
9 tristate "GPIO-based I2C multiplexer"
10 depends on GENERIC_GPIO
11 help
12 If you say yes to this option, support will be included for a
13 GPIO based I2C multiplexer. This driver provides access to
14 I2C busses connected through a MUX, which is controlled
15 through GPIO pins.
16
17 This driver can also be built as a module. If so, the module
18 will be called gpio-i2cmux.
19
8config I2C_MUX_PCA9541 20config I2C_MUX_PCA9541
9 tristate "NXP PCA9541 I2C Master Selector" 21 tristate "NXP PCA9541 I2C Master Selector"
10 depends on EXPERIMENTAL 22 depends on EXPERIMENTAL
diff --git a/drivers/i2c/muxes/Makefile b/drivers/i2c/muxes/Makefile
index d743806d9b42..4640436ea61f 100644
--- a/drivers/i2c/muxes/Makefile
+++ b/drivers/i2c/muxes/Makefile
@@ -1,6 +1,7 @@
1# 1#
2# Makefile for multiplexer I2C chip drivers. 2# Makefile for multiplexer I2C chip drivers.
3 3
4obj-$(CONFIG_I2C_MUX_GPIO) += gpio-i2cmux.o
4obj-$(CONFIG_I2C_MUX_PCA9541) += pca9541.o 5obj-$(CONFIG_I2C_MUX_PCA9541) += pca9541.o
5obj-$(CONFIG_I2C_MUX_PCA954x) += pca954x.o 6obj-$(CONFIG_I2C_MUX_PCA954x) += pca954x.o
6 7
diff --git a/drivers/i2c/muxes/gpio-i2cmux.c b/drivers/i2c/muxes/gpio-i2cmux.c
new file mode 100644
index 000000000000..7b6ce624cd6e
--- /dev/null
+++ b/drivers/i2c/muxes/gpio-i2cmux.c
@@ -0,0 +1,184 @@
1/*
2 * I2C multiplexer using GPIO API
3 *
4 * Peter Korsgaard <peter.korsgaard@barco.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/i2c.h>
12#include <linux/i2c-mux.h>
13#include <linux/gpio-i2cmux.h>
14#include <linux/platform_device.h>
15#include <linux/init.h>
16#include <linux/module.h>
17#include <linux/slab.h>
18#include <linux/gpio.h>
19
20struct gpiomux {
21 struct i2c_adapter *parent;
22 struct i2c_adapter **adap; /* child busses */
23 struct gpio_i2cmux_platform_data data;
24};
25
26static void gpiomux_set(const struct gpiomux *mux, unsigned val)
27{
28 int i;
29
30 for (i = 0; i < mux->data.n_gpios; i++)
31 gpio_set_value(mux->data.gpios[i], val & (1 << i));
32}
33
34static int gpiomux_select(struct i2c_adapter *adap, void *data, u32 chan)
35{
36 struct gpiomux *mux = data;
37
38 gpiomux_set(mux, mux->data.values[chan]);
39
40 return 0;
41}
42
43static int gpiomux_deselect(struct i2c_adapter *adap, void *data, u32 chan)
44{
45 struct gpiomux *mux = data;
46
47 gpiomux_set(mux, mux->data.idle);
48
49 return 0;
50}
51
52static int __devinit gpiomux_probe(struct platform_device *pdev)
53{
54 struct gpiomux *mux;
55 struct gpio_i2cmux_platform_data *pdata;
56 struct i2c_adapter *parent;
57 int (*deselect) (struct i2c_adapter *, void *, u32);
58 unsigned initial_state;
59 int i, ret;
60
61 pdata = pdev->dev.platform_data;
62 if (!pdata) {
63 dev_err(&pdev->dev, "Missing platform data\n");
64 return -ENODEV;
65 }
66
67 parent = i2c_get_adapter(pdata->parent);
68 if (!parent) {
69 dev_err(&pdev->dev, "Parent adapter (%d) not found\n",
70 pdata->parent);
71 return -ENODEV;
72 }
73
74 mux = kzalloc(sizeof(*mux), GFP_KERNEL);
75 if (!mux) {
76 ret = -ENOMEM;
77 goto alloc_failed;
78 }
79
80 mux->parent = parent;
81 mux->data = *pdata;
82 mux->adap = kzalloc(sizeof(struct i2c_adapter *) * pdata->n_values,
83 GFP_KERNEL);
84 if (!mux->adap) {
85 ret = -ENOMEM;
86 goto alloc_failed2;
87 }
88
89 if (pdata->idle != GPIO_I2CMUX_NO_IDLE) {
90 initial_state = pdata->idle;
91 deselect = gpiomux_deselect;
92 } else {
93 initial_state = pdata->values[0];
94 deselect = NULL;
95 }
96
97 for (i = 0; i < pdata->n_gpios; i++) {
98 ret = gpio_request(pdata->gpios[i], "gpio-i2cmux");
99 if (ret)
100 goto err_request_gpio;
101 gpio_direction_output(pdata->gpios[i],
102 initial_state & (1 << i));
103 }
104
105 for (i = 0; i < pdata->n_values; i++) {
106 u32 nr = pdata->base_nr ? (pdata->base_nr + i) : 0;
107
108 mux->adap[i] = i2c_add_mux_adapter(parent, mux, nr, i,
109 gpiomux_select, deselect);
110 if (!mux->adap[i]) {
111 ret = -ENODEV;
112 dev_err(&pdev->dev, "Failed to add adapter %d\n", i);
113 goto add_adapter_failed;
114 }
115 }
116
117 dev_info(&pdev->dev, "%d port mux on %s adapter\n",
118 pdata->n_values, parent->name);
119
120 platform_set_drvdata(pdev, mux);
121
122 return 0;
123
124add_adapter_failed:
125 for (; i > 0; i--)
126 i2c_del_mux_adapter(mux->adap[i - 1]);
127 i = pdata->n_gpios;
128err_request_gpio:
129 for (; i > 0; i--)
130 gpio_free(pdata->gpios[i - 1]);
131 kfree(mux->adap);
132alloc_failed2:
133 kfree(mux);
134alloc_failed:
135 i2c_put_adapter(parent);
136
137 return ret;
138}
139
140static int __devexit gpiomux_remove(struct platform_device *pdev)
141{
142 struct gpiomux *mux = platform_get_drvdata(pdev);
143 int i;
144
145 for (i = 0; i < mux->data.n_values; i++)
146 i2c_del_mux_adapter(mux->adap[i]);
147
148 for (i = 0; i < mux->data.n_gpios; i++)
149 gpio_free(mux->data.gpios[i]);
150
151 platform_set_drvdata(pdev, NULL);
152 i2c_put_adapter(mux->parent);
153 kfree(mux->adap);
154 kfree(mux);
155
156 return 0;
157}
158
159static struct platform_driver gpiomux_driver = {
160 .probe = gpiomux_probe,
161 .remove = __devexit_p(gpiomux_remove),
162 .driver = {
163 .owner = THIS_MODULE,
164 .name = "gpio-i2cmux",
165 },
166};
167
168static int __init gpiomux_init(void)
169{
170 return platform_driver_register(&gpiomux_driver);
171}
172
173static void __exit gpiomux_exit(void)
174{
175 platform_driver_unregister(&gpiomux_driver);
176}
177
178module_init(gpiomux_init);
179module_exit(gpiomux_exit);
180
181MODULE_DESCRIPTION("GPIO-based I2C multiplexer driver");
182MODULE_AUTHOR("Peter Korsgaard <peter.korsgaard@barco.com>");
183MODULE_LICENSE("GPL");
184MODULE_ALIAS("platform:gpio-i2cmux");
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
index 09dda0b8740e..c3f5aca4ef00 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c
@@ -189,6 +189,7 @@ int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq, int kernel)
189 return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup)); 189 return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup));
190} 190}
191 191
192#ifdef notyet
192int cxio_resize_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq) 193int cxio_resize_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq)
193{ 194{
194 struct rdma_cq_setup setup; 195 struct rdma_cq_setup setup;
@@ -200,6 +201,7 @@ int cxio_resize_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq)
200 setup.ovfl_mode = 1; 201 setup.ovfl_mode = 1;
201 return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup)); 202 return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup));
202} 203}
204#endif
203 205
204static u32 get_qpid(struct cxio_rdev *rdev_p, struct cxio_ucontext *uctx) 206static u32 get_qpid(struct cxio_rdev *rdev_p, struct cxio_ucontext *uctx)
205{ 207{
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.h b/drivers/infiniband/hw/cxgb3/iwch_provider.h
index a237d49bdcc9..c5406da3f4cd 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.h
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.h
@@ -335,8 +335,6 @@ int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg);
335int iwch_post_zb_read(struct iwch_qp *qhp); 335int iwch_post_zb_read(struct iwch_qp *qhp);
336int iwch_register_device(struct iwch_dev *dev); 336int iwch_register_device(struct iwch_dev *dev);
337void iwch_unregister_device(struct iwch_dev *dev); 337void iwch_unregister_device(struct iwch_dev *dev);
338int iwch_quiesce_qps(struct iwch_cq *chp);
339int iwch_resume_qps(struct iwch_cq *chp);
340void stop_read_rep_timer(struct iwch_qp *qhp); 338void stop_read_rep_timer(struct iwch_qp *qhp);
341int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php, 339int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php,
342 struct iwch_mr *mhp, int shift); 340 struct iwch_mr *mhp, int shift);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
index 0993137181d7..1b4cd09f74dc 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c
@@ -1149,59 +1149,3 @@ out:
1149 PDBG("%s exit state %d\n", __func__, qhp->attr.state); 1149 PDBG("%s exit state %d\n", __func__, qhp->attr.state);
1150 return ret; 1150 return ret;
1151} 1151}
1152
1153static int quiesce_qp(struct iwch_qp *qhp)
1154{
1155 spin_lock_irq(&qhp->lock);
1156 iwch_quiesce_tid(qhp->ep);
1157 qhp->flags |= QP_QUIESCED;
1158 spin_unlock_irq(&qhp->lock);
1159 return 0;
1160}
1161
1162static int resume_qp(struct iwch_qp *qhp)
1163{
1164 spin_lock_irq(&qhp->lock);
1165 iwch_resume_tid(qhp->ep);
1166 qhp->flags &= ~QP_QUIESCED;
1167 spin_unlock_irq(&qhp->lock);
1168 return 0;
1169}
1170
1171int iwch_quiesce_qps(struct iwch_cq *chp)
1172{
1173 int i;
1174 struct iwch_qp *qhp;
1175
1176 for (i=0; i < T3_MAX_NUM_QP; i++) {
1177 qhp = get_qhp(chp->rhp, i);
1178 if (!qhp)
1179 continue;
1180 if ((qhp->attr.rcq == chp->cq.cqid) && !qp_quiesced(qhp)) {
1181 quiesce_qp(qhp);
1182 continue;
1183 }
1184 if ((qhp->attr.scq == chp->cq.cqid) && !qp_quiesced(qhp))
1185 quiesce_qp(qhp);
1186 }
1187 return 0;
1188}
1189
1190int iwch_resume_qps(struct iwch_cq *chp)
1191{
1192 int i;
1193 struct iwch_qp *qhp;
1194
1195 for (i=0; i < T3_MAX_NUM_QP; i++) {
1196 qhp = get_qhp(chp->rhp, i);
1197 if (!qhp)
1198 continue;
1199 if ((qhp->attr.rcq == chp->cq.cqid) && qp_quiesced(qhp)) {
1200 resume_qp(qhp);
1201 continue;
1202 }
1203 if ((qhp->attr.scq == chp->cq.cqid) && qp_quiesced(qhp))
1204 resume_qp(qhp);
1205 }
1206 return 0;
1207}
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 16032cdb4337..cc600c2dd0b3 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -760,7 +760,6 @@ int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count);
760int c4iw_flush_sq(struct t4_wq *wq, struct t4_cq *cq, int count); 760int c4iw_flush_sq(struct t4_wq *wq, struct t4_cq *cq, int count);
761int c4iw_ev_handler(struct c4iw_dev *rnicp, u32 qid); 761int c4iw_ev_handler(struct c4iw_dev *rnicp, u32 qid);
762u16 c4iw_rqes_posted(struct c4iw_qp *qhp); 762u16 c4iw_rqes_posted(struct c4iw_qp *qhp);
763int c4iw_post_zb_read(struct c4iw_qp *qhp);
764int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe); 763int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe);
765u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx); 764u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx);
766void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid, 765void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid,
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 057cb2505ea1..20800900ef3f 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -892,36 +892,6 @@ static inline void build_term_codes(struct t4_cqe *err_cqe, u8 *layer_type,
892 } 892 }
893} 893}
894 894
895int c4iw_post_zb_read(struct c4iw_qp *qhp)
896{
897 union t4_wr *wqe;
898 struct sk_buff *skb;
899 u8 len16;
900
901 PDBG("%s enter\n", __func__);
902 skb = alloc_skb(40, GFP_KERNEL);
903 if (!skb) {
904 printk(KERN_ERR "%s cannot send zb_read!!\n", __func__);
905 return -ENOMEM;
906 }
907 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
908
909 wqe = (union t4_wr *)skb_put(skb, sizeof wqe->read);
910 memset(wqe, 0, sizeof wqe->read);
911 wqe->read.r2 = cpu_to_be64(0);
912 wqe->read.stag_sink = cpu_to_be32(1);
913 wqe->read.to_sink_hi = cpu_to_be32(0);
914 wqe->read.to_sink_lo = cpu_to_be32(1);
915 wqe->read.stag_src = cpu_to_be32(1);
916 wqe->read.plen = cpu_to_be32(0);
917 wqe->read.to_src_hi = cpu_to_be32(0);
918 wqe->read.to_src_lo = cpu_to_be32(1);
919 len16 = DIV_ROUND_UP(sizeof wqe->read, 16);
920 init_wr_hdr(wqe, 0, FW_RI_RDMA_READ_WR, FW_RI_COMPLETION_FLAG, len16);
921
922 return c4iw_ofld_send(&qhp->rhp->rdev, skb);
923}
924
925static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe, 895static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
926 gfp_t gfp) 896 gfp_t gfp)
927{ 897{
@@ -1029,7 +999,6 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1029 wqe->cookie = (unsigned long) &ep->com.wr_wait; 999 wqe->cookie = (unsigned long) &ep->com.wr_wait;
1030 1000
1031 wqe->u.fini.type = FW_RI_TYPE_FINI; 1001 wqe->u.fini.type = FW_RI_TYPE_FINI;
1032 c4iw_init_wr_wait(&ep->com.wr_wait);
1033 ret = c4iw_ofld_send(&rhp->rdev, skb); 1002 ret = c4iw_ofld_send(&rhp->rdev, skb);
1034 if (ret) 1003 if (ret)
1035 goto out; 1004 goto out;
@@ -1125,7 +1094,6 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
1125 if (qhp->attr.mpa_attr.initiator) 1094 if (qhp->attr.mpa_attr.initiator)
1126 build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init); 1095 build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init);
1127 1096
1128 c4iw_init_wr_wait(&qhp->ep->com.wr_wait);
1129 ret = c4iw_ofld_send(&rhp->rdev, skb); 1097 ret = c4iw_ofld_send(&rhp->rdev, skb);
1130 if (ret) 1098 if (ret)
1131 goto out; 1099 goto out;
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
index 765f0fc1da76..b33f0457a1ff 100644
--- a/drivers/infiniband/hw/ipath/ipath_driver.c
+++ b/drivers/infiniband/hw/ipath/ipath_driver.c
@@ -530,9 +530,8 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
530 for (j = 0; j < 6; j++) { 530 for (j = 0; j < 6; j++) {
531 if (!pdev->resource[j].start) 531 if (!pdev->resource[j].start)
532 continue; 532 continue;
533 ipath_cdbg(VERBOSE, "BAR %d start %llx, end %llx, len %llx\n", 533 ipath_cdbg(VERBOSE, "BAR %d %pR, len %llx\n",
534 j, (unsigned long long)pdev->resource[j].start, 534 j, &pdev->resource[j],
535 (unsigned long long)pdev->resource[j].end,
536 (unsigned long long)pci_resource_len(pdev, j)); 535 (unsigned long long)pci_resource_len(pdev, j));
537 } 536 }
538 537
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 5a219a2fdf16..e8df155bc3b0 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -397,10 +397,14 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
397 cq->resize_buf = NULL; 397 cq->resize_buf = NULL;
398 cq->resize_umem = NULL; 398 cq->resize_umem = NULL;
399 } else { 399 } else {
400 struct mlx4_ib_cq_buf tmp_buf;
401 int tmp_cqe = 0;
402
400 spin_lock_irq(&cq->lock); 403 spin_lock_irq(&cq->lock);
401 if (cq->resize_buf) { 404 if (cq->resize_buf) {
402 mlx4_ib_cq_resize_copy_cqes(cq); 405 mlx4_ib_cq_resize_copy_cqes(cq);
403 mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe); 406 tmp_buf = cq->buf;
407 tmp_cqe = cq->ibcq.cqe;
404 cq->buf = cq->resize_buf->buf; 408 cq->buf = cq->resize_buf->buf;
405 cq->ibcq.cqe = cq->resize_buf->cqe; 409 cq->ibcq.cqe = cq->resize_buf->cqe;
406 410
@@ -408,6 +412,9 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
408 cq->resize_buf = NULL; 412 cq->resize_buf = NULL;
409 } 413 }
410 spin_unlock_irq(&cq->lock); 414 spin_unlock_irq(&cq->lock);
415
416 if (tmp_cqe)
417 mlx4_ib_free_cq_buf(dev, &tmp_buf, tmp_cqe);
411 } 418 }
412 419
413 goto out; 420 goto out;
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index c9a8dd63b9e2..57ffa50f509e 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -211,6 +211,8 @@ static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, struct ib_mad *ma
211 if (agent) { 211 if (agent) {
212 send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR, 212 send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR,
213 IB_MGMT_MAD_DATA, GFP_ATOMIC); 213 IB_MGMT_MAD_DATA, GFP_ATOMIC);
214 if (IS_ERR(send_buf))
215 return;
214 /* 216 /*
215 * We rely here on the fact that MLX QPs don't use the 217 * We rely here on the fact that MLX QPs don't use the
216 * address handle after the send is posted (this is 218 * address handle after the send is posted (this is
diff --git a/drivers/infiniband/hw/mthca/mthca_mad.c b/drivers/infiniband/hw/mthca/mthca_mad.c
index 5648659ff0b0..03a59534f59e 100644
--- a/drivers/infiniband/hw/mthca/mthca_mad.c
+++ b/drivers/infiniband/hw/mthca/mthca_mad.c
@@ -171,6 +171,8 @@ static void forward_trap(struct mthca_dev *dev,
171 if (agent) { 171 if (agent) {
172 send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR, 172 send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR,
173 IB_MGMT_MAD_DATA, GFP_ATOMIC); 173 IB_MGMT_MAD_DATA, GFP_ATOMIC);
174 if (IS_ERR(send_buf))
175 return;
174 /* 176 /*
175 * We rely here on the fact that MLX QPs don't use the 177 * We rely here on the fact that MLX QPs don't use the
176 * address handle after the send is posted (this is 178 * address handle after the send is posted (this is
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 3892e2c0e95a..5a4c36484722 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -908,8 +908,8 @@ static void nes_netdev_set_multicast_list(struct net_device *netdev)
908 nesvnic->nic_index && 908 nesvnic->nic_index &&
909 mc_index < max_pft_entries_avaiable) { 909 mc_index < max_pft_entries_avaiable) {
910 nes_debug(NES_DBG_NIC_RX, 910 nes_debug(NES_DBG_NIC_RX,
911 "mc_index=%d skipping nic_index=%d,\ 911 "mc_index=%d skipping nic_index=%d, "
912 used for=%d \n", mc_index, 912 "used for=%d \n", mc_index,
913 nesvnic->nic_index, 913 nesvnic->nic_index,
914 nesadapter->pft_mcast_map[mc_index]); 914 nesadapter->pft_mcast_map[mc_index]);
915 mc_index++; 915 mc_index++;
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
index 64c9e7d02d4a..73225eee3cc6 100644
--- a/drivers/infiniband/hw/qib/qib.h
+++ b/drivers/infiniband/hw/qib/qib.h
@@ -766,7 +766,7 @@ struct qib_devdata {
766 void (*f_sdma_hw_start_up)(struct qib_pportdata *); 766 void (*f_sdma_hw_start_up)(struct qib_pportdata *);
767 void (*f_sdma_init_early)(struct qib_pportdata *); 767 void (*f_sdma_init_early)(struct qib_pportdata *);
768 void (*f_set_cntr_sample)(struct qib_pportdata *, u32, u32); 768 void (*f_set_cntr_sample)(struct qib_pportdata *, u32, u32);
769 void (*f_update_usrhead)(struct qib_ctxtdata *, u64, u32, u32); 769 void (*f_update_usrhead)(struct qib_ctxtdata *, u64, u32, u32, u32);
770 u32 (*f_hdrqempty)(struct qib_ctxtdata *); 770 u32 (*f_hdrqempty)(struct qib_ctxtdata *);
771 u64 (*f_portcntr)(struct qib_pportdata *, u32); 771 u64 (*f_portcntr)(struct qib_pportdata *, u32);
772 u32 (*f_read_cntrs)(struct qib_devdata *, loff_t, char **, 772 u32 (*f_read_cntrs)(struct qib_devdata *, loff_t, char **,
diff --git a/drivers/infiniband/hw/qib/qib_cq.c b/drivers/infiniband/hw/qib/qib_cq.c
index a86cbf880f98..5246aa486bbe 100644
--- a/drivers/infiniband/hw/qib/qib_cq.c
+++ b/drivers/infiniband/hw/qib/qib_cq.c
@@ -100,7 +100,8 @@ void qib_cq_enter(struct qib_cq *cq, struct ib_wc *entry, int solicited)
100 wc->head = next; 100 wc->head = next;
101 101
102 if (cq->notify == IB_CQ_NEXT_COMP || 102 if (cq->notify == IB_CQ_NEXT_COMP ||
103 (cq->notify == IB_CQ_SOLICITED && solicited)) { 103 (cq->notify == IB_CQ_SOLICITED &&
104 (solicited || entry->status != IB_WC_SUCCESS))) {
104 cq->notify = IB_CQ_NONE; 105 cq->notify = IB_CQ_NONE;
105 cq->triggered++; 106 cq->triggered++;
106 /* 107 /*
diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c
index 9cd193603fb1..23e584f4c36c 100644
--- a/drivers/infiniband/hw/qib/qib_driver.c
+++ b/drivers/infiniband/hw/qib/qib_driver.c
@@ -71,6 +71,11 @@ MODULE_DESCRIPTION("QLogic IB driver");
71 */ 71 */
72#define QIB_PIO_MAXIBHDR 128 72#define QIB_PIO_MAXIBHDR 128
73 73
74/*
75 * QIB_MAX_PKT_RCV is the max # if packets processed per receive interrupt.
76 */
77#define QIB_MAX_PKT_RECV 64
78
74struct qlogic_ib_stats qib_stats; 79struct qlogic_ib_stats qib_stats;
75 80
76const char *qib_get_unit_name(int unit) 81const char *qib_get_unit_name(int unit)
@@ -284,14 +289,147 @@ static inline void *qib_get_egrbuf(const struct qib_ctxtdata *rcd, u32 etail)
284 * Returns 1 if error was a CRC, else 0. 289 * Returns 1 if error was a CRC, else 0.
285 * Needed for some chip's synthesized error counters. 290 * Needed for some chip's synthesized error counters.
286 */ 291 */
287static u32 qib_rcv_hdrerr(struct qib_pportdata *ppd, u32 ctxt, 292static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd,
288 u32 eflags, u32 l, u32 etail, __le32 *rhf_addr, 293 u32 ctxt, u32 eflags, u32 l, u32 etail,
289 struct qib_message_header *hdr) 294 __le32 *rhf_addr, struct qib_message_header *rhdr)
290{ 295{
291 u32 ret = 0; 296 u32 ret = 0;
292 297
293 if (eflags & (QLOGIC_IB_RHF_H_ICRCERR | QLOGIC_IB_RHF_H_VCRCERR)) 298 if (eflags & (QLOGIC_IB_RHF_H_ICRCERR | QLOGIC_IB_RHF_H_VCRCERR))
294 ret = 1; 299 ret = 1;
300 else if (eflags == QLOGIC_IB_RHF_H_TIDERR) {
301 /* For TIDERR and RC QPs premptively schedule a NAK */
302 struct qib_ib_header *hdr = (struct qib_ib_header *) rhdr;
303 struct qib_other_headers *ohdr = NULL;
304 struct qib_ibport *ibp = &ppd->ibport_data;
305 struct qib_qp *qp = NULL;
306 u32 tlen = qib_hdrget_length_in_bytes(rhf_addr);
307 u16 lid = be16_to_cpu(hdr->lrh[1]);
308 int lnh = be16_to_cpu(hdr->lrh[0]) & 3;
309 u32 qp_num;
310 u32 opcode;
311 u32 psn;
312 int diff;
313 unsigned long flags;
314
315 /* Sanity check packet */
316 if (tlen < 24)
317 goto drop;
318
319 if (lid < QIB_MULTICAST_LID_BASE) {
320 lid &= ~((1 << ppd->lmc) - 1);
321 if (unlikely(lid != ppd->lid))
322 goto drop;
323 }
324
325 /* Check for GRH */
326 if (lnh == QIB_LRH_BTH)
327 ohdr = &hdr->u.oth;
328 else if (lnh == QIB_LRH_GRH) {
329 u32 vtf;
330
331 ohdr = &hdr->u.l.oth;
332 if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR)
333 goto drop;
334 vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow);
335 if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
336 goto drop;
337 } else
338 goto drop;
339
340 /* Get opcode and PSN from packet */
341 opcode = be32_to_cpu(ohdr->bth[0]);
342 opcode >>= 24;
343 psn = be32_to_cpu(ohdr->bth[2]);
344
345 /* Get the destination QP number. */
346 qp_num = be32_to_cpu(ohdr->bth[1]) & QIB_QPN_MASK;
347 if (qp_num != QIB_MULTICAST_QPN) {
348 int ruc_res;
349 qp = qib_lookup_qpn(ibp, qp_num);
350 if (!qp)
351 goto drop;
352
353 /*
354 * Handle only RC QPs - for other QP types drop error
355 * packet.
356 */
357 spin_lock(&qp->r_lock);
358
359 /* Check for valid receive state. */
360 if (!(ib_qib_state_ops[qp->state] &
361 QIB_PROCESS_RECV_OK)) {
362 ibp->n_pkt_drops++;
363 goto unlock;
364 }
365
366 switch (qp->ibqp.qp_type) {
367 case IB_QPT_RC:
368 spin_lock_irqsave(&qp->s_lock, flags);
369 ruc_res =
370 qib_ruc_check_hdr(
371 ibp, hdr,
372 lnh == QIB_LRH_GRH,
373 qp,
374 be32_to_cpu(ohdr->bth[0]));
375 if (ruc_res) {
376 spin_unlock_irqrestore(&qp->s_lock,
377 flags);
378 goto unlock;
379 }
380 spin_unlock_irqrestore(&qp->s_lock, flags);
381
382 /* Only deal with RDMA Writes for now */
383 if (opcode <
384 IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST) {
385 diff = qib_cmp24(psn, qp->r_psn);
386 if (!qp->r_nak_state && diff >= 0) {
387 ibp->n_rc_seqnak++;
388 qp->r_nak_state =
389 IB_NAK_PSN_ERROR;
390 /* Use the expected PSN. */
391 qp->r_ack_psn = qp->r_psn;
392 /*
393 * Wait to send the sequence
394 * NAK until all packets
395 * in the receive queue have
396 * been processed.
397 * Otherwise, we end up
398 * propagating congestion.
399 */
400 if (list_empty(&qp->rspwait)) {
401 qp->r_flags |=
402 QIB_R_RSP_NAK;
403 atomic_inc(
404 &qp->refcount);
405 list_add_tail(
406 &qp->rspwait,
407 &rcd->qp_wait_list);
408 }
409 } /* Out of sequence NAK */
410 } /* QP Request NAKs */
411 break;
412 case IB_QPT_SMI:
413 case IB_QPT_GSI:
414 case IB_QPT_UD:
415 case IB_QPT_UC:
416 default:
417 /* For now don't handle any other QP types */
418 break;
419 }
420
421unlock:
422 spin_unlock(&qp->r_lock);
423 /*
424 * Notify qib_destroy_qp() if it is waiting
425 * for us to finish.
426 */
427 if (atomic_dec_and_test(&qp->refcount))
428 wake_up(&qp->wait);
429 } /* Unicast QP */
430 } /* Valid packet with TIDErr */
431
432drop:
295 return ret; 433 return ret;
296} 434}
297 435
@@ -335,7 +473,7 @@ u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts)
335 smp_rmb(); /* prevent speculative reads of dma'ed hdrq */ 473 smp_rmb(); /* prevent speculative reads of dma'ed hdrq */
336 } 474 }
337 475
338 for (last = 0, i = 1; !last && i <= 64; i += !last) { 476 for (last = 0, i = 1; !last; i += !last) {
339 hdr = dd->f_get_msgheader(dd, rhf_addr); 477 hdr = dd->f_get_msgheader(dd, rhf_addr);
340 eflags = qib_hdrget_err_flags(rhf_addr); 478 eflags = qib_hdrget_err_flags(rhf_addr);
341 etype = qib_hdrget_rcv_type(rhf_addr); 479 etype = qib_hdrget_rcv_type(rhf_addr);
@@ -371,7 +509,7 @@ u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts)
371 * packets; only qibhdrerr should be set. 509 * packets; only qibhdrerr should be set.
372 */ 510 */
373 if (unlikely(eflags)) 511 if (unlikely(eflags))
374 crcs += qib_rcv_hdrerr(ppd, rcd->ctxt, eflags, l, 512 crcs += qib_rcv_hdrerr(rcd, ppd, rcd->ctxt, eflags, l,
375 etail, rhf_addr, hdr); 513 etail, rhf_addr, hdr);
376 else if (etype == RCVHQ_RCV_TYPE_NON_KD) { 514 else if (etype == RCVHQ_RCV_TYPE_NON_KD) {
377 qib_ib_rcv(rcd, hdr, ebuf, tlen); 515 qib_ib_rcv(rcd, hdr, ebuf, tlen);
@@ -384,6 +522,9 @@ move_along:
384 l += rsize; 522 l += rsize;
385 if (l >= maxcnt) 523 if (l >= maxcnt)
386 l = 0; 524 l = 0;
525 if (i == QIB_MAX_PKT_RECV)
526 last = 1;
527
387 rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset; 528 rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset;
388 if (dd->flags & QIB_NODMA_RTAIL) { 529 if (dd->flags & QIB_NODMA_RTAIL) {
389 u32 seq = qib_hdrget_seq(rhf_addr); 530 u32 seq = qib_hdrget_seq(rhf_addr);
@@ -402,7 +543,7 @@ move_along:
402 */ 543 */
403 lval = l; 544 lval = l;
404 if (!last && !(i & 0xf)) { 545 if (!last && !(i & 0xf)) {
405 dd->f_update_usrhead(rcd, lval, updegr, etail); 546 dd->f_update_usrhead(rcd, lval, updegr, etail, i);
406 updegr = 0; 547 updegr = 0;
407 } 548 }
408 } 549 }
@@ -444,7 +585,7 @@ bail:
444 * if no packets were processed. 585 * if no packets were processed.
445 */ 586 */
446 lval = (u64)rcd->head | dd->rhdrhead_intr_off; 587 lval = (u64)rcd->head | dd->rhdrhead_intr_off;
447 dd->f_update_usrhead(rcd, lval, updegr, etail); 588 dd->f_update_usrhead(rcd, lval, updegr, etail, i);
448 return crcs; 589 return crcs;
449} 590}
450 591
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index 79d9971aff1f..75bfad16c114 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -1379,17 +1379,17 @@ static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo,
1379 /* find device (with ACTIVE ports) with fewest ctxts in use */ 1379 /* find device (with ACTIVE ports) with fewest ctxts in use */
1380 for (ndev = 0; ndev < devmax; ndev++) { 1380 for (ndev = 0; ndev < devmax; ndev++) {
1381 struct qib_devdata *dd = qib_lookup(ndev); 1381 struct qib_devdata *dd = qib_lookup(ndev);
1382 unsigned cused = 0, cfree = 0; 1382 unsigned cused = 0, cfree = 0, pusable = 0;
1383 if (!dd) 1383 if (!dd)
1384 continue; 1384 continue;
1385 if (port && port <= dd->num_pports && 1385 if (port && port <= dd->num_pports &&
1386 usable(dd->pport + port - 1)) 1386 usable(dd->pport + port - 1))
1387 dusable = 1; 1387 pusable = 1;
1388 else 1388 else
1389 for (i = 0; i < dd->num_pports; i++) 1389 for (i = 0; i < dd->num_pports; i++)
1390 if (usable(dd->pport + i)) 1390 if (usable(dd->pport + i))
1391 dusable++; 1391 pusable++;
1392 if (!dusable) 1392 if (!pusable)
1393 continue; 1393 continue;
1394 for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; 1394 for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts;
1395 ctxt++) 1395 ctxt++)
@@ -1397,7 +1397,7 @@ static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo,
1397 cused++; 1397 cused++;
1398 else 1398 else
1399 cfree++; 1399 cfree++;
1400 if (cfree && cused < inuse) { 1400 if (pusable && cfree && cused < inuse) {
1401 udd = dd; 1401 udd = dd;
1402 inuse = cused; 1402 inuse = cused;
1403 } 1403 }
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
index a5e29dbb9537..774dea897e9c 100644
--- a/drivers/infiniband/hw/qib/qib_iba6120.c
+++ b/drivers/infiniband/hw/qib/qib_iba6120.c
@@ -2074,7 +2074,7 @@ static void qib_6120_config_ctxts(struct qib_devdata *dd)
2074} 2074}
2075 2075
2076static void qib_update_6120_usrhead(struct qib_ctxtdata *rcd, u64 hd, 2076static void qib_update_6120_usrhead(struct qib_ctxtdata *rcd, u64 hd,
2077 u32 updegr, u32 egrhd) 2077 u32 updegr, u32 egrhd, u32 npkts)
2078{ 2078{
2079 qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); 2079 qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
2080 if (updegr) 2080 if (updegr)
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c
index 6fd8d74e7392..127a0d5069f0 100644
--- a/drivers/infiniband/hw/qib/qib_iba7220.c
+++ b/drivers/infiniband/hw/qib/qib_iba7220.c
@@ -2297,7 +2297,7 @@ static void qib_7220_config_ctxts(struct qib_devdata *dd)
2297 nchipctxts = qib_read_kreg32(dd, kr_portcnt); 2297 nchipctxts = qib_read_kreg32(dd, kr_portcnt);
2298 dd->cspec->numctxts = nchipctxts; 2298 dd->cspec->numctxts = nchipctxts;
2299 if (qib_n_krcv_queues > 1) { 2299 if (qib_n_krcv_queues > 1) {
2300 dd->qpn_mask = 0x3f; 2300 dd->qpn_mask = 0x3e;
2301 dd->first_user_ctxt = qib_n_krcv_queues * dd->num_pports; 2301 dd->first_user_ctxt = qib_n_krcv_queues * dd->num_pports;
2302 if (dd->first_user_ctxt > nchipctxts) 2302 if (dd->first_user_ctxt > nchipctxts)
2303 dd->first_user_ctxt = nchipctxts; 2303 dd->first_user_ctxt = nchipctxts;
@@ -2703,7 +2703,7 @@ static int qib_7220_set_loopback(struct qib_pportdata *ppd, const char *what)
2703} 2703}
2704 2704
2705static void qib_update_7220_usrhead(struct qib_ctxtdata *rcd, u64 hd, 2705static void qib_update_7220_usrhead(struct qib_ctxtdata *rcd, u64 hd,
2706 u32 updegr, u32 egrhd) 2706 u32 updegr, u32 egrhd, u32 npkts)
2707{ 2707{
2708 qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); 2708 qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
2709 if (updegr) 2709 if (updegr)
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 584d443b5335..dbbb0e85afe4 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -71,6 +71,9 @@ static void qib_7322_mini_pcs_reset(struct qib_pportdata *);
71 71
72static u32 ahb_mod(struct qib_devdata *, int, int, int, u32, u32); 72static u32 ahb_mod(struct qib_devdata *, int, int, int, u32, u32);
73static void ibsd_wr_allchans(struct qib_pportdata *, int, unsigned, unsigned); 73static void ibsd_wr_allchans(struct qib_pportdata *, int, unsigned, unsigned);
74static void serdes_7322_los_enable(struct qib_pportdata *, int);
75static int serdes_7322_init_old(struct qib_pportdata *);
76static int serdes_7322_init_new(struct qib_pportdata *);
74 77
75#define BMASK(msb, lsb) (((1 << ((msb) + 1 - (lsb))) - 1) << (lsb)) 78#define BMASK(msb, lsb) (((1 << ((msb) + 1 - (lsb))) - 1) << (lsb))
76 79
@@ -111,6 +114,21 @@ static ushort qib_singleport;
111module_param_named(singleport, qib_singleport, ushort, S_IRUGO); 114module_param_named(singleport, qib_singleport, ushort, S_IRUGO);
112MODULE_PARM_DESC(singleport, "Use only IB port 1; more per-port buffer space"); 115MODULE_PARM_DESC(singleport, "Use only IB port 1; more per-port buffer space");
113 116
117/*
118 * Receive header queue sizes
119 */
120static unsigned qib_rcvhdrcnt;
121module_param_named(rcvhdrcnt, qib_rcvhdrcnt, uint, S_IRUGO);
122MODULE_PARM_DESC(rcvhdrcnt, "receive header count");
123
124static unsigned qib_rcvhdrsize;
125module_param_named(rcvhdrsize, qib_rcvhdrsize, uint, S_IRUGO);
126MODULE_PARM_DESC(rcvhdrsize, "receive header size in 32-bit words");
127
128static unsigned qib_rcvhdrentsize;
129module_param_named(rcvhdrentsize, qib_rcvhdrentsize, uint, S_IRUGO);
130MODULE_PARM_DESC(rcvhdrentsize, "receive header entry size in 32-bit words");
131
114#define MAX_ATTEN_LEN 64 /* plenty for any real system */ 132#define MAX_ATTEN_LEN 64 /* plenty for any real system */
115/* for read back, default index is ~5m copper cable */ 133/* for read back, default index is ~5m copper cable */
116static char txselect_list[MAX_ATTEN_LEN] = "10"; 134static char txselect_list[MAX_ATTEN_LEN] = "10";
@@ -544,6 +562,7 @@ static void write_tx_serdes_param(struct qib_pportdata *, struct txdds_ent *);
544 562
545#define TXDDS_TABLE_SZ 16 /* number of entries per speed in onchip table */ 563#define TXDDS_TABLE_SZ 16 /* number of entries per speed in onchip table */
546#define TXDDS_EXTRA_SZ 13 /* number of extra tx settings entries */ 564#define TXDDS_EXTRA_SZ 13 /* number of extra tx settings entries */
565#define TXDDS_MFG_SZ 2 /* number of mfg tx settings entries */
547#define SERDES_CHANS 4 /* yes, it's obvious, but one less magic number */ 566#define SERDES_CHANS 4 /* yes, it's obvious, but one less magic number */
548 567
549#define H1_FORCE_VAL 8 568#define H1_FORCE_VAL 8
@@ -604,6 +623,7 @@ struct qib_chippport_specific {
604 u8 ibmalfusesnap; 623 u8 ibmalfusesnap;
605 struct qib_qsfp_data qsfp_data; 624 struct qib_qsfp_data qsfp_data;
606 char epmsgbuf[192]; /* for port error interrupt msg buffer */ 625 char epmsgbuf[192]; /* for port error interrupt msg buffer */
626 u8 bounced;
607}; 627};
608 628
609static struct { 629static struct {
@@ -1677,6 +1697,8 @@ static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst)
1677 (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR))) { 1697 (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR))) {
1678 force_h1(ppd); 1698 force_h1(ppd);
1679 ppd->cpspec->qdr_reforce = 1; 1699 ppd->cpspec->qdr_reforce = 1;
1700 if (!ppd->dd->cspec->r1)
1701 serdes_7322_los_enable(ppd, 0);
1680 } else if (ppd->cpspec->qdr_reforce && 1702 } else if (ppd->cpspec->qdr_reforce &&
1681 (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) && 1703 (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) &&
1682 (ibclt == IB_7322_LT_STATE_CFGENH || 1704 (ibclt == IB_7322_LT_STATE_CFGENH ||
@@ -1692,18 +1714,37 @@ static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst)
1692 ibclt <= IB_7322_LT_STATE_SLEEPQUIET))) 1714 ibclt <= IB_7322_LT_STATE_SLEEPQUIET)))
1693 adj_tx_serdes(ppd); 1715 adj_tx_serdes(ppd);
1694 1716
1695 if (!ppd->cpspec->qdr_dfe_on && ibclt != IB_7322_LT_STATE_LINKUP && 1717 if (ibclt != IB_7322_LT_STATE_LINKUP) {
1696 ibclt <= IB_7322_LT_STATE_SLEEPQUIET) { 1718 u8 ltstate = qib_7322_phys_portstate(ibcst);
1697 ppd->cpspec->qdr_dfe_on = 1; 1719 u8 pibclt = (u8)SYM_FIELD(ppd->lastibcstat, IBCStatusA_0,
1698 ppd->cpspec->qdr_dfe_time = 0; 1720 LinkTrainingState);
1699 /* On link down, reenable QDR adaptation */ 1721 if (!ppd->dd->cspec->r1 &&
1700 qib_write_kreg_port(ppd, krp_static_adapt_dis(2), 1722 pibclt == IB_7322_LT_STATE_LINKUP &&
1701 ppd->dd->cspec->r1 ? 1723 ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER &&
1702 QDR_STATIC_ADAPT_DOWN_R1 : 1724 ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN &&
1703 QDR_STATIC_ADAPT_DOWN); 1725 ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
1726 ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
1727 /* If the link went down (but no into recovery,
1728 * turn LOS back on */
1729 serdes_7322_los_enable(ppd, 1);
1730 if (!ppd->cpspec->qdr_dfe_on &&
1731 ibclt <= IB_7322_LT_STATE_SLEEPQUIET) {
1732 ppd->cpspec->qdr_dfe_on = 1;
1733 ppd->cpspec->qdr_dfe_time = 0;
1734 /* On link down, reenable QDR adaptation */
1735 qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
1736 ppd->dd->cspec->r1 ?
1737 QDR_STATIC_ADAPT_DOWN_R1 :
1738 QDR_STATIC_ADAPT_DOWN);
1739 printk(KERN_INFO QIB_DRV_NAME
1740 " IB%u:%u re-enabled QDR adaptation "
1741 "ibclt %x\n", ppd->dd->unit, ppd->port, ibclt);
1742 }
1704 } 1743 }
1705} 1744}
1706 1745
1746static int qib_7322_set_ib_cfg(struct qib_pportdata *, int, u32);
1747
1707/* 1748/*
1708 * This is per-pport error handling. 1749 * This is per-pport error handling.
1709 * will likely get it's own MSIx interrupt (one for each port, 1750 * will likely get it's own MSIx interrupt (one for each port,
@@ -1840,7 +1881,23 @@ static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
1840 IB_PHYSPORTSTATE_DISABLED) 1881 IB_PHYSPORTSTATE_DISABLED)
1841 qib_set_ib_7322_lstate(ppd, 0, 1882 qib_set_ib_7322_lstate(ppd, 0,
1842 QLOGIC_IB_IBCC_LINKINITCMD_DISABLE); 1883 QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
1843 else 1884 else {
1885 u32 lstate;
1886 /*
1887 * We need the current logical link state before
1888 * lflags are set in handle_e_ibstatuschanged.
1889 */
1890 lstate = qib_7322_iblink_state(ibcs);
1891
1892 if (IS_QMH(dd) && !ppd->cpspec->bounced &&
1893 ltstate == IB_PHYSPORTSTATE_LINKUP &&
1894 (lstate >= IB_PORT_INIT &&
1895 lstate <= IB_PORT_ACTIVE)) {
1896 ppd->cpspec->bounced = 1;
1897 qib_7322_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
1898 IB_LINKCMD_DOWN | IB_LINKINITCMD_POLL);
1899 }
1900
1844 /* 1901 /*
1845 * Since going into a recovery state causes the link 1902 * Since going into a recovery state causes the link
1846 * state to go down and since recovery is transitory, 1903 * state to go down and since recovery is transitory,
@@ -1854,6 +1911,7 @@ static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
1854 ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT && 1911 ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
1855 ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE) 1912 ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
1856 qib_handle_e_ibstatuschanged(ppd, ibcs); 1913 qib_handle_e_ibstatuschanged(ppd, ibcs);
1914 }
1857 } 1915 }
1858 if (*msg && iserr) 1916 if (*msg && iserr)
1859 qib_dev_porterr(dd, ppd->port, "%s error\n", msg); 1917 qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
@@ -2785,7 +2843,6 @@ static irqreturn_t qib_7322intr(int irq, void *data)
2785 ctxtrbits &= ~rmask; 2843 ctxtrbits &= ~rmask;
2786 if (dd->rcd[i]) { 2844 if (dd->rcd[i]) {
2787 qib_kreceive(dd->rcd[i], NULL, &npkts); 2845 qib_kreceive(dd->rcd[i], NULL, &npkts);
2788 adjust_rcv_timeout(dd->rcd[i], npkts);
2789 } 2846 }
2790 } 2847 }
2791 rmask <<= 1; 2848 rmask <<= 1;
@@ -2835,7 +2892,6 @@ static irqreturn_t qib_7322pintr(int irq, void *data)
2835 (1ULL << QIB_I_RCVURG_LSB)) << rcd->ctxt); 2892 (1ULL << QIB_I_RCVURG_LSB)) << rcd->ctxt);
2836 2893
2837 qib_kreceive(rcd, NULL, &npkts); 2894 qib_kreceive(rcd, NULL, &npkts);
2838 adjust_rcv_timeout(rcd, npkts);
2839 2895
2840 return IRQ_HANDLED; 2896 return IRQ_HANDLED;
2841} 2897}
@@ -3157,6 +3213,10 @@ static unsigned qib_7322_boardname(struct qib_devdata *dd)
3157 case BOARD_QME7342: 3213 case BOARD_QME7342:
3158 n = "InfiniPath_QME7342"; 3214 n = "InfiniPath_QME7342";
3159 break; 3215 break;
3216 case 8:
3217 n = "InfiniPath_QME7362";
3218 dd->flags |= QIB_HAS_QSFP;
3219 break;
3160 case 15: 3220 case 15:
3161 n = "InfiniPath_QLE7342_TEST"; 3221 n = "InfiniPath_QLE7342_TEST";
3162 dd->flags |= QIB_HAS_QSFP; 3222 dd->flags |= QIB_HAS_QSFP;
@@ -3475,11 +3535,6 @@ static void qib_7322_config_ctxts(struct qib_devdata *dd)
3475 nchipctxts = qib_read_kreg32(dd, kr_contextcnt); 3535 nchipctxts = qib_read_kreg32(dd, kr_contextcnt);
3476 dd->cspec->numctxts = nchipctxts; 3536 dd->cspec->numctxts = nchipctxts;
3477 if (qib_n_krcv_queues > 1 && dd->num_pports) { 3537 if (qib_n_krcv_queues > 1 && dd->num_pports) {
3478 /*
3479 * Set the mask for which bits from the QPN are used
3480 * to select a context number.
3481 */
3482 dd->qpn_mask = 0x3f;
3483 dd->first_user_ctxt = NUM_IB_PORTS + 3538 dd->first_user_ctxt = NUM_IB_PORTS +
3484 (qib_n_krcv_queues - 1) * dd->num_pports; 3539 (qib_n_krcv_queues - 1) * dd->num_pports;
3485 if (dd->first_user_ctxt > nchipctxts) 3540 if (dd->first_user_ctxt > nchipctxts)
@@ -3530,8 +3585,11 @@ static void qib_7322_config_ctxts(struct qib_devdata *dd)
3530 3585
3531 /* kr_rcvegrcnt changes based on the number of contexts enabled */ 3586 /* kr_rcvegrcnt changes based on the number of contexts enabled */
3532 dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt); 3587 dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt);
3533 dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, 3588 if (qib_rcvhdrcnt)
3534 dd->num_pports > 1 ? 1024U : 2048U); 3589 dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, qib_rcvhdrcnt);
3590 else
3591 dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt,
3592 dd->num_pports > 1 ? 1024U : 2048U);
3535} 3593}
3536 3594
3537static int qib_7322_get_ib_cfg(struct qib_pportdata *ppd, int which) 3595static int qib_7322_get_ib_cfg(struct qib_pportdata *ppd, int which)
@@ -4002,8 +4060,14 @@ static int qib_7322_set_ib_table(struct qib_pportdata *ppd, int which, void *t)
4002} 4060}
4003 4061
4004static void qib_update_7322_usrhead(struct qib_ctxtdata *rcd, u64 hd, 4062static void qib_update_7322_usrhead(struct qib_ctxtdata *rcd, u64 hd,
4005 u32 updegr, u32 egrhd) 4063 u32 updegr, u32 egrhd, u32 npkts)
4006{ 4064{
4065 /*
4066 * Need to write timeout register before updating rcvhdrhead to ensure
4067 * that the timer is enabled on reception of a packet.
4068 */
4069 if (hd >> IBA7322_HDRHEAD_PKTINT_SHIFT)
4070 adjust_rcv_timeout(rcd, npkts);
4007 qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); 4071 qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
4008 qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); 4072 qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
4009 if (updegr) 4073 if (updegr)
@@ -5522,7 +5586,7 @@ static void qsfp_7322_event(struct work_struct *work)
5522 u64 now = get_jiffies_64(); 5586 u64 now = get_jiffies_64();
5523 if (time_after64(now, pwrup)) 5587 if (time_after64(now, pwrup))
5524 break; 5588 break;
5525 msleep(1); 5589 msleep(20);
5526 } 5590 }
5527 ret = qib_refresh_qsfp_cache(ppd, &qd->cache); 5591 ret = qib_refresh_qsfp_cache(ppd, &qd->cache);
5528 /* 5592 /*
@@ -5579,6 +5643,7 @@ static void set_no_qsfp_atten(struct qib_devdata *dd, int change)
5579 u32 pidx, unit, port, deflt, h1; 5643 u32 pidx, unit, port, deflt, h1;
5580 unsigned long val; 5644 unsigned long val;
5581 int any = 0, seth1; 5645 int any = 0, seth1;
5646 int txdds_size;
5582 5647
5583 str = txselect_list; 5648 str = txselect_list;
5584 5649
@@ -5587,6 +5652,10 @@ static void set_no_qsfp_atten(struct qib_devdata *dd, int change)
5587 for (pidx = 0; pidx < dd->num_pports; ++pidx) 5652 for (pidx = 0; pidx < dd->num_pports; ++pidx)
5588 dd->pport[pidx].cpspec->no_eep = deflt; 5653 dd->pport[pidx].cpspec->no_eep = deflt;
5589 5654
5655 txdds_size = TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ;
5656 if (IS_QME(dd) || IS_QMH(dd))
5657 txdds_size += TXDDS_MFG_SZ;
5658
5590 while (*nxt && nxt[1]) { 5659 while (*nxt && nxt[1]) {
5591 str = ++nxt; 5660 str = ++nxt;
5592 unit = simple_strtoul(str, &nxt, 0); 5661 unit = simple_strtoul(str, &nxt, 0);
@@ -5609,7 +5678,7 @@ static void set_no_qsfp_atten(struct qib_devdata *dd, int change)
5609 ; 5678 ;
5610 continue; 5679 continue;
5611 } 5680 }
5612 if (val >= TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ) 5681 if (val >= txdds_size)
5613 continue; 5682 continue;
5614 seth1 = 0; 5683 seth1 = 0;
5615 h1 = 0; /* gcc thinks it might be used uninitted */ 5684 h1 = 0; /* gcc thinks it might be used uninitted */
@@ -5661,10 +5730,11 @@ static int setup_txselect(const char *str, struct kernel_param *kp)
5661 return -ENOSPC; 5730 return -ENOSPC;
5662 } 5731 }
5663 val = simple_strtoul(str, &n, 0); 5732 val = simple_strtoul(str, &n, 0);
5664 if (n == str || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)) { 5733 if (n == str || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
5734 TXDDS_MFG_SZ)) {
5665 printk(KERN_INFO QIB_DRV_NAME 5735 printk(KERN_INFO QIB_DRV_NAME
5666 "txselect_values must start with a number < %d\n", 5736 "txselect_values must start with a number < %d\n",
5667 TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ); 5737 TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + TXDDS_MFG_SZ);
5668 return -EINVAL; 5738 return -EINVAL;
5669 } 5739 }
5670 strcpy(txselect_list, str); 5740 strcpy(txselect_list, str);
@@ -5810,7 +5880,8 @@ static void write_7322_initregs(struct qib_devdata *dd)
5810 unsigned n, regno; 5880 unsigned n, regno;
5811 unsigned long flags; 5881 unsigned long flags;
5812 5882
5813 if (!dd->qpn_mask || !dd->pport[pidx].link_speed_supported) 5883 if (dd->n_krcv_queues < 2 ||
5884 !dd->pport[pidx].link_speed_supported)
5814 continue; 5885 continue;
5815 5886
5816 ppd = &dd->pport[pidx]; 5887 ppd = &dd->pport[pidx];
@@ -6097,8 +6168,10 @@ static int qib_init_7322_variables(struct qib_devdata *dd)
6097 ppd++; 6168 ppd++;
6098 } 6169 }
6099 6170
6100 dd->rcvhdrentsize = QIB_RCVHDR_ENTSIZE; 6171 dd->rcvhdrentsize = qib_rcvhdrentsize ?
6101 dd->rcvhdrsize = QIB_DFLT_RCVHDRSIZE; 6172 qib_rcvhdrentsize : QIB_RCVHDR_ENTSIZE;
6173 dd->rcvhdrsize = qib_rcvhdrsize ?
6174 qib_rcvhdrsize : QIB_DFLT_RCVHDRSIZE;
6102 dd->rhf_offset = dd->rcvhdrentsize - sizeof(u64) / sizeof(u32); 6175 dd->rhf_offset = dd->rcvhdrentsize - sizeof(u64) / sizeof(u32);
6103 6176
6104 /* we always allocate at least 2048 bytes for eager buffers */ 6177 /* we always allocate at least 2048 bytes for eager buffers */
@@ -6495,7 +6568,7 @@ static void qib_7322_txchk_change(struct qib_devdata *dd, u32 start,
6495 /* make sure we see an updated copy next time around */ 6568 /* make sure we see an updated copy next time around */
6496 sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP); 6569 sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
6497 sleeps++; 6570 sleeps++;
6498 msleep(1); 6571 msleep(20);
6499 } 6572 }
6500 6573
6501 switch (which) { 6574 switch (which) {
@@ -6993,6 +7066,12 @@ static const struct txdds_ent txdds_extra_qdr[TXDDS_EXTRA_SZ] = {
6993 { 0, 1, 0, 12 }, /* QMH7342 backplane settings */ 7066 { 0, 1, 0, 12 }, /* QMH7342 backplane settings */
6994}; 7067};
6995 7068
7069static const struct txdds_ent txdds_extra_mfg[TXDDS_MFG_SZ] = {
7070 /* amp, pre, main, post */
7071 { 0, 0, 0, 0 }, /* QME7342 mfg settings */
7072 { 0, 0, 0, 6 }, /* QME7342 P2 mfg settings */
7073};
7074
6996static const struct txdds_ent *get_atten_table(const struct txdds_ent *txdds, 7075static const struct txdds_ent *get_atten_table(const struct txdds_ent *txdds,
6997 unsigned atten) 7076 unsigned atten)
6998{ 7077{
@@ -7066,6 +7145,16 @@ static void find_best_ent(struct qib_pportdata *ppd,
7066 *sdr_dds = &txdds_extra_sdr[idx]; 7145 *sdr_dds = &txdds_extra_sdr[idx];
7067 *ddr_dds = &txdds_extra_ddr[idx]; 7146 *ddr_dds = &txdds_extra_ddr[idx];
7068 *qdr_dds = &txdds_extra_qdr[idx]; 7147 *qdr_dds = &txdds_extra_qdr[idx];
7148 } else if ((IS_QME(ppd->dd) || IS_QMH(ppd->dd)) &&
7149 ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
7150 TXDDS_MFG_SZ)) {
7151 idx = ppd->cpspec->no_eep - (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ);
7152 printk(KERN_INFO QIB_DRV_NAME
7153 " IB%u:%u use idx %u into txdds_mfg\n",
7154 ppd->dd->unit, ppd->port, idx);
7155 *sdr_dds = &txdds_extra_mfg[idx];
7156 *ddr_dds = &txdds_extra_mfg[idx];
7157 *qdr_dds = &txdds_extra_mfg[idx];
7069 } else { 7158 } else {
7070 /* this shouldn't happen, it's range checked */ 7159 /* this shouldn't happen, it's range checked */
7071 *sdr_dds = txdds_sdr + qib_long_atten; 7160 *sdr_dds = txdds_sdr + qib_long_atten;
@@ -7210,9 +7299,30 @@ static void ibsd_wr_allchans(struct qib_pportdata *ppd, int addr, unsigned data,
7210 } 7299 }
7211} 7300}
7212 7301
7302static void serdes_7322_los_enable(struct qib_pportdata *ppd, int enable)
7303{
7304 u64 data = qib_read_kreg_port(ppd, krp_serdesctrl);
7305 printk(KERN_INFO QIB_DRV_NAME " IB%u:%u Turning LOS %s\n",
7306 ppd->dd->unit, ppd->port, (enable ? "on" : "off"));
7307 if (enable)
7308 data |= SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
7309 else
7310 data &= ~SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
7311 qib_write_kreg_port(ppd, krp_serdesctrl, data);
7312}
7313
7213static int serdes_7322_init(struct qib_pportdata *ppd) 7314static int serdes_7322_init(struct qib_pportdata *ppd)
7214{ 7315{
7215 u64 data; 7316 int ret = 0;
7317 if (ppd->dd->cspec->r1)
7318 ret = serdes_7322_init_old(ppd);
7319 else
7320 ret = serdes_7322_init_new(ppd);
7321 return ret;
7322}
7323
7324static int serdes_7322_init_old(struct qib_pportdata *ppd)
7325{
7216 u32 le_val; 7326 u32 le_val;
7217 7327
7218 /* 7328 /*
@@ -7270,11 +7380,7 @@ static int serdes_7322_init(struct qib_pportdata *ppd)
7270 ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */ 7380 ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
7271 ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */ 7381 ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
7272 7382
7273 data = qib_read_kreg_port(ppd, krp_serdesctrl); 7383 serdes_7322_los_enable(ppd, 1);
7274 /* Turn off IB latency mode */
7275 data &= ~SYM_MASK(IBSerdesCtrl_0, IB_LAT_MODE);
7276 qib_write_kreg_port(ppd, krp_serdesctrl, data |
7277 SYM_MASK(IBSerdesCtrl_0, RXLOSEN));
7278 7384
7279 /* rxbistena; set 0 to avoid effects of it switch later */ 7385 /* rxbistena; set 0 to avoid effects of it switch later */
7280 ibsd_wr_allchans(ppd, 9, 0 << 15, 1 << 15); 7386 ibsd_wr_allchans(ppd, 9, 0 << 15, 1 << 15);
@@ -7314,6 +7420,205 @@ static int serdes_7322_init(struct qib_pportdata *ppd)
7314 return 0; 7420 return 0;
7315} 7421}
7316 7422
7423static int serdes_7322_init_new(struct qib_pportdata *ppd)
7424{
7425 u64 tstart;
7426 u32 le_val, rxcaldone;
7427 int chan, chan_done = (1 << SERDES_CHANS) - 1;
7428
7429 /*
7430 * Initialize the Tx DDS tables. Also done every QSFP event,
7431 * for adapters with QSFP
7432 */
7433 init_txdds_table(ppd, 0);
7434
7435 /* Clear cmode-override, may be set from older driver */
7436 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
7437
7438 /* ensure no tx overrides from earlier driver loads */
7439 qib_write_kreg_port(ppd, krp_tx_deemph_override,
7440 SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7441 reset_tx_deemphasis_override));
7442
7443 /* START OF LSI SUGGESTED SERDES BRINGUP */
7444 /* Reset - Calibration Setup */
7445 /* Stop DFE adaptaion */
7446 ibsd_wr_allchans(ppd, 1, 0, BMASK(9, 1));
7447 /* Disable LE1 */
7448 ibsd_wr_allchans(ppd, 13, 0, BMASK(5, 5));
7449 /* Disable autoadapt for LE1 */
7450 ibsd_wr_allchans(ppd, 1, 0, BMASK(15, 15));
7451 /* Disable LE2 */
7452 ibsd_wr_allchans(ppd, 13, 0, BMASK(6, 6));
7453 /* Disable VGA */
7454 ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
7455 /* Disable AFE Offset Cancel */
7456 ibsd_wr_allchans(ppd, 12, 0, BMASK(12, 12));
7457 /* Disable Timing Loop */
7458 ibsd_wr_allchans(ppd, 2, 0, BMASK(3, 3));
7459 /* Disable Frequency Loop */
7460 ibsd_wr_allchans(ppd, 2, 0, BMASK(4, 4));
7461 /* Disable Baseline Wander Correction */
7462 ibsd_wr_allchans(ppd, 13, 0, BMASK(13, 13));
7463 /* Disable RX Calibration */
7464 ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
7465 /* Disable RX Offset Calibration */
7466 ibsd_wr_allchans(ppd, 12, 0, BMASK(4, 4));
7467 /* Select BB CDR */
7468 ibsd_wr_allchans(ppd, 2, (1 << 15), BMASK(15, 15));
7469 /* CDR Step Size */
7470 ibsd_wr_allchans(ppd, 5, 0, BMASK(9, 8));
7471 /* Enable phase Calibration */
7472 ibsd_wr_allchans(ppd, 12, (1 << 5), BMASK(5, 5));
7473 /* DFE Bandwidth [2:14-12] */
7474 ibsd_wr_allchans(ppd, 2, (4 << 12), BMASK(14, 12));
7475 /* DFE Config (4 taps only) */
7476 ibsd_wr_allchans(ppd, 16, 0, BMASK(1, 0));
7477 /* Gain Loop Bandwidth */
7478 if (!ppd->dd->cspec->r1) {
7479 ibsd_wr_allchans(ppd, 12, 1 << 12, BMASK(12, 12));
7480 ibsd_wr_allchans(ppd, 12, 2 << 8, BMASK(11, 8));
7481 } else {
7482 ibsd_wr_allchans(ppd, 19, (3 << 11), BMASK(13, 11));
7483 }
7484 /* Baseline Wander Correction Gain [13:4-0] (leave as default) */
7485 /* Baseline Wander Correction Gain [3:7-5] (leave as default) */
7486 /* Data Rate Select [5:7-6] (leave as default) */
7487 /* RX Parralel Word Width [3:10-8] (leave as default) */
7488
7489 /* RX REST */
7490 /* Single- or Multi-channel reset */
7491 /* RX Analog reset */
7492 /* RX Digital reset */
7493 ibsd_wr_allchans(ppd, 0, 0, BMASK(15, 13));
7494 msleep(20);
7495 /* RX Analog reset */
7496 ibsd_wr_allchans(ppd, 0, (1 << 14), BMASK(14, 14));
7497 msleep(20);
7498 /* RX Digital reset */
7499 ibsd_wr_allchans(ppd, 0, (1 << 13), BMASK(13, 13));
7500 msleep(20);
7501
7502 /* setup LoS params; these are subsystem, so chan == 5 */
7503 /* LoS filter threshold_count on, ch 0-3, set to 8 */
7504 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
7505 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
7506 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
7507 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
7508
7509 /* LoS filter threshold_count off, ch 0-3, set to 4 */
7510 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
7511 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
7512 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
7513 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
7514
7515 /* LoS filter select enabled */
7516 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
7517
7518 /* LoS target data: SDR=4, DDR=2, QDR=1 */
7519 ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */
7520 ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
7521 ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
7522
7523 /* Turn on LOS on initial SERDES init */
7524 serdes_7322_los_enable(ppd, 1);
7525 /* FLoop LOS gate: PPM filter enabled */
7526 ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
7527
7528 /* RX LATCH CALIBRATION */
7529 /* Enable Eyefinder Phase Calibration latch */
7530 ibsd_wr_allchans(ppd, 15, 1, BMASK(0, 0));
7531 /* Enable RX Offset Calibration latch */
7532 ibsd_wr_allchans(ppd, 12, (1 << 4), BMASK(4, 4));
7533 msleep(20);
7534 /* Start Calibration */
7535 ibsd_wr_allchans(ppd, 4, (1 << 10), BMASK(10, 10));
7536 tstart = get_jiffies_64();
7537 while (chan_done &&
7538 !time_after64(tstart, tstart + msecs_to_jiffies(500))) {
7539 msleep(20);
7540 for (chan = 0; chan < SERDES_CHANS; ++chan) {
7541 rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
7542 (chan + (chan >> 1)),
7543 25, 0, 0);
7544 if ((~rxcaldone & (u32)BMASK(9, 9)) == 0 &&
7545 (~chan_done & (1 << chan)) == 0)
7546 chan_done &= ~(1 << chan);
7547 }
7548 }
7549 if (chan_done) {
7550 printk(KERN_INFO QIB_DRV_NAME
7551 " Serdes %d calibration not done after .5 sec: 0x%x\n",
7552 IBSD(ppd->hw_pidx), chan_done);
7553 } else {
7554 for (chan = 0; chan < SERDES_CHANS; ++chan) {
7555 rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
7556 (chan + (chan >> 1)),
7557 25, 0, 0);
7558 if ((~rxcaldone & (u32)BMASK(10, 10)) == 0)
7559 printk(KERN_INFO QIB_DRV_NAME
7560 " Serdes %d chan %d calibration "
7561 "failed\n", IBSD(ppd->hw_pidx), chan);
7562 }
7563 }
7564
7565 /* Turn off Calibration */
7566 ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
7567 msleep(20);
7568
7569 /* BRING RX UP */
7570 /* Set LE2 value (May be overridden in qsfp_7322_event) */
7571 le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
7572 ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
7573 /* Set LE2 Loop bandwidth */
7574 ibsd_wr_allchans(ppd, 3, (7 << 5), BMASK(7, 5));
7575 /* Enable LE2 */
7576 ibsd_wr_allchans(ppd, 13, (1 << 6), BMASK(6, 6));
7577 msleep(20);
7578 /* Enable H0 only */
7579 ibsd_wr_allchans(ppd, 1, 1, BMASK(9, 1));
7580 /* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */
7581 le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
7582 ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
7583 /* Enable VGA */
7584 ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
7585 msleep(20);
7586 /* Set Frequency Loop Bandwidth */
7587 ibsd_wr_allchans(ppd, 2, (7 << 5), BMASK(8, 5));
7588 /* Enable Frequency Loop */
7589 ibsd_wr_allchans(ppd, 2, (1 << 4), BMASK(4, 4));
7590 /* Set Timing Loop Bandwidth */
7591 ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
7592 /* Enable Timing Loop */
7593 ibsd_wr_allchans(ppd, 2, (1 << 3), BMASK(3, 3));
7594 msleep(50);
7595 /* Enable DFE
7596 * Set receive adaptation mode. SDR and DDR adaptation are
7597 * always on, and QDR is initially enabled; later disabled.
7598 */
7599 qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
7600 qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
7601 qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
7602 ppd->dd->cspec->r1 ?
7603 QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN);
7604 ppd->cpspec->qdr_dfe_on = 1;
7605 /* Disable LE1 */
7606 ibsd_wr_allchans(ppd, 13, (0 << 5), (1 << 5));
7607 /* Disable auto adapt for LE1 */
7608 ibsd_wr_allchans(ppd, 1, (0 << 15), BMASK(15, 15));
7609 msleep(20);
7610 /* Enable AFE Offset Cancel */
7611 ibsd_wr_allchans(ppd, 12, (1 << 12), BMASK(12, 12));
7612 /* Enable Baseline Wander Correction */
7613 ibsd_wr_allchans(ppd, 12, (1 << 13), BMASK(13, 13));
7614 /* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */
7615 ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
7616 /* VGA output common mode */
7617 ibsd_wr_allchans(ppd, 12, (3 << 2), BMASK(3, 2));
7618
7619 return 0;
7620}
7621
7317/* start adjust QMH serdes parameters */ 7622/* start adjust QMH serdes parameters */
7318 7623
7319static void set_man_code(struct qib_pportdata *ppd, int chan, int code) 7624static void set_man_code(struct qib_pportdata *ppd, int chan, int code)
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index f3b503936043..7896afbb9ce8 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -92,9 +92,11 @@ unsigned long *qib_cpulist;
92/* set number of contexts we'll actually use */ 92/* set number of contexts we'll actually use */
93void qib_set_ctxtcnt(struct qib_devdata *dd) 93void qib_set_ctxtcnt(struct qib_devdata *dd)
94{ 94{
95 if (!qib_cfgctxts) 95 if (!qib_cfgctxts) {
96 dd->cfgctxts = dd->first_user_ctxt + num_online_cpus(); 96 dd->cfgctxts = dd->first_user_ctxt + num_online_cpus();
97 else if (qib_cfgctxts < dd->num_pports) 97 if (dd->cfgctxts > dd->ctxtcnt)
98 dd->cfgctxts = dd->ctxtcnt;
99 } else if (qib_cfgctxts < dd->num_pports)
98 dd->cfgctxts = dd->ctxtcnt; 100 dd->cfgctxts = dd->ctxtcnt;
99 else if (qib_cfgctxts <= dd->ctxtcnt) 101 else if (qib_cfgctxts <= dd->ctxtcnt)
100 dd->cfgctxts = qib_cfgctxts; 102 dd->cfgctxts = qib_cfgctxts;
diff --git a/drivers/infiniband/hw/qib/qib_intr.c b/drivers/infiniband/hw/qib/qib_intr.c
index 54a40828a106..a693c56ec8a6 100644
--- a/drivers/infiniband/hw/qib/qib_intr.c
+++ b/drivers/infiniband/hw/qib/qib_intr.c
@@ -131,7 +131,8 @@ void qib_handle_e_ibstatuschanged(struct qib_pportdata *ppd, u64 ibcs)
131 /* start a 75msec timer to clear symbol errors */ 131 /* start a 75msec timer to clear symbol errors */
132 mod_timer(&ppd->symerr_clear_timer, 132 mod_timer(&ppd->symerr_clear_timer,
133 msecs_to_jiffies(75)); 133 msecs_to_jiffies(75));
134 } else if (ltstate == IB_PHYSPORTSTATE_LINKUP) { 134 } else if (ltstate == IB_PHYSPORTSTATE_LINKUP &&
135 !(ppd->lflags & QIBL_LINKACTIVE)) {
135 /* active, but not active defered */ 136 /* active, but not active defered */
136 qib_hol_up(ppd); /* useful only for 6120 now */ 137 qib_hol_up(ppd); /* useful only for 6120 now */
137 *ppd->statusp |= 138 *ppd->statusp |=
diff --git a/drivers/infiniband/hw/qib/qib_keys.c b/drivers/infiniband/hw/qib/qib_keys.c
index 4b80eb153d57..8fd19a47df0c 100644
--- a/drivers/infiniband/hw/qib/qib_keys.c
+++ b/drivers/infiniband/hw/qib/qib_keys.c
@@ -136,7 +136,6 @@ int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
136 struct qib_mregion *mr; 136 struct qib_mregion *mr;
137 unsigned n, m; 137 unsigned n, m;
138 size_t off; 138 size_t off;
139 int ret = 0;
140 unsigned long flags; 139 unsigned long flags;
141 140
142 /* 141 /*
@@ -152,6 +151,8 @@ int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
152 if (!dev->dma_mr) 151 if (!dev->dma_mr)
153 goto bail; 152 goto bail;
154 atomic_inc(&dev->dma_mr->refcount); 153 atomic_inc(&dev->dma_mr->refcount);
154 spin_unlock_irqrestore(&rkt->lock, flags);
155
155 isge->mr = dev->dma_mr; 156 isge->mr = dev->dma_mr;
156 isge->vaddr = (void *) sge->addr; 157 isge->vaddr = (void *) sge->addr;
157 isge->length = sge->length; 158 isge->length = sge->length;
@@ -170,19 +171,34 @@ int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
170 off + sge->length > mr->length || 171 off + sge->length > mr->length ||
171 (mr->access_flags & acc) != acc)) 172 (mr->access_flags & acc) != acc))
172 goto bail; 173 goto bail;
174 atomic_inc(&mr->refcount);
175 spin_unlock_irqrestore(&rkt->lock, flags);
173 176
174 off += mr->offset; 177 off += mr->offset;
175 m = 0; 178 if (mr->page_shift) {
176 n = 0; 179 /*
177 while (off >= mr->map[m]->segs[n].length) { 180 page sizes are uniform power of 2 so no loop is necessary
178 off -= mr->map[m]->segs[n].length; 181 entries_spanned_by_off is the number of times the loop below
179 n++; 182 would have executed.
180 if (n >= QIB_SEGSZ) { 183 */
181 m++; 184 size_t entries_spanned_by_off;
182 n = 0; 185
186 entries_spanned_by_off = off >> mr->page_shift;
187 off -= (entries_spanned_by_off << mr->page_shift);
188 m = entries_spanned_by_off/QIB_SEGSZ;
189 n = entries_spanned_by_off%QIB_SEGSZ;
190 } else {
191 m = 0;
192 n = 0;
193 while (off >= mr->map[m]->segs[n].length) {
194 off -= mr->map[m]->segs[n].length;
195 n++;
196 if (n >= QIB_SEGSZ) {
197 m++;
198 n = 0;
199 }
183 } 200 }
184 } 201 }
185 atomic_inc(&mr->refcount);
186 isge->mr = mr; 202 isge->mr = mr;
187 isge->vaddr = mr->map[m]->segs[n].vaddr + off; 203 isge->vaddr = mr->map[m]->segs[n].vaddr + off;
188 isge->length = mr->map[m]->segs[n].length - off; 204 isge->length = mr->map[m]->segs[n].length - off;
@@ -190,10 +206,10 @@ int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
190 isge->m = m; 206 isge->m = m;
191 isge->n = n; 207 isge->n = n;
192ok: 208ok:
193 ret = 1; 209 return 1;
194bail: 210bail:
195 spin_unlock_irqrestore(&rkt->lock, flags); 211 spin_unlock_irqrestore(&rkt->lock, flags);
196 return ret; 212 return 0;
197} 213}
198 214
199/** 215/**
@@ -214,7 +230,6 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
214 struct qib_mregion *mr; 230 struct qib_mregion *mr;
215 unsigned n, m; 231 unsigned n, m;
216 size_t off; 232 size_t off;
217 int ret = 0;
218 unsigned long flags; 233 unsigned long flags;
219 234
220 /* 235 /*
@@ -231,6 +246,8 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
231 if (!dev->dma_mr) 246 if (!dev->dma_mr)
232 goto bail; 247 goto bail;
233 atomic_inc(&dev->dma_mr->refcount); 248 atomic_inc(&dev->dma_mr->refcount);
249 spin_unlock_irqrestore(&rkt->lock, flags);
250
234 sge->mr = dev->dma_mr; 251 sge->mr = dev->dma_mr;
235 sge->vaddr = (void *) vaddr; 252 sge->vaddr = (void *) vaddr;
236 sge->length = len; 253 sge->length = len;
@@ -248,19 +265,34 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
248 if (unlikely(vaddr < mr->iova || off + len > mr->length || 265 if (unlikely(vaddr < mr->iova || off + len > mr->length ||
249 (mr->access_flags & acc) == 0)) 266 (mr->access_flags & acc) == 0))
250 goto bail; 267 goto bail;
268 atomic_inc(&mr->refcount);
269 spin_unlock_irqrestore(&rkt->lock, flags);
251 270
252 off += mr->offset; 271 off += mr->offset;
253 m = 0; 272 if (mr->page_shift) {
254 n = 0; 273 /*
255 while (off >= mr->map[m]->segs[n].length) { 274 page sizes are uniform power of 2 so no loop is necessary
256 off -= mr->map[m]->segs[n].length; 275 entries_spanned_by_off is the number of times the loop below
257 n++; 276 would have executed.
258 if (n >= QIB_SEGSZ) { 277 */
259 m++; 278 size_t entries_spanned_by_off;
260 n = 0; 279
280 entries_spanned_by_off = off >> mr->page_shift;
281 off -= (entries_spanned_by_off << mr->page_shift);
282 m = entries_spanned_by_off/QIB_SEGSZ;
283 n = entries_spanned_by_off%QIB_SEGSZ;
284 } else {
285 m = 0;
286 n = 0;
287 while (off >= mr->map[m]->segs[n].length) {
288 off -= mr->map[m]->segs[n].length;
289 n++;
290 if (n >= QIB_SEGSZ) {
291 m++;
292 n = 0;
293 }
261 } 294 }
262 } 295 }
263 atomic_inc(&mr->refcount);
264 sge->mr = mr; 296 sge->mr = mr;
265 sge->vaddr = mr->map[m]->segs[n].vaddr + off; 297 sge->vaddr = mr->map[m]->segs[n].vaddr + off;
266 sge->length = mr->map[m]->segs[n].length - off; 298 sge->length = mr->map[m]->segs[n].length - off;
@@ -268,10 +300,10 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
268 sge->m = m; 300 sge->m = m;
269 sge->n = n; 301 sge->n = n;
270ok: 302ok:
271 ret = 1; 303 return 1;
272bail: 304bail:
273 spin_unlock_irqrestore(&rkt->lock, flags); 305 spin_unlock_irqrestore(&rkt->lock, flags);
274 return ret; 306 return 0;
275} 307}
276 308
277/* 309/*
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c
index 94b0d1f3a8f0..5ad224e4a38b 100644
--- a/drivers/infiniband/hw/qib/qib_mad.c
+++ b/drivers/infiniband/hw/qib/qib_mad.c
@@ -668,8 +668,8 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
668 lid = be16_to_cpu(pip->lid); 668 lid = be16_to_cpu(pip->lid);
669 /* Must be a valid unicast LID address. */ 669 /* Must be a valid unicast LID address. */
670 if (lid == 0 || lid >= QIB_MULTICAST_LID_BASE) 670 if (lid == 0 || lid >= QIB_MULTICAST_LID_BASE)
671 goto err; 671 smp->status |= IB_SMP_INVALID_FIELD;
672 if (ppd->lid != lid || ppd->lmc != (pip->mkeyprot_resv_lmc & 7)) { 672 else if (ppd->lid != lid || ppd->lmc != (pip->mkeyprot_resv_lmc & 7)) {
673 if (ppd->lid != lid) 673 if (ppd->lid != lid)
674 qib_set_uevent_bits(ppd, _QIB_EVENT_LID_CHANGE_BIT); 674 qib_set_uevent_bits(ppd, _QIB_EVENT_LID_CHANGE_BIT);
675 if (ppd->lmc != (pip->mkeyprot_resv_lmc & 7)) 675 if (ppd->lmc != (pip->mkeyprot_resv_lmc & 7))
@@ -683,8 +683,8 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
683 msl = pip->neighbormtu_mastersmsl & 0xF; 683 msl = pip->neighbormtu_mastersmsl & 0xF;
684 /* Must be a valid unicast LID address. */ 684 /* Must be a valid unicast LID address. */
685 if (smlid == 0 || smlid >= QIB_MULTICAST_LID_BASE) 685 if (smlid == 0 || smlid >= QIB_MULTICAST_LID_BASE)
686 goto err; 686 smp->status |= IB_SMP_INVALID_FIELD;
687 if (smlid != ibp->sm_lid || msl != ibp->sm_sl) { 687 else if (smlid != ibp->sm_lid || msl != ibp->sm_sl) {
688 spin_lock_irqsave(&ibp->lock, flags); 688 spin_lock_irqsave(&ibp->lock, flags);
689 if (ibp->sm_ah) { 689 if (ibp->sm_ah) {
690 if (smlid != ibp->sm_lid) 690 if (smlid != ibp->sm_lid)
@@ -707,8 +707,9 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
707 if (lwe == 0xFF) 707 if (lwe == 0xFF)
708 lwe = ppd->link_width_supported; 708 lwe = ppd->link_width_supported;
709 else if (lwe >= 16 || (lwe & ~ppd->link_width_supported)) 709 else if (lwe >= 16 || (lwe & ~ppd->link_width_supported))
710 goto err; 710 smp->status |= IB_SMP_INVALID_FIELD;
711 set_link_width_enabled(ppd, lwe); 711 else if (lwe != ppd->link_width_enabled)
712 set_link_width_enabled(ppd, lwe);
712 } 713 }
713 714
714 lse = pip->linkspeedactive_enabled & 0xF; 715 lse = pip->linkspeedactive_enabled & 0xF;
@@ -721,8 +722,9 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
721 if (lse == 15) 722 if (lse == 15)
722 lse = ppd->link_speed_supported; 723 lse = ppd->link_speed_supported;
723 else if (lse >= 8 || (lse & ~ppd->link_speed_supported)) 724 else if (lse >= 8 || (lse & ~ppd->link_speed_supported))
724 goto err; 725 smp->status |= IB_SMP_INVALID_FIELD;
725 set_link_speed_enabled(ppd, lse); 726 else if (lse != ppd->link_speed_enabled)
727 set_link_speed_enabled(ppd, lse);
726 } 728 }
727 729
728 /* Set link down default state. */ 730 /* Set link down default state. */
@@ -738,7 +740,7 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
738 IB_LINKINITCMD_POLL); 740 IB_LINKINITCMD_POLL);
739 break; 741 break;
740 default: 742 default:
741 goto err; 743 smp->status |= IB_SMP_INVALID_FIELD;
742 } 744 }
743 745
744 ibp->mkeyprot = pip->mkeyprot_resv_lmc >> 6; 746 ibp->mkeyprot = pip->mkeyprot_resv_lmc >> 6;
@@ -748,15 +750,17 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
748 750
749 mtu = ib_mtu_enum_to_int((pip->neighbormtu_mastersmsl >> 4) & 0xF); 751 mtu = ib_mtu_enum_to_int((pip->neighbormtu_mastersmsl >> 4) & 0xF);
750 if (mtu == -1) 752 if (mtu == -1)
751 goto err; 753 smp->status |= IB_SMP_INVALID_FIELD;
752 qib_set_mtu(ppd, mtu); 754 else
755 qib_set_mtu(ppd, mtu);
753 756
754 /* Set operational VLs */ 757 /* Set operational VLs */
755 vls = (pip->operationalvl_pei_peo_fpi_fpo >> 4) & 0xF; 758 vls = (pip->operationalvl_pei_peo_fpi_fpo >> 4) & 0xF;
756 if (vls) { 759 if (vls) {
757 if (vls > ppd->vls_supported) 760 if (vls > ppd->vls_supported)
758 goto err; 761 smp->status |= IB_SMP_INVALID_FIELD;
759 (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OP_VLS, vls); 762 else
763 (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OP_VLS, vls);
760 } 764 }
761 765
762 if (pip->mkey_violations == 0) 766 if (pip->mkey_violations == 0)
@@ -770,10 +774,10 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
770 774
771 ore = pip->localphyerrors_overrunerrors; 775 ore = pip->localphyerrors_overrunerrors;
772 if (set_phyerrthreshold(ppd, (ore >> 4) & 0xF)) 776 if (set_phyerrthreshold(ppd, (ore >> 4) & 0xF))
773 goto err; 777 smp->status |= IB_SMP_INVALID_FIELD;
774 778
775 if (set_overrunthreshold(ppd, (ore & 0xF))) 779 if (set_overrunthreshold(ppd, (ore & 0xF)))
776 goto err; 780 smp->status |= IB_SMP_INVALID_FIELD;
777 781
778 ibp->subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F; 782 ibp->subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F;
779 783
@@ -792,7 +796,7 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
792 state = pip->linkspeed_portstate & 0xF; 796 state = pip->linkspeed_portstate & 0xF;
793 lstate = (pip->portphysstate_linkdown >> 4) & 0xF; 797 lstate = (pip->portphysstate_linkdown >> 4) & 0xF;
794 if (lstate && !(state == IB_PORT_DOWN || state == IB_PORT_NOP)) 798 if (lstate && !(state == IB_PORT_DOWN || state == IB_PORT_NOP))
795 goto err; 799 smp->status |= IB_SMP_INVALID_FIELD;
796 800
797 /* 801 /*
798 * Only state changes of DOWN, ARM, and ACTIVE are valid 802 * Only state changes of DOWN, ARM, and ACTIVE are valid
@@ -812,8 +816,10 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
812 lstate = QIB_IB_LINKDOWN; 816 lstate = QIB_IB_LINKDOWN;
813 else if (lstate == 3) 817 else if (lstate == 3)
814 lstate = QIB_IB_LINKDOWN_DISABLE; 818 lstate = QIB_IB_LINKDOWN_DISABLE;
815 else 819 else {
816 goto err; 820 smp->status |= IB_SMP_INVALID_FIELD;
821 break;
822 }
817 spin_lock_irqsave(&ppd->lflags_lock, flags); 823 spin_lock_irqsave(&ppd->lflags_lock, flags);
818 ppd->lflags &= ~QIBL_LINKV; 824 ppd->lflags &= ~QIBL_LINKV;
819 spin_unlock_irqrestore(&ppd->lflags_lock, flags); 825 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
@@ -835,8 +841,7 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
835 qib_set_linkstate(ppd, QIB_IB_LINKACTIVE); 841 qib_set_linkstate(ppd, QIB_IB_LINKACTIVE);
836 break; 842 break;
837 default: 843 default:
838 /* XXX We have already partially updated our state! */ 844 smp->status |= IB_SMP_INVALID_FIELD;
839 goto err;
840 } 845 }
841 846
842 ret = subn_get_portinfo(smp, ibdev, port); 847 ret = subn_get_portinfo(smp, ibdev, port);
diff --git a/drivers/infiniband/hw/qib/qib_mr.c b/drivers/infiniband/hw/qib/qib_mr.c
index 5f95f0f6385d..08944e2ee334 100644
--- a/drivers/infiniband/hw/qib/qib_mr.c
+++ b/drivers/infiniband/hw/qib/qib_mr.c
@@ -39,7 +39,6 @@
39/* Fast memory region */ 39/* Fast memory region */
40struct qib_fmr { 40struct qib_fmr {
41 struct ib_fmr ibfmr; 41 struct ib_fmr ibfmr;
42 u8 page_shift;
43 struct qib_mregion mr; /* must be last */ 42 struct qib_mregion mr; /* must be last */
44}; 43};
45 44
@@ -107,6 +106,7 @@ static struct qib_mr *alloc_mr(int count, struct qib_lkey_table *lk_table)
107 goto bail; 106 goto bail;
108 } 107 }
109 mr->mr.mapsz = m; 108 mr->mr.mapsz = m;
109 mr->mr.page_shift = 0;
110 mr->mr.max_segs = count; 110 mr->mr.max_segs = count;
111 111
112 /* 112 /*
@@ -231,6 +231,8 @@ struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
231 mr->mr.access_flags = mr_access_flags; 231 mr->mr.access_flags = mr_access_flags;
232 mr->umem = umem; 232 mr->umem = umem;
233 233
234 if (is_power_of_2(umem->page_size))
235 mr->mr.page_shift = ilog2(umem->page_size);
234 m = 0; 236 m = 0;
235 n = 0; 237 n = 0;
236 list_for_each_entry(chunk, &umem->chunk_list, list) { 238 list_for_each_entry(chunk, &umem->chunk_list, list) {
@@ -390,7 +392,7 @@ struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
390 fmr->mr.offset = 0; 392 fmr->mr.offset = 0;
391 fmr->mr.access_flags = mr_access_flags; 393 fmr->mr.access_flags = mr_access_flags;
392 fmr->mr.max_segs = fmr_attr->max_pages; 394 fmr->mr.max_segs = fmr_attr->max_pages;
393 fmr->page_shift = fmr_attr->page_shift; 395 fmr->mr.page_shift = fmr_attr->page_shift;
394 396
395 atomic_set(&fmr->mr.refcount, 0); 397 atomic_set(&fmr->mr.refcount, 0);
396 ret = &fmr->ibfmr; 398 ret = &fmr->ibfmr;
@@ -437,7 +439,7 @@ int qib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
437 spin_lock_irqsave(&rkt->lock, flags); 439 spin_lock_irqsave(&rkt->lock, flags);
438 fmr->mr.user_base = iova; 440 fmr->mr.user_base = iova;
439 fmr->mr.iova = iova; 441 fmr->mr.iova = iova;
440 ps = 1 << fmr->page_shift; 442 ps = 1 << fmr->mr.page_shift;
441 fmr->mr.length = list_len * ps; 443 fmr->mr.length = list_len * ps;
442 m = 0; 444 m = 0;
443 n = 0; 445 n = 0;
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
index 6c39851d2ded..e16751f8639e 100644
--- a/drivers/infiniband/hw/qib/qib_qp.c
+++ b/drivers/infiniband/hw/qib/qib_qp.c
@@ -48,13 +48,12 @@ static inline unsigned mk_qpn(struct qib_qpn_table *qpt,
48 48
49static inline unsigned find_next_offset(struct qib_qpn_table *qpt, 49static inline unsigned find_next_offset(struct qib_qpn_table *qpt,
50 struct qpn_map *map, unsigned off, 50 struct qpn_map *map, unsigned off,
51 unsigned r) 51 unsigned n)
52{ 52{
53 if (qpt->mask) { 53 if (qpt->mask) {
54 off++; 54 off++;
55 if ((off & qpt->mask) >> 1 != r) 55 if (((off & qpt->mask) >> 1) >= n)
56 off = ((off & qpt->mask) ? 56 off = (off | qpt->mask) + 2;
57 (off | qpt->mask) + 1 : off) | (r << 1);
58 } else 57 } else
59 off = find_next_zero_bit(map->page, BITS_PER_PAGE, off); 58 off = find_next_zero_bit(map->page, BITS_PER_PAGE, off);
60 return off; 59 return off;
@@ -123,7 +122,6 @@ static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
123 u32 i, offset, max_scan, qpn; 122 u32 i, offset, max_scan, qpn;
124 struct qpn_map *map; 123 struct qpn_map *map;
125 u32 ret; 124 u32 ret;
126 int r;
127 125
128 if (type == IB_QPT_SMI || type == IB_QPT_GSI) { 126 if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
129 unsigned n; 127 unsigned n;
@@ -139,15 +137,11 @@ static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
139 goto bail; 137 goto bail;
140 } 138 }
141 139
142 r = smp_processor_id(); 140 qpn = qpt->last + 2;
143 if (r >= dd->n_krcv_queues)
144 r %= dd->n_krcv_queues;
145 qpn = qpt->last + 1;
146 if (qpn >= QPN_MAX) 141 if (qpn >= QPN_MAX)
147 qpn = 2; 142 qpn = 2;
148 if (qpt->mask && ((qpn & qpt->mask) >> 1) != r) 143 if (qpt->mask && ((qpn & qpt->mask) >> 1) >= dd->n_krcv_queues)
149 qpn = ((qpn & qpt->mask) ? (qpn | qpt->mask) + 1 : qpn) | 144 qpn = (qpn | qpt->mask) + 2;
150 (r << 1);
151 offset = qpn & BITS_PER_PAGE_MASK; 145 offset = qpn & BITS_PER_PAGE_MASK;
152 map = &qpt->map[qpn / BITS_PER_PAGE]; 146 map = &qpt->map[qpn / BITS_PER_PAGE];
153 max_scan = qpt->nmaps - !offset; 147 max_scan = qpt->nmaps - !offset;
@@ -163,7 +157,8 @@ static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
163 ret = qpn; 157 ret = qpn;
164 goto bail; 158 goto bail;
165 } 159 }
166 offset = find_next_offset(qpt, map, offset, r); 160 offset = find_next_offset(qpt, map, offset,
161 dd->n_krcv_queues);
167 qpn = mk_qpn(qpt, map, offset); 162 qpn = mk_qpn(qpt, map, offset);
168 /* 163 /*
169 * This test differs from alloc_pidmap(). 164 * This test differs from alloc_pidmap().
@@ -183,13 +178,13 @@ static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
183 if (qpt->nmaps == QPNMAP_ENTRIES) 178 if (qpt->nmaps == QPNMAP_ENTRIES)
184 break; 179 break;
185 map = &qpt->map[qpt->nmaps++]; 180 map = &qpt->map[qpt->nmaps++];
186 offset = qpt->mask ? (r << 1) : 0; 181 offset = 0;
187 } else if (map < &qpt->map[qpt->nmaps]) { 182 } else if (map < &qpt->map[qpt->nmaps]) {
188 ++map; 183 ++map;
189 offset = qpt->mask ? (r << 1) : 0; 184 offset = 0;
190 } else { 185 } else {
191 map = &qpt->map[0]; 186 map = &qpt->map[0];
192 offset = qpt->mask ? (r << 1) : 2; 187 offset = 2;
193 } 188 }
194 qpn = mk_qpn(qpt, map, offset); 189 qpn = mk_qpn(qpt, map, offset);
195 } 190 }
@@ -468,6 +463,10 @@ int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err)
468 qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR); 463 qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR);
469 del_timer(&qp->s_timer); 464 del_timer(&qp->s_timer);
470 } 465 }
466
467 if (qp->s_flags & QIB_S_ANY_WAIT_SEND)
468 qp->s_flags &= ~QIB_S_ANY_WAIT_SEND;
469
471 spin_lock(&dev->pending_lock); 470 spin_lock(&dev->pending_lock);
472 if (!list_empty(&qp->iowait) && !(qp->s_flags & QIB_S_BUSY)) { 471 if (!list_empty(&qp->iowait) && !(qp->s_flags & QIB_S_BUSY)) {
473 qp->s_flags &= ~QIB_S_ANY_WAIT_IO; 472 qp->s_flags &= ~QIB_S_ANY_WAIT_IO;
@@ -1061,7 +1060,6 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
1061 } 1060 }
1062 qp->ibqp.qp_num = err; 1061 qp->ibqp.qp_num = err;
1063 qp->port_num = init_attr->port_num; 1062 qp->port_num = init_attr->port_num;
1064 qp->processor_id = smp_processor_id();
1065 qib_reset_qp(qp, init_attr->qp_type); 1063 qib_reset_qp(qp, init_attr->qp_type);
1066 break; 1064 break;
1067 1065
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index 955fb7157793..8245237b67ce 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -1407,6 +1407,7 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp,
1407 struct qib_ctxtdata *rcd) 1407 struct qib_ctxtdata *rcd)
1408{ 1408{
1409 struct qib_swqe *wqe; 1409 struct qib_swqe *wqe;
1410 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1410 enum ib_wc_status status; 1411 enum ib_wc_status status;
1411 unsigned long flags; 1412 unsigned long flags;
1412 int diff; 1413 int diff;
@@ -1414,6 +1415,29 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp,
1414 u32 aeth; 1415 u32 aeth;
1415 u64 val; 1416 u64 val;
1416 1417
1418 if (opcode != OP(RDMA_READ_RESPONSE_MIDDLE)) {
1419 /*
1420 * If ACK'd PSN on SDMA busy list try to make progress to
1421 * reclaim SDMA credits.
1422 */
1423 if ((qib_cmp24(psn, qp->s_sending_psn) >= 0) &&
1424 (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)) {
1425
1426 /*
1427 * If send tasklet not running attempt to progress
1428 * SDMA queue.
1429 */
1430 if (!(qp->s_flags & QIB_S_BUSY)) {
1431 /* Acquire SDMA Lock */
1432 spin_lock_irqsave(&ppd->sdma_lock, flags);
1433 /* Invoke sdma make progress */
1434 qib_sdma_make_progress(ppd);
1435 /* Release SDMA Lock */
1436 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1437 }
1438 }
1439 }
1440
1417 spin_lock_irqsave(&qp->s_lock, flags); 1441 spin_lock_irqsave(&qp->s_lock, flags);
1418 1442
1419 /* Ignore invalid responses. */ 1443 /* Ignore invalid responses. */
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
index e1b3da2a1f85..4a51fd1e9cb7 100644
--- a/drivers/infiniband/hw/qib/qib_ud.c
+++ b/drivers/infiniband/hw/qib/qib_ud.c
@@ -445,13 +445,14 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
445 qkey = be32_to_cpu(ohdr->u.ud.deth[0]); 445 qkey = be32_to_cpu(ohdr->u.ud.deth[0]);
446 src_qp = be32_to_cpu(ohdr->u.ud.deth[1]) & QIB_QPN_MASK; 446 src_qp = be32_to_cpu(ohdr->u.ud.deth[1]) & QIB_QPN_MASK;
447 447
448 /* Get the number of bytes the message was padded by. */ 448 /*
449 * Get the number of bytes the message was padded by
450 * and drop incomplete packets.
451 */
449 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3; 452 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
450 if (unlikely(tlen < (hdrsize + pad + 4))) { 453 if (unlikely(tlen < (hdrsize + pad + 4)))
451 /* Drop incomplete packets. */ 454 goto drop;
452 ibp->n_pkt_drops++; 455
453 goto bail;
454 }
455 tlen -= hdrsize + pad + 4; 456 tlen -= hdrsize + pad + 4;
456 457
457 /* 458 /*
@@ -460,10 +461,8 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
460 */ 461 */
461 if (qp->ibqp.qp_num) { 462 if (qp->ibqp.qp_num) {
462 if (unlikely(hdr->lrh[1] == IB_LID_PERMISSIVE || 463 if (unlikely(hdr->lrh[1] == IB_LID_PERMISSIVE ||
463 hdr->lrh[3] == IB_LID_PERMISSIVE)) { 464 hdr->lrh[3] == IB_LID_PERMISSIVE))
464 ibp->n_pkt_drops++; 465 goto drop;
465 goto bail;
466 }
467 if (qp->ibqp.qp_num > 1) { 466 if (qp->ibqp.qp_num > 1) {
468 u16 pkey1, pkey2; 467 u16 pkey1, pkey2;
469 468
@@ -476,7 +475,7 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
476 0xF, 475 0xF,
477 src_qp, qp->ibqp.qp_num, 476 src_qp, qp->ibqp.qp_num,
478 hdr->lrh[3], hdr->lrh[1]); 477 hdr->lrh[3], hdr->lrh[1]);
479 goto bail; 478 return;
480 } 479 }
481 } 480 }
482 if (unlikely(qkey != qp->qkey)) { 481 if (unlikely(qkey != qp->qkey)) {
@@ -484,30 +483,24 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
484 (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF, 483 (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
485 src_qp, qp->ibqp.qp_num, 484 src_qp, qp->ibqp.qp_num,
486 hdr->lrh[3], hdr->lrh[1]); 485 hdr->lrh[3], hdr->lrh[1]);
487 goto bail; 486 return;
488 } 487 }
489 /* Drop invalid MAD packets (see 13.5.3.1). */ 488 /* Drop invalid MAD packets (see 13.5.3.1). */
490 if (unlikely(qp->ibqp.qp_num == 1 && 489 if (unlikely(qp->ibqp.qp_num == 1 &&
491 (tlen != 256 || 490 (tlen != 256 ||
492 (be16_to_cpu(hdr->lrh[0]) >> 12) == 15))) { 491 (be16_to_cpu(hdr->lrh[0]) >> 12) == 15)))
493 ibp->n_pkt_drops++; 492 goto drop;
494 goto bail;
495 }
496 } else { 493 } else {
497 struct ib_smp *smp; 494 struct ib_smp *smp;
498 495
499 /* Drop invalid MAD packets (see 13.5.3.1). */ 496 /* Drop invalid MAD packets (see 13.5.3.1). */
500 if (tlen != 256 || (be16_to_cpu(hdr->lrh[0]) >> 12) != 15) { 497 if (tlen != 256 || (be16_to_cpu(hdr->lrh[0]) >> 12) != 15)
501 ibp->n_pkt_drops++; 498 goto drop;
502 goto bail;
503 }
504 smp = (struct ib_smp *) data; 499 smp = (struct ib_smp *) data;
505 if ((hdr->lrh[1] == IB_LID_PERMISSIVE || 500 if ((hdr->lrh[1] == IB_LID_PERMISSIVE ||
506 hdr->lrh[3] == IB_LID_PERMISSIVE) && 501 hdr->lrh[3] == IB_LID_PERMISSIVE) &&
507 smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { 502 smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
508 ibp->n_pkt_drops++; 503 goto drop;
509 goto bail;
510 }
511 } 504 }
512 505
513 /* 506 /*
@@ -519,14 +512,12 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
519 opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) { 512 opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
520 wc.ex.imm_data = ohdr->u.ud.imm_data; 513 wc.ex.imm_data = ohdr->u.ud.imm_data;
521 wc.wc_flags = IB_WC_WITH_IMM; 514 wc.wc_flags = IB_WC_WITH_IMM;
522 hdrsize += sizeof(u32); 515 tlen -= sizeof(u32);
523 } else if (opcode == IB_OPCODE_UD_SEND_ONLY) { 516 } else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
524 wc.ex.imm_data = 0; 517 wc.ex.imm_data = 0;
525 wc.wc_flags = 0; 518 wc.wc_flags = 0;
526 } else { 519 } else
527 ibp->n_pkt_drops++; 520 goto drop;
528 goto bail;
529 }
530 521
531 /* 522 /*
532 * A GRH is expected to preceed the data even if not 523 * A GRH is expected to preceed the data even if not
@@ -556,8 +547,7 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
556 /* Silently drop packets which are too big. */ 547 /* Silently drop packets which are too big. */
557 if (unlikely(wc.byte_len > qp->r_len)) { 548 if (unlikely(wc.byte_len > qp->r_len)) {
558 qp->r_flags |= QIB_R_REUSE_SGE; 549 qp->r_flags |= QIB_R_REUSE_SGE;
559 ibp->n_pkt_drops++; 550 goto drop;
560 return;
561 } 551 }
562 if (has_grh) { 552 if (has_grh) {
563 qib_copy_sge(&qp->r_sge, &hdr->u.l.grh, 553 qib_copy_sge(&qp->r_sge, &hdr->u.l.grh,
@@ -594,5 +584,8 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
594 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 584 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
595 (ohdr->bth[0] & 585 (ohdr->bth[0] &
596 cpu_to_be32(IB_BTH_SOLICITED)) != 0); 586 cpu_to_be32(IB_BTH_SOLICITED)) != 0);
597bail:; 587 return;
588
589drop:
590 ibp->n_pkt_drops++;
598} 591}
diff --git a/drivers/infiniband/hw/qib/qib_user_sdma.c b/drivers/infiniband/hw/qib/qib_user_sdma.c
index 4c19e06b5e85..66208bcd7c13 100644
--- a/drivers/infiniband/hw/qib/qib_user_sdma.c
+++ b/drivers/infiniband/hw/qib/qib_user_sdma.c
@@ -382,6 +382,7 @@ static void qib_user_sdma_free_pkt_list(struct device *dev,
382 382
383 kmem_cache_free(pq->pkt_slab, pkt); 383 kmem_cache_free(pq->pkt_slab, pkt);
384 } 384 }
385 INIT_LIST_HEAD(list);
385} 386}
386 387
387/* 388/*
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
index bd57c1273225..63b22a9a7feb 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.h
+++ b/drivers/infiniband/hw/qib/qib_verbs.h
@@ -301,6 +301,7 @@ struct qib_mregion {
301 int access_flags; 301 int access_flags;
302 u32 max_segs; /* number of qib_segs in all the arrays */ 302 u32 max_segs; /* number of qib_segs in all the arrays */
303 u32 mapsz; /* size of the map array */ 303 u32 mapsz; /* size of the map array */
304 u8 page_shift; /* 0 - non unform/non powerof2 sizes */
304 atomic_t refcount; 305 atomic_t refcount;
305 struct qib_segarray *map[0]; /* the segments */ 306 struct qib_segarray *map[0]; /* the segments */
306}; 307};
@@ -435,7 +436,6 @@ struct qib_qp {
435 spinlock_t r_lock; /* used for APM */ 436 spinlock_t r_lock; /* used for APM */
436 spinlock_t s_lock; 437 spinlock_t s_lock;
437 atomic_t s_dma_busy; 438 atomic_t s_dma_busy;
438 unsigned processor_id; /* Processor ID QP is bound to */
439 u32 s_flags; 439 u32 s_flags;
440 u32 s_cur_size; /* size of send packet in bytes */ 440 u32 s_cur_size; /* size of send packet in bytes */
441 u32 s_len; /* total length of s_sge */ 441 u32 s_len; /* total length of s_sge */
@@ -813,13 +813,8 @@ extern struct workqueue_struct *qib_cq_wq;
813 */ 813 */
814static inline void qib_schedule_send(struct qib_qp *qp) 814static inline void qib_schedule_send(struct qib_qp *qp)
815{ 815{
816 if (qib_send_ok(qp)) { 816 if (qib_send_ok(qp))
817 if (qp->processor_id == smp_processor_id()) 817 queue_work(qib_wq, &qp->s_work);
818 queue_work(qib_wq, &qp->s_work);
819 else
820 queue_work_on(qp->processor_id,
821 qib_wq, &qp->s_work);
822 }
823} 818}
824 819
825static inline int qib_pkey_ok(u16 pkey1, u16 pkey2) 820static inline int qib_pkey_ok(u16 pkey1, u16 pkey2)
diff --git a/drivers/infiniband/ulp/ipoib/Kconfig b/drivers/infiniband/ulp/ipoib/Kconfig
index 9d9a9dc51f18..55855eeabae7 100644
--- a/drivers/infiniband/ulp/ipoib/Kconfig
+++ b/drivers/infiniband/ulp/ipoib/Kconfig
@@ -1,7 +1,6 @@
1config INFINIBAND_IPOIB 1config INFINIBAND_IPOIB
2 tristate "IP-over-InfiniBand" 2 tristate "IP-over-InfiniBand"
3 depends on NETDEVICES && INET && (IPV6 || IPV6=n) 3 depends on NETDEVICES && INET && (IPV6 || IPV6=n)
4 select INET_LRO
5 ---help--- 4 ---help---
6 Support for the IP-over-InfiniBand protocol (IPoIB). This 5 Support for the IP-over-InfiniBand protocol (IPoIB). This
7 transports IP packets over InfiniBand so you can use your IB 6 transports IP packets over InfiniBand so you can use your IB
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 753a983a5fdc..ab97f92fc257 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -50,7 +50,7 @@
50#include <rdma/ib_verbs.h> 50#include <rdma/ib_verbs.h>
51#include <rdma/ib_pack.h> 51#include <rdma/ib_pack.h>
52#include <rdma/ib_sa.h> 52#include <rdma/ib_sa.h>
53#include <linux/inet_lro.h> 53#include <linux/sched.h>
54 54
55/* constants */ 55/* constants */
56 56
@@ -100,9 +100,6 @@ enum {
100 IPOIB_MCAST_FLAG_BUSY = 2, /* joining or already joined */ 100 IPOIB_MCAST_FLAG_BUSY = 2, /* joining or already joined */
101 IPOIB_MCAST_FLAG_ATTACHED = 3, 101 IPOIB_MCAST_FLAG_ATTACHED = 3,
102 102
103 IPOIB_MAX_LRO_DESCRIPTORS = 8,
104 IPOIB_LRO_MAX_AGGR = 64,
105
106 MAX_SEND_CQE = 16, 103 MAX_SEND_CQE = 16,
107 IPOIB_CM_COPYBREAK = 256, 104 IPOIB_CM_COPYBREAK = 256,
108}; 105};
@@ -262,11 +259,6 @@ struct ipoib_ethtool_st {
262 u16 max_coalesced_frames; 259 u16 max_coalesced_frames;
263}; 260};
264 261
265struct ipoib_lro {
266 struct net_lro_mgr lro_mgr;
267 struct net_lro_desc lro_desc[IPOIB_MAX_LRO_DESCRIPTORS];
268};
269
270/* 262/*
271 * Device private locking: network stack tx_lock protects members used 263 * Device private locking: network stack tx_lock protects members used
272 * in TX fast path, lock protects everything else. lock nests inside 264 * in TX fast path, lock protects everything else. lock nests inside
@@ -352,8 +344,6 @@ struct ipoib_dev_priv {
352 int hca_caps; 344 int hca_caps;
353 struct ipoib_ethtool_st ethtool; 345 struct ipoib_ethtool_st ethtool;
354 struct timer_list poll_timer; 346 struct timer_list poll_timer;
355
356 struct ipoib_lro lro;
357}; 347};
358 348
359struct ipoib_ah { 349struct ipoib_ah {
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index bb1004114dec..c1c49f2d35b5 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -1480,6 +1480,7 @@ static ssize_t set_mode(struct device *d, struct device_attribute *attr,
1480 1480
1481 if (test_bit(IPOIB_FLAG_CSUM, &priv->flags)) { 1481 if (test_bit(IPOIB_FLAG_CSUM, &priv->flags)) {
1482 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; 1482 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
1483 priv->dev->features |= NETIF_F_GRO;
1483 if (priv->hca_caps & IB_DEVICE_UD_TSO) 1484 if (priv->hca_caps & IB_DEVICE_UD_TSO)
1484 dev->features |= NETIF_F_TSO; 1485 dev->features |= NETIF_F_TSO;
1485 } 1486 }
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
index 1a1657c82edd..19f7f5206f78 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
@@ -106,63 +106,12 @@ static int ipoib_set_coalesce(struct net_device *dev,
106 return 0; 106 return 0;
107} 107}
108 108
109static const char ipoib_stats_keys[][ETH_GSTRING_LEN] = {
110 "LRO aggregated", "LRO flushed",
111 "LRO avg aggr", "LRO no desc"
112};
113
114static void ipoib_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
115{
116 switch (stringset) {
117 case ETH_SS_STATS:
118 memcpy(data, *ipoib_stats_keys, sizeof(ipoib_stats_keys));
119 break;
120 }
121}
122
123static int ipoib_get_sset_count(struct net_device *dev, int sset)
124{
125 switch (sset) {
126 case ETH_SS_STATS:
127 return ARRAY_SIZE(ipoib_stats_keys);
128 default:
129 return -EOPNOTSUPP;
130 }
131}
132
133static void ipoib_get_ethtool_stats(struct net_device *dev,
134 struct ethtool_stats *stats, uint64_t *data)
135{
136 struct ipoib_dev_priv *priv = netdev_priv(dev);
137 int index = 0;
138
139 /* Get LRO statistics */
140 data[index++] = priv->lro.lro_mgr.stats.aggregated;
141 data[index++] = priv->lro.lro_mgr.stats.flushed;
142 if (priv->lro.lro_mgr.stats.flushed)
143 data[index++] = priv->lro.lro_mgr.stats.aggregated /
144 priv->lro.lro_mgr.stats.flushed;
145 else
146 data[index++] = 0;
147 data[index++] = priv->lro.lro_mgr.stats.no_desc;
148}
149
150static int ipoib_set_flags(struct net_device *dev, u32 flags)
151{
152 return ethtool_op_set_flags(dev, flags, ETH_FLAG_LRO);
153}
154
155static const struct ethtool_ops ipoib_ethtool_ops = { 109static const struct ethtool_ops ipoib_ethtool_ops = {
156 .get_drvinfo = ipoib_get_drvinfo, 110 .get_drvinfo = ipoib_get_drvinfo,
157 .get_rx_csum = ipoib_get_rx_csum, 111 .get_rx_csum = ipoib_get_rx_csum,
158 .set_tso = ipoib_set_tso, 112 .set_tso = ipoib_set_tso,
159 .get_coalesce = ipoib_get_coalesce, 113 .get_coalesce = ipoib_get_coalesce,
160 .set_coalesce = ipoib_set_coalesce, 114 .set_coalesce = ipoib_set_coalesce,
161 .get_flags = ethtool_op_get_flags,
162 .set_flags = ipoib_set_flags,
163 .get_strings = ipoib_get_strings,
164 .get_sset_count = ipoib_get_sset_count,
165 .get_ethtool_stats = ipoib_get_ethtool_stats,
166}; 115};
167 116
168void ipoib_set_ethtool_ops(struct net_device *dev) 117void ipoib_set_ethtool_ops(struct net_device *dev)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index dfa71903d6e4..806d0292dc39 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -295,10 +295,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
295 if (test_bit(IPOIB_FLAG_CSUM, &priv->flags) && likely(wc->csum_ok)) 295 if (test_bit(IPOIB_FLAG_CSUM, &priv->flags) && likely(wc->csum_ok))
296 skb->ip_summed = CHECKSUM_UNNECESSARY; 296 skb->ip_summed = CHECKSUM_UNNECESSARY;
297 297
298 if (dev->features & NETIF_F_LRO) 298 napi_gro_receive(&priv->napi, skb);
299 lro_receive_skb(&priv->lro.lro_mgr, skb, NULL);
300 else
301 netif_receive_skb(skb);
302 299
303repost: 300repost:
304 if (unlikely(ipoib_ib_post_receive(dev, wr_id))) 301 if (unlikely(ipoib_ib_post_receive(dev, wr_id)))
@@ -450,9 +447,6 @@ poll_more:
450 } 447 }
451 448
452 if (done < budget) { 449 if (done < budget) {
453 if (dev->features & NETIF_F_LRO)
454 lro_flush_all(&priv->lro.lro_mgr);
455
456 napi_complete(napi); 450 napi_complete(napi);
457 if (unlikely(ib_req_notify_cq(priv->recv_cq, 451 if (unlikely(ib_req_notify_cq(priv->recv_cq,
458 IB_CQ_NEXT_COMP | 452 IB_CQ_NEXT_COMP |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 9ff7bc73ed95..7a07a728fe0d 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -60,15 +60,6 @@ MODULE_PARM_DESC(send_queue_size, "Number of descriptors in send queue");
60module_param_named(recv_queue_size, ipoib_recvq_size, int, 0444); 60module_param_named(recv_queue_size, ipoib_recvq_size, int, 0444);
61MODULE_PARM_DESC(recv_queue_size, "Number of descriptors in receive queue"); 61MODULE_PARM_DESC(recv_queue_size, "Number of descriptors in receive queue");
62 62
63static int lro;
64module_param(lro, bool, 0444);
65MODULE_PARM_DESC(lro, "Enable LRO (Large Receive Offload)");
66
67static int lro_max_aggr = IPOIB_LRO_MAX_AGGR;
68module_param(lro_max_aggr, int, 0644);
69MODULE_PARM_DESC(lro_max_aggr, "LRO: Max packets to be aggregated "
70 "(default = 64)");
71
72#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG 63#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
73int ipoib_debug_level; 64int ipoib_debug_level;
74 65
@@ -976,54 +967,6 @@ static const struct header_ops ipoib_header_ops = {
976 .create = ipoib_hard_header, 967 .create = ipoib_hard_header,
977}; 968};
978 969
979static int get_skb_hdr(struct sk_buff *skb, void **iphdr,
980 void **tcph, u64 *hdr_flags, void *priv)
981{
982 unsigned int ip_len;
983 struct iphdr *iph;
984
985 if (unlikely(skb->protocol != htons(ETH_P_IP)))
986 return -1;
987
988 /*
989 * In the future we may add an else clause that verifies the
990 * checksum and allows devices which do not calculate checksum
991 * to use LRO.
992 */
993 if (unlikely(skb->ip_summed != CHECKSUM_UNNECESSARY))
994 return -1;
995
996 /* Check for non-TCP packet */
997 skb_reset_network_header(skb);
998 iph = ip_hdr(skb);
999 if (iph->protocol != IPPROTO_TCP)
1000 return -1;
1001
1002 ip_len = ip_hdrlen(skb);
1003 skb_set_transport_header(skb, ip_len);
1004 *tcph = tcp_hdr(skb);
1005
1006 /* check if IP header and TCP header are complete */
1007 if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb))
1008 return -1;
1009
1010 *hdr_flags = LRO_IPV4 | LRO_TCP;
1011 *iphdr = iph;
1012
1013 return 0;
1014}
1015
1016static void ipoib_lro_setup(struct ipoib_dev_priv *priv)
1017{
1018 priv->lro.lro_mgr.max_aggr = lro_max_aggr;
1019 priv->lro.lro_mgr.max_desc = IPOIB_MAX_LRO_DESCRIPTORS;
1020 priv->lro.lro_mgr.lro_arr = priv->lro.lro_desc;
1021 priv->lro.lro_mgr.get_skb_header = get_skb_hdr;
1022 priv->lro.lro_mgr.features = LRO_F_NAPI;
1023 priv->lro.lro_mgr.dev = priv->dev;
1024 priv->lro.lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
1025}
1026
1027static const struct net_device_ops ipoib_netdev_ops = { 970static const struct net_device_ops ipoib_netdev_ops = {
1028 .ndo_open = ipoib_open, 971 .ndo_open = ipoib_open,
1029 .ndo_stop = ipoib_stop, 972 .ndo_stop = ipoib_stop,
@@ -1067,8 +1010,6 @@ static void ipoib_setup(struct net_device *dev)
1067 1010
1068 priv->dev = dev; 1011 priv->dev = dev;
1069 1012
1070 ipoib_lro_setup(priv);
1071
1072 spin_lock_init(&priv->lock); 1013 spin_lock_init(&priv->lock);
1073 1014
1074 mutex_init(&priv->vlan_mutex); 1015 mutex_init(&priv->vlan_mutex);
@@ -1218,8 +1159,7 @@ int ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca)
1218 priv->dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM; 1159 priv->dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
1219 } 1160 }
1220 1161
1221 if (lro) 1162 priv->dev->features |= NETIF_F_GRO;
1222 priv->dev->features |= NETIF_F_LRO;
1223 1163
1224 if (priv->dev->features & NETIF_F_SG && priv->hca_caps & IB_DEVICE_UD_TSO) 1164 if (priv->dev->features & NETIF_F_SG && priv->hca_caps & IB_DEVICE_UD_TSO)
1225 priv->dev->features |= NETIF_F_TSO; 1165 priv->dev->features |= NETIF_F_TSO;
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 1e1e347a7715..4b62105ed1e8 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -441,18 +441,28 @@ static void srp_disconnect_target(struct srp_target_port *target)
441 wait_for_completion(&target->done); 441 wait_for_completion(&target->done);
442} 442}
443 443
444static bool srp_change_state(struct srp_target_port *target,
445 enum srp_target_state old,
446 enum srp_target_state new)
447{
448 bool changed = false;
449
450 spin_lock_irq(&target->lock);
451 if (target->state == old) {
452 target->state = new;
453 changed = true;
454 }
455 spin_unlock_irq(&target->lock);
456 return changed;
457}
458
444static void srp_remove_work(struct work_struct *work) 459static void srp_remove_work(struct work_struct *work)
445{ 460{
446 struct srp_target_port *target = 461 struct srp_target_port *target =
447 container_of(work, struct srp_target_port, work); 462 container_of(work, struct srp_target_port, work);
448 463
449 spin_lock_irq(target->scsi_host->host_lock); 464 if (!srp_change_state(target, SRP_TARGET_DEAD, SRP_TARGET_REMOVED))
450 if (target->state != SRP_TARGET_DEAD) {
451 spin_unlock_irq(target->scsi_host->host_lock);
452 return; 465 return;
453 }
454 target->state = SRP_TARGET_REMOVED;
455 spin_unlock_irq(target->scsi_host->host_lock);
456 466
457 spin_lock(&target->srp_host->target_lock); 467 spin_lock(&target->srp_host->target_lock);
458 list_del(&target->list); 468 list_del(&target->list);
@@ -539,33 +549,34 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd,
539 scsi_sg_count(scmnd), scmnd->sc_data_direction); 549 scsi_sg_count(scmnd), scmnd->sc_data_direction);
540} 550}
541 551
542static void srp_remove_req(struct srp_target_port *target, struct srp_request *req) 552static void srp_remove_req(struct srp_target_port *target,
553 struct srp_request *req, s32 req_lim_delta)
543{ 554{
555 unsigned long flags;
556
544 srp_unmap_data(req->scmnd, target, req); 557 srp_unmap_data(req->scmnd, target, req);
545 list_move_tail(&req->list, &target->free_reqs); 558 spin_lock_irqsave(&target->lock, flags);
559 target->req_lim += req_lim_delta;
560 req->scmnd = NULL;
561 list_add_tail(&req->list, &target->free_reqs);
562 spin_unlock_irqrestore(&target->lock, flags);
546} 563}
547 564
548static void srp_reset_req(struct srp_target_port *target, struct srp_request *req) 565static void srp_reset_req(struct srp_target_port *target, struct srp_request *req)
549{ 566{
550 req->scmnd->result = DID_RESET << 16; 567 req->scmnd->result = DID_RESET << 16;
551 req->scmnd->scsi_done(req->scmnd); 568 req->scmnd->scsi_done(req->scmnd);
552 srp_remove_req(target, req); 569 srp_remove_req(target, req, 0);
553} 570}
554 571
555static int srp_reconnect_target(struct srp_target_port *target) 572static int srp_reconnect_target(struct srp_target_port *target)
556{ 573{
557 struct ib_qp_attr qp_attr; 574 struct ib_qp_attr qp_attr;
558 struct srp_request *req, *tmp;
559 struct ib_wc wc; 575 struct ib_wc wc;
560 int ret; 576 int i, ret;
561 577
562 spin_lock_irq(target->scsi_host->host_lock); 578 if (!srp_change_state(target, SRP_TARGET_LIVE, SRP_TARGET_CONNECTING))
563 if (target->state != SRP_TARGET_LIVE) {
564 spin_unlock_irq(target->scsi_host->host_lock);
565 return -EAGAIN; 579 return -EAGAIN;
566 }
567 target->state = SRP_TARGET_CONNECTING;
568 spin_unlock_irq(target->scsi_host->host_lock);
569 580
570 srp_disconnect_target(target); 581 srp_disconnect_target(target);
571 /* 582 /*
@@ -590,27 +601,23 @@ static int srp_reconnect_target(struct srp_target_port *target)
590 while (ib_poll_cq(target->send_cq, 1, &wc) > 0) 601 while (ib_poll_cq(target->send_cq, 1, &wc) > 0)
591 ; /* nothing */ 602 ; /* nothing */
592 603
593 spin_lock_irq(target->scsi_host->host_lock); 604 for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
594 list_for_each_entry_safe(req, tmp, &target->req_queue, list) 605 struct srp_request *req = &target->req_ring[i];
595 srp_reset_req(target, req); 606 if (req->scmnd)
596 spin_unlock_irq(target->scsi_host->host_lock); 607 srp_reset_req(target, req);
608 }
597 609
598 target->rx_head = 0; 610 INIT_LIST_HEAD(&target->free_tx);
599 target->tx_head = 0; 611 for (i = 0; i < SRP_SQ_SIZE; ++i)
600 target->tx_tail = 0; 612 list_add(&target->tx_ring[i]->list, &target->free_tx);
601 613
602 target->qp_in_error = 0; 614 target->qp_in_error = 0;
603 ret = srp_connect_target(target); 615 ret = srp_connect_target(target);
604 if (ret) 616 if (ret)
605 goto err; 617 goto err;
606 618
607 spin_lock_irq(target->scsi_host->host_lock); 619 if (!srp_change_state(target, SRP_TARGET_CONNECTING, SRP_TARGET_LIVE))
608 if (target->state == SRP_TARGET_CONNECTING) {
609 ret = 0;
610 target->state = SRP_TARGET_LIVE;
611 } else
612 ret = -EAGAIN; 620 ret = -EAGAIN;
613 spin_unlock_irq(target->scsi_host->host_lock);
614 621
615 return ret; 622 return ret;
616 623
@@ -620,17 +627,20 @@ err:
620 627
621 /* 628 /*
622 * We couldn't reconnect, so kill our target port off. 629 * We couldn't reconnect, so kill our target port off.
623 * However, we have to defer the real removal because we might 630 * However, we have to defer the real removal because we
624 * be in the context of the SCSI error handler now, which 631 * are in the context of the SCSI error handler now, which
625 * would deadlock if we call scsi_remove_host(). 632 * will deadlock if we call scsi_remove_host().
633 *
634 * Schedule our work inside the lock to avoid a race with
635 * the flush_scheduled_work() in srp_remove_one().
626 */ 636 */
627 spin_lock_irq(target->scsi_host->host_lock); 637 spin_lock_irq(&target->lock);
628 if (target->state == SRP_TARGET_CONNECTING) { 638 if (target->state == SRP_TARGET_CONNECTING) {
629 target->state = SRP_TARGET_DEAD; 639 target->state = SRP_TARGET_DEAD;
630 INIT_WORK(&target->work, srp_remove_work); 640 INIT_WORK(&target->work, srp_remove_work);
631 schedule_work(&target->work); 641 schedule_work(&target->work);
632 } 642 }
633 spin_unlock_irq(target->scsi_host->host_lock); 643 spin_unlock_irq(&target->lock);
634 644
635 return ret; 645 return ret;
636} 646}
@@ -758,7 +768,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
758 struct srp_direct_buf *buf = (void *) cmd->add_data; 768 struct srp_direct_buf *buf = (void *) cmd->add_data;
759 769
760 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat)); 770 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
761 buf->key = cpu_to_be32(dev->mr->rkey); 771 buf->key = cpu_to_be32(target->rkey);
762 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat)); 772 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
763 } else if (srp_map_fmr(target, scat, count, req, 773 } else if (srp_map_fmr(target, scat, count, req,
764 (void *) cmd->add_data)) { 774 (void *) cmd->add_data)) {
@@ -783,7 +793,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
783 buf->desc_list[i].va = 793 buf->desc_list[i].va =
784 cpu_to_be64(ib_sg_dma_address(ibdev, sg)); 794 cpu_to_be64(ib_sg_dma_address(ibdev, sg));
785 buf->desc_list[i].key = 795 buf->desc_list[i].key =
786 cpu_to_be32(dev->mr->rkey); 796 cpu_to_be32(target->rkey);
787 buf->desc_list[i].len = cpu_to_be32(dma_len); 797 buf->desc_list[i].len = cpu_to_be32(dma_len);
788 datalen += dma_len; 798 datalen += dma_len;
789 } 799 }
@@ -796,7 +806,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
796 buf->table_desc.va = 806 buf->table_desc.va =
797 cpu_to_be64(req->cmd->dma + sizeof *cmd + sizeof *buf); 807 cpu_to_be64(req->cmd->dma + sizeof *cmd + sizeof *buf);
798 buf->table_desc.key = 808 buf->table_desc.key =
799 cpu_to_be32(target->srp_host->srp_dev->mr->rkey); 809 cpu_to_be32(target->rkey);
800 buf->table_desc.len = 810 buf->table_desc.len =
801 cpu_to_be32(count * sizeof (struct srp_direct_buf)); 811 cpu_to_be32(count * sizeof (struct srp_direct_buf));
802 812
@@ -812,9 +822,23 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
812} 822}
813 823
814/* 824/*
815 * Must be called with target->scsi_host->host_lock held to protect 825 * Return an IU and possible credit to the free pool
816 * req_lim and tx_head. Lock cannot be dropped between call here and 826 */
817 * call to __srp_post_send(). 827static void srp_put_tx_iu(struct srp_target_port *target, struct srp_iu *iu,
828 enum srp_iu_type iu_type)
829{
830 unsigned long flags;
831
832 spin_lock_irqsave(&target->lock, flags);
833 list_add(&iu->list, &target->free_tx);
834 if (iu_type != SRP_IU_RSP)
835 ++target->req_lim;
836 spin_unlock_irqrestore(&target->lock, flags);
837}
838
839/*
840 * Must be called with target->lock held to protect req_lim and free_tx.
841 * If IU is not sent, it must be returned using srp_put_tx_iu().
818 * 842 *
819 * Note: 843 * Note:
820 * An upper limit for the number of allocated information units for each 844 * An upper limit for the number of allocated information units for each
@@ -833,83 +857,59 @@ static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target,
833 857
834 srp_send_completion(target->send_cq, target); 858 srp_send_completion(target->send_cq, target);
835 859
836 if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE) 860 if (list_empty(&target->free_tx))
837 return NULL; 861 return NULL;
838 862
839 /* Initiator responses to target requests do not consume credits */ 863 /* Initiator responses to target requests do not consume credits */
840 if (target->req_lim <= rsv && iu_type != SRP_IU_RSP) { 864 if (iu_type != SRP_IU_RSP) {
841 ++target->zero_req_lim; 865 if (target->req_lim <= rsv) {
842 return NULL; 866 ++target->zero_req_lim;
867 return NULL;
868 }
869
870 --target->req_lim;
843 } 871 }
844 872
845 iu = target->tx_ring[target->tx_head & SRP_SQ_MASK]; 873 iu = list_first_entry(&target->free_tx, struct srp_iu, list);
846 iu->type = iu_type; 874 list_del(&iu->list);
847 return iu; 875 return iu;
848} 876}
849 877
850/* 878static int srp_post_send(struct srp_target_port *target,
851 * Must be called with target->scsi_host->host_lock held to protect 879 struct srp_iu *iu, int len)
852 * req_lim and tx_head.
853 */
854static int __srp_post_send(struct srp_target_port *target,
855 struct srp_iu *iu, int len)
856{ 880{
857 struct ib_sge list; 881 struct ib_sge list;
858 struct ib_send_wr wr, *bad_wr; 882 struct ib_send_wr wr, *bad_wr;
859 int ret = 0;
860 883
861 list.addr = iu->dma; 884 list.addr = iu->dma;
862 list.length = len; 885 list.length = len;
863 list.lkey = target->srp_host->srp_dev->mr->lkey; 886 list.lkey = target->lkey;
864 887
865 wr.next = NULL; 888 wr.next = NULL;
866 wr.wr_id = target->tx_head & SRP_SQ_MASK; 889 wr.wr_id = (uintptr_t) iu;
867 wr.sg_list = &list; 890 wr.sg_list = &list;
868 wr.num_sge = 1; 891 wr.num_sge = 1;
869 wr.opcode = IB_WR_SEND; 892 wr.opcode = IB_WR_SEND;
870 wr.send_flags = IB_SEND_SIGNALED; 893 wr.send_flags = IB_SEND_SIGNALED;
871 894
872 ret = ib_post_send(target->qp, &wr, &bad_wr); 895 return ib_post_send(target->qp, &wr, &bad_wr);
873
874 if (!ret) {
875 ++target->tx_head;
876 if (iu->type != SRP_IU_RSP)
877 --target->req_lim;
878 }
879
880 return ret;
881} 896}
882 897
883static int srp_post_recv(struct srp_target_port *target) 898static int srp_post_recv(struct srp_target_port *target, struct srp_iu *iu)
884{ 899{
885 unsigned long flags;
886 struct srp_iu *iu;
887 struct ib_sge list;
888 struct ib_recv_wr wr, *bad_wr; 900 struct ib_recv_wr wr, *bad_wr;
889 unsigned int next; 901 struct ib_sge list;
890 int ret;
891
892 spin_lock_irqsave(target->scsi_host->host_lock, flags);
893
894 next = target->rx_head & SRP_RQ_MASK;
895 wr.wr_id = next;
896 iu = target->rx_ring[next];
897 902
898 list.addr = iu->dma; 903 list.addr = iu->dma;
899 list.length = iu->size; 904 list.length = iu->size;
900 list.lkey = target->srp_host->srp_dev->mr->lkey; 905 list.lkey = target->lkey;
901 906
902 wr.next = NULL; 907 wr.next = NULL;
908 wr.wr_id = (uintptr_t) iu;
903 wr.sg_list = &list; 909 wr.sg_list = &list;
904 wr.num_sge = 1; 910 wr.num_sge = 1;
905 911
906 ret = ib_post_recv(target->qp, &wr, &bad_wr); 912 return ib_post_recv(target->qp, &wr, &bad_wr);
907 if (!ret)
908 ++target->rx_head;
909
910 spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
911
912 return ret;
913} 913}
914 914
915static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) 915static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
@@ -917,23 +917,18 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
917 struct srp_request *req; 917 struct srp_request *req;
918 struct scsi_cmnd *scmnd; 918 struct scsi_cmnd *scmnd;
919 unsigned long flags; 919 unsigned long flags;
920 s32 delta;
921
922 delta = (s32) be32_to_cpu(rsp->req_lim_delta);
923
924 spin_lock_irqsave(target->scsi_host->host_lock, flags);
925
926 target->req_lim += delta;
927
928 req = &target->req_ring[rsp->tag & ~SRP_TAG_TSK_MGMT];
929 920
930 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) { 921 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
931 if (be32_to_cpu(rsp->resp_data_len) < 4) 922 spin_lock_irqsave(&target->lock, flags);
932 req->tsk_status = -1; 923 target->req_lim += be32_to_cpu(rsp->req_lim_delta);
933 else 924 spin_unlock_irqrestore(&target->lock, flags);
934 req->tsk_status = rsp->data[3]; 925
935 complete(&req->done); 926 target->tsk_mgmt_status = -1;
927 if (be32_to_cpu(rsp->resp_data_len) >= 4)
928 target->tsk_mgmt_status = rsp->data[3];
929 complete(&target->tsk_mgmt_done);
936 } else { 930 } else {
931 req = &target->req_ring[rsp->tag];
937 scmnd = req->scmnd; 932 scmnd = req->scmnd;
938 if (!scmnd) 933 if (!scmnd)
939 shost_printk(KERN_ERR, target->scsi_host, 934 shost_printk(KERN_ERR, target->scsi_host,
@@ -953,49 +948,42 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
953 else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER)) 948 else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER))
954 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt)); 949 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
955 950
956 if (!req->tsk_mgmt) { 951 srp_remove_req(target, req, be32_to_cpu(rsp->req_lim_delta));
957 scmnd->host_scribble = (void *) -1L; 952 scmnd->host_scribble = NULL;
958 scmnd->scsi_done(scmnd); 953 scmnd->scsi_done(scmnd);
959
960 srp_remove_req(target, req);
961 } else
962 req->cmd_done = 1;
963 } 954 }
964
965 spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
966} 955}
967 956
968static int srp_response_common(struct srp_target_port *target, s32 req_delta, 957static int srp_response_common(struct srp_target_port *target, s32 req_delta,
969 void *rsp, int len) 958 void *rsp, int len)
970{ 959{
971 struct ib_device *dev; 960 struct ib_device *dev = target->srp_host->srp_dev->dev;
972 unsigned long flags; 961 unsigned long flags;
973 struct srp_iu *iu; 962 struct srp_iu *iu;
974 int err = 1; 963 int err;
975 964
976 dev = target->srp_host->srp_dev->dev; 965 spin_lock_irqsave(&target->lock, flags);
977
978 spin_lock_irqsave(target->scsi_host->host_lock, flags);
979 target->req_lim += req_delta; 966 target->req_lim += req_delta;
980
981 iu = __srp_get_tx_iu(target, SRP_IU_RSP); 967 iu = __srp_get_tx_iu(target, SRP_IU_RSP);
968 spin_unlock_irqrestore(&target->lock, flags);
969
982 if (!iu) { 970 if (!iu) {
983 shost_printk(KERN_ERR, target->scsi_host, PFX 971 shost_printk(KERN_ERR, target->scsi_host, PFX
984 "no IU available to send response\n"); 972 "no IU available to send response\n");
985 goto out; 973 return 1;
986 } 974 }
987 975
988 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE); 976 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
989 memcpy(iu->buf, rsp, len); 977 memcpy(iu->buf, rsp, len);
990 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE); 978 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
991 979
992 err = __srp_post_send(target, iu, len); 980 err = srp_post_send(target, iu, len);
993 if (err) 981 if (err) {
994 shost_printk(KERN_ERR, target->scsi_host, PFX 982 shost_printk(KERN_ERR, target->scsi_host, PFX
995 "unable to post response: %d\n", err); 983 "unable to post response: %d\n", err);
984 srp_put_tx_iu(target, iu, SRP_IU_RSP);
985 }
996 986
997out:
998 spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
999 return err; 987 return err;
1000} 988}
1001 989
@@ -1032,14 +1020,11 @@ static void srp_process_aer_req(struct srp_target_port *target,
1032 1020
1033static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) 1021static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
1034{ 1022{
1035 struct ib_device *dev; 1023 struct ib_device *dev = target->srp_host->srp_dev->dev;
1036 struct srp_iu *iu; 1024 struct srp_iu *iu = (struct srp_iu *) wc->wr_id;
1037 int res; 1025 int res;
1038 u8 opcode; 1026 u8 opcode;
1039 1027
1040 iu = target->rx_ring[wc->wr_id];
1041
1042 dev = target->srp_host->srp_dev->dev;
1043 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len, 1028 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len,
1044 DMA_FROM_DEVICE); 1029 DMA_FROM_DEVICE);
1045 1030
@@ -1080,7 +1065,7 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
1080 ib_dma_sync_single_for_device(dev, iu->dma, target->max_ti_iu_len, 1065 ib_dma_sync_single_for_device(dev, iu->dma, target->max_ti_iu_len,
1081 DMA_FROM_DEVICE); 1066 DMA_FROM_DEVICE);
1082 1067
1083 res = srp_post_recv(target); 1068 res = srp_post_recv(target, iu);
1084 if (res != 0) 1069 if (res != 0)
1085 shost_printk(KERN_ERR, target->scsi_host, 1070 shost_printk(KERN_ERR, target->scsi_host,
1086 PFX "Recv failed with error code %d\n", res); 1071 PFX "Recv failed with error code %d\n", res);
@@ -1109,6 +1094,7 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr)
1109{ 1094{
1110 struct srp_target_port *target = target_ptr; 1095 struct srp_target_port *target = target_ptr;
1111 struct ib_wc wc; 1096 struct ib_wc wc;
1097 struct srp_iu *iu;
1112 1098
1113 while (ib_poll_cq(cq, 1, &wc) > 0) { 1099 while (ib_poll_cq(cq, 1, &wc) > 0) {
1114 if (wc.status) { 1100 if (wc.status) {
@@ -1119,18 +1105,19 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr)
1119 break; 1105 break;
1120 } 1106 }
1121 1107
1122 ++target->tx_tail; 1108 iu = (struct srp_iu *) wc.wr_id;
1109 list_add(&iu->list, &target->free_tx);
1123 } 1110 }
1124} 1111}
1125 1112
1126static int srp_queuecommand_lck(struct scsi_cmnd *scmnd, 1113static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
1127 void (*done)(struct scsi_cmnd *))
1128{ 1114{
1129 struct srp_target_port *target = host_to_target(scmnd->device->host); 1115 struct srp_target_port *target = host_to_target(shost);
1130 struct srp_request *req; 1116 struct srp_request *req;
1131 struct srp_iu *iu; 1117 struct srp_iu *iu;
1132 struct srp_cmd *cmd; 1118 struct srp_cmd *cmd;
1133 struct ib_device *dev; 1119 struct ib_device *dev;
1120 unsigned long flags;
1134 int len; 1121 int len;
1135 1122
1136 if (target->state == SRP_TARGET_CONNECTING) 1123 if (target->state == SRP_TARGET_CONNECTING)
@@ -1139,11 +1126,19 @@ static int srp_queuecommand_lck(struct scsi_cmnd *scmnd,
1139 if (target->state == SRP_TARGET_DEAD || 1126 if (target->state == SRP_TARGET_DEAD ||
1140 target->state == SRP_TARGET_REMOVED) { 1127 target->state == SRP_TARGET_REMOVED) {
1141 scmnd->result = DID_BAD_TARGET << 16; 1128 scmnd->result = DID_BAD_TARGET << 16;
1142 done(scmnd); 1129 scmnd->scsi_done(scmnd);
1143 return 0; 1130 return 0;
1144 } 1131 }
1145 1132
1133 spin_lock_irqsave(&target->lock, flags);
1146 iu = __srp_get_tx_iu(target, SRP_IU_CMD); 1134 iu = __srp_get_tx_iu(target, SRP_IU_CMD);
1135 if (iu) {
1136 req = list_first_entry(&target->free_reqs, struct srp_request,
1137 list);
1138 list_del(&req->list);
1139 }
1140 spin_unlock_irqrestore(&target->lock, flags);
1141
1147 if (!iu) 1142 if (!iu)
1148 goto err; 1143 goto err;
1149 1144
@@ -1151,11 +1146,8 @@ static int srp_queuecommand_lck(struct scsi_cmnd *scmnd,
1151 ib_dma_sync_single_for_cpu(dev, iu->dma, srp_max_iu_len, 1146 ib_dma_sync_single_for_cpu(dev, iu->dma, srp_max_iu_len,
1152 DMA_TO_DEVICE); 1147 DMA_TO_DEVICE);
1153 1148
1154 req = list_first_entry(&target->free_reqs, struct srp_request, list);
1155
1156 scmnd->scsi_done = done;
1157 scmnd->result = 0; 1149 scmnd->result = 0;
1158 scmnd->host_scribble = (void *) (long) req->index; 1150 scmnd->host_scribble = (void *) req;
1159 1151
1160 cmd = iu->buf; 1152 cmd = iu->buf;
1161 memset(cmd, 0, sizeof *cmd); 1153 memset(cmd, 0, sizeof *cmd);
@@ -1167,37 +1159,38 @@ static int srp_queuecommand_lck(struct scsi_cmnd *scmnd,
1167 1159
1168 req->scmnd = scmnd; 1160 req->scmnd = scmnd;
1169 req->cmd = iu; 1161 req->cmd = iu;
1170 req->cmd_done = 0;
1171 req->tsk_mgmt = NULL;
1172 1162
1173 len = srp_map_data(scmnd, target, req); 1163 len = srp_map_data(scmnd, target, req);
1174 if (len < 0) { 1164 if (len < 0) {
1175 shost_printk(KERN_ERR, target->scsi_host, 1165 shost_printk(KERN_ERR, target->scsi_host,
1176 PFX "Failed to map data\n"); 1166 PFX "Failed to map data\n");
1177 goto err; 1167 goto err_iu;
1178 } 1168 }
1179 1169
1180 ib_dma_sync_single_for_device(dev, iu->dma, srp_max_iu_len, 1170 ib_dma_sync_single_for_device(dev, iu->dma, srp_max_iu_len,
1181 DMA_TO_DEVICE); 1171 DMA_TO_DEVICE);
1182 1172
1183 if (__srp_post_send(target, iu, len)) { 1173 if (srp_post_send(target, iu, len)) {
1184 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n"); 1174 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
1185 goto err_unmap; 1175 goto err_unmap;
1186 } 1176 }
1187 1177
1188 list_move_tail(&req->list, &target->req_queue);
1189
1190 return 0; 1178 return 0;
1191 1179
1192err_unmap: 1180err_unmap:
1193 srp_unmap_data(scmnd, target, req); 1181 srp_unmap_data(scmnd, target, req);
1194 1182
1183err_iu:
1184 srp_put_tx_iu(target, iu, SRP_IU_CMD);
1185
1186 spin_lock_irqsave(&target->lock, flags);
1187 list_add(&req->list, &target->free_reqs);
1188 spin_unlock_irqrestore(&target->lock, flags);
1189
1195err: 1190err:
1196 return SCSI_MLQUEUE_HOST_BUSY; 1191 return SCSI_MLQUEUE_HOST_BUSY;
1197} 1192}
1198 1193
1199static DEF_SCSI_QCMD(srp_queuecommand)
1200
1201static int srp_alloc_iu_bufs(struct srp_target_port *target) 1194static int srp_alloc_iu_bufs(struct srp_target_port *target)
1202{ 1195{
1203 int i; 1196 int i;
@@ -1216,6 +1209,8 @@ static int srp_alloc_iu_bufs(struct srp_target_port *target)
1216 GFP_KERNEL, DMA_TO_DEVICE); 1209 GFP_KERNEL, DMA_TO_DEVICE);
1217 if (!target->tx_ring[i]) 1210 if (!target->tx_ring[i])
1218 goto err; 1211 goto err;
1212
1213 list_add(&target->tx_ring[i]->list, &target->free_tx);
1219 } 1214 }
1220 1215
1221 return 0; 1216 return 0;
@@ -1377,7 +1372,8 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
1377 break; 1372 break;
1378 1373
1379 for (i = 0; i < SRP_RQ_SIZE; i++) { 1374 for (i = 0; i < SRP_RQ_SIZE; i++) {
1380 target->status = srp_post_recv(target); 1375 struct srp_iu *iu = target->rx_ring[i];
1376 target->status = srp_post_recv(target, iu);
1381 if (target->status) 1377 if (target->status)
1382 break; 1378 break;
1383 } 1379 }
@@ -1442,25 +1438,24 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
1442} 1438}
1443 1439
1444static int srp_send_tsk_mgmt(struct srp_target_port *target, 1440static int srp_send_tsk_mgmt(struct srp_target_port *target,
1445 struct srp_request *req, u8 func) 1441 u64 req_tag, unsigned int lun, u8 func)
1446{ 1442{
1447 struct ib_device *dev = target->srp_host->srp_dev->dev; 1443 struct ib_device *dev = target->srp_host->srp_dev->dev;
1448 struct srp_iu *iu; 1444 struct srp_iu *iu;
1449 struct srp_tsk_mgmt *tsk_mgmt; 1445 struct srp_tsk_mgmt *tsk_mgmt;
1450 1446
1451 spin_lock_irq(target->scsi_host->host_lock);
1452
1453 if (target->state == SRP_TARGET_DEAD || 1447 if (target->state == SRP_TARGET_DEAD ||
1454 target->state == SRP_TARGET_REMOVED) { 1448 target->state == SRP_TARGET_REMOVED)
1455 req->scmnd->result = DID_BAD_TARGET << 16; 1449 return -1;
1456 goto out;
1457 }
1458 1450
1459 init_completion(&req->done); 1451 init_completion(&target->tsk_mgmt_done);
1460 1452
1453 spin_lock_irq(&target->lock);
1461 iu = __srp_get_tx_iu(target, SRP_IU_TSK_MGMT); 1454 iu = __srp_get_tx_iu(target, SRP_IU_TSK_MGMT);
1455 spin_unlock_irq(&target->lock);
1456
1462 if (!iu) 1457 if (!iu)
1463 goto out; 1458 return -1;
1464 1459
1465 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt, 1460 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
1466 DMA_TO_DEVICE); 1461 DMA_TO_DEVICE);
@@ -1468,70 +1463,46 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target,
1468 memset(tsk_mgmt, 0, sizeof *tsk_mgmt); 1463 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
1469 1464
1470 tsk_mgmt->opcode = SRP_TSK_MGMT; 1465 tsk_mgmt->opcode = SRP_TSK_MGMT;
1471 tsk_mgmt->lun = cpu_to_be64((u64) req->scmnd->device->lun << 48); 1466 tsk_mgmt->lun = cpu_to_be64((u64) lun << 48);
1472 tsk_mgmt->tag = req->index | SRP_TAG_TSK_MGMT; 1467 tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT;
1473 tsk_mgmt->tsk_mgmt_func = func; 1468 tsk_mgmt->tsk_mgmt_func = func;
1474 tsk_mgmt->task_tag = req->index; 1469 tsk_mgmt->task_tag = req_tag;
1475 1470
1476 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt, 1471 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
1477 DMA_TO_DEVICE); 1472 DMA_TO_DEVICE);
1478 if (__srp_post_send(target, iu, sizeof *tsk_mgmt)) 1473 if (srp_post_send(target, iu, sizeof *tsk_mgmt)) {
1479 goto out; 1474 srp_put_tx_iu(target, iu, SRP_IU_TSK_MGMT);
1480
1481 req->tsk_mgmt = iu;
1482
1483 spin_unlock_irq(target->scsi_host->host_lock);
1484
1485 if (!wait_for_completion_timeout(&req->done,
1486 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
1487 return -1; 1475 return -1;
1476 }
1488 1477
1489 return 0; 1478 if (!wait_for_completion_timeout(&target->tsk_mgmt_done,
1490 1479 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
1491out:
1492 spin_unlock_irq(target->scsi_host->host_lock);
1493 return -1;
1494}
1495
1496static int srp_find_req(struct srp_target_port *target,
1497 struct scsi_cmnd *scmnd,
1498 struct srp_request **req)
1499{
1500 if (scmnd->host_scribble == (void *) -1L)
1501 return -1; 1480 return -1;
1502 1481
1503 *req = &target->req_ring[(long) scmnd->host_scribble];
1504
1505 return 0; 1482 return 0;
1506} 1483}
1507 1484
1508static int srp_abort(struct scsi_cmnd *scmnd) 1485static int srp_abort(struct scsi_cmnd *scmnd)
1509{ 1486{
1510 struct srp_target_port *target = host_to_target(scmnd->device->host); 1487 struct srp_target_port *target = host_to_target(scmnd->device->host);
1511 struct srp_request *req; 1488 struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
1512 int ret = SUCCESS; 1489 int ret = SUCCESS;
1513 1490
1514 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n"); 1491 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
1515 1492
1516 if (target->qp_in_error) 1493 if (!req || target->qp_in_error)
1517 return FAILED; 1494 return FAILED;
1518 if (srp_find_req(target, scmnd, &req)) 1495 if (srp_send_tsk_mgmt(target, req->index, scmnd->device->lun,
1496 SRP_TSK_ABORT_TASK))
1519 return FAILED; 1497 return FAILED;
1520 if (srp_send_tsk_mgmt(target, req, SRP_TSK_ABORT_TASK))
1521 return FAILED;
1522
1523 spin_lock_irq(target->scsi_host->host_lock);
1524 1498
1525 if (req->cmd_done) { 1499 if (req->scmnd) {
1526 srp_remove_req(target, req); 1500 if (!target->tsk_mgmt_status) {
1527 scmnd->scsi_done(scmnd); 1501 srp_remove_req(target, req, 0);
1528 } else if (!req->tsk_status) { 1502 scmnd->result = DID_ABORT << 16;
1529 srp_remove_req(target, req); 1503 } else
1530 scmnd->result = DID_ABORT << 16; 1504 ret = FAILED;
1531 } else 1505 }
1532 ret = FAILED;
1533
1534 spin_unlock_irq(target->scsi_host->host_lock);
1535 1506
1536 return ret; 1507 return ret;
1537} 1508}
@@ -1539,26 +1510,23 @@ static int srp_abort(struct scsi_cmnd *scmnd)
1539static int srp_reset_device(struct scsi_cmnd *scmnd) 1510static int srp_reset_device(struct scsi_cmnd *scmnd)
1540{ 1511{
1541 struct srp_target_port *target = host_to_target(scmnd->device->host); 1512 struct srp_target_port *target = host_to_target(scmnd->device->host);
1542 struct srp_request *req, *tmp; 1513 int i;
1543 1514
1544 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n"); 1515 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
1545 1516
1546 if (target->qp_in_error) 1517 if (target->qp_in_error)
1547 return FAILED; 1518 return FAILED;
1548 if (srp_find_req(target, scmnd, &req)) 1519 if (srp_send_tsk_mgmt(target, SRP_TAG_NO_REQ, scmnd->device->lun,
1520 SRP_TSK_LUN_RESET))
1549 return FAILED; 1521 return FAILED;
1550 if (srp_send_tsk_mgmt(target, req, SRP_TSK_LUN_RESET)) 1522 if (target->tsk_mgmt_status)
1551 return FAILED; 1523 return FAILED;
1552 if (req->tsk_status)
1553 return FAILED;
1554
1555 spin_lock_irq(target->scsi_host->host_lock);
1556 1524
1557 list_for_each_entry_safe(req, tmp, &target->req_queue, list) 1525 for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
1558 if (req->scmnd->device == scmnd->device) 1526 struct srp_request *req = &target->req_ring[i];
1527 if (req->scmnd && req->scmnd->device == scmnd->device)
1559 srp_reset_req(target, req); 1528 srp_reset_req(target, req);
1560 1529 }
1561 spin_unlock_irq(target->scsi_host->host_lock);
1562 1530
1563 return SUCCESS; 1531 return SUCCESS;
1564} 1532}
@@ -1987,9 +1955,12 @@ static ssize_t srp_create_target(struct device *dev,
1987 target->io_class = SRP_REV16A_IB_IO_CLASS; 1955 target->io_class = SRP_REV16A_IB_IO_CLASS;
1988 target->scsi_host = target_host; 1956 target->scsi_host = target_host;
1989 target->srp_host = host; 1957 target->srp_host = host;
1958 target->lkey = host->srp_dev->mr->lkey;
1959 target->rkey = host->srp_dev->mr->rkey;
1990 1960
1961 spin_lock_init(&target->lock);
1962 INIT_LIST_HEAD(&target->free_tx);
1991 INIT_LIST_HEAD(&target->free_reqs); 1963 INIT_LIST_HEAD(&target->free_reqs);
1992 INIT_LIST_HEAD(&target->req_queue);
1993 for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) { 1964 for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
1994 target->req_ring[i].index = i; 1965 target->req_ring[i].index = i;
1995 list_add_tail(&target->req_ring[i].list, &target->free_reqs); 1966 list_add_tail(&target->req_ring[i].list, &target->free_reqs);
@@ -2217,9 +2188,9 @@ static void srp_remove_one(struct ib_device *device)
2217 */ 2188 */
2218 spin_lock(&host->target_lock); 2189 spin_lock(&host->target_lock);
2219 list_for_each_entry(target, &host->target_list, list) { 2190 list_for_each_entry(target, &host->target_list, list) {
2220 spin_lock_irq(target->scsi_host->host_lock); 2191 spin_lock_irq(&target->lock);
2221 target->state = SRP_TARGET_REMOVED; 2192 target->state = SRP_TARGET_REMOVED;
2222 spin_unlock_irq(target->scsi_host->host_lock); 2193 spin_unlock_irq(&target->lock);
2223 } 2194 }
2224 spin_unlock(&host->target_lock); 2195 spin_unlock(&host->target_lock);
2225 2196
@@ -2258,8 +2229,7 @@ static int __init srp_init_module(void)
2258{ 2229{
2259 int ret; 2230 int ret;
2260 2231
2261 BUILD_BUG_ON_NOT_POWER_OF_2(SRP_SQ_SIZE); 2232 BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *));
2262 BUILD_BUG_ON_NOT_POWER_OF_2(SRP_RQ_SIZE);
2263 2233
2264 if (srp_sg_tablesize > 255) { 2234 if (srp_sg_tablesize > 255) {
2265 printk(KERN_WARNING PFX "Clamping srp_sg_tablesize to 255\n"); 2235 printk(KERN_WARNING PFX "Clamping srp_sg_tablesize to 255\n");
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index ed0dce9e479f..9dc6fc3fd894 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -59,16 +59,15 @@ enum {
59 59
60 SRP_RQ_SHIFT = 6, 60 SRP_RQ_SHIFT = 6,
61 SRP_RQ_SIZE = 1 << SRP_RQ_SHIFT, 61 SRP_RQ_SIZE = 1 << SRP_RQ_SHIFT,
62 SRP_RQ_MASK = SRP_RQ_SIZE - 1,
63 62
64 SRP_SQ_SIZE = SRP_RQ_SIZE, 63 SRP_SQ_SIZE = SRP_RQ_SIZE,
65 SRP_SQ_MASK = SRP_SQ_SIZE - 1,
66 SRP_RSP_SQ_SIZE = 1, 64 SRP_RSP_SQ_SIZE = 1,
67 SRP_REQ_SQ_SIZE = SRP_SQ_SIZE - SRP_RSP_SQ_SIZE, 65 SRP_REQ_SQ_SIZE = SRP_SQ_SIZE - SRP_RSP_SQ_SIZE,
68 SRP_TSK_MGMT_SQ_SIZE = 1, 66 SRP_TSK_MGMT_SQ_SIZE = 1,
69 SRP_CMD_SQ_SIZE = SRP_REQ_SQ_SIZE - SRP_TSK_MGMT_SQ_SIZE, 67 SRP_CMD_SQ_SIZE = SRP_REQ_SQ_SIZE - SRP_TSK_MGMT_SQ_SIZE,
70 68
71 SRP_TAG_TSK_MGMT = 1 << (SRP_RQ_SHIFT + 1), 69 SRP_TAG_NO_REQ = ~0U,
70 SRP_TAG_TSK_MGMT = 1U << 31,
72 71
73 SRP_FMR_SIZE = 256, 72 SRP_FMR_SIZE = 256,
74 SRP_FMR_POOL_SIZE = 1024, 73 SRP_FMR_POOL_SIZE = 1024,
@@ -113,15 +112,29 @@ struct srp_request {
113 struct list_head list; 112 struct list_head list;
114 struct scsi_cmnd *scmnd; 113 struct scsi_cmnd *scmnd;
115 struct srp_iu *cmd; 114 struct srp_iu *cmd;
116 struct srp_iu *tsk_mgmt;
117 struct ib_pool_fmr *fmr; 115 struct ib_pool_fmr *fmr;
118 struct completion done;
119 short index; 116 short index;
120 u8 cmd_done;
121 u8 tsk_status;
122}; 117};
123 118
124struct srp_target_port { 119struct srp_target_port {
120 /* These are RW in the hot path, and commonly used together */
121 struct list_head free_tx;
122 struct list_head free_reqs;
123 spinlock_t lock;
124 s32 req_lim;
125
126 /* These are read-only in the hot path */
127 struct ib_cq *send_cq ____cacheline_aligned_in_smp;
128 struct ib_cq *recv_cq;
129 struct ib_qp *qp;
130 u32 lkey;
131 u32 rkey;
132 enum srp_target_state state;
133
134 /* Everything above this point is used in the hot path of
135 * command processing. Try to keep them packed into cachelines.
136 */
137
125 __be64 id_ext; 138 __be64 id_ext;
126 __be64 ioc_guid; 139 __be64 ioc_guid;
127 __be64 service_id; 140 __be64 service_id;
@@ -138,24 +151,13 @@ struct srp_target_port {
138 int path_query_id; 151 int path_query_id;
139 152
140 struct ib_cm_id *cm_id; 153 struct ib_cm_id *cm_id;
141 struct ib_cq *recv_cq;
142 struct ib_cq *send_cq;
143 struct ib_qp *qp;
144 154
145 int max_ti_iu_len; 155 int max_ti_iu_len;
146 s32 req_lim;
147 156
148 int zero_req_lim; 157 int zero_req_lim;
149 158
150 unsigned rx_head;
151 struct srp_iu *rx_ring[SRP_RQ_SIZE];
152
153 unsigned tx_head;
154 unsigned tx_tail;
155 struct srp_iu *tx_ring[SRP_SQ_SIZE]; 159 struct srp_iu *tx_ring[SRP_SQ_SIZE];
156 160 struct srp_iu *rx_ring[SRP_RQ_SIZE];
157 struct list_head free_reqs;
158 struct list_head req_queue;
159 struct srp_request req_ring[SRP_CMD_SQ_SIZE]; 161 struct srp_request req_ring[SRP_CMD_SQ_SIZE];
160 162
161 struct work_struct work; 163 struct work_struct work;
@@ -163,16 +165,18 @@ struct srp_target_port {
163 struct list_head list; 165 struct list_head list;
164 struct completion done; 166 struct completion done;
165 int status; 167 int status;
166 enum srp_target_state state;
167 int qp_in_error; 168 int qp_in_error;
169
170 struct completion tsk_mgmt_done;
171 u8 tsk_mgmt_status;
168}; 172};
169 173
170struct srp_iu { 174struct srp_iu {
175 struct list_head list;
171 u64 dma; 176 u64 dma;
172 void *buf; 177 void *buf;
173 size_t size; 178 size_t size;
174 enum dma_data_direction direction; 179 enum dma_data_direction direction;
175 enum srp_iu_type type;
176}; 180};
177 181
178#endif /* IB_SRP_H */ 182#endif /* IB_SRP_H */
diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
index b6e7ddc09d76..4daf9e5a7736 100644
--- a/drivers/macintosh/macio_asic.c
+++ b/drivers/macintosh/macio_asic.c
@@ -387,11 +387,10 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip,
387 /* Set the DMA ops to the ones from the PCI device, this could be 387 /* Set the DMA ops to the ones from the PCI device, this could be
388 * fishy if we didn't know that on PowerMac it's always direct ops 388 * fishy if we didn't know that on PowerMac it's always direct ops
389 * or iommu ops that will work fine 389 * or iommu ops that will work fine
390 *
391 * To get all the fields, copy all archdata
390 */ 392 */
391 dev->ofdev.dev.archdata.dma_ops = 393 dev->ofdev.dev.archdata = chip->lbus.pdev->dev.archdata;
392 chip->lbus.pdev->dev.archdata.dma_ops;
393 dev->ofdev.dev.archdata.dma_data =
394 chip->lbus.pdev->dev.archdata.dma_data;
395#endif /* CONFIG_PCI */ 394#endif /* CONFIG_PCI */
396 395
397#ifdef DEBUG 396#ifdef DEBUG
diff --git a/drivers/macintosh/therm_pm72.c b/drivers/macintosh/therm_pm72.c
index 44549272333c..2e041fd0a00c 100644
--- a/drivers/macintosh/therm_pm72.c
+++ b/drivers/macintosh/therm_pm72.c
@@ -2213,6 +2213,9 @@ static void fcu_lookup_fans(struct device_node *fcu_node)
2213static int fcu_of_probe(struct platform_device* dev, const struct of_device_id *match) 2213static int fcu_of_probe(struct platform_device* dev, const struct of_device_id *match)
2214{ 2214{
2215 state = state_detached; 2215 state = state_detached;
2216 of_dev = dev;
2217
2218 dev_info(&dev->dev, "PowerMac G5 Thermal control driver %s\n", VERSION);
2216 2219
2217 /* Lookup the fans in the device tree */ 2220 /* Lookup the fans in the device tree */
2218 fcu_lookup_fans(dev->dev.of_node); 2221 fcu_lookup_fans(dev->dev.of_node);
@@ -2235,6 +2238,7 @@ static const struct of_device_id fcu_match[] =
2235 }, 2238 },
2236 {}, 2239 {},
2237}; 2240};
2241MODULE_DEVICE_TABLE(of, fcu_match);
2238 2242
2239static struct of_platform_driver fcu_of_platform_driver = 2243static struct of_platform_driver fcu_of_platform_driver =
2240{ 2244{
@@ -2252,8 +2256,6 @@ static struct of_platform_driver fcu_of_platform_driver =
2252 */ 2256 */
2253static int __init therm_pm72_init(void) 2257static int __init therm_pm72_init(void)
2254{ 2258{
2255 struct device_node *np;
2256
2257 rackmac = of_machine_is_compatible("RackMac3,1"); 2259 rackmac = of_machine_is_compatible("RackMac3,1");
2258 2260
2259 if (!of_machine_is_compatible("PowerMac7,2") && 2261 if (!of_machine_is_compatible("PowerMac7,2") &&
@@ -2261,34 +2263,12 @@ static int __init therm_pm72_init(void)
2261 !rackmac) 2263 !rackmac)
2262 return -ENODEV; 2264 return -ENODEV;
2263 2265
2264 printk(KERN_INFO "PowerMac G5 Thermal control driver %s\n", VERSION); 2266 return of_register_platform_driver(&fcu_of_platform_driver);
2265
2266 np = of_find_node_by_type(NULL, "fcu");
2267 if (np == NULL) {
2268 /* Some machines have strangely broken device-tree */
2269 np = of_find_node_by_path("/u3@0,f8000000/i2c@f8001000/fan@15e");
2270 if (np == NULL) {
2271 printk(KERN_ERR "Can't find FCU in device-tree !\n");
2272 return -ENODEV;
2273 }
2274 }
2275 of_dev = of_platform_device_create(np, "temperature", NULL);
2276 if (of_dev == NULL) {
2277 printk(KERN_ERR "Can't register FCU platform device !\n");
2278 return -ENODEV;
2279 }
2280
2281 of_register_platform_driver(&fcu_of_platform_driver);
2282
2283 return 0;
2284} 2267}
2285 2268
2286static void __exit therm_pm72_exit(void) 2269static void __exit therm_pm72_exit(void)
2287{ 2270{
2288 of_unregister_platform_driver(&fcu_of_platform_driver); 2271 of_unregister_platform_driver(&fcu_of_platform_driver);
2289
2290 if (of_dev)
2291 of_device_unregister(of_dev);
2292} 2272}
2293 2273
2294module_init(therm_pm72_init); 2274module_init(therm_pm72_init);
diff --git a/drivers/mfd/sh_mobile_sdhi.c b/drivers/mfd/sh_mobile_sdhi.c
index f1714f93af9d..0a7df44a93c0 100644
--- a/drivers/mfd/sh_mobile_sdhi.c
+++ b/drivers/mfd/sh_mobile_sdhi.c
@@ -131,11 +131,17 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev)
131 */ 131 */
132 mmc_data->flags |= TMIO_MMC_BLKSZ_2BYTES; 132 mmc_data->flags |= TMIO_MMC_BLKSZ_2BYTES;
133 133
134 /*
135 * All SDHI blocks support SDIO IRQ signalling.
136 */
137 mmc_data->flags |= TMIO_MMC_SDIO_IRQ;
138
134 if (p && p->dma_slave_tx >= 0 && p->dma_slave_rx >= 0) { 139 if (p && p->dma_slave_tx >= 0 && p->dma_slave_rx >= 0) {
135 priv->param_tx.slave_id = p->dma_slave_tx; 140 priv->param_tx.slave_id = p->dma_slave_tx;
136 priv->param_rx.slave_id = p->dma_slave_rx; 141 priv->param_rx.slave_id = p->dma_slave_rx;
137 priv->dma_priv.chan_priv_tx = &priv->param_tx; 142 priv->dma_priv.chan_priv_tx = &priv->param_tx;
138 priv->dma_priv.chan_priv_rx = &priv->param_rx; 143 priv->dma_priv.chan_priv_rx = &priv->param_rx;
144 priv->dma_priv.alignment_shift = 1; /* 2-byte alignment */
139 mmc_data->dma = &priv->dma_priv; 145 mmc_data->dma = &priv->dma_priv;
140 } 146 }
141 147
diff --git a/drivers/mmc/card/Kconfig b/drivers/mmc/card/Kconfig
index 57e4416b9ef0..2a876c4099cd 100644
--- a/drivers/mmc/card/Kconfig
+++ b/drivers/mmc/card/Kconfig
@@ -16,6 +16,7 @@ config MMC_BLOCK
16 16
17config MMC_BLOCK_MINORS 17config MMC_BLOCK_MINORS
18 int "Number of minors per block device" 18 int "Number of minors per block device"
19 depends on MMC_BLOCK
19 range 4 256 20 range 4 256
20 default 8 21 default 8
21 help 22 help
diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig
index bb22ffd76ef8..ef103871517f 100644
--- a/drivers/mmc/core/Kconfig
+++ b/drivers/mmc/core/Kconfig
@@ -16,3 +16,14 @@ config MMC_UNSAFE_RESUME
16 16
17 This option sets a default which can be overridden by the 17 This option sets a default which can be overridden by the
18 module parameter "removable=0" or "removable=1". 18 module parameter "removable=0" or "removable=1".
19
20config MMC_CLKGATE
21 bool "MMC host clock gating (EXPERIMENTAL)"
22 depends on EXPERIMENTAL
23 help
24 This will attempt to aggressively gate the clock to the MMC card.
25 This is done to save power due to gating off the logic and bus
26 noise when the MMC card is not in use. Your host driver has to
27 support handling this in order for it to be of any use.
28
29 If unsure, say N.
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index af8dc6a2a317..63667a8f140c 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -303,14 +303,14 @@ int mmc_add_card(struct mmc_card *card)
303 type, card->rca); 303 type, card->rca);
304 } 304 }
305 305
306 ret = device_add(&card->dev);
307 if (ret)
308 return ret;
309
310#ifdef CONFIG_DEBUG_FS 306#ifdef CONFIG_DEBUG_FS
311 mmc_add_card_debugfs(card); 307 mmc_add_card_debugfs(card);
312#endif 308#endif
313 309
310 ret = device_add(&card->dev);
311 if (ret)
312 return ret;
313
314 mmc_card_set_present(card); 314 mmc_card_set_present(card);
315 315
316 return 0; 316 return 0;
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index a3a780faf85a..6625c057be05 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -22,6 +22,7 @@
22#include <linux/scatterlist.h> 22#include <linux/scatterlist.h>
23#include <linux/log2.h> 23#include <linux/log2.h>
24#include <linux/regulator/consumer.h> 24#include <linux/regulator/consumer.h>
25#include <linux/pm_runtime.h>
25 26
26#include <linux/mmc/card.h> 27#include <linux/mmc/card.h>
27#include <linux/mmc/host.h> 28#include <linux/mmc/host.h>
@@ -130,6 +131,8 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
130 131
131 if (mrq->done) 132 if (mrq->done)
132 mrq->done(mrq); 133 mrq->done(mrq);
134
135 mmc_host_clk_gate(host);
133 } 136 }
134} 137}
135 138
@@ -190,6 +193,7 @@ mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
190 mrq->stop->mrq = mrq; 193 mrq->stop->mrq = mrq;
191 } 194 }
192 } 195 }
196 mmc_host_clk_ungate(host);
193 host->ops->request(host, mrq); 197 host->ops->request(host, mrq);
194} 198}
195 199
@@ -295,8 +299,9 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
295 unsigned int timeout_us, limit_us; 299 unsigned int timeout_us, limit_us;
296 300
297 timeout_us = data->timeout_ns / 1000; 301 timeout_us = data->timeout_ns / 1000;
298 timeout_us += data->timeout_clks * 1000 / 302 if (mmc_host_clk_rate(card->host))
299 (card->host->ios.clock / 1000); 303 timeout_us += data->timeout_clks * 1000 /
304 (mmc_host_clk_rate(card->host) / 1000);
300 305
301 if (data->flags & MMC_DATA_WRITE) 306 if (data->flags & MMC_DATA_WRITE)
302 /* 307 /*
@@ -614,6 +619,8 @@ static inline void mmc_set_ios(struct mmc_host *host)
614 ios->power_mode, ios->chip_select, ios->vdd, 619 ios->power_mode, ios->chip_select, ios->vdd,
615 ios->bus_width, ios->timing); 620 ios->bus_width, ios->timing);
616 621
622 if (ios->clock > 0)
623 mmc_set_ungated(host);
617 host->ops->set_ios(host, ios); 624 host->ops->set_ios(host, ios);
618} 625}
619 626
@@ -641,6 +648,61 @@ void mmc_set_clock(struct mmc_host *host, unsigned int hz)
641 mmc_set_ios(host); 648 mmc_set_ios(host);
642} 649}
643 650
651#ifdef CONFIG_MMC_CLKGATE
652/*
653 * This gates the clock by setting it to 0 Hz.
654 */
655void mmc_gate_clock(struct mmc_host *host)
656{
657 unsigned long flags;
658
659 spin_lock_irqsave(&host->clk_lock, flags);
660 host->clk_old = host->ios.clock;
661 host->ios.clock = 0;
662 host->clk_gated = true;
663 spin_unlock_irqrestore(&host->clk_lock, flags);
664 mmc_set_ios(host);
665}
666
667/*
668 * This restores the clock from gating by using the cached
669 * clock value.
670 */
671void mmc_ungate_clock(struct mmc_host *host)
672{
673 /*
674 * We should previously have gated the clock, so the clock shall
675 * be 0 here! The clock may however be 0 during initialization,
676 * when some request operations are performed before setting
677 * the frequency. When ungate is requested in that situation
678 * we just ignore the call.
679 */
680 if (host->clk_old) {
681 BUG_ON(host->ios.clock);
682 /* This call will also set host->clk_gated to false */
683 mmc_set_clock(host, host->clk_old);
684 }
685}
686
687void mmc_set_ungated(struct mmc_host *host)
688{
689 unsigned long flags;
690
691 /*
692 * We've been given a new frequency while the clock is gated,
693 * so make sure we regard this as ungating it.
694 */
695 spin_lock_irqsave(&host->clk_lock, flags);
696 host->clk_gated = false;
697 spin_unlock_irqrestore(&host->clk_lock, flags);
698}
699
700#else
701void mmc_set_ungated(struct mmc_host *host)
702{
703}
704#endif
705
644/* 706/*
645 * Change the bus mode (open drain/push-pull) of a host. 707 * Change the bus mode (open drain/push-pull) of a host.
646 */ 708 */
@@ -1424,35 +1486,57 @@ int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
1424} 1486}
1425EXPORT_SYMBOL(mmc_set_blocklen); 1487EXPORT_SYMBOL(mmc_set_blocklen);
1426 1488
1489static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
1490{
1491 host->f_init = freq;
1492
1493#ifdef CONFIG_MMC_DEBUG
1494 pr_info("%s: %s: trying to init card at %u Hz\n",
1495 mmc_hostname(host), __func__, host->f_init);
1496#endif
1497 mmc_power_up(host);
1498 sdio_reset(host);
1499 mmc_go_idle(host);
1500
1501 mmc_send_if_cond(host, host->ocr_avail);
1502
1503 /* Order's important: probe SDIO, then SD, then MMC */
1504 if (!mmc_attach_sdio(host))
1505 return 0;
1506 if (!mmc_attach_sd(host))
1507 return 0;
1508 if (!mmc_attach_mmc(host))
1509 return 0;
1510
1511 mmc_power_off(host);
1512 return -EIO;
1513}
1514
1427void mmc_rescan(struct work_struct *work) 1515void mmc_rescan(struct work_struct *work)
1428{ 1516{
1517 static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
1429 struct mmc_host *host = 1518 struct mmc_host *host =
1430 container_of(work, struct mmc_host, detect.work); 1519 container_of(work, struct mmc_host, detect.work);
1431 u32 ocr;
1432 int err;
1433 unsigned long flags;
1434 int i; 1520 int i;
1435 const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
1436
1437 spin_lock_irqsave(&host->lock, flags);
1438 1521
1439 if (host->rescan_disable) { 1522 if (host->rescan_disable)
1440 spin_unlock_irqrestore(&host->lock, flags);
1441 return; 1523 return;
1442 }
1443
1444 spin_unlock_irqrestore(&host->lock, flags);
1445
1446 1524
1447 mmc_bus_get(host); 1525 mmc_bus_get(host);
1448 1526
1449 /* if there is a card registered, check whether it is still present */ 1527 /*
1450 if ((host->bus_ops != NULL) && host->bus_ops->detect && !host->bus_dead) 1528 * if there is a _removable_ card registered, check whether it is
1529 * still present
1530 */
1531 if (host->bus_ops && host->bus_ops->detect && !host->bus_dead
1532 && mmc_card_is_removable(host))
1451 host->bus_ops->detect(host); 1533 host->bus_ops->detect(host);
1452 1534
1535 /*
1536 * Let mmc_bus_put() free the bus/bus_ops if we've found that
1537 * the card is no longer present.
1538 */
1453 mmc_bus_put(host); 1539 mmc_bus_put(host);
1454
1455
1456 mmc_bus_get(host); 1540 mmc_bus_get(host);
1457 1541
1458 /* if there still is a card present, stop here */ 1542 /* if there still is a card present, stop here */
@@ -1461,8 +1545,6 @@ void mmc_rescan(struct work_struct *work)
1461 goto out; 1545 goto out;
1462 } 1546 }
1463 1547
1464 /* detect a newly inserted card */
1465
1466 /* 1548 /*
1467 * Only we can add a new handler, so it's safe to 1549 * Only we can add a new handler, so it's safe to
1468 * release the lock here. 1550 * release the lock here.
@@ -1472,72 +1554,16 @@ void mmc_rescan(struct work_struct *work)
1472 if (host->ops->get_cd && host->ops->get_cd(host) == 0) 1554 if (host->ops->get_cd && host->ops->get_cd(host) == 0)
1473 goto out; 1555 goto out;
1474 1556
1557 mmc_claim_host(host);
1475 for (i = 0; i < ARRAY_SIZE(freqs); i++) { 1558 for (i = 0; i < ARRAY_SIZE(freqs); i++) {
1476 mmc_claim_host(host); 1559 if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min)))
1477 1560 break;
1478 if (freqs[i] >= host->f_min) 1561 if (freqs[i] < host->f_min)
1479 host->f_init = freqs[i]; 1562 break;
1480 else if (!i || freqs[i-1] > host->f_min)
1481 host->f_init = host->f_min;
1482 else {
1483 mmc_release_host(host);
1484 goto out;
1485 }
1486#ifdef CONFIG_MMC_DEBUG
1487 pr_info("%s: %s: trying to init card at %u Hz\n",
1488 mmc_hostname(host), __func__, host->f_init);
1489#endif
1490 mmc_power_up(host);
1491 sdio_reset(host);
1492 mmc_go_idle(host);
1493
1494 mmc_send_if_cond(host, host->ocr_avail);
1495
1496 /*
1497 * First we search for SDIO...
1498 */
1499 err = mmc_send_io_op_cond(host, 0, &ocr);
1500 if (!err) {
1501 if (mmc_attach_sdio(host, ocr)) {
1502 mmc_claim_host(host);
1503 /*
1504 * Try SDMEM (but not MMC) even if SDIO
1505 * is broken.
1506 */
1507 if (mmc_send_app_op_cond(host, 0, &ocr))
1508 goto out_fail;
1509
1510 if (mmc_attach_sd(host, ocr))
1511 mmc_power_off(host);
1512 }
1513 goto out;
1514 }
1515
1516 /*
1517 * ...then normal SD...
1518 */
1519 err = mmc_send_app_op_cond(host, 0, &ocr);
1520 if (!err) {
1521 if (mmc_attach_sd(host, ocr))
1522 mmc_power_off(host);
1523 goto out;
1524 }
1525
1526 /*
1527 * ...and finally MMC.
1528 */
1529 err = mmc_send_op_cond(host, 0, &ocr);
1530 if (!err) {
1531 if (mmc_attach_mmc(host, ocr))
1532 mmc_power_off(host);
1533 goto out;
1534 }
1535
1536out_fail:
1537 mmc_release_host(host);
1538 mmc_power_off(host);
1539 } 1563 }
1540out: 1564 mmc_release_host(host);
1565
1566 out:
1541 if (host->caps & MMC_CAP_NEEDS_POLL) 1567 if (host->caps & MMC_CAP_NEEDS_POLL)
1542 mmc_schedule_delayed_work(&host->detect, HZ); 1568 mmc_schedule_delayed_work(&host->detect, HZ);
1543} 1569}
@@ -1721,6 +1747,18 @@ int mmc_resume_host(struct mmc_host *host)
1721 if (!(host->pm_flags & MMC_PM_KEEP_POWER)) { 1747 if (!(host->pm_flags & MMC_PM_KEEP_POWER)) {
1722 mmc_power_up(host); 1748 mmc_power_up(host);
1723 mmc_select_voltage(host, host->ocr); 1749 mmc_select_voltage(host, host->ocr);
1750 /*
1751 * Tell runtime PM core we just powered up the card,
1752 * since it still believes the card is powered off.
1753 * Note that currently runtime PM is only enabled
1754 * for SDIO cards that are MMC_CAP_POWER_OFF_CARD
1755 */
1756 if (mmc_card_sdio(host->card) &&
1757 (host->caps & MMC_CAP_POWER_OFF_CARD)) {
1758 pm_runtime_disable(&host->card->dev);
1759 pm_runtime_set_active(&host->card->dev);
1760 pm_runtime_enable(&host->card->dev);
1761 }
1724 } 1762 }
1725 BUG_ON(!host->bus_ops->resume); 1763 BUG_ON(!host->bus_ops->resume);
1726 err = host->bus_ops->resume(host); 1764 err = host->bus_ops->resume(host);
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index 77240cd11bcf..ca1fdde29df6 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -33,6 +33,9 @@ void mmc_init_erase(struct mmc_card *card);
33 33
34void mmc_set_chip_select(struct mmc_host *host, int mode); 34void mmc_set_chip_select(struct mmc_host *host, int mode);
35void mmc_set_clock(struct mmc_host *host, unsigned int hz); 35void mmc_set_clock(struct mmc_host *host, unsigned int hz);
36void mmc_gate_clock(struct mmc_host *host);
37void mmc_ungate_clock(struct mmc_host *host);
38void mmc_set_ungated(struct mmc_host *host);
36void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode); 39void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode);
37void mmc_set_bus_width(struct mmc_host *host, unsigned int width); 40void mmc_set_bus_width(struct mmc_host *host, unsigned int width);
38void mmc_set_bus_width_ddr(struct mmc_host *host, unsigned int width, 41void mmc_set_bus_width_ddr(struct mmc_host *host, unsigned int width,
@@ -54,9 +57,9 @@ void mmc_rescan(struct work_struct *work);
54void mmc_start_host(struct mmc_host *host); 57void mmc_start_host(struct mmc_host *host);
55void mmc_stop_host(struct mmc_host *host); 58void mmc_stop_host(struct mmc_host *host);
56 59
57int mmc_attach_mmc(struct mmc_host *host, u32 ocr); 60int mmc_attach_mmc(struct mmc_host *host);
58int mmc_attach_sd(struct mmc_host *host, u32 ocr); 61int mmc_attach_sd(struct mmc_host *host);
59int mmc_attach_sdio(struct mmc_host *host, u32 ocr); 62int mmc_attach_sdio(struct mmc_host *host);
60 63
61/* Module parameters */ 64/* Module parameters */
62extern int use_spi_crc; 65extern int use_spi_crc;
diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c
index eed1405fd742..998797ed67a6 100644
--- a/drivers/mmc/core/debugfs.c
+++ b/drivers/mmc/core/debugfs.c
@@ -183,6 +183,11 @@ void mmc_add_host_debugfs(struct mmc_host *host)
183 &mmc_clock_fops)) 183 &mmc_clock_fops))
184 goto err_node; 184 goto err_node;
185 185
186#ifdef CONFIG_MMC_CLKGATE
187 if (!debugfs_create_u32("clk_delay", (S_IRUSR | S_IWUSR),
188 root, &host->clk_delay))
189 goto err_node;
190#endif
186 return; 191 return;
187 192
188err_node: 193err_node:
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 10b8af27e03a..b3ac6c5bc5c6 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -3,6 +3,7 @@
3 * 3 *
4 * Copyright (C) 2003 Russell King, All Rights Reserved. 4 * Copyright (C) 2003 Russell King, All Rights Reserved.
5 * Copyright (C) 2007-2008 Pierre Ossman 5 * Copyright (C) 2007-2008 Pierre Ossman
6 * Copyright (C) 2010 Linus Walleij
6 * 7 *
7 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as 9 * it under the terms of the GNU General Public License version 2 as
@@ -20,6 +21,7 @@
20#include <linux/suspend.h> 21#include <linux/suspend.h>
21 22
22#include <linux/mmc/host.h> 23#include <linux/mmc/host.h>
24#include <linux/mmc/card.h>
23 25
24#include "core.h" 26#include "core.h"
25#include "host.h" 27#include "host.h"
@@ -50,6 +52,205 @@ void mmc_unregister_host_class(void)
50static DEFINE_IDR(mmc_host_idr); 52static DEFINE_IDR(mmc_host_idr);
51static DEFINE_SPINLOCK(mmc_host_lock); 53static DEFINE_SPINLOCK(mmc_host_lock);
52 54
55#ifdef CONFIG_MMC_CLKGATE
56
57/*
58 * Enabling clock gating will make the core call out to the host
59 * once up and once down when it performs a request or card operation
60 * intermingled in any fashion. The driver will see this through
61 * set_ios() operations with ios.clock field set to 0 to gate (disable)
62 * the block clock, and to the old frequency to enable it again.
63 */
64static void mmc_host_clk_gate_delayed(struct mmc_host *host)
65{
66 unsigned long tick_ns;
67 unsigned long freq = host->ios.clock;
68 unsigned long flags;
69
70 if (!freq) {
71 pr_debug("%s: frequency set to 0 in disable function, "
72 "this means the clock is already disabled.\n",
73 mmc_hostname(host));
74 return;
75 }
76 /*
77 * New requests may have appeared while we were scheduling,
78 * then there is no reason to delay the check before
79 * clk_disable().
80 */
81 spin_lock_irqsave(&host->clk_lock, flags);
82
83 /*
84 * Delay n bus cycles (at least 8 from MMC spec) before attempting
85 * to disable the MCI block clock. The reference count may have
86 * gone up again after this delay due to rescheduling!
87 */
88 if (!host->clk_requests) {
89 spin_unlock_irqrestore(&host->clk_lock, flags);
90 tick_ns = DIV_ROUND_UP(1000000000, freq);
91 ndelay(host->clk_delay * tick_ns);
92 } else {
93 /* New users appeared while waiting for this work */
94 spin_unlock_irqrestore(&host->clk_lock, flags);
95 return;
96 }
97 mutex_lock(&host->clk_gate_mutex);
98 spin_lock_irqsave(&host->clk_lock, flags);
99 if (!host->clk_requests) {
100 spin_unlock_irqrestore(&host->clk_lock, flags);
101 /* This will set host->ios.clock to 0 */
102 mmc_gate_clock(host);
103 spin_lock_irqsave(&host->clk_lock, flags);
104 pr_debug("%s: gated MCI clock\n", mmc_hostname(host));
105 }
106 spin_unlock_irqrestore(&host->clk_lock, flags);
107 mutex_unlock(&host->clk_gate_mutex);
108}
109
110/*
111 * Internal work. Work to disable the clock at some later point.
112 */
113static void mmc_host_clk_gate_work(struct work_struct *work)
114{
115 struct mmc_host *host = container_of(work, struct mmc_host,
116 clk_gate_work);
117
118 mmc_host_clk_gate_delayed(host);
119}
120
121/**
122 * mmc_host_clk_ungate - ungate hardware MCI clocks
123 * @host: host to ungate.
124 *
125 * Makes sure the host ios.clock is restored to a non-zero value
126 * past this call. Increase clock reference count and ungate clock
127 * if we're the first user.
128 */
129void mmc_host_clk_ungate(struct mmc_host *host)
130{
131 unsigned long flags;
132
133 mutex_lock(&host->clk_gate_mutex);
134 spin_lock_irqsave(&host->clk_lock, flags);
135 if (host->clk_gated) {
136 spin_unlock_irqrestore(&host->clk_lock, flags);
137 mmc_ungate_clock(host);
138 spin_lock_irqsave(&host->clk_lock, flags);
139 pr_debug("%s: ungated MCI clock\n", mmc_hostname(host));
140 }
141 host->clk_requests++;
142 spin_unlock_irqrestore(&host->clk_lock, flags);
143 mutex_unlock(&host->clk_gate_mutex);
144}
145
146/**
147 * mmc_host_may_gate_card - check if this card may be gated
148 * @card: card to check.
149 */
150static bool mmc_host_may_gate_card(struct mmc_card *card)
151{
152 /* If there is no card we may gate it */
153 if (!card)
154 return true;
155 /*
156 * Don't gate SDIO cards! These need to be clocked at all times
157 * since they may be independent systems generating interrupts
158 * and other events. The clock requests counter from the core will
159 * go down to zero since the core does not need it, but we will not
160 * gate the clock, because there is somebody out there that may still
161 * be using it.
162 */
163 if (mmc_card_sdio(card))
164 return false;
165
166 return true;
167}
168
169/**
170 * mmc_host_clk_gate - gate off hardware MCI clocks
171 * @host: host to gate.
172 *
173 * Calls the host driver with ios.clock set to zero as often as possible
174 * in order to gate off hardware MCI clocks. Decrease clock reference
175 * count and schedule disabling of clock.
176 */
177void mmc_host_clk_gate(struct mmc_host *host)
178{
179 unsigned long flags;
180
181 spin_lock_irqsave(&host->clk_lock, flags);
182 host->clk_requests--;
183 if (mmc_host_may_gate_card(host->card) &&
184 !host->clk_requests)
185 schedule_work(&host->clk_gate_work);
186 spin_unlock_irqrestore(&host->clk_lock, flags);
187}
188
189/**
190 * mmc_host_clk_rate - get current clock frequency setting
191 * @host: host to get the clock frequency for.
192 *
193 * Returns current clock frequency regardless of gating.
194 */
195unsigned int mmc_host_clk_rate(struct mmc_host *host)
196{
197 unsigned long freq;
198 unsigned long flags;
199
200 spin_lock_irqsave(&host->clk_lock, flags);
201 if (host->clk_gated)
202 freq = host->clk_old;
203 else
204 freq = host->ios.clock;
205 spin_unlock_irqrestore(&host->clk_lock, flags);
206 return freq;
207}
208
209/**
210 * mmc_host_clk_init - set up clock gating code
211 * @host: host with potential clock to control
212 */
213static inline void mmc_host_clk_init(struct mmc_host *host)
214{
215 host->clk_requests = 0;
216 /* Hold MCI clock for 8 cycles by default */
217 host->clk_delay = 8;
218 host->clk_gated = false;
219 INIT_WORK(&host->clk_gate_work, mmc_host_clk_gate_work);
220 spin_lock_init(&host->clk_lock);
221 mutex_init(&host->clk_gate_mutex);
222}
223
224/**
225 * mmc_host_clk_exit - shut down clock gating code
226 * @host: host with potential clock to control
227 */
228static inline void mmc_host_clk_exit(struct mmc_host *host)
229{
230 /*
231 * Wait for any outstanding gate and then make sure we're
232 * ungated before exiting.
233 */
234 if (cancel_work_sync(&host->clk_gate_work))
235 mmc_host_clk_gate_delayed(host);
236 if (host->clk_gated)
237 mmc_host_clk_ungate(host);
238 /* There should be only one user now */
239 WARN_ON(host->clk_requests > 1);
240}
241
242#else
243
244static inline void mmc_host_clk_init(struct mmc_host *host)
245{
246}
247
248static inline void mmc_host_clk_exit(struct mmc_host *host)
249{
250}
251
252#endif
253
53/** 254/**
54 * mmc_alloc_host - initialise the per-host structure. 255 * mmc_alloc_host - initialise the per-host structure.
55 * @extra: sizeof private data structure 256 * @extra: sizeof private data structure
@@ -82,6 +283,8 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
82 host->class_dev.class = &mmc_host_class; 283 host->class_dev.class = &mmc_host_class;
83 device_initialize(&host->class_dev); 284 device_initialize(&host->class_dev);
84 285
286 mmc_host_clk_init(host);
287
85 spin_lock_init(&host->lock); 288 spin_lock_init(&host->lock);
86 init_waitqueue_head(&host->wq); 289 init_waitqueue_head(&host->wq);
87 INIT_DELAYED_WORK(&host->detect, mmc_rescan); 290 INIT_DELAYED_WORK(&host->detect, mmc_rescan);
@@ -163,6 +366,8 @@ void mmc_remove_host(struct mmc_host *host)
163 device_del(&host->class_dev); 366 device_del(&host->class_dev);
164 367
165 led_trigger_unregister_simple(host->led); 368 led_trigger_unregister_simple(host->led);
369
370 mmc_host_clk_exit(host);
166} 371}
167 372
168EXPORT_SYMBOL(mmc_remove_host); 373EXPORT_SYMBOL(mmc_remove_host);
@@ -183,4 +388,3 @@ void mmc_free_host(struct mmc_host *host)
183} 388}
184 389
185EXPORT_SYMBOL(mmc_free_host); 390EXPORT_SYMBOL(mmc_free_host);
186
diff --git a/drivers/mmc/core/host.h b/drivers/mmc/core/host.h
index 8c87e1109a34..de199f911928 100644
--- a/drivers/mmc/core/host.h
+++ b/drivers/mmc/core/host.h
@@ -10,10 +10,31 @@
10 */ 10 */
11#ifndef _MMC_CORE_HOST_H 11#ifndef _MMC_CORE_HOST_H
12#define _MMC_CORE_HOST_H 12#define _MMC_CORE_HOST_H
13#include <linux/mmc/host.h>
13 14
14int mmc_register_host_class(void); 15int mmc_register_host_class(void);
15void mmc_unregister_host_class(void); 16void mmc_unregister_host_class(void);
16 17
18#ifdef CONFIG_MMC_CLKGATE
19void mmc_host_clk_ungate(struct mmc_host *host);
20void mmc_host_clk_gate(struct mmc_host *host);
21unsigned int mmc_host_clk_rate(struct mmc_host *host);
22
23#else
24static inline void mmc_host_clk_ungate(struct mmc_host *host)
25{
26}
27
28static inline void mmc_host_clk_gate(struct mmc_host *host)
29{
30}
31
32static inline unsigned int mmc_host_clk_rate(struct mmc_host *host)
33{
34 return host->ios.clock;
35}
36#endif
37
17void mmc_host_deeper_disable(struct work_struct *work); 38void mmc_host_deeper_disable(struct work_struct *work);
18 39
19#endif 40#endif
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 77f93c3b8808..16006ef153fe 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -534,39 +534,57 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
534 */ 534 */
535 if ((card->csd.mmca_vsn >= CSD_SPEC_VER_4) && 535 if ((card->csd.mmca_vsn >= CSD_SPEC_VER_4) &&
536 (host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) { 536 (host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) {
537 unsigned ext_csd_bit, bus_width; 537 static unsigned ext_csd_bits[][2] = {
538 538 { EXT_CSD_BUS_WIDTH_8, EXT_CSD_DDR_BUS_WIDTH_8 },
539 if (host->caps & MMC_CAP_8_BIT_DATA) { 539 { EXT_CSD_BUS_WIDTH_4, EXT_CSD_DDR_BUS_WIDTH_4 },
540 if (ddr) 540 { EXT_CSD_BUS_WIDTH_1, EXT_CSD_BUS_WIDTH_1 },
541 ext_csd_bit = EXT_CSD_DDR_BUS_WIDTH_8; 541 };
542 else 542 static unsigned bus_widths[] = {
543 ext_csd_bit = EXT_CSD_BUS_WIDTH_8; 543 MMC_BUS_WIDTH_8,
544 bus_width = MMC_BUS_WIDTH_8; 544 MMC_BUS_WIDTH_4,
545 } else { 545 MMC_BUS_WIDTH_1
546 if (ddr) 546 };
547 ext_csd_bit = EXT_CSD_DDR_BUS_WIDTH_4; 547 unsigned idx, bus_width = 0;
548 else 548
549 ext_csd_bit = EXT_CSD_BUS_WIDTH_4; 549 if (host->caps & MMC_CAP_8_BIT_DATA)
550 bus_width = MMC_BUS_WIDTH_4; 550 idx = 0;
551 else
552 idx = 1;
553 for (; idx < ARRAY_SIZE(bus_widths); idx++) {
554 bus_width = bus_widths[idx];
555 if (bus_width == MMC_BUS_WIDTH_1)
556 ddr = 0; /* no DDR for 1-bit width */
557 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
558 EXT_CSD_BUS_WIDTH,
559 ext_csd_bits[idx][0]);
560 if (!err) {
561 mmc_set_bus_width_ddr(card->host,
562 bus_width, MMC_SDR_MODE);
563 /*
564 * If controller can't handle bus width test,
565 * use the highest bus width to maintain
566 * compatibility with previous MMC behavior.
567 */
568 if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST))
569 break;
570 err = mmc_bus_test(card, bus_width);
571 if (!err)
572 break;
573 }
551 } 574 }
552 575
553 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 576 if (!err && ddr) {
554 EXT_CSD_BUS_WIDTH, ext_csd_bit); 577 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
555 578 EXT_CSD_BUS_WIDTH,
556 if (err && err != -EBADMSG) 579 ext_csd_bits[idx][1]);
557 goto free_card; 580 }
558
559 if (err) { 581 if (err) {
560 printk(KERN_WARNING "%s: switch to bus width %d ddr %d " 582 printk(KERN_WARNING "%s: switch to bus width %d ddr %d "
561 "failed\n", mmc_hostname(card->host), 583 "failed\n", mmc_hostname(card->host),
562 1 << bus_width, ddr); 584 1 << bus_width, ddr);
563 err = 0; 585 goto free_card;
564 } else { 586 } else if (ddr) {
565 if (ddr) 587 mmc_card_set_ddr_mode(card);
566 mmc_card_set_ddr_mode(card);
567 else
568 ddr = MMC_SDR_MODE;
569
570 mmc_set_bus_width_ddr(card->host, bus_width, ddr); 588 mmc_set_bus_width_ddr(card->host, bus_width, ddr);
571 } 589 }
572 } 590 }
@@ -737,14 +755,21 @@ static void mmc_attach_bus_ops(struct mmc_host *host)
737/* 755/*
738 * Starting point for MMC card init. 756 * Starting point for MMC card init.
739 */ 757 */
740int mmc_attach_mmc(struct mmc_host *host, u32 ocr) 758int mmc_attach_mmc(struct mmc_host *host)
741{ 759{
742 int err; 760 int err;
761 u32 ocr;
743 762
744 BUG_ON(!host); 763 BUG_ON(!host);
745 WARN_ON(!host->claimed); 764 WARN_ON(!host->claimed);
746 765
766 err = mmc_send_op_cond(host, 0, &ocr);
767 if (err)
768 return err;
769
747 mmc_attach_bus_ops(host); 770 mmc_attach_bus_ops(host);
771 if (host->ocr_avail_mmc)
772 host->ocr_avail = host->ocr_avail_mmc;
748 773
749 /* 774 /*
750 * We need to get OCR a different way for SPI. 775 * We need to get OCR a different way for SPI.
@@ -784,20 +809,20 @@ int mmc_attach_mmc(struct mmc_host *host, u32 ocr)
784 goto err; 809 goto err;
785 810
786 mmc_release_host(host); 811 mmc_release_host(host);
787
788 err = mmc_add_card(host->card); 812 err = mmc_add_card(host->card);
813 mmc_claim_host(host);
789 if (err) 814 if (err)
790 goto remove_card; 815 goto remove_card;
791 816
792 return 0; 817 return 0;
793 818
794remove_card: 819remove_card:
820 mmc_release_host(host);
795 mmc_remove_card(host->card); 821 mmc_remove_card(host->card);
796 host->card = NULL;
797 mmc_claim_host(host); 822 mmc_claim_host(host);
823 host->card = NULL;
798err: 824err:
799 mmc_detach_bus(host); 825 mmc_detach_bus(host);
800 mmc_release_host(host);
801 826
802 printk(KERN_ERR "%s: error %d whilst initialising MMC card\n", 827 printk(KERN_ERR "%s: error %d whilst initialising MMC card\n",
803 mmc_hostname(host), err); 828 mmc_hostname(host), err);
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 326447c9ede8..60842f878ded 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -462,3 +462,104 @@ int mmc_send_status(struct mmc_card *card, u32 *status)
462 return 0; 462 return 0;
463} 463}
464 464
465static int
466mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
467 u8 len)
468{
469 struct mmc_request mrq;
470 struct mmc_command cmd;
471 struct mmc_data data;
472 struct scatterlist sg;
473 u8 *data_buf;
474 u8 *test_buf;
475 int i, err;
476 static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
477 static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 };
478
479 /* dma onto stack is unsafe/nonportable, but callers to this
480 * routine normally provide temporary on-stack buffers ...
481 */
482 data_buf = kmalloc(len, GFP_KERNEL);
483 if (!data_buf)
484 return -ENOMEM;
485
486 if (len == 8)
487 test_buf = testdata_8bit;
488 else if (len == 4)
489 test_buf = testdata_4bit;
490 else {
491 printk(KERN_ERR "%s: Invalid bus_width %d\n",
492 mmc_hostname(host), len);
493 kfree(data_buf);
494 return -EINVAL;
495 }
496
497 if (opcode == MMC_BUS_TEST_W)
498 memcpy(data_buf, test_buf, len);
499
500 memset(&mrq, 0, sizeof(struct mmc_request));
501 memset(&cmd, 0, sizeof(struct mmc_command));
502 memset(&data, 0, sizeof(struct mmc_data));
503
504 mrq.cmd = &cmd;
505 mrq.data = &data;
506 cmd.opcode = opcode;
507 cmd.arg = 0;
508
509 /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we
510 * rely on callers to never use this with "native" calls for reading
511 * CSD or CID. Native versions of those commands use the R2 type,
512 * not R1 plus a data block.
513 */
514 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
515
516 data.blksz = len;
517 data.blocks = 1;
518 if (opcode == MMC_BUS_TEST_R)
519 data.flags = MMC_DATA_READ;
520 else
521 data.flags = MMC_DATA_WRITE;
522
523 data.sg = &sg;
524 data.sg_len = 1;
525 sg_init_one(&sg, data_buf, len);
526 mmc_wait_for_req(host, &mrq);
527 err = 0;
528 if (opcode == MMC_BUS_TEST_R) {
529 for (i = 0; i < len / 4; i++)
530 if ((test_buf[i] ^ data_buf[i]) != 0xff) {
531 err = -EIO;
532 break;
533 }
534 }
535 kfree(data_buf);
536
537 if (cmd.error)
538 return cmd.error;
539 if (data.error)
540 return data.error;
541
542 return err;
543}
544
545int mmc_bus_test(struct mmc_card *card, u8 bus_width)
546{
547 int err, width;
548
549 if (bus_width == MMC_BUS_WIDTH_8)
550 width = 8;
551 else if (bus_width == MMC_BUS_WIDTH_4)
552 width = 4;
553 else if (bus_width == MMC_BUS_WIDTH_1)
554 return 0; /* no need for test */
555 else
556 return -EINVAL;
557
558 /*
559 * Ignore errors from BUS_TEST_W. BUS_TEST_R will fail if there
560 * is a problem. This improves chances that the test will work.
561 */
562 mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
563 err = mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
564 return err;
565}
diff --git a/drivers/mmc/core/mmc_ops.h b/drivers/mmc/core/mmc_ops.h
index 653eb8e84178..e6d44b8a18db 100644
--- a/drivers/mmc/core/mmc_ops.h
+++ b/drivers/mmc/core/mmc_ops.h
@@ -26,6 +26,7 @@ int mmc_send_cid(struct mmc_host *host, u32 *cid);
26int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp); 26int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp);
27int mmc_spi_set_crc(struct mmc_host *host, int use_crc); 27int mmc_spi_set_crc(struct mmc_host *host, int use_crc);
28int mmc_card_sleepawake(struct mmc_host *host, int sleep); 28int mmc_card_sleepawake(struct mmc_host *host, int sleep);
29int mmc_bus_test(struct mmc_card *card, u8 bus_width);
29 30
30#endif 31#endif
31 32
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 49da4dffd28e..d18c32bca99b 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -764,14 +764,21 @@ static void mmc_sd_attach_bus_ops(struct mmc_host *host)
764/* 764/*
765 * Starting point for SD card init. 765 * Starting point for SD card init.
766 */ 766 */
767int mmc_attach_sd(struct mmc_host *host, u32 ocr) 767int mmc_attach_sd(struct mmc_host *host)
768{ 768{
769 int err; 769 int err;
770 u32 ocr;
770 771
771 BUG_ON(!host); 772 BUG_ON(!host);
772 WARN_ON(!host->claimed); 773 WARN_ON(!host->claimed);
773 774
775 err = mmc_send_app_op_cond(host, 0, &ocr);
776 if (err)
777 return err;
778
774 mmc_sd_attach_bus_ops(host); 779 mmc_sd_attach_bus_ops(host);
780 if (host->ocr_avail_sd)
781 host->ocr_avail = host->ocr_avail_sd;
775 782
776 /* 783 /*
777 * We need to get OCR a different way for SPI. 784 * We need to get OCR a different way for SPI.
@@ -795,7 +802,8 @@ int mmc_attach_sd(struct mmc_host *host, u32 ocr)
795 ocr &= ~0x7F; 802 ocr &= ~0x7F;
796 } 803 }
797 804
798 if (ocr & MMC_VDD_165_195) { 805 if ((ocr & MMC_VDD_165_195) &&
806 !(host->ocr_avail_sd & MMC_VDD_165_195)) {
799 printk(KERN_WARNING "%s: SD card claims to support the " 807 printk(KERN_WARNING "%s: SD card claims to support the "
800 "incompletely defined 'low voltage range'. This " 808 "incompletely defined 'low voltage range'. This "
801 "will be ignored.\n", mmc_hostname(host)); 809 "will be ignored.\n", mmc_hostname(host));
@@ -820,20 +828,20 @@ int mmc_attach_sd(struct mmc_host *host, u32 ocr)
820 goto err; 828 goto err;
821 829
822 mmc_release_host(host); 830 mmc_release_host(host);
823
824 err = mmc_add_card(host->card); 831 err = mmc_add_card(host->card);
832 mmc_claim_host(host);
825 if (err) 833 if (err)
826 goto remove_card; 834 goto remove_card;
827 835
828 return 0; 836 return 0;
829 837
830remove_card: 838remove_card:
839 mmc_release_host(host);
831 mmc_remove_card(host->card); 840 mmc_remove_card(host->card);
832 host->card = NULL; 841 host->card = NULL;
833 mmc_claim_host(host); 842 mmc_claim_host(host);
834err: 843err:
835 mmc_detach_bus(host); 844 mmc_detach_bus(host);
836 mmc_release_host(host);
837 845
838 printk(KERN_ERR "%s: error %d whilst initialising SD card\n", 846 printk(KERN_ERR "%s: error %d whilst initialising SD card\n",
839 mmc_hostname(host), err); 847 mmc_hostname(host), err);
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index efef5f94ac42..5c4a54d9b6a4 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -627,15 +627,27 @@ static int mmc_sdio_suspend(struct mmc_host *host)
627 627
628static int mmc_sdio_resume(struct mmc_host *host) 628static int mmc_sdio_resume(struct mmc_host *host)
629{ 629{
630 int i, err; 630 int i, err = 0;
631 631
632 BUG_ON(!host); 632 BUG_ON(!host);
633 BUG_ON(!host->card); 633 BUG_ON(!host->card);
634 634
635 /* Basic card reinitialization. */ 635 /* Basic card reinitialization. */
636 mmc_claim_host(host); 636 mmc_claim_host(host);
637 err = mmc_sdio_init_card(host, host->ocr, host->card, 637
638 /* No need to reinitialize powered-resumed nonremovable cards */
639 if (mmc_card_is_removable(host) || !mmc_card_is_powered_resumed(host))
640 err = mmc_sdio_init_card(host, host->ocr, host->card,
638 (host->pm_flags & MMC_PM_KEEP_POWER)); 641 (host->pm_flags & MMC_PM_KEEP_POWER));
642 else if (mmc_card_is_powered_resumed(host)) {
643 /* We may have switched to 1-bit mode during suspend */
644 err = sdio_enable_4bit_bus(host->card);
645 if (err > 0) {
646 mmc_set_bus_width(host, MMC_BUS_WIDTH_4);
647 err = 0;
648 }
649 }
650
639 if (!err && host->sdio_irqs) 651 if (!err && host->sdio_irqs)
640 mmc_signal_sdio_irq(host); 652 mmc_signal_sdio_irq(host);
641 mmc_release_host(host); 653 mmc_release_host(host);
@@ -690,16 +702,22 @@ static const struct mmc_bus_ops mmc_sdio_ops = {
690/* 702/*
691 * Starting point for SDIO card init. 703 * Starting point for SDIO card init.
692 */ 704 */
693int mmc_attach_sdio(struct mmc_host *host, u32 ocr) 705int mmc_attach_sdio(struct mmc_host *host)
694{ 706{
695 int err; 707 int err, i, funcs;
696 int i, funcs; 708 u32 ocr;
697 struct mmc_card *card; 709 struct mmc_card *card;
698 710
699 BUG_ON(!host); 711 BUG_ON(!host);
700 WARN_ON(!host->claimed); 712 WARN_ON(!host->claimed);
701 713
714 err = mmc_send_io_op_cond(host, 0, &ocr);
715 if (err)
716 return err;
717
702 mmc_attach_bus(host, &mmc_sdio_ops); 718 mmc_attach_bus(host, &mmc_sdio_ops);
719 if (host->ocr_avail_sdio)
720 host->ocr_avail = host->ocr_avail_sdio;
703 721
704 /* 722 /*
705 * Sanity check the voltages that the card claims to 723 * Sanity check the voltages that the card claims to
@@ -769,12 +787,12 @@ int mmc_attach_sdio(struct mmc_host *host, u32 ocr)
769 pm_runtime_enable(&card->sdio_func[i]->dev); 787 pm_runtime_enable(&card->sdio_func[i]->dev);
770 } 788 }
771 789
772 mmc_release_host(host);
773
774 /* 790 /*
775 * First add the card to the driver model... 791 * First add the card to the driver model...
776 */ 792 */
793 mmc_release_host(host);
777 err = mmc_add_card(host->card); 794 err = mmc_add_card(host->card);
795 mmc_claim_host(host);
778 if (err) 796 if (err)
779 goto remove_added; 797 goto remove_added;
780 798
@@ -792,15 +810,17 @@ int mmc_attach_sdio(struct mmc_host *host, u32 ocr)
792 810
793remove_added: 811remove_added:
794 /* Remove without lock if the device has been added. */ 812 /* Remove without lock if the device has been added. */
813 mmc_release_host(host);
795 mmc_sdio_remove(host); 814 mmc_sdio_remove(host);
796 mmc_claim_host(host); 815 mmc_claim_host(host);
797remove: 816remove:
798 /* And with lock if it hasn't been added. */ 817 /* And with lock if it hasn't been added. */
818 mmc_release_host(host);
799 if (host->card) 819 if (host->card)
800 mmc_sdio_remove(host); 820 mmc_sdio_remove(host);
821 mmc_claim_host(host);
801err: 822err:
802 mmc_detach_bus(host); 823 mmc_detach_bus(host);
803 mmc_release_host(host);
804 824
805 printk(KERN_ERR "%s: error %d whilst initialising SDIO card\n", 825 printk(KERN_ERR "%s: error %d whilst initialising SDIO card\n",
806 mmc_hostname(host), err); 826 mmc_hostname(host), err);
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index 203da443e339..d29b9c36919a 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -197,44 +197,12 @@ out:
197 197
198#ifdef CONFIG_PM_RUNTIME 198#ifdef CONFIG_PM_RUNTIME
199 199
200static int sdio_bus_pm_prepare(struct device *dev)
201{
202 struct sdio_func *func = dev_to_sdio_func(dev);
203
204 /*
205 * Resume an SDIO device which was suspended at run time at this
206 * point, in order to allow standard SDIO suspend/resume paths
207 * to keep working as usual.
208 *
209 * Ultimately, the SDIO driver itself will decide (in its
210 * suspend handler, or lack thereof) whether the card should be
211 * removed or kept, and if kept, at what power state.
212 *
213 * At this point, PM core have increased our use count, so it's
214 * safe to directly resume the device. After system is resumed
215 * again, PM core will drop back its runtime PM use count, and if
216 * needed device will be suspended again.
217 *
218 * The end result is guaranteed to be a power state that is
219 * coherent with the device's runtime PM use count.
220 *
221 * The return value of pm_runtime_resume is deliberately unchecked
222 * since there is little point in failing system suspend if a
223 * device can't be resumed.
224 */
225 if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD)
226 pm_runtime_resume(dev);
227
228 return 0;
229}
230
231static const struct dev_pm_ops sdio_bus_pm_ops = { 200static const struct dev_pm_ops sdio_bus_pm_ops = {
232 SET_RUNTIME_PM_OPS( 201 SET_RUNTIME_PM_OPS(
233 pm_generic_runtime_suspend, 202 pm_generic_runtime_suspend,
234 pm_generic_runtime_resume, 203 pm_generic_runtime_resume,
235 pm_generic_runtime_idle 204 pm_generic_runtime_idle
236 ) 205 )
237 .prepare = sdio_bus_pm_prepare,
238}; 206};
239 207
240#define SDIO_PM_OPS_PTR (&sdio_bus_pm_ops) 208#define SDIO_PM_OPS_PTR (&sdio_bus_pm_ops)
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index e960a9300eb2..c22a4c039988 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -142,6 +142,27 @@ config MMC_SDHCI_ESDHC_IMX
142 142
143 If unsure, say N. 143 If unsure, say N.
144 144
145config MMC_SDHCI_DOVE
146 bool "SDHCI support on Marvell's Dove SoC"
147 depends on ARCH_DOVE
148 depends on MMC_SDHCI_PLTFM
149 select MMC_SDHCI_IO_ACCESSORS
150 help
151 This selects the Secure Digital Host Controller Interface in
152 Marvell's Dove SoC.
153
154 If unsure, say N.
155
156config MMC_SDHCI_TEGRA
157 tristate "SDHCI platform support for the Tegra SD/MMC Controller"
158 depends on MMC_SDHCI_PLTFM && ARCH_TEGRA
159 select MMC_SDHCI_IO_ACCESSORS
160 help
161 This selects the Tegra SD/MMC controller. If you have a Tegra
162 platform with SD or MMC devices, say Y or M here.
163
164 If unsure, say N.
165
145config MMC_SDHCI_S3C 166config MMC_SDHCI_S3C
146 tristate "SDHCI support on Samsung S3C SoC" 167 tristate "SDHCI support on Samsung S3C SoC"
147 depends on MMC_SDHCI && PLAT_SAMSUNG 168 depends on MMC_SDHCI && PLAT_SAMSUNG
@@ -460,6 +481,22 @@ config SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND
460 help 481 help
461 If you say yes here SD-Cards may work on the EZkit. 482 If you say yes here SD-Cards may work on the EZkit.
462 483
484config MMC_DW
485 tristate "Synopsys DesignWare Memory Card Interface"
486 depends on ARM
487 help
488 This selects support for the Synopsys DesignWare Mobile Storage IP
489 block, this provides host support for SD and MMC interfaces, in both
490 PIO and external DMA modes.
491
492config MMC_DW_IDMAC
493 bool "Internal DMAC interface"
494 depends on MMC_DW
495 help
496 This selects support for the internal DMAC block within the Synopsys
497 Designware Mobile Storage IP block. This disables the external DMA
498 interface.
499
463config MMC_SH_MMCIF 500config MMC_SH_MMCIF
464 tristate "SuperH Internal MMCIF support" 501 tristate "SuperH Internal MMCIF support"
465 depends on MMC_BLOCK && (SUPERH || ARCH_SHMOBILE) 502 depends on MMC_BLOCK && (SUPERH || ARCH_SHMOBILE)
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index 7b645ff43b30..e834fb223e9a 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -31,6 +31,7 @@ obj-$(CONFIG_MMC_TMIO) += tmio_mmc.o
31obj-$(CONFIG_MMC_CB710) += cb710-mmc.o 31obj-$(CONFIG_MMC_CB710) += cb710-mmc.o
32obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o 32obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o
33obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o 33obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o
34obj-$(CONFIG_MMC_DW) += dw_mmc.o
34obj-$(CONFIG_MMC_SH_MMCIF) += sh_mmcif.o 35obj-$(CONFIG_MMC_SH_MMCIF) += sh_mmcif.o
35obj-$(CONFIG_MMC_JZ4740) += jz4740_mmc.o 36obj-$(CONFIG_MMC_JZ4740) += jz4740_mmc.o
36obj-$(CONFIG_MMC_USHC) += ushc.o 37obj-$(CONFIG_MMC_USHC) += ushc.o
@@ -39,6 +40,8 @@ obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-platform.o
39sdhci-platform-y := sdhci-pltfm.o 40sdhci-platform-y := sdhci-pltfm.o
40sdhci-platform-$(CONFIG_MMC_SDHCI_CNS3XXX) += sdhci-cns3xxx.o 41sdhci-platform-$(CONFIG_MMC_SDHCI_CNS3XXX) += sdhci-cns3xxx.o
41sdhci-platform-$(CONFIG_MMC_SDHCI_ESDHC_IMX) += sdhci-esdhc-imx.o 42sdhci-platform-$(CONFIG_MMC_SDHCI_ESDHC_IMX) += sdhci-esdhc-imx.o
43sdhci-platform-$(CONFIG_MMC_SDHCI_DOVE) += sdhci-dove.o
44sdhci-platform-$(CONFIG_MMC_SDHCI_TEGRA) += sdhci-tegra.o
42 45
43obj-$(CONFIG_MMC_SDHCI_OF) += sdhci-of.o 46obj-$(CONFIG_MMC_SDHCI_OF) += sdhci-of.o
44sdhci-of-y := sdhci-of-core.o 47sdhci-of-y := sdhci-of-core.o
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index e15547cf701f..0076c7448fe6 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -66,8 +66,8 @@
66#define DAVINCI_MMCBLNC 0x60 66#define DAVINCI_MMCBLNC 0x60
67#define DAVINCI_SDIOCTL 0x64 67#define DAVINCI_SDIOCTL 0x64
68#define DAVINCI_SDIOST0 0x68 68#define DAVINCI_SDIOST0 0x68
69#define DAVINCI_SDIOEN 0x6C 69#define DAVINCI_SDIOIEN 0x6C
70#define DAVINCI_SDIOST 0x70 70#define DAVINCI_SDIOIST 0x70
71#define DAVINCI_MMCFIFOCTL 0x74 /* FIFO Control Register */ 71#define DAVINCI_MMCFIFOCTL 0x74 /* FIFO Control Register */
72 72
73/* DAVINCI_MMCCTL definitions */ 73/* DAVINCI_MMCCTL definitions */
@@ -131,6 +131,14 @@
131#define MMCFIFOCTL_ACCWD_2 (2 << 3) /* access width of 2 bytes */ 131#define MMCFIFOCTL_ACCWD_2 (2 << 3) /* access width of 2 bytes */
132#define MMCFIFOCTL_ACCWD_1 (3 << 3) /* access width of 1 byte */ 132#define MMCFIFOCTL_ACCWD_1 (3 << 3) /* access width of 1 byte */
133 133
134/* DAVINCI_SDIOST0 definitions */
135#define SDIOST0_DAT1_HI BIT(0)
136
137/* DAVINCI_SDIOIEN definitions */
138#define SDIOIEN_IOINTEN BIT(0)
139
140/* DAVINCI_SDIOIST definitions */
141#define SDIOIST_IOINT BIT(0)
134 142
135/* MMCSD Init clock in Hz in opendrain mode */ 143/* MMCSD Init clock in Hz in opendrain mode */
136#define MMCSD_INIT_CLOCK 200000 144#define MMCSD_INIT_CLOCK 200000
@@ -164,7 +172,7 @@ struct mmc_davinci_host {
164 unsigned int mmc_input_clk; 172 unsigned int mmc_input_clk;
165 void __iomem *base; 173 void __iomem *base;
166 struct resource *mem_res; 174 struct resource *mem_res;
167 int irq; 175 int mmc_irq, sdio_irq;
168 unsigned char bus_mode; 176 unsigned char bus_mode;
169 177
170#define DAVINCI_MMC_DATADIR_NONE 0 178#define DAVINCI_MMC_DATADIR_NONE 0
@@ -184,6 +192,7 @@ struct mmc_davinci_host {
184 u32 rxdma, txdma; 192 u32 rxdma, txdma;
185 bool use_dma; 193 bool use_dma;
186 bool do_dma; 194 bool do_dma;
195 bool sdio_int;
187 196
188 /* Scatterlist DMA uses one or more parameter RAM entries: 197 /* Scatterlist DMA uses one or more parameter RAM entries:
189 * the main one (associated with rxdma or txdma) plus zero or 198 * the main one (associated with rxdma or txdma) plus zero or
@@ -480,7 +489,7 @@ static void mmc_davinci_send_dma_request(struct mmc_davinci_host *host,
480 struct scatterlist *sg; 489 struct scatterlist *sg;
481 unsigned sg_len; 490 unsigned sg_len;
482 unsigned bytes_left = host->bytes_left; 491 unsigned bytes_left = host->bytes_left;
483 const unsigned shift = ffs(rw_threshold) - 1;; 492 const unsigned shift = ffs(rw_threshold) - 1;
484 493
485 if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) { 494 if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) {
486 template = &host->tx_template; 495 template = &host->tx_template;
@@ -866,6 +875,19 @@ mmc_davinci_xfer_done(struct mmc_davinci_host *host, struct mmc_data *data)
866{ 875{
867 host->data = NULL; 876 host->data = NULL;
868 877
878 if (host->mmc->caps & MMC_CAP_SDIO_IRQ) {
879 /*
880 * SDIO Interrupt Detection work-around as suggested by
881 * Davinci Errata (TMS320DM355 Silicon Revision 1.1 Errata
882 * 2.1.6): Signal SDIO interrupt only if it is enabled by core
883 */
884 if (host->sdio_int && !(readl(host->base + DAVINCI_SDIOST0) &
885 SDIOST0_DAT1_HI)) {
886 writel(SDIOIST_IOINT, host->base + DAVINCI_SDIOIST);
887 mmc_signal_sdio_irq(host->mmc);
888 }
889 }
890
869 if (host->do_dma) { 891 if (host->do_dma) {
870 davinci_abort_dma(host); 892 davinci_abort_dma(host);
871 893
@@ -932,6 +954,21 @@ davinci_abort_data(struct mmc_davinci_host *host, struct mmc_data *data)
932 mmc_davinci_reset_ctrl(host, 0); 954 mmc_davinci_reset_ctrl(host, 0);
933} 955}
934 956
957static irqreturn_t mmc_davinci_sdio_irq(int irq, void *dev_id)
958{
959 struct mmc_davinci_host *host = dev_id;
960 unsigned int status;
961
962 status = readl(host->base + DAVINCI_SDIOIST);
963 if (status & SDIOIST_IOINT) {
964 dev_dbg(mmc_dev(host->mmc),
965 "SDIO interrupt status %x\n", status);
966 writel(status | SDIOIST_IOINT, host->base + DAVINCI_SDIOIST);
967 mmc_signal_sdio_irq(host->mmc);
968 }
969 return IRQ_HANDLED;
970}
971
935static irqreturn_t mmc_davinci_irq(int irq, void *dev_id) 972static irqreturn_t mmc_davinci_irq(int irq, void *dev_id)
936{ 973{
937 struct mmc_davinci_host *host = (struct mmc_davinci_host *)dev_id; 974 struct mmc_davinci_host *host = (struct mmc_davinci_host *)dev_id;
@@ -1076,11 +1113,32 @@ static int mmc_davinci_get_ro(struct mmc_host *mmc)
1076 return config->get_ro(pdev->id); 1113 return config->get_ro(pdev->id);
1077} 1114}
1078 1115
1116static void mmc_davinci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1117{
1118 struct mmc_davinci_host *host = mmc_priv(mmc);
1119
1120 if (enable) {
1121 if (!(readl(host->base + DAVINCI_SDIOST0) & SDIOST0_DAT1_HI)) {
1122 writel(SDIOIST_IOINT, host->base + DAVINCI_SDIOIST);
1123 mmc_signal_sdio_irq(host->mmc);
1124 } else {
1125 host->sdio_int = true;
1126 writel(readl(host->base + DAVINCI_SDIOIEN) |
1127 SDIOIEN_IOINTEN, host->base + DAVINCI_SDIOIEN);
1128 }
1129 } else {
1130 host->sdio_int = false;
1131 writel(readl(host->base + DAVINCI_SDIOIEN) & ~SDIOIEN_IOINTEN,
1132 host->base + DAVINCI_SDIOIEN);
1133 }
1134}
1135
1079static struct mmc_host_ops mmc_davinci_ops = { 1136static struct mmc_host_ops mmc_davinci_ops = {
1080 .request = mmc_davinci_request, 1137 .request = mmc_davinci_request,
1081 .set_ios = mmc_davinci_set_ios, 1138 .set_ios = mmc_davinci_set_ios,
1082 .get_cd = mmc_davinci_get_cd, 1139 .get_cd = mmc_davinci_get_cd,
1083 .get_ro = mmc_davinci_get_ro, 1140 .get_ro = mmc_davinci_get_ro,
1141 .enable_sdio_irq = mmc_davinci_enable_sdio_irq,
1084}; 1142};
1085 1143
1086/*----------------------------------------------------------------------*/ 1144/*----------------------------------------------------------------------*/
@@ -1209,7 +1267,8 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev)
1209 host->nr_sg = MAX_NR_SG; 1267 host->nr_sg = MAX_NR_SG;
1210 1268
1211 host->use_dma = use_dma; 1269 host->use_dma = use_dma;
1212 host->irq = irq; 1270 host->mmc_irq = irq;
1271 host->sdio_irq = platform_get_irq(pdev, 1);
1213 1272
1214 if (host->use_dma && davinci_acquire_dma_channels(host) != 0) 1273 if (host->use_dma && davinci_acquire_dma_channels(host) != 0)
1215 host->use_dma = 0; 1274 host->use_dma = 0;
@@ -1270,6 +1329,13 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev)
1270 if (ret) 1329 if (ret)
1271 goto out; 1330 goto out;
1272 1331
1332 if (host->sdio_irq >= 0) {
1333 ret = request_irq(host->sdio_irq, mmc_davinci_sdio_irq, 0,
1334 mmc_hostname(mmc), host);
1335 if (!ret)
1336 mmc->caps |= MMC_CAP_SDIO_IRQ;
1337 }
1338
1273 rename_region(mem, mmc_hostname(mmc)); 1339 rename_region(mem, mmc_hostname(mmc));
1274 1340
1275 dev_info(mmc_dev(host->mmc), "Using %s, %d-bit mode\n", 1341 dev_info(mmc_dev(host->mmc), "Using %s, %d-bit mode\n",
@@ -1313,7 +1379,9 @@ static int __exit davinci_mmcsd_remove(struct platform_device *pdev)
1313 mmc_davinci_cpufreq_deregister(host); 1379 mmc_davinci_cpufreq_deregister(host);
1314 1380
1315 mmc_remove_host(host->mmc); 1381 mmc_remove_host(host->mmc);
1316 free_irq(host->irq, host); 1382 free_irq(host->mmc_irq, host);
1383 if (host->mmc->caps & MMC_CAP_SDIO_IRQ)
1384 free_irq(host->sdio_irq, host);
1317 1385
1318 davinci_release_dma_channels(host); 1386 davinci_release_dma_channels(host);
1319 1387
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
new file mode 100644
index 000000000000..2fcc82577c1b
--- /dev/null
+++ b/drivers/mmc/host/dw_mmc.c
@@ -0,0 +1,1796 @@
1/*
2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
4 *
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#include <linux/blkdev.h>
15#include <linux/clk.h>
16#include <linux/debugfs.h>
17#include <linux/device.h>
18#include <linux/dma-mapping.h>
19#include <linux/err.h>
20#include <linux/init.h>
21#include <linux/interrupt.h>
22#include <linux/ioport.h>
23#include <linux/module.h>
24#include <linux/platform_device.h>
25#include <linux/scatterlist.h>
26#include <linux/seq_file.h>
27#include <linux/slab.h>
28#include <linux/stat.h>
29#include <linux/delay.h>
30#include <linux/irq.h>
31#include <linux/mmc/host.h>
32#include <linux/mmc/mmc.h>
33#include <linux/mmc/dw_mmc.h>
34#include <linux/bitops.h>
35
36#include "dw_mmc.h"
37
38/* Common flag combinations */
39#define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DTO | SDMMC_INT_DCRC | \
40 SDMMC_INT_HTO | SDMMC_INT_SBE | \
41 SDMMC_INT_EBE)
42#define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
43 SDMMC_INT_RESP_ERR)
44#define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
45 DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
46#define DW_MCI_SEND_STATUS 1
47#define DW_MCI_RECV_STATUS 2
48#define DW_MCI_DMA_THRESHOLD 16
49
50#ifdef CONFIG_MMC_DW_IDMAC
51struct idmac_desc {
52 u32 des0; /* Control Descriptor */
53#define IDMAC_DES0_DIC BIT(1)
54#define IDMAC_DES0_LD BIT(2)
55#define IDMAC_DES0_FD BIT(3)
56#define IDMAC_DES0_CH BIT(4)
57#define IDMAC_DES0_ER BIT(5)
58#define IDMAC_DES0_CES BIT(30)
59#define IDMAC_DES0_OWN BIT(31)
60
61 u32 des1; /* Buffer sizes */
62#define IDMAC_SET_BUFFER1_SIZE(d, s) \
63 ((d)->des1 = ((d)->des1 & 0x03ffc000) | ((s) & 0x3fff))
64
65 u32 des2; /* buffer 1 physical address */
66
67 u32 des3; /* buffer 2 physical address */
68};
69#endif /* CONFIG_MMC_DW_IDMAC */
70
71/**
72 * struct dw_mci_slot - MMC slot state
73 * @mmc: The mmc_host representing this slot.
74 * @host: The MMC controller this slot is using.
75 * @ctype: Card type for this slot.
76 * @mrq: mmc_request currently being processed or waiting to be
77 * processed, or NULL when the slot is idle.
78 * @queue_node: List node for placing this node in the @queue list of
79 * &struct dw_mci.
80 * @clock: Clock rate configured by set_ios(). Protected by host->lock.
81 * @flags: Random state bits associated with the slot.
82 * @id: Number of this slot.
83 * @last_detect_state: Most recently observed card detect state.
84 */
85struct dw_mci_slot {
86 struct mmc_host *mmc;
87 struct dw_mci *host;
88
89 u32 ctype;
90
91 struct mmc_request *mrq;
92 struct list_head queue_node;
93
94 unsigned int clock;
95 unsigned long flags;
96#define DW_MMC_CARD_PRESENT 0
97#define DW_MMC_CARD_NEED_INIT 1
98 int id;
99 int last_detect_state;
100};
101
102#if defined(CONFIG_DEBUG_FS)
103static int dw_mci_req_show(struct seq_file *s, void *v)
104{
105 struct dw_mci_slot *slot = s->private;
106 struct mmc_request *mrq;
107 struct mmc_command *cmd;
108 struct mmc_command *stop;
109 struct mmc_data *data;
110
111 /* Make sure we get a consistent snapshot */
112 spin_lock_bh(&slot->host->lock);
113 mrq = slot->mrq;
114
115 if (mrq) {
116 cmd = mrq->cmd;
117 data = mrq->data;
118 stop = mrq->stop;
119
120 if (cmd)
121 seq_printf(s,
122 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
123 cmd->opcode, cmd->arg, cmd->flags,
124 cmd->resp[0], cmd->resp[1], cmd->resp[2],
125 cmd->resp[2], cmd->error);
126 if (data)
127 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
128 data->bytes_xfered, data->blocks,
129 data->blksz, data->flags, data->error);
130 if (stop)
131 seq_printf(s,
132 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
133 stop->opcode, stop->arg, stop->flags,
134 stop->resp[0], stop->resp[1], stop->resp[2],
135 stop->resp[2], stop->error);
136 }
137
138 spin_unlock_bh(&slot->host->lock);
139
140 return 0;
141}
142
143static int dw_mci_req_open(struct inode *inode, struct file *file)
144{
145 return single_open(file, dw_mci_req_show, inode->i_private);
146}
147
148static const struct file_operations dw_mci_req_fops = {
149 .owner = THIS_MODULE,
150 .open = dw_mci_req_open,
151 .read = seq_read,
152 .llseek = seq_lseek,
153 .release = single_release,
154};
155
156static int dw_mci_regs_show(struct seq_file *s, void *v)
157{
158 seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
159 seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
160 seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
161 seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
162 seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
163 seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
164
165 return 0;
166}
167
168static int dw_mci_regs_open(struct inode *inode, struct file *file)
169{
170 return single_open(file, dw_mci_regs_show, inode->i_private);
171}
172
173static const struct file_operations dw_mci_regs_fops = {
174 .owner = THIS_MODULE,
175 .open = dw_mci_regs_open,
176 .read = seq_read,
177 .llseek = seq_lseek,
178 .release = single_release,
179};
180
181static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
182{
183 struct mmc_host *mmc = slot->mmc;
184 struct dw_mci *host = slot->host;
185 struct dentry *root;
186 struct dentry *node;
187
188 root = mmc->debugfs_root;
189 if (!root)
190 return;
191
192 node = debugfs_create_file("regs", S_IRUSR, root, host,
193 &dw_mci_regs_fops);
194 if (!node)
195 goto err;
196
197 node = debugfs_create_file("req", S_IRUSR, root, slot,
198 &dw_mci_req_fops);
199 if (!node)
200 goto err;
201
202 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
203 if (!node)
204 goto err;
205
206 node = debugfs_create_x32("pending_events", S_IRUSR, root,
207 (u32 *)&host->pending_events);
208 if (!node)
209 goto err;
210
211 node = debugfs_create_x32("completed_events", S_IRUSR, root,
212 (u32 *)&host->completed_events);
213 if (!node)
214 goto err;
215
216 return;
217
218err:
219 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
220}
221#endif /* defined(CONFIG_DEBUG_FS) */
222
223static void dw_mci_set_timeout(struct dw_mci *host)
224{
225 /* timeout (maximum) */
226 mci_writel(host, TMOUT, 0xffffffff);
227}
228
229static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
230{
231 struct mmc_data *data;
232 u32 cmdr;
233 cmd->error = -EINPROGRESS;
234
235 cmdr = cmd->opcode;
236
237 if (cmdr == MMC_STOP_TRANSMISSION)
238 cmdr |= SDMMC_CMD_STOP;
239 else
240 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
241
242 if (cmd->flags & MMC_RSP_PRESENT) {
243 /* We expect a response, so set this bit */
244 cmdr |= SDMMC_CMD_RESP_EXP;
245 if (cmd->flags & MMC_RSP_136)
246 cmdr |= SDMMC_CMD_RESP_LONG;
247 }
248
249 if (cmd->flags & MMC_RSP_CRC)
250 cmdr |= SDMMC_CMD_RESP_CRC;
251
252 data = cmd->data;
253 if (data) {
254 cmdr |= SDMMC_CMD_DAT_EXP;
255 if (data->flags & MMC_DATA_STREAM)
256 cmdr |= SDMMC_CMD_STRM_MODE;
257 if (data->flags & MMC_DATA_WRITE)
258 cmdr |= SDMMC_CMD_DAT_WR;
259 }
260
261 return cmdr;
262}
263
264static void dw_mci_start_command(struct dw_mci *host,
265 struct mmc_command *cmd, u32 cmd_flags)
266{
267 host->cmd = cmd;
268 dev_vdbg(&host->pdev->dev,
269 "start command: ARGR=0x%08x CMDR=0x%08x\n",
270 cmd->arg, cmd_flags);
271
272 mci_writel(host, CMDARG, cmd->arg);
273 wmb();
274
275 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
276}
277
278static void send_stop_cmd(struct dw_mci *host, struct mmc_data *data)
279{
280 dw_mci_start_command(host, data->stop, host->stop_cmdr);
281}
282
283/* DMA interface functions */
284static void dw_mci_stop_dma(struct dw_mci *host)
285{
286 if (host->use_dma) {
287 host->dma_ops->stop(host);
288 host->dma_ops->cleanup(host);
289 } else {
290 /* Data transfer was stopped by the interrupt handler */
291 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
292 }
293}
294
295#ifdef CONFIG_MMC_DW_IDMAC
296static void dw_mci_dma_cleanup(struct dw_mci *host)
297{
298 struct mmc_data *data = host->data;
299
300 if (data)
301 dma_unmap_sg(&host->pdev->dev, data->sg, data->sg_len,
302 ((data->flags & MMC_DATA_WRITE)
303 ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
304}
305
306static void dw_mci_idmac_stop_dma(struct dw_mci *host)
307{
308 u32 temp;
309
310 /* Disable and reset the IDMAC interface */
311 temp = mci_readl(host, CTRL);
312 temp &= ~SDMMC_CTRL_USE_IDMAC;
313 temp |= SDMMC_CTRL_DMA_RESET;
314 mci_writel(host, CTRL, temp);
315
316 /* Stop the IDMAC running */
317 temp = mci_readl(host, BMOD);
318 temp &= ~SDMMC_IDMAC_ENABLE;
319 mci_writel(host, BMOD, temp);
320}
321
322static void dw_mci_idmac_complete_dma(struct dw_mci *host)
323{
324 struct mmc_data *data = host->data;
325
326 dev_vdbg(&host->pdev->dev, "DMA complete\n");
327
328 host->dma_ops->cleanup(host);
329
330 /*
331 * If the card was removed, data will be NULL. No point in trying to
332 * send the stop command or waiting for NBUSY in this case.
333 */
334 if (data) {
335 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
336 tasklet_schedule(&host->tasklet);
337 }
338}
339
340static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
341 unsigned int sg_len)
342{
343 int i;
344 struct idmac_desc *desc = host->sg_cpu;
345
346 for (i = 0; i < sg_len; i++, desc++) {
347 unsigned int length = sg_dma_len(&data->sg[i]);
348 u32 mem_addr = sg_dma_address(&data->sg[i]);
349
350 /* Set the OWN bit and disable interrupts for this descriptor */
351 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
352
353 /* Buffer length */
354 IDMAC_SET_BUFFER1_SIZE(desc, length);
355
356 /* Physical address to DMA to/from */
357 desc->des2 = mem_addr;
358 }
359
360 /* Set first descriptor */
361 desc = host->sg_cpu;
362 desc->des0 |= IDMAC_DES0_FD;
363
364 /* Set last descriptor */
365 desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
366 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
367 desc->des0 |= IDMAC_DES0_LD;
368
369 wmb();
370}
371
372static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
373{
374 u32 temp;
375
376 dw_mci_translate_sglist(host, host->data, sg_len);
377
378 /* Select IDMAC interface */
379 temp = mci_readl(host, CTRL);
380 temp |= SDMMC_CTRL_USE_IDMAC;
381 mci_writel(host, CTRL, temp);
382
383 wmb();
384
385 /* Enable the IDMAC */
386 temp = mci_readl(host, BMOD);
387 temp |= SDMMC_IDMAC_ENABLE;
388 mci_writel(host, BMOD, temp);
389
390 /* Start it running */
391 mci_writel(host, PLDMND, 1);
392}
393
394static int dw_mci_idmac_init(struct dw_mci *host)
395{
396 struct idmac_desc *p;
397 int i;
398
399 /* Number of descriptors in the ring buffer */
400 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
401
402 /* Forward link the descriptor list */
403 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
404 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
405
406 /* Set the last descriptor as the end-of-ring descriptor */
407 p->des3 = host->sg_dma;
408 p->des0 = IDMAC_DES0_ER;
409
410 /* Mask out interrupts - get Tx & Rx complete only */
411 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
412 SDMMC_IDMAC_INT_TI);
413
414 /* Set the descriptor base address */
415 mci_writel(host, DBADDR, host->sg_dma);
416 return 0;
417}
418
419static struct dw_mci_dma_ops dw_mci_idmac_ops = {
420 .init = dw_mci_idmac_init,
421 .start = dw_mci_idmac_start_dma,
422 .stop = dw_mci_idmac_stop_dma,
423 .complete = dw_mci_idmac_complete_dma,
424 .cleanup = dw_mci_dma_cleanup,
425};
426#endif /* CONFIG_MMC_DW_IDMAC */
427
428static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
429{
430 struct scatterlist *sg;
431 unsigned int i, direction, sg_len;
432 u32 temp;
433
434 /* If we don't have a channel, we can't do DMA */
435 if (!host->use_dma)
436 return -ENODEV;
437
438 /*
439 * We don't do DMA on "complex" transfers, i.e. with
440 * non-word-aligned buffers or lengths. Also, we don't bother
441 * with all the DMA setup overhead for short transfers.
442 */
443 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
444 return -EINVAL;
445 if (data->blksz & 3)
446 return -EINVAL;
447
448 for_each_sg(data->sg, sg, data->sg_len, i) {
449 if (sg->offset & 3 || sg->length & 3)
450 return -EINVAL;
451 }
452
453 if (data->flags & MMC_DATA_READ)
454 direction = DMA_FROM_DEVICE;
455 else
456 direction = DMA_TO_DEVICE;
457
458 sg_len = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len,
459 direction);
460
461 dev_vdbg(&host->pdev->dev,
462 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
463 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
464 sg_len);
465
466 /* Enable the DMA interface */
467 temp = mci_readl(host, CTRL);
468 temp |= SDMMC_CTRL_DMA_ENABLE;
469 mci_writel(host, CTRL, temp);
470
471 /* Disable RX/TX IRQs, let DMA handle it */
472 temp = mci_readl(host, INTMASK);
473 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
474 mci_writel(host, INTMASK, temp);
475
476 host->dma_ops->start(host, sg_len);
477
478 return 0;
479}
480
481static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
482{
483 u32 temp;
484
485 data->error = -EINPROGRESS;
486
487 WARN_ON(host->data);
488 host->sg = NULL;
489 host->data = data;
490
491 if (dw_mci_submit_data_dma(host, data)) {
492 host->sg = data->sg;
493 host->pio_offset = 0;
494 if (data->flags & MMC_DATA_READ)
495 host->dir_status = DW_MCI_RECV_STATUS;
496 else
497 host->dir_status = DW_MCI_SEND_STATUS;
498
499 temp = mci_readl(host, INTMASK);
500 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
501 mci_writel(host, INTMASK, temp);
502
503 temp = mci_readl(host, CTRL);
504 temp &= ~SDMMC_CTRL_DMA_ENABLE;
505 mci_writel(host, CTRL, temp);
506 }
507}
508
509static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
510{
511 struct dw_mci *host = slot->host;
512 unsigned long timeout = jiffies + msecs_to_jiffies(500);
513 unsigned int cmd_status = 0;
514
515 mci_writel(host, CMDARG, arg);
516 wmb();
517 mci_writel(host, CMD, SDMMC_CMD_START | cmd);
518
519 while (time_before(jiffies, timeout)) {
520 cmd_status = mci_readl(host, CMD);
521 if (!(cmd_status & SDMMC_CMD_START))
522 return;
523 }
524 dev_err(&slot->mmc->class_dev,
525 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
526 cmd, arg, cmd_status);
527}
528
529static void dw_mci_setup_bus(struct dw_mci_slot *slot)
530{
531 struct dw_mci *host = slot->host;
532 u32 div;
533
534 if (slot->clock != host->current_speed) {
535 if (host->bus_hz % slot->clock)
536 /*
537 * move the + 1 after the divide to prevent
538 * over-clocking the card.
539 */
540 div = ((host->bus_hz / slot->clock) >> 1) + 1;
541 else
542 div = (host->bus_hz / slot->clock) >> 1;
543
544 dev_info(&slot->mmc->class_dev,
545 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ"
546 " div = %d)\n", slot->id, host->bus_hz, slot->clock,
547 div ? ((host->bus_hz / div) >> 1) : host->bus_hz, div);
548
549 /* disable clock */
550 mci_writel(host, CLKENA, 0);
551 mci_writel(host, CLKSRC, 0);
552
553 /* inform CIU */
554 mci_send_cmd(slot,
555 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
556
557 /* set clock to desired speed */
558 mci_writel(host, CLKDIV, div);
559
560 /* inform CIU */
561 mci_send_cmd(slot,
562 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
563
564 /* enable clock */
565 mci_writel(host, CLKENA, SDMMC_CLKEN_ENABLE);
566
567 /* inform CIU */
568 mci_send_cmd(slot,
569 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
570
571 host->current_speed = slot->clock;
572 }
573
574 /* Set the current slot bus width */
575 mci_writel(host, CTYPE, slot->ctype);
576}
577
578static void dw_mci_start_request(struct dw_mci *host,
579 struct dw_mci_slot *slot)
580{
581 struct mmc_request *mrq;
582 struct mmc_command *cmd;
583 struct mmc_data *data;
584 u32 cmdflags;
585
586 mrq = slot->mrq;
587 if (host->pdata->select_slot)
588 host->pdata->select_slot(slot->id);
589
590 /* Slot specific timing and width adjustment */
591 dw_mci_setup_bus(slot);
592
593 host->cur_slot = slot;
594 host->mrq = mrq;
595
596 host->pending_events = 0;
597 host->completed_events = 0;
598 host->data_status = 0;
599
600 data = mrq->data;
601 if (data) {
602 dw_mci_set_timeout(host);
603 mci_writel(host, BYTCNT, data->blksz*data->blocks);
604 mci_writel(host, BLKSIZ, data->blksz);
605 }
606
607 cmd = mrq->cmd;
608 cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
609
610 /* this is the first command, send the initialization clock */
611 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
612 cmdflags |= SDMMC_CMD_INIT;
613
614 if (data) {
615 dw_mci_submit_data(host, data);
616 wmb();
617 }
618
619 dw_mci_start_command(host, cmd, cmdflags);
620
621 if (mrq->stop)
622 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
623}
624
625static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
626 struct mmc_request *mrq)
627{
628 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
629 host->state);
630
631 spin_lock_bh(&host->lock);
632 slot->mrq = mrq;
633
634 if (host->state == STATE_IDLE) {
635 host->state = STATE_SENDING_CMD;
636 dw_mci_start_request(host, slot);
637 } else {
638 list_add_tail(&slot->queue_node, &host->queue);
639 }
640
641 spin_unlock_bh(&host->lock);
642}
643
644static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
645{
646 struct dw_mci_slot *slot = mmc_priv(mmc);
647 struct dw_mci *host = slot->host;
648
649 WARN_ON(slot->mrq);
650
651 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
652 mrq->cmd->error = -ENOMEDIUM;
653 mmc_request_done(mmc, mrq);
654 return;
655 }
656
657 /* We don't support multiple blocks of weird lengths. */
658 dw_mci_queue_request(host, slot, mrq);
659}
660
661static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
662{
663 struct dw_mci_slot *slot = mmc_priv(mmc);
664
665 /* set default 1 bit mode */
666 slot->ctype = SDMMC_CTYPE_1BIT;
667
668 switch (ios->bus_width) {
669 case MMC_BUS_WIDTH_1:
670 slot->ctype = SDMMC_CTYPE_1BIT;
671 break;
672 case MMC_BUS_WIDTH_4:
673 slot->ctype = SDMMC_CTYPE_4BIT;
674 break;
675 }
676
677 if (ios->clock) {
678 /*
679 * Use mirror of ios->clock to prevent race with mmc
680 * core ios update when finding the minimum.
681 */
682 slot->clock = ios->clock;
683 }
684
685 switch (ios->power_mode) {
686 case MMC_POWER_UP:
687 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
688 break;
689 default:
690 break;
691 }
692}
693
694static int dw_mci_get_ro(struct mmc_host *mmc)
695{
696 int read_only;
697 struct dw_mci_slot *slot = mmc_priv(mmc);
698 struct dw_mci_board *brd = slot->host->pdata;
699
700 /* Use platform get_ro function, else try on board write protect */
701 if (brd->get_ro)
702 read_only = brd->get_ro(slot->id);
703 else
704 read_only =
705 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
706
707 dev_dbg(&mmc->class_dev, "card is %s\n",
708 read_only ? "read-only" : "read-write");
709
710 return read_only;
711}
712
713static int dw_mci_get_cd(struct mmc_host *mmc)
714{
715 int present;
716 struct dw_mci_slot *slot = mmc_priv(mmc);
717 struct dw_mci_board *brd = slot->host->pdata;
718
719 /* Use platform get_cd function, else try onboard card detect */
720 if (brd->get_cd)
721 present = !brd->get_cd(slot->id);
722 else
723 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
724 == 0 ? 1 : 0;
725
726 if (present)
727 dev_dbg(&mmc->class_dev, "card is present\n");
728 else
729 dev_dbg(&mmc->class_dev, "card is not present\n");
730
731 return present;
732}
733
734static const struct mmc_host_ops dw_mci_ops = {
735 .request = dw_mci_request,
736 .set_ios = dw_mci_set_ios,
737 .get_ro = dw_mci_get_ro,
738 .get_cd = dw_mci_get_cd,
739};
740
741static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
742 __releases(&host->lock)
743 __acquires(&host->lock)
744{
745 struct dw_mci_slot *slot;
746 struct mmc_host *prev_mmc = host->cur_slot->mmc;
747
748 WARN_ON(host->cmd || host->data);
749
750 host->cur_slot->mrq = NULL;
751 host->mrq = NULL;
752 if (!list_empty(&host->queue)) {
753 slot = list_entry(host->queue.next,
754 struct dw_mci_slot, queue_node);
755 list_del(&slot->queue_node);
756 dev_vdbg(&host->pdev->dev, "list not empty: %s is next\n",
757 mmc_hostname(slot->mmc));
758 host->state = STATE_SENDING_CMD;
759 dw_mci_start_request(host, slot);
760 } else {
761 dev_vdbg(&host->pdev->dev, "list empty\n");
762 host->state = STATE_IDLE;
763 }
764
765 spin_unlock(&host->lock);
766 mmc_request_done(prev_mmc, mrq);
767 spin_lock(&host->lock);
768}
769
770static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
771{
772 u32 status = host->cmd_status;
773
774 host->cmd_status = 0;
775
776 /* Read the response from the card (up to 16 bytes) */
777 if (cmd->flags & MMC_RSP_PRESENT) {
778 if (cmd->flags & MMC_RSP_136) {
779 cmd->resp[3] = mci_readl(host, RESP0);
780 cmd->resp[2] = mci_readl(host, RESP1);
781 cmd->resp[1] = mci_readl(host, RESP2);
782 cmd->resp[0] = mci_readl(host, RESP3);
783 } else {
784 cmd->resp[0] = mci_readl(host, RESP0);
785 cmd->resp[1] = 0;
786 cmd->resp[2] = 0;
787 cmd->resp[3] = 0;
788 }
789 }
790
791 if (status & SDMMC_INT_RTO)
792 cmd->error = -ETIMEDOUT;
793 else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
794 cmd->error = -EILSEQ;
795 else if (status & SDMMC_INT_RESP_ERR)
796 cmd->error = -EIO;
797 else
798 cmd->error = 0;
799
800 if (cmd->error) {
801 /* newer ip versions need a delay between retries */
802 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
803 mdelay(20);
804
805 if (cmd->data) {
806 host->data = NULL;
807 dw_mci_stop_dma(host);
808 }
809 }
810}
811
812static void dw_mci_tasklet_func(unsigned long priv)
813{
814 struct dw_mci *host = (struct dw_mci *)priv;
815 struct mmc_data *data;
816 struct mmc_command *cmd;
817 enum dw_mci_state state;
818 enum dw_mci_state prev_state;
819 u32 status;
820
821 spin_lock(&host->lock);
822
823 state = host->state;
824 data = host->data;
825
826 do {
827 prev_state = state;
828
829 switch (state) {
830 case STATE_IDLE:
831 break;
832
833 case STATE_SENDING_CMD:
834 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
835 &host->pending_events))
836 break;
837
838 cmd = host->cmd;
839 host->cmd = NULL;
840 set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
841 dw_mci_command_complete(host, host->mrq->cmd);
842 if (!host->mrq->data || cmd->error) {
843 dw_mci_request_end(host, host->mrq);
844 goto unlock;
845 }
846
847 prev_state = state = STATE_SENDING_DATA;
848 /* fall through */
849
850 case STATE_SENDING_DATA:
851 if (test_and_clear_bit(EVENT_DATA_ERROR,
852 &host->pending_events)) {
853 dw_mci_stop_dma(host);
854 if (data->stop)
855 send_stop_cmd(host, data);
856 state = STATE_DATA_ERROR;
857 break;
858 }
859
860 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
861 &host->pending_events))
862 break;
863
864 set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
865 prev_state = state = STATE_DATA_BUSY;
866 /* fall through */
867
868 case STATE_DATA_BUSY:
869 if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
870 &host->pending_events))
871 break;
872
873 host->data = NULL;
874 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
875 status = host->data_status;
876
877 if (status & DW_MCI_DATA_ERROR_FLAGS) {
878 if (status & SDMMC_INT_DTO) {
879 dev_err(&host->pdev->dev,
880 "data timeout error\n");
881 data->error = -ETIMEDOUT;
882 } else if (status & SDMMC_INT_DCRC) {
883 dev_err(&host->pdev->dev,
884 "data CRC error\n");
885 data->error = -EILSEQ;
886 } else {
887 dev_err(&host->pdev->dev,
888 "data FIFO error "
889 "(status=%08x)\n",
890 status);
891 data->error = -EIO;
892 }
893 } else {
894 data->bytes_xfered = data->blocks * data->blksz;
895 data->error = 0;
896 }
897
898 if (!data->stop) {
899 dw_mci_request_end(host, host->mrq);
900 goto unlock;
901 }
902
903 prev_state = state = STATE_SENDING_STOP;
904 if (!data->error)
905 send_stop_cmd(host, data);
906 /* fall through */
907
908 case STATE_SENDING_STOP:
909 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
910 &host->pending_events))
911 break;
912
913 host->cmd = NULL;
914 dw_mci_command_complete(host, host->mrq->stop);
915 dw_mci_request_end(host, host->mrq);
916 goto unlock;
917
918 case STATE_DATA_ERROR:
919 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
920 &host->pending_events))
921 break;
922
923 state = STATE_DATA_BUSY;
924 break;
925 }
926 } while (state != prev_state);
927
928 host->state = state;
929unlock:
930 spin_unlock(&host->lock);
931
932}
933
934static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
935{
936 u16 *pdata = (u16 *)buf;
937
938 WARN_ON(cnt % 2 != 0);
939
940 cnt = cnt >> 1;
941 while (cnt > 0) {
942 mci_writew(host, DATA, *pdata++);
943 cnt--;
944 }
945}
946
947static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
948{
949 u16 *pdata = (u16 *)buf;
950
951 WARN_ON(cnt % 2 != 0);
952
953 cnt = cnt >> 1;
954 while (cnt > 0) {
955 *pdata++ = mci_readw(host, DATA);
956 cnt--;
957 }
958}
959
960static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
961{
962 u32 *pdata = (u32 *)buf;
963
964 WARN_ON(cnt % 4 != 0);
965 WARN_ON((unsigned long)pdata & 0x3);
966
967 cnt = cnt >> 2;
968 while (cnt > 0) {
969 mci_writel(host, DATA, *pdata++);
970 cnt--;
971 }
972}
973
974static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
975{
976 u32 *pdata = (u32 *)buf;
977
978 WARN_ON(cnt % 4 != 0);
979 WARN_ON((unsigned long)pdata & 0x3);
980
981 cnt = cnt >> 2;
982 while (cnt > 0) {
983 *pdata++ = mci_readl(host, DATA);
984 cnt--;
985 }
986}
987
988static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
989{
990 u64 *pdata = (u64 *)buf;
991
992 WARN_ON(cnt % 8 != 0);
993
994 cnt = cnt >> 3;
995 while (cnt > 0) {
996 mci_writeq(host, DATA, *pdata++);
997 cnt--;
998 }
999}
1000
1001static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
1002{
1003 u64 *pdata = (u64 *)buf;
1004
1005 WARN_ON(cnt % 8 != 0);
1006
1007 cnt = cnt >> 3;
1008 while (cnt > 0) {
1009 *pdata++ = mci_readq(host, DATA);
1010 cnt--;
1011 }
1012}
1013
1014static void dw_mci_read_data_pio(struct dw_mci *host)
1015{
1016 struct scatterlist *sg = host->sg;
1017 void *buf = sg_virt(sg);
1018 unsigned int offset = host->pio_offset;
1019 struct mmc_data *data = host->data;
1020 int shift = host->data_shift;
1021 u32 status;
1022 unsigned int nbytes = 0, len, old_len, count = 0;
1023
1024 do {
1025 len = SDMMC_GET_FCNT(mci_readl(host, STATUS)) << shift;
1026 if (count == 0)
1027 old_len = len;
1028
1029 if (offset + len <= sg->length) {
1030 host->pull_data(host, (void *)(buf + offset), len);
1031
1032 offset += len;
1033 nbytes += len;
1034
1035 if (offset == sg->length) {
1036 flush_dcache_page(sg_page(sg));
1037 host->sg = sg = sg_next(sg);
1038 if (!sg)
1039 goto done;
1040
1041 offset = 0;
1042 buf = sg_virt(sg);
1043 }
1044 } else {
1045 unsigned int remaining = sg->length - offset;
1046 host->pull_data(host, (void *)(buf + offset),
1047 remaining);
1048 nbytes += remaining;
1049
1050 flush_dcache_page(sg_page(sg));
1051 host->sg = sg = sg_next(sg);
1052 if (!sg)
1053 goto done;
1054
1055 offset = len - remaining;
1056 buf = sg_virt(sg);
1057 host->pull_data(host, buf, offset);
1058 nbytes += offset;
1059 }
1060
1061 status = mci_readl(host, MINTSTS);
1062 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
1063 if (status & DW_MCI_DATA_ERROR_FLAGS) {
1064 host->data_status = status;
1065 data->bytes_xfered += nbytes;
1066 smp_wmb();
1067
1068 set_bit(EVENT_DATA_ERROR, &host->pending_events);
1069
1070 tasklet_schedule(&host->tasklet);
1071 return;
1072 }
1073 count++;
1074 } while (status & SDMMC_INT_RXDR); /*if the RXDR is ready read again*/
1075 len = SDMMC_GET_FCNT(mci_readl(host, STATUS));
1076 host->pio_offset = offset;
1077 data->bytes_xfered += nbytes;
1078 return;
1079
1080done:
1081 data->bytes_xfered += nbytes;
1082 smp_wmb();
1083 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1084}
1085
1086static void dw_mci_write_data_pio(struct dw_mci *host)
1087{
1088 struct scatterlist *sg = host->sg;
1089 void *buf = sg_virt(sg);
1090 unsigned int offset = host->pio_offset;
1091 struct mmc_data *data = host->data;
1092 int shift = host->data_shift;
1093 u32 status;
1094 unsigned int nbytes = 0, len;
1095
1096 do {
1097 len = SDMMC_FIFO_SZ -
1098 (SDMMC_GET_FCNT(mci_readl(host, STATUS)) << shift);
1099 if (offset + len <= sg->length) {
1100 host->push_data(host, (void *)(buf + offset), len);
1101
1102 offset += len;
1103 nbytes += len;
1104 if (offset == sg->length) {
1105 host->sg = sg = sg_next(sg);
1106 if (!sg)
1107 goto done;
1108
1109 offset = 0;
1110 buf = sg_virt(sg);
1111 }
1112 } else {
1113 unsigned int remaining = sg->length - offset;
1114
1115 host->push_data(host, (void *)(buf + offset),
1116 remaining);
1117 nbytes += remaining;
1118
1119 host->sg = sg = sg_next(sg);
1120 if (!sg)
1121 goto done;
1122
1123 offset = len - remaining;
1124 buf = sg_virt(sg);
1125 host->push_data(host, (void *)buf, offset);
1126 nbytes += offset;
1127 }
1128
1129 status = mci_readl(host, MINTSTS);
1130 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
1131 if (status & DW_MCI_DATA_ERROR_FLAGS) {
1132 host->data_status = status;
1133 data->bytes_xfered += nbytes;
1134
1135 smp_wmb();
1136
1137 set_bit(EVENT_DATA_ERROR, &host->pending_events);
1138
1139 tasklet_schedule(&host->tasklet);
1140 return;
1141 }
1142 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
1143
1144 host->pio_offset = offset;
1145 data->bytes_xfered += nbytes;
1146
1147 return;
1148
1149done:
1150 data->bytes_xfered += nbytes;
1151 smp_wmb();
1152 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1153}
1154
1155static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
1156{
1157 if (!host->cmd_status)
1158 host->cmd_status = status;
1159
1160 smp_wmb();
1161
1162 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1163 tasklet_schedule(&host->tasklet);
1164}
1165
1166static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
1167{
1168 struct dw_mci *host = dev_id;
1169 u32 status, pending;
1170 unsigned int pass_count = 0;
1171
1172 do {
1173 status = mci_readl(host, RINTSTS);
1174 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
1175
1176 /*
1177 * DTO fix - version 2.10a and below, and only if internal DMA
1178 * is configured.
1179 */
1180 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
1181 if (!pending &&
1182 ((mci_readl(host, STATUS) >> 17) & 0x1fff))
1183 pending |= SDMMC_INT_DATA_OVER;
1184 }
1185
1186 if (!pending)
1187 break;
1188
1189 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
1190 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
1191 host->cmd_status = status;
1192 smp_wmb();
1193 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1194 tasklet_schedule(&host->tasklet);
1195 }
1196
1197 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
1198 /* if there is an error report DATA_ERROR */
1199 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
1200 host->data_status = status;
1201 smp_wmb();
1202 set_bit(EVENT_DATA_ERROR, &host->pending_events);
1203 tasklet_schedule(&host->tasklet);
1204 }
1205
1206 if (pending & SDMMC_INT_DATA_OVER) {
1207 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
1208 if (!host->data_status)
1209 host->data_status = status;
1210 smp_wmb();
1211 if (host->dir_status == DW_MCI_RECV_STATUS) {
1212 if (host->sg != NULL)
1213 dw_mci_read_data_pio(host);
1214 }
1215 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
1216 tasklet_schedule(&host->tasklet);
1217 }
1218
1219 if (pending & SDMMC_INT_RXDR) {
1220 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
1221 if (host->sg)
1222 dw_mci_read_data_pio(host);
1223 }
1224
1225 if (pending & SDMMC_INT_TXDR) {
1226 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
1227 if (host->sg)
1228 dw_mci_write_data_pio(host);
1229 }
1230
1231 if (pending & SDMMC_INT_CMD_DONE) {
1232 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
1233 dw_mci_cmd_interrupt(host, status);
1234 }
1235
1236 if (pending & SDMMC_INT_CD) {
1237 mci_writel(host, RINTSTS, SDMMC_INT_CD);
1238 tasklet_schedule(&host->card_tasklet);
1239 }
1240
1241 } while (pass_count++ < 5);
1242
1243#ifdef CONFIG_MMC_DW_IDMAC
1244 /* Handle DMA interrupts */
1245 pending = mci_readl(host, IDSTS);
1246 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
1247 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
1248 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
1249 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
1250 host->dma_ops->complete(host);
1251 }
1252#endif
1253
1254 return IRQ_HANDLED;
1255}
1256
1257static void dw_mci_tasklet_card(unsigned long data)
1258{
1259 struct dw_mci *host = (struct dw_mci *)data;
1260 int i;
1261
1262 for (i = 0; i < host->num_slots; i++) {
1263 struct dw_mci_slot *slot = host->slot[i];
1264 struct mmc_host *mmc = slot->mmc;
1265 struct mmc_request *mrq;
1266 int present;
1267 u32 ctrl;
1268
1269 present = dw_mci_get_cd(mmc);
1270 while (present != slot->last_detect_state) {
1271 spin_lock(&host->lock);
1272
1273 dev_dbg(&slot->mmc->class_dev, "card %s\n",
1274 present ? "inserted" : "removed");
1275
1276 /* Card change detected */
1277 slot->last_detect_state = present;
1278
1279 /* Power up slot */
1280 if (present != 0) {
1281 if (host->pdata->setpower)
1282 host->pdata->setpower(slot->id,
1283 mmc->ocr_avail);
1284
1285 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1286 }
1287
1288 /* Clean up queue if present */
1289 mrq = slot->mrq;
1290 if (mrq) {
1291 if (mrq == host->mrq) {
1292 host->data = NULL;
1293 host->cmd = NULL;
1294
1295 switch (host->state) {
1296 case STATE_IDLE:
1297 break;
1298 case STATE_SENDING_CMD:
1299 mrq->cmd->error = -ENOMEDIUM;
1300 if (!mrq->data)
1301 break;
1302 /* fall through */
1303 case STATE_SENDING_DATA:
1304 mrq->data->error = -ENOMEDIUM;
1305 dw_mci_stop_dma(host);
1306 break;
1307 case STATE_DATA_BUSY:
1308 case STATE_DATA_ERROR:
1309 if (mrq->data->error == -EINPROGRESS)
1310 mrq->data->error = -ENOMEDIUM;
1311 if (!mrq->stop)
1312 break;
1313 /* fall through */
1314 case STATE_SENDING_STOP:
1315 mrq->stop->error = -ENOMEDIUM;
1316 break;
1317 }
1318
1319 dw_mci_request_end(host, mrq);
1320 } else {
1321 list_del(&slot->queue_node);
1322 mrq->cmd->error = -ENOMEDIUM;
1323 if (mrq->data)
1324 mrq->data->error = -ENOMEDIUM;
1325 if (mrq->stop)
1326 mrq->stop->error = -ENOMEDIUM;
1327
1328 spin_unlock(&host->lock);
1329 mmc_request_done(slot->mmc, mrq);
1330 spin_lock(&host->lock);
1331 }
1332 }
1333
1334 /* Power down slot */
1335 if (present == 0) {
1336 if (host->pdata->setpower)
1337 host->pdata->setpower(slot->id, 0);
1338 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1339
1340 /*
1341 * Clear down the FIFO - doing so generates a
1342 * block interrupt, hence setting the
1343 * scatter-gather pointer to NULL.
1344 */
1345 host->sg = NULL;
1346
1347 ctrl = mci_readl(host, CTRL);
1348 ctrl |= SDMMC_CTRL_FIFO_RESET;
1349 mci_writel(host, CTRL, ctrl);
1350
1351#ifdef CONFIG_MMC_DW_IDMAC
1352 ctrl = mci_readl(host, BMOD);
1353 ctrl |= 0x01; /* Software reset of DMA */
1354 mci_writel(host, BMOD, ctrl);
1355#endif
1356
1357 }
1358
1359 spin_unlock(&host->lock);
1360 present = dw_mci_get_cd(mmc);
1361 }
1362
1363 mmc_detect_change(slot->mmc,
1364 msecs_to_jiffies(host->pdata->detect_delay_ms));
1365 }
1366}
1367
1368static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id)
1369{
1370 struct mmc_host *mmc;
1371 struct dw_mci_slot *slot;
1372
1373 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), &host->pdev->dev);
1374 if (!mmc)
1375 return -ENOMEM;
1376
1377 slot = mmc_priv(mmc);
1378 slot->id = id;
1379 slot->mmc = mmc;
1380 slot->host = host;
1381
1382 mmc->ops = &dw_mci_ops;
1383 mmc->f_min = DIV_ROUND_UP(host->bus_hz, 510);
1384 mmc->f_max = host->bus_hz;
1385
1386 if (host->pdata->get_ocr)
1387 mmc->ocr_avail = host->pdata->get_ocr(id);
1388 else
1389 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
1390
1391 /*
1392 * Start with slot power disabled, it will be enabled when a card
1393 * is detected.
1394 */
1395 if (host->pdata->setpower)
1396 host->pdata->setpower(id, 0);
1397
1398 mmc->caps = 0;
1399 if (host->pdata->get_bus_wd)
1400 if (host->pdata->get_bus_wd(slot->id) >= 4)
1401 mmc->caps |= MMC_CAP_4_BIT_DATA;
1402
1403 if (host->pdata->quirks & DW_MCI_QUIRK_HIGHSPEED)
1404 mmc->caps |= MMC_CAP_SD_HIGHSPEED;
1405
1406#ifdef CONFIG_MMC_DW_IDMAC
1407 mmc->max_segs = host->ring_size;
1408 mmc->max_blk_size = 65536;
1409 mmc->max_blk_count = host->ring_size;
1410 mmc->max_seg_size = 0x1000;
1411 mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
1412#else
1413 if (host->pdata->blk_settings) {
1414 mmc->max_segs = host->pdata->blk_settings->max_segs;
1415 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
1416 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
1417 mmc->max_req_size = host->pdata->blk_settings->max_req_size;
1418 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
1419 } else {
1420 /* Useful defaults if platform data is unset. */
1421 mmc->max_segs = 64;
1422 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
1423 mmc->max_blk_count = 512;
1424 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
1425 mmc->max_seg_size = mmc->max_req_size;
1426 }
1427#endif /* CONFIG_MMC_DW_IDMAC */
1428
1429 if (dw_mci_get_cd(mmc))
1430 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1431 else
1432 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1433
1434 host->slot[id] = slot;
1435 mmc_add_host(mmc);
1436
1437#if defined(CONFIG_DEBUG_FS)
1438 dw_mci_init_debugfs(slot);
1439#endif
1440
1441 /* Card initially undetected */
1442 slot->last_detect_state = 0;
1443
1444 return 0;
1445}
1446
1447static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
1448{
1449 /* Shutdown detect IRQ */
1450 if (slot->host->pdata->exit)
1451 slot->host->pdata->exit(id);
1452
1453 /* Debugfs stuff is cleaned up by mmc core */
1454 mmc_remove_host(slot->mmc);
1455 slot->host->slot[id] = NULL;
1456 mmc_free_host(slot->mmc);
1457}
1458
1459static void dw_mci_init_dma(struct dw_mci *host)
1460{
1461 /* Alloc memory for sg translation */
1462 host->sg_cpu = dma_alloc_coherent(&host->pdev->dev, PAGE_SIZE,
1463 &host->sg_dma, GFP_KERNEL);
1464 if (!host->sg_cpu) {
1465 dev_err(&host->pdev->dev, "%s: could not alloc DMA memory\n",
1466 __func__);
1467 goto no_dma;
1468 }
1469
1470 /* Determine which DMA interface to use */
1471#ifdef CONFIG_MMC_DW_IDMAC
1472 host->dma_ops = &dw_mci_idmac_ops;
1473 dev_info(&host->pdev->dev, "Using internal DMA controller.\n");
1474#endif
1475
1476 if (!host->dma_ops)
1477 goto no_dma;
1478
1479 if (host->dma_ops->init) {
1480 if (host->dma_ops->init(host)) {
1481 dev_err(&host->pdev->dev, "%s: Unable to initialize "
1482 "DMA Controller.\n", __func__);
1483 goto no_dma;
1484 }
1485 } else {
1486 dev_err(&host->pdev->dev, "DMA initialization not found.\n");
1487 goto no_dma;
1488 }
1489
1490 host->use_dma = 1;
1491 return;
1492
1493no_dma:
1494 dev_info(&host->pdev->dev, "Using PIO mode.\n");
1495 host->use_dma = 0;
1496 return;
1497}
1498
1499static bool mci_wait_reset(struct device *dev, struct dw_mci *host)
1500{
1501 unsigned long timeout = jiffies + msecs_to_jiffies(500);
1502 unsigned int ctrl;
1503
1504 mci_writel(host, CTRL, (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET |
1505 SDMMC_CTRL_DMA_RESET));
1506
1507 /* wait till resets clear */
1508 do {
1509 ctrl = mci_readl(host, CTRL);
1510 if (!(ctrl & (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET |
1511 SDMMC_CTRL_DMA_RESET)))
1512 return true;
1513 } while (time_before(jiffies, timeout));
1514
1515 dev_err(dev, "Timeout resetting block (ctrl %#x)\n", ctrl);
1516
1517 return false;
1518}
1519
1520static int dw_mci_probe(struct platform_device *pdev)
1521{
1522 struct dw_mci *host;
1523 struct resource *regs;
1524 struct dw_mci_board *pdata;
1525 int irq, ret, i, width;
1526 u32 fifo_size;
1527
1528 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1529 if (!regs)
1530 return -ENXIO;
1531
1532 irq = platform_get_irq(pdev, 0);
1533 if (irq < 0)
1534 return irq;
1535
1536 host = kzalloc(sizeof(struct dw_mci), GFP_KERNEL);
1537 if (!host)
1538 return -ENOMEM;
1539
1540 host->pdev = pdev;
1541 host->pdata = pdata = pdev->dev.platform_data;
1542 if (!pdata || !pdata->init) {
1543 dev_err(&pdev->dev,
1544 "Platform data must supply init function\n");
1545 ret = -ENODEV;
1546 goto err_freehost;
1547 }
1548
1549 if (!pdata->select_slot && pdata->num_slots > 1) {
1550 dev_err(&pdev->dev,
1551 "Platform data must supply select_slot function\n");
1552 ret = -ENODEV;
1553 goto err_freehost;
1554 }
1555
1556 if (!pdata->bus_hz) {
1557 dev_err(&pdev->dev,
1558 "Platform data must supply bus speed\n");
1559 ret = -ENODEV;
1560 goto err_freehost;
1561 }
1562
1563 host->bus_hz = pdata->bus_hz;
1564 host->quirks = pdata->quirks;
1565
1566 spin_lock_init(&host->lock);
1567 INIT_LIST_HEAD(&host->queue);
1568
1569 ret = -ENOMEM;
1570 host->regs = ioremap(regs->start, regs->end - regs->start + 1);
1571 if (!host->regs)
1572 goto err_freehost;
1573
1574 host->dma_ops = pdata->dma_ops;
1575 dw_mci_init_dma(host);
1576
1577 /*
1578 * Get the host data width - this assumes that HCON has been set with
1579 * the correct values.
1580 */
1581 i = (mci_readl(host, HCON) >> 7) & 0x7;
1582 if (!i) {
1583 host->push_data = dw_mci_push_data16;
1584 host->pull_data = dw_mci_pull_data16;
1585 width = 16;
1586 host->data_shift = 1;
1587 } else if (i == 2) {
1588 host->push_data = dw_mci_push_data64;
1589 host->pull_data = dw_mci_pull_data64;
1590 width = 64;
1591 host->data_shift = 3;
1592 } else {
1593 /* Check for a reserved value, and warn if it is */
1594 WARN((i != 1),
1595 "HCON reports a reserved host data width!\n"
1596 "Defaulting to 32-bit access.\n");
1597 host->push_data = dw_mci_push_data32;
1598 host->pull_data = dw_mci_pull_data32;
1599 width = 32;
1600 host->data_shift = 2;
1601 }
1602
1603 /* Reset all blocks */
1604 if (!mci_wait_reset(&pdev->dev, host)) {
1605 ret = -ENODEV;
1606 goto err_dmaunmap;
1607 }
1608
1609 /* Clear the interrupts for the host controller */
1610 mci_writel(host, RINTSTS, 0xFFFFFFFF);
1611 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
1612
1613 /* Put in max timeout */
1614 mci_writel(host, TMOUT, 0xFFFFFFFF);
1615
1616 /*
1617 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
1618 * Tx Mark = fifo_size / 2 DMA Size = 8
1619 */
1620 fifo_size = mci_readl(host, FIFOTH);
1621 fifo_size = (fifo_size >> 16) & 0x7ff;
1622 mci_writel(host, FIFOTH, ((0x2 << 28) | ((fifo_size/2 - 1) << 16) |
1623 ((fifo_size/2) << 0)));
1624
1625 /* disable clock to CIU */
1626 mci_writel(host, CLKENA, 0);
1627 mci_writel(host, CLKSRC, 0);
1628
1629 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
1630 tasklet_init(&host->card_tasklet,
1631 dw_mci_tasklet_card, (unsigned long)host);
1632
1633 ret = request_irq(irq, dw_mci_interrupt, 0, "dw-mci", host);
1634 if (ret)
1635 goto err_dmaunmap;
1636
1637 platform_set_drvdata(pdev, host);
1638
1639 if (host->pdata->num_slots)
1640 host->num_slots = host->pdata->num_slots;
1641 else
1642 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
1643
1644 /* We need at least one slot to succeed */
1645 for (i = 0; i < host->num_slots; i++) {
1646 ret = dw_mci_init_slot(host, i);
1647 if (ret) {
1648 ret = -ENODEV;
1649 goto err_init_slot;
1650 }
1651 }
1652
1653 /*
1654 * Enable interrupts for command done, data over, data empty, card det,
1655 * receive ready and error such as transmit, receive timeout, crc error
1656 */
1657 mci_writel(host, RINTSTS, 0xFFFFFFFF);
1658 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
1659 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
1660 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
1661 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
1662
1663 dev_info(&pdev->dev, "DW MMC controller at irq %d, "
1664 "%d bit host data width\n", irq, width);
1665 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
1666 dev_info(&pdev->dev, "Internal DMAC interrupt fix enabled.\n");
1667
1668 return 0;
1669
1670err_init_slot:
1671 /* De-init any initialized slots */
1672 while (i > 0) {
1673 if (host->slot[i])
1674 dw_mci_cleanup_slot(host->slot[i], i);
1675 i--;
1676 }
1677 free_irq(irq, host);
1678
1679err_dmaunmap:
1680 if (host->use_dma && host->dma_ops->exit)
1681 host->dma_ops->exit(host);
1682 dma_free_coherent(&host->pdev->dev, PAGE_SIZE,
1683 host->sg_cpu, host->sg_dma);
1684 iounmap(host->regs);
1685
1686err_freehost:
1687 kfree(host);
1688 return ret;
1689}
1690
1691static int __exit dw_mci_remove(struct platform_device *pdev)
1692{
1693 struct dw_mci *host = platform_get_drvdata(pdev);
1694 int i;
1695
1696 mci_writel(host, RINTSTS, 0xFFFFFFFF);
1697 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
1698
1699 platform_set_drvdata(pdev, NULL);
1700
1701 for (i = 0; i < host->num_slots; i++) {
1702 dev_dbg(&pdev->dev, "remove slot %d\n", i);
1703 if (host->slot[i])
1704 dw_mci_cleanup_slot(host->slot[i], i);
1705 }
1706
1707 /* disable clock to CIU */
1708 mci_writel(host, CLKENA, 0);
1709 mci_writel(host, CLKSRC, 0);
1710
1711 free_irq(platform_get_irq(pdev, 0), host);
1712 dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
1713
1714 if (host->use_dma && host->dma_ops->exit)
1715 host->dma_ops->exit(host);
1716
1717 iounmap(host->regs);
1718
1719 kfree(host);
1720 return 0;
1721}
1722
1723#ifdef CONFIG_PM
1724/*
1725 * TODO: we should probably disable the clock to the card in the suspend path.
1726 */
1727static int dw_mci_suspend(struct platform_device *pdev, pm_message_t mesg)
1728{
1729 int i, ret;
1730 struct dw_mci *host = platform_get_drvdata(pdev);
1731
1732 for (i = 0; i < host->num_slots; i++) {
1733 struct dw_mci_slot *slot = host->slot[i];
1734 if (!slot)
1735 continue;
1736 ret = mmc_suspend_host(slot->mmc);
1737 if (ret < 0) {
1738 while (--i >= 0) {
1739 slot = host->slot[i];
1740 if (slot)
1741 mmc_resume_host(host->slot[i]->mmc);
1742 }
1743 return ret;
1744 }
1745 }
1746
1747 return 0;
1748}
1749
1750static int dw_mci_resume(struct platform_device *pdev)
1751{
1752 int i, ret;
1753 struct dw_mci *host = platform_get_drvdata(pdev);
1754
1755 for (i = 0; i < host->num_slots; i++) {
1756 struct dw_mci_slot *slot = host->slot[i];
1757 if (!slot)
1758 continue;
1759 ret = mmc_resume_host(host->slot[i]->mmc);
1760 if (ret < 0)
1761 return ret;
1762 }
1763
1764 return 0;
1765}
1766#else
1767#define dw_mci_suspend NULL
1768#define dw_mci_resume NULL
1769#endif /* CONFIG_PM */
1770
1771static struct platform_driver dw_mci_driver = {
1772 .remove = __exit_p(dw_mci_remove),
1773 .suspend = dw_mci_suspend,
1774 .resume = dw_mci_resume,
1775 .driver = {
1776 .name = "dw_mmc",
1777 },
1778};
1779
1780static int __init dw_mci_init(void)
1781{
1782 return platform_driver_probe(&dw_mci_driver, dw_mci_probe);
1783}
1784
1785static void __exit dw_mci_exit(void)
1786{
1787 platform_driver_unregister(&dw_mci_driver);
1788}
1789
1790module_init(dw_mci_init);
1791module_exit(dw_mci_exit);
1792
1793MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
1794MODULE_AUTHOR("NXP Semiconductor VietNam");
1795MODULE_AUTHOR("Imagination Technologies Ltd");
1796MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
new file mode 100644
index 000000000000..5dd55a75233d
--- /dev/null
+++ b/drivers/mmc/host/dw_mmc.h
@@ -0,0 +1,168 @@
1/*
2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
4 *
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#ifndef _DW_MMC_H_
15#define _DW_MMC_H_
16
17#define SDMMC_CTRL 0x000
18#define SDMMC_PWREN 0x004
19#define SDMMC_CLKDIV 0x008
20#define SDMMC_CLKSRC 0x00c
21#define SDMMC_CLKENA 0x010
22#define SDMMC_TMOUT 0x014
23#define SDMMC_CTYPE 0x018
24#define SDMMC_BLKSIZ 0x01c
25#define SDMMC_BYTCNT 0x020
26#define SDMMC_INTMASK 0x024
27#define SDMMC_CMDARG 0x028
28#define SDMMC_CMD 0x02c
29#define SDMMC_RESP0 0x030
30#define SDMMC_RESP1 0x034
31#define SDMMC_RESP2 0x038
32#define SDMMC_RESP3 0x03c
33#define SDMMC_MINTSTS 0x040
34#define SDMMC_RINTSTS 0x044
35#define SDMMC_STATUS 0x048
36#define SDMMC_FIFOTH 0x04c
37#define SDMMC_CDETECT 0x050
38#define SDMMC_WRTPRT 0x054
39#define SDMMC_GPIO 0x058
40#define SDMMC_TCBCNT 0x05c
41#define SDMMC_TBBCNT 0x060
42#define SDMMC_DEBNCE 0x064
43#define SDMMC_USRID 0x068
44#define SDMMC_VERID 0x06c
45#define SDMMC_HCON 0x070
46#define SDMMC_BMOD 0x080
47#define SDMMC_PLDMND 0x084
48#define SDMMC_DBADDR 0x088
49#define SDMMC_IDSTS 0x08c
50#define SDMMC_IDINTEN 0x090
51#define SDMMC_DSCADDR 0x094
52#define SDMMC_BUFADDR 0x098
53#define SDMMC_DATA 0x100
54#define SDMMC_DATA_ADR 0x100
55
56/* shift bit field */
57#define _SBF(f, v) ((v) << (f))
58
59/* Control register defines */
60#define SDMMC_CTRL_USE_IDMAC BIT(25)
61#define SDMMC_CTRL_CEATA_INT_EN BIT(11)
62#define SDMMC_CTRL_SEND_AS_CCSD BIT(10)
63#define SDMMC_CTRL_SEND_CCSD BIT(9)
64#define SDMMC_CTRL_ABRT_READ_DATA BIT(8)
65#define SDMMC_CTRL_SEND_IRQ_RESP BIT(7)
66#define SDMMC_CTRL_READ_WAIT BIT(6)
67#define SDMMC_CTRL_DMA_ENABLE BIT(5)
68#define SDMMC_CTRL_INT_ENABLE BIT(4)
69#define SDMMC_CTRL_DMA_RESET BIT(2)
70#define SDMMC_CTRL_FIFO_RESET BIT(1)
71#define SDMMC_CTRL_RESET BIT(0)
72/* Clock Enable register defines */
73#define SDMMC_CLKEN_LOW_PWR BIT(16)
74#define SDMMC_CLKEN_ENABLE BIT(0)
75/* time-out register defines */
76#define SDMMC_TMOUT_DATA(n) _SBF(8, (n))
77#define SDMMC_TMOUT_DATA_MSK 0xFFFFFF00
78#define SDMMC_TMOUT_RESP(n) ((n) & 0xFF)
79#define SDMMC_TMOUT_RESP_MSK 0xFF
80/* card-type register defines */
81#define SDMMC_CTYPE_8BIT BIT(16)
82#define SDMMC_CTYPE_4BIT BIT(0)
83#define SDMMC_CTYPE_1BIT 0
84/* Interrupt status & mask register defines */
85#define SDMMC_INT_SDIO BIT(16)
86#define SDMMC_INT_EBE BIT(15)
87#define SDMMC_INT_ACD BIT(14)
88#define SDMMC_INT_SBE BIT(13)
89#define SDMMC_INT_HLE BIT(12)
90#define SDMMC_INT_FRUN BIT(11)
91#define SDMMC_INT_HTO BIT(10)
92#define SDMMC_INT_DTO BIT(9)
93#define SDMMC_INT_RTO BIT(8)
94#define SDMMC_INT_DCRC BIT(7)
95#define SDMMC_INT_RCRC BIT(6)
96#define SDMMC_INT_RXDR BIT(5)
97#define SDMMC_INT_TXDR BIT(4)
98#define SDMMC_INT_DATA_OVER BIT(3)
99#define SDMMC_INT_CMD_DONE BIT(2)
100#define SDMMC_INT_RESP_ERR BIT(1)
101#define SDMMC_INT_CD BIT(0)
102#define SDMMC_INT_ERROR 0xbfc2
103/* Command register defines */
104#define SDMMC_CMD_START BIT(31)
105#define SDMMC_CMD_CCS_EXP BIT(23)
106#define SDMMC_CMD_CEATA_RD BIT(22)
107#define SDMMC_CMD_UPD_CLK BIT(21)
108#define SDMMC_CMD_INIT BIT(15)
109#define SDMMC_CMD_STOP BIT(14)
110#define SDMMC_CMD_PRV_DAT_WAIT BIT(13)
111#define SDMMC_CMD_SEND_STOP BIT(12)
112#define SDMMC_CMD_STRM_MODE BIT(11)
113#define SDMMC_CMD_DAT_WR BIT(10)
114#define SDMMC_CMD_DAT_EXP BIT(9)
115#define SDMMC_CMD_RESP_CRC BIT(8)
116#define SDMMC_CMD_RESP_LONG BIT(7)
117#define SDMMC_CMD_RESP_EXP BIT(6)
118#define SDMMC_CMD_INDX(n) ((n) & 0x1F)
119/* Status register defines */
120#define SDMMC_GET_FCNT(x) (((x)>>17) & 0x1FF)
121#define SDMMC_FIFO_SZ 32
122/* Internal DMAC interrupt defines */
123#define SDMMC_IDMAC_INT_AI BIT(9)
124#define SDMMC_IDMAC_INT_NI BIT(8)
125#define SDMMC_IDMAC_INT_CES BIT(5)
126#define SDMMC_IDMAC_INT_DU BIT(4)
127#define SDMMC_IDMAC_INT_FBE BIT(2)
128#define SDMMC_IDMAC_INT_RI BIT(1)
129#define SDMMC_IDMAC_INT_TI BIT(0)
130/* Internal DMAC bus mode bits */
131#define SDMMC_IDMAC_ENABLE BIT(7)
132#define SDMMC_IDMAC_FB BIT(1)
133#define SDMMC_IDMAC_SWRESET BIT(0)
134
135/* Register access macros */
136#define mci_readl(dev, reg) \
137 __raw_readl(dev->regs + SDMMC_##reg)
138#define mci_writel(dev, reg, value) \
139 __raw_writel((value), dev->regs + SDMMC_##reg)
140
141/* 16-bit FIFO access macros */
142#define mci_readw(dev, reg) \
143 __raw_readw(dev->regs + SDMMC_##reg)
144#define mci_writew(dev, reg, value) \
145 __raw_writew((value), dev->regs + SDMMC_##reg)
146
147/* 64-bit FIFO access macros */
148#ifdef readq
149#define mci_readq(dev, reg) \
150 __raw_readq(dev->regs + SDMMC_##reg)
151#define mci_writeq(dev, reg, value) \
152 __raw_writeq((value), dev->regs + SDMMC_##reg)
153#else
154/*
155 * Dummy readq implementation for architectures that don't define it.
156 *
157 * We would assume that none of these architectures would configure
158 * the IP block with a 64bit FIFO width, so this code will never be
159 * executed on those machines. Defining these macros here keeps the
160 * rest of the code free from ifdefs.
161 */
162#define mci_readq(dev, reg) \
163 (*(volatile u64 __force *)(dev->regs + SDMMC_##reg))
164#define mci_writeq(dev, reg, value) \
165 (*(volatile u64 __force *)(dev->regs + SDMMC_##reg) = value)
166#endif
167
168#endif /* _DW_MMC_H_ */
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index bdd2cbb87cba..4428594261c5 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -31,6 +31,7 @@
31#include <linux/clk.h> 31#include <linux/clk.h>
32#include <linux/io.h> 32#include <linux/io.h>
33#include <linux/gpio.h> 33#include <linux/gpio.h>
34#include <linux/regulator/consumer.h>
34 35
35#include <asm/dma.h> 36#include <asm/dma.h>
36#include <asm/irq.h> 37#include <asm/irq.h>
@@ -141,10 +142,49 @@ struct mxcmci_host {
141 142
142 struct work_struct datawork; 143 struct work_struct datawork;
143 spinlock_t lock; 144 spinlock_t lock;
145
146 struct regulator *vcc;
144}; 147};
145 148
146static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios); 149static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios);
147 150
151static inline void mxcmci_init_ocr(struct mxcmci_host *host)
152{
153 host->vcc = regulator_get(mmc_dev(host->mmc), "vmmc");
154
155 if (IS_ERR(host->vcc)) {
156 host->vcc = NULL;
157 } else {
158 host->mmc->ocr_avail = mmc_regulator_get_ocrmask(host->vcc);
159 if (host->pdata && host->pdata->ocr_avail)
160 dev_warn(mmc_dev(host->mmc),
161 "pdata->ocr_avail will not be used\n");
162 }
163
164 if (host->vcc == NULL) {
165 /* fall-back to platform data */
166 if (host->pdata && host->pdata->ocr_avail)
167 host->mmc->ocr_avail = host->pdata->ocr_avail;
168 else
169 host->mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
170 }
171}
172
173static inline void mxcmci_set_power(struct mxcmci_host *host,
174 unsigned char power_mode,
175 unsigned int vdd)
176{
177 if (host->vcc) {
178 if (power_mode == MMC_POWER_UP)
179 mmc_regulator_set_ocr(host->mmc, host->vcc, vdd);
180 else if (power_mode == MMC_POWER_OFF)
181 mmc_regulator_set_ocr(host->mmc, host->vcc, 0);
182 }
183
184 if (host->pdata && host->pdata->setpower)
185 host->pdata->setpower(mmc_dev(host->mmc), vdd);
186}
187
148static inline int mxcmci_use_dma(struct mxcmci_host *host) 188static inline int mxcmci_use_dma(struct mxcmci_host *host)
149{ 189{
150 return host->do_dma; 190 return host->do_dma;
@@ -680,9 +720,9 @@ static void mxcmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
680 host->cmdat &= ~CMD_DAT_CONT_BUS_WIDTH_4; 720 host->cmdat &= ~CMD_DAT_CONT_BUS_WIDTH_4;
681 721
682 if (host->power_mode != ios->power_mode) { 722 if (host->power_mode != ios->power_mode) {
683 if (host->pdata && host->pdata->setpower) 723 mxcmci_set_power(host, ios->power_mode, ios->vdd);
684 host->pdata->setpower(mmc_dev(mmc), ios->vdd);
685 host->power_mode = ios->power_mode; 724 host->power_mode = ios->power_mode;
725
686 if (ios->power_mode == MMC_POWER_ON) 726 if (ios->power_mode == MMC_POWER_ON)
687 host->cmdat |= CMD_DAT_CONT_INIT; 727 host->cmdat |= CMD_DAT_CONT_INIT;
688 } 728 }
@@ -807,10 +847,7 @@ static int mxcmci_probe(struct platform_device *pdev)
807 host->pdata = pdev->dev.platform_data; 847 host->pdata = pdev->dev.platform_data;
808 spin_lock_init(&host->lock); 848 spin_lock_init(&host->lock);
809 849
810 if (host->pdata && host->pdata->ocr_avail) 850 mxcmci_init_ocr(host);
811 mmc->ocr_avail = host->pdata->ocr_avail;
812 else
813 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
814 851
815 if (host->pdata && host->pdata->dat3_card_detect) 852 if (host->pdata && host->pdata->dat3_card_detect)
816 host->default_irq_mask = 853 host->default_irq_mask =
@@ -915,6 +952,9 @@ static int mxcmci_remove(struct platform_device *pdev)
915 952
916 mmc_remove_host(mmc); 953 mmc_remove_host(mmc);
917 954
955 if (host->vcc)
956 regulator_put(host->vcc);
957
918 if (host->pdata && host->pdata->exit) 958 if (host->pdata && host->pdata->exit)
919 host->pdata->exit(&pdev->dev, mmc); 959 host->pdata->exit(&pdev->dev, mmc);
920 960
@@ -927,7 +967,6 @@ static int mxcmci_remove(struct platform_device *pdev)
927 clk_put(host->clk); 967 clk_put(host->clk);
928 968
929 release_mem_region(host->res->start, resource_size(host->res)); 969 release_mem_region(host->res->start, resource_size(host->res));
930 release_resource(host->res);
931 970
932 mmc_free_host(mmc); 971 mmc_free_host(mmc);
933 972
diff --git a/drivers/mmc/host/sdhci-dove.c b/drivers/mmc/host/sdhci-dove.c
new file mode 100644
index 000000000000..2aeef4ffed8c
--- /dev/null
+++ b/drivers/mmc/host/sdhci-dove.c
@@ -0,0 +1,70 @@
1/*
2 * sdhci-dove.c Support for SDHCI on Marvell's Dove SoC
3 *
4 * Author: Saeed Bishara <saeed@marvell.com>
5 * Mike Rapoport <mike@compulab.co.il>
6 * Based on sdhci-cns3xxx.c
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */
21
22#include <linux/io.h>
23#include <linux/mmc/host.h>
24
25#include "sdhci.h"
26#include "sdhci-pltfm.h"
27
28static u16 sdhci_dove_readw(struct sdhci_host *host, int reg)
29{
30 u16 ret;
31
32 switch (reg) {
33 case SDHCI_HOST_VERSION:
34 case SDHCI_SLOT_INT_STATUS:
35 /* those registers don't exist */
36 return 0;
37 default:
38 ret = readw(host->ioaddr + reg);
39 }
40 return ret;
41}
42
43static u32 sdhci_dove_readl(struct sdhci_host *host, int reg)
44{
45 u32 ret;
46
47 switch (reg) {
48 case SDHCI_CAPABILITIES:
49 ret = readl(host->ioaddr + reg);
50 /* Mask the support for 3.0V */
51 ret &= ~SDHCI_CAN_VDD_300;
52 break;
53 default:
54 ret = readl(host->ioaddr + reg);
55 }
56 return ret;
57}
58
59static struct sdhci_ops sdhci_dove_ops = {
60 .read_w = sdhci_dove_readw,
61 .read_l = sdhci_dove_readl,
62};
63
64struct sdhci_pltfm_data sdhci_dove_pdata = {
65 .ops = &sdhci_dove_ops,
66 .quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER |
67 SDHCI_QUIRK_NO_BUSY_IRQ |
68 SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
69 SDHCI_QUIRK_FORCE_DMA,
70};
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index 3d9c2460d437..0dc905b20eee 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -176,6 +176,74 @@ static const struct sdhci_pci_fixes sdhci_intel_mfd_emmc_sdio = {
176 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, 176 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
177}; 177};
178 178
179/* O2Micro extra registers */
180#define O2_SD_LOCK_WP 0xD3
181#define O2_SD_MULTI_VCC3V 0xEE
182#define O2_SD_CLKREQ 0xEC
183#define O2_SD_CAPS 0xE0
184#define O2_SD_ADMA1 0xE2
185#define O2_SD_ADMA2 0xE7
186#define O2_SD_INF_MOD 0xF1
187
188static int o2_probe(struct sdhci_pci_chip *chip)
189{
190 int ret;
191 u8 scratch;
192
193 switch (chip->pdev->device) {
194 case PCI_DEVICE_ID_O2_8220:
195 case PCI_DEVICE_ID_O2_8221:
196 case PCI_DEVICE_ID_O2_8320:
197 case PCI_DEVICE_ID_O2_8321:
198 /* This extra setup is required due to broken ADMA. */
199 ret = pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch);
200 if (ret)
201 return ret;
202 scratch &= 0x7f;
203 pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
204
205 /* Set Multi 3 to VCC3V# */
206 pci_write_config_byte(chip->pdev, O2_SD_MULTI_VCC3V, 0x08);
207
208 /* Disable CLK_REQ# support after media DET */
209 ret = pci_read_config_byte(chip->pdev, O2_SD_CLKREQ, &scratch);
210 if (ret)
211 return ret;
212 scratch |= 0x20;
213 pci_write_config_byte(chip->pdev, O2_SD_CLKREQ, scratch);
214
215 /* Choose capabilities, enable SDMA. We have to write 0x01
216 * to the capabilities register first to unlock it.
217 */
218 ret = pci_read_config_byte(chip->pdev, O2_SD_CAPS, &scratch);
219 if (ret)
220 return ret;
221 scratch |= 0x01;
222 pci_write_config_byte(chip->pdev, O2_SD_CAPS, scratch);
223 pci_write_config_byte(chip->pdev, O2_SD_CAPS, 0x73);
224
225 /* Disable ADMA1/2 */
226 pci_write_config_byte(chip->pdev, O2_SD_ADMA1, 0x39);
227 pci_write_config_byte(chip->pdev, O2_SD_ADMA2, 0x08);
228
229 /* Disable the infinite transfer mode */
230 ret = pci_read_config_byte(chip->pdev, O2_SD_INF_MOD, &scratch);
231 if (ret)
232 return ret;
233 scratch |= 0x08;
234 pci_write_config_byte(chip->pdev, O2_SD_INF_MOD, scratch);
235
236 /* Lock WP */
237 ret = pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch);
238 if (ret)
239 return ret;
240 scratch |= 0x80;
241 pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
242 }
243
244 return 0;
245}
246
179static int jmicron_pmos(struct sdhci_pci_chip *chip, int on) 247static int jmicron_pmos(struct sdhci_pci_chip *chip, int on)
180{ 248{
181 u8 scratch; 249 u8 scratch;
@@ -204,6 +272,7 @@ static int jmicron_pmos(struct sdhci_pci_chip *chip, int on)
204static int jmicron_probe(struct sdhci_pci_chip *chip) 272static int jmicron_probe(struct sdhci_pci_chip *chip)
205{ 273{
206 int ret; 274 int ret;
275 u16 mmcdev = 0;
207 276
208 if (chip->pdev->revision == 0) { 277 if (chip->pdev->revision == 0) {
209 chip->quirks |= SDHCI_QUIRK_32BIT_DMA_ADDR | 278 chip->quirks |= SDHCI_QUIRK_32BIT_DMA_ADDR |
@@ -225,12 +294,17 @@ static int jmicron_probe(struct sdhci_pci_chip *chip)
225 * 2. The MMC interface has a lower subfunction number 294 * 2. The MMC interface has a lower subfunction number
226 * than the SD interface. 295 * than the SD interface.
227 */ 296 */
228 if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_SD) { 297 if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_SD)
298 mmcdev = PCI_DEVICE_ID_JMICRON_JMB38X_MMC;
299 else if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_SD)
300 mmcdev = PCI_DEVICE_ID_JMICRON_JMB388_ESD;
301
302 if (mmcdev) {
229 struct pci_dev *sd_dev; 303 struct pci_dev *sd_dev;
230 304
231 sd_dev = NULL; 305 sd_dev = NULL;
232 while ((sd_dev = pci_get_device(PCI_VENDOR_ID_JMICRON, 306 while ((sd_dev = pci_get_device(PCI_VENDOR_ID_JMICRON,
233 PCI_DEVICE_ID_JMICRON_JMB38X_MMC, sd_dev)) != NULL) { 307 mmcdev, sd_dev)) != NULL) {
234 if ((PCI_SLOT(chip->pdev->devfn) == 308 if ((PCI_SLOT(chip->pdev->devfn) ==
235 PCI_SLOT(sd_dev->devfn)) && 309 PCI_SLOT(sd_dev->devfn)) &&
236 (chip->pdev->bus == sd_dev->bus)) 310 (chip->pdev->bus == sd_dev->bus))
@@ -290,13 +364,25 @@ static int jmicron_probe_slot(struct sdhci_pci_slot *slot)
290 slot->host->quirks |= SDHCI_QUIRK_BROKEN_ADMA; 364 slot->host->quirks |= SDHCI_QUIRK_BROKEN_ADMA;
291 } 365 }
292 366
367 /* JM388 MMC doesn't support 1.8V while SD supports it */
368 if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) {
369 slot->host->ocr_avail_sd = MMC_VDD_32_33 | MMC_VDD_33_34 |
370 MMC_VDD_29_30 | MMC_VDD_30_31 |
371 MMC_VDD_165_195; /* allow 1.8V */
372 slot->host->ocr_avail_mmc = MMC_VDD_32_33 | MMC_VDD_33_34 |
373 MMC_VDD_29_30 | MMC_VDD_30_31; /* no 1.8V for MMC */
374 }
375
293 /* 376 /*
294 * The secondary interface requires a bit set to get the 377 * The secondary interface requires a bit set to get the
295 * interrupts. 378 * interrupts.
296 */ 379 */
297 if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC) 380 if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
381 slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD)
298 jmicron_enable_mmc(slot->host, 1); 382 jmicron_enable_mmc(slot->host, 1);
299 383
384 slot->host->mmc->caps |= MMC_CAP_BUS_WIDTH_TEST;
385
300 return 0; 386 return 0;
301} 387}
302 388
@@ -305,7 +391,8 @@ static void jmicron_remove_slot(struct sdhci_pci_slot *slot, int dead)
305 if (dead) 391 if (dead)
306 return; 392 return;
307 393
308 if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC) 394 if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
395 slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD)
309 jmicron_enable_mmc(slot->host, 0); 396 jmicron_enable_mmc(slot->host, 0);
310} 397}
311 398
@@ -313,7 +400,8 @@ static int jmicron_suspend(struct sdhci_pci_chip *chip, pm_message_t state)
313{ 400{
314 int i; 401 int i;
315 402
316 if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC) { 403 if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
404 chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) {
317 for (i = 0;i < chip->num_slots;i++) 405 for (i = 0;i < chip->num_slots;i++)
318 jmicron_enable_mmc(chip->slots[i]->host, 0); 406 jmicron_enable_mmc(chip->slots[i]->host, 0);
319 } 407 }
@@ -325,7 +413,8 @@ static int jmicron_resume(struct sdhci_pci_chip *chip)
325{ 413{
326 int ret, i; 414 int ret, i;
327 415
328 if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC) { 416 if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
417 chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) {
329 for (i = 0;i < chip->num_slots;i++) 418 for (i = 0;i < chip->num_slots;i++)
330 jmicron_enable_mmc(chip->slots[i]->host, 1); 419 jmicron_enable_mmc(chip->slots[i]->host, 1);
331 } 420 }
@@ -339,6 +428,10 @@ static int jmicron_resume(struct sdhci_pci_chip *chip)
339 return 0; 428 return 0;
340} 429}
341 430
431static const struct sdhci_pci_fixes sdhci_o2 = {
432 .probe = o2_probe,
433};
434
342static const struct sdhci_pci_fixes sdhci_jmicron = { 435static const struct sdhci_pci_fixes sdhci_jmicron = {
343 .probe = jmicron_probe, 436 .probe = jmicron_probe,
344 437
@@ -510,6 +603,22 @@ static const struct pci_device_id pci_ids[] __devinitdata = {
510 }, 603 },
511 604
512 { 605 {
606 .vendor = PCI_VENDOR_ID_JMICRON,
607 .device = PCI_DEVICE_ID_JMICRON_JMB388_SD,
608 .subvendor = PCI_ANY_ID,
609 .subdevice = PCI_ANY_ID,
610 .driver_data = (kernel_ulong_t)&sdhci_jmicron,
611 },
612
613 {
614 .vendor = PCI_VENDOR_ID_JMICRON,
615 .device = PCI_DEVICE_ID_JMICRON_JMB388_ESD,
616 .subvendor = PCI_ANY_ID,
617 .subdevice = PCI_ANY_ID,
618 .driver_data = (kernel_ulong_t)&sdhci_jmicron,
619 },
620
621 {
513 .vendor = PCI_VENDOR_ID_SYSKONNECT, 622 .vendor = PCI_VENDOR_ID_SYSKONNECT,
514 .device = 0x8000, 623 .device = 0x8000,
515 .subvendor = PCI_ANY_ID, 624 .subvendor = PCI_ANY_ID,
@@ -589,6 +698,46 @@ static const struct pci_device_id pci_ids[] __devinitdata = {
589 .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc_sdio, 698 .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc_sdio,
590 }, 699 },
591 700
701 {
702 .vendor = PCI_VENDOR_ID_O2,
703 .device = PCI_DEVICE_ID_O2_8120,
704 .subvendor = PCI_ANY_ID,
705 .subdevice = PCI_ANY_ID,
706 .driver_data = (kernel_ulong_t)&sdhci_o2,
707 },
708
709 {
710 .vendor = PCI_VENDOR_ID_O2,
711 .device = PCI_DEVICE_ID_O2_8220,
712 .subvendor = PCI_ANY_ID,
713 .subdevice = PCI_ANY_ID,
714 .driver_data = (kernel_ulong_t)&sdhci_o2,
715 },
716
717 {
718 .vendor = PCI_VENDOR_ID_O2,
719 .device = PCI_DEVICE_ID_O2_8221,
720 .subvendor = PCI_ANY_ID,
721 .subdevice = PCI_ANY_ID,
722 .driver_data = (kernel_ulong_t)&sdhci_o2,
723 },
724
725 {
726 .vendor = PCI_VENDOR_ID_O2,
727 .device = PCI_DEVICE_ID_O2_8320,
728 .subvendor = PCI_ANY_ID,
729 .subdevice = PCI_ANY_ID,
730 .driver_data = (kernel_ulong_t)&sdhci_o2,
731 },
732
733 {
734 .vendor = PCI_VENDOR_ID_O2,
735 .device = PCI_DEVICE_ID_O2_8321,
736 .subvendor = PCI_ANY_ID,
737 .subdevice = PCI_ANY_ID,
738 .driver_data = (kernel_ulong_t)&sdhci_o2,
739 },
740
592 { /* Generic SD host controller */ 741 { /* Generic SD host controller */
593 PCI_DEVICE_CLASS((PCI_CLASS_SYSTEM_SDHCI << 8), 0xFFFF00) 742 PCI_DEVICE_CLASS((PCI_CLASS_SYSTEM_SDHCI << 8), 0xFFFF00)
594 }, 743 },
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c
index 0502f89f662b..dbab0407f4b6 100644
--- a/drivers/mmc/host/sdhci-pltfm.c
+++ b/drivers/mmc/host/sdhci-pltfm.c
@@ -170,6 +170,12 @@ static const struct platform_device_id sdhci_pltfm_ids[] = {
170#ifdef CONFIG_MMC_SDHCI_ESDHC_IMX 170#ifdef CONFIG_MMC_SDHCI_ESDHC_IMX
171 { "sdhci-esdhc-imx", (kernel_ulong_t)&sdhci_esdhc_imx_pdata }, 171 { "sdhci-esdhc-imx", (kernel_ulong_t)&sdhci_esdhc_imx_pdata },
172#endif 172#endif
173#ifdef CONFIG_MMC_SDHCI_DOVE
174 { "sdhci-dove", (kernel_ulong_t)&sdhci_dove_pdata },
175#endif
176#ifdef CONFIG_MMC_SDHCI_TEGRA
177 { "sdhci-tegra", (kernel_ulong_t)&sdhci_tegra_pdata },
178#endif
173 { }, 179 { },
174}; 180};
175MODULE_DEVICE_TABLE(platform, sdhci_pltfm_ids); 181MODULE_DEVICE_TABLE(platform, sdhci_pltfm_ids);
diff --git a/drivers/mmc/host/sdhci-pltfm.h b/drivers/mmc/host/sdhci-pltfm.h
index c1bfe48af56a..ea2e44d9be5e 100644
--- a/drivers/mmc/host/sdhci-pltfm.h
+++ b/drivers/mmc/host/sdhci-pltfm.h
@@ -22,5 +22,7 @@ struct sdhci_pltfm_host {
22 22
23extern struct sdhci_pltfm_data sdhci_cns3xxx_pdata; 23extern struct sdhci_pltfm_data sdhci_cns3xxx_pdata;
24extern struct sdhci_pltfm_data sdhci_esdhc_imx_pdata; 24extern struct sdhci_pltfm_data sdhci_esdhc_imx_pdata;
25extern struct sdhci_pltfm_data sdhci_dove_pdata;
26extern struct sdhci_pltfm_data sdhci_tegra_pdata;
25 27
26#endif /* _DRIVERS_MMC_SDHCI_PLTFM_H */ 28#endif /* _DRIVERS_MMC_SDHCI_PLTFM_H */
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index aacb862ecc8a..17203586305c 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -130,6 +130,15 @@ static unsigned int sdhci_s3c_consider_clock(struct sdhci_s3c *ourhost,
130 if (!clksrc) 130 if (!clksrc)
131 return UINT_MAX; 131 return UINT_MAX;
132 132
133 /*
134 * Clock divider's step is different as 1 from that of host controller
135 * when 'clk_type' is S3C_SDHCI_CLK_DIV_EXTERNAL.
136 */
137 if (ourhost->pdata->clk_type) {
138 rate = clk_round_rate(clksrc, wanted);
139 return wanted - rate;
140 }
141
133 rate = clk_get_rate(clksrc); 142 rate = clk_get_rate(clksrc);
134 143
135 for (div = 1; div < 256; div *= 2) { 144 for (div = 1; div < 256; div *= 2) {
@@ -232,6 +241,42 @@ static unsigned int sdhci_s3c_get_min_clock(struct sdhci_host *host)
232 return min; 241 return min;
233} 242}
234 243
244/* sdhci_cmu_get_max_clk - callback to get maximum clock frequency.*/
245static unsigned int sdhci_cmu_get_max_clock(struct sdhci_host *host)
246{
247 struct sdhci_s3c *ourhost = to_s3c(host);
248
249 return clk_round_rate(ourhost->clk_bus[ourhost->cur_clk], UINT_MAX);
250}
251
252/* sdhci_cmu_get_min_clock - callback to get minimal supported clock value. */
253static unsigned int sdhci_cmu_get_min_clock(struct sdhci_host *host)
254{
255 struct sdhci_s3c *ourhost = to_s3c(host);
256
257 /*
258 * initial clock can be in the frequency range of
259 * 100KHz-400KHz, so we set it as max value.
260 */
261 return clk_round_rate(ourhost->clk_bus[ourhost->cur_clk], 400000);
262}
263
264/* sdhci_cmu_set_clock - callback on clock change.*/
265static void sdhci_cmu_set_clock(struct sdhci_host *host, unsigned int clock)
266{
267 struct sdhci_s3c *ourhost = to_s3c(host);
268
269 /* don't bother if the clock is going off */
270 if (clock == 0)
271 return;
272
273 sdhci_s3c_set_clock(host, clock);
274
275 clk_set_rate(ourhost->clk_bus[ourhost->cur_clk], clock);
276
277 host->clock = clock;
278}
279
235static struct sdhci_ops sdhci_s3c_ops = { 280static struct sdhci_ops sdhci_s3c_ops = {
236 .get_max_clock = sdhci_s3c_get_max_clk, 281 .get_max_clock = sdhci_s3c_get_max_clk,
237 .set_clock = sdhci_s3c_set_clock, 282 .set_clock = sdhci_s3c_set_clock,
@@ -361,6 +406,13 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
361 406
362 clks++; 407 clks++;
363 sc->clk_bus[ptr] = clk; 408 sc->clk_bus[ptr] = clk;
409
410 /*
411 * save current clock index to know which clock bus
412 * is used later in overriding functions.
413 */
414 sc->cur_clk = ptr;
415
364 clk_enable(clk); 416 clk_enable(clk);
365 417
366 dev_info(dev, "clock source %d: %s (%ld Hz)\n", 418 dev_info(dev, "clock source %d: %s (%ld Hz)\n",
@@ -427,6 +479,20 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
427 /* HSMMC on Samsung SoCs uses SDCLK as timeout clock */ 479 /* HSMMC on Samsung SoCs uses SDCLK as timeout clock */
428 host->quirks |= SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK; 480 host->quirks |= SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK;
429 481
482 /*
483 * If controller does not have internal clock divider,
484 * we can use overriding functions instead of default.
485 */
486 if (pdata->clk_type) {
487 sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
488 sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
489 sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
490 }
491
492 /* It supports additional host capabilities if needed */
493 if (pdata->host_caps)
494 host->mmc->caps |= pdata->host_caps;
495
430 ret = sdhci_add_host(host); 496 ret = sdhci_add_host(host);
431 if (ret) { 497 if (ret) {
432 dev_err(dev, "sdhci_add_host() failed\n"); 498 dev_err(dev, "sdhci_add_host() failed\n");
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
new file mode 100644
index 000000000000..4823ee94a63f
--- /dev/null
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -0,0 +1,257 @@
1/*
2 * Copyright (C) 2010 Google, Inc.
3 *
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14
15#include <linux/err.h>
16#include <linux/init.h>
17#include <linux/platform_device.h>
18#include <linux/clk.h>
19#include <linux/io.h>
20#include <linux/gpio.h>
21#include <linux/mmc/card.h>
22#include <linux/mmc/host.h>
23
24#include <mach/gpio.h>
25#include <mach/sdhci.h>
26
27#include "sdhci.h"
28#include "sdhci-pltfm.h"
29
30static u32 tegra_sdhci_readl(struct sdhci_host *host, int reg)
31{
32 u32 val;
33
34 if (unlikely(reg == SDHCI_PRESENT_STATE)) {
35 /* Use wp_gpio here instead? */
36 val = readl(host->ioaddr + reg);
37 return val | SDHCI_WRITE_PROTECT;
38 }
39
40 return readl(host->ioaddr + reg);
41}
42
43static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg)
44{
45 if (unlikely(reg == SDHCI_HOST_VERSION)) {
46 /* Erratum: Version register is invalid in HW. */
47 return SDHCI_SPEC_200;
48 }
49
50 return readw(host->ioaddr + reg);
51}
52
53static void tegra_sdhci_writel(struct sdhci_host *host, u32 val, int reg)
54{
55 /* Seems like we're getting spurious timeout and crc errors, so
56 * disable signalling of them. In case of real errors software
57 * timers should take care of eventually detecting them.
58 */
59 if (unlikely(reg == SDHCI_SIGNAL_ENABLE))
60 val &= ~(SDHCI_INT_TIMEOUT|SDHCI_INT_CRC);
61
62 writel(val, host->ioaddr + reg);
63
64 if (unlikely(reg == SDHCI_INT_ENABLE)) {
65 /* Erratum: Must enable block gap interrupt detection */
66 u8 gap_ctrl = readb(host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
67 if (val & SDHCI_INT_CARD_INT)
68 gap_ctrl |= 0x8;
69 else
70 gap_ctrl &= ~0x8;
71 writeb(gap_ctrl, host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
72 }
73}
74
75static unsigned int tegra_sdhci_get_ro(struct sdhci_host *sdhci)
76{
77 struct platform_device *pdev = to_platform_device(mmc_dev(sdhci->mmc));
78 struct tegra_sdhci_platform_data *plat;
79
80 plat = pdev->dev.platform_data;
81
82 if (!gpio_is_valid(plat->wp_gpio))
83 return -1;
84
85 return gpio_get_value(plat->wp_gpio);
86}
87
88static irqreturn_t carddetect_irq(int irq, void *data)
89{
90 struct sdhci_host *sdhost = (struct sdhci_host *)data;
91
92 tasklet_schedule(&sdhost->card_tasklet);
93 return IRQ_HANDLED;
94};
95
96static int tegra_sdhci_8bit(struct sdhci_host *host, int bus_width)
97{
98 struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
99 struct tegra_sdhci_platform_data *plat;
100 u32 ctrl;
101
102 plat = pdev->dev.platform_data;
103
104 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
105 if (plat->is_8bit && bus_width == MMC_BUS_WIDTH_8) {
106 ctrl &= ~SDHCI_CTRL_4BITBUS;
107 ctrl |= SDHCI_CTRL_8BITBUS;
108 } else {
109 ctrl &= ~SDHCI_CTRL_8BITBUS;
110 if (bus_width == MMC_BUS_WIDTH_4)
111 ctrl |= SDHCI_CTRL_4BITBUS;
112 else
113 ctrl &= ~SDHCI_CTRL_4BITBUS;
114 }
115 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
116 return 0;
117}
118
119
120static int tegra_sdhci_pltfm_init(struct sdhci_host *host,
121 struct sdhci_pltfm_data *pdata)
122{
123 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
124 struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
125 struct tegra_sdhci_platform_data *plat;
126 struct clk *clk;
127 int rc;
128
129 plat = pdev->dev.platform_data;
130 if (plat == NULL) {
131 dev_err(mmc_dev(host->mmc), "missing platform data\n");
132 return -ENXIO;
133 }
134
135 if (gpio_is_valid(plat->power_gpio)) {
136 rc = gpio_request(plat->power_gpio, "sdhci_power");
137 if (rc) {
138 dev_err(mmc_dev(host->mmc),
139 "failed to allocate power gpio\n");
140 goto out;
141 }
142 tegra_gpio_enable(plat->power_gpio);
143 gpio_direction_output(plat->power_gpio, 1);
144 }
145
146 if (gpio_is_valid(plat->cd_gpio)) {
147 rc = gpio_request(plat->cd_gpio, "sdhci_cd");
148 if (rc) {
149 dev_err(mmc_dev(host->mmc),
150 "failed to allocate cd gpio\n");
151 goto out_power;
152 }
153 tegra_gpio_enable(plat->cd_gpio);
154 gpio_direction_input(plat->cd_gpio);
155
156 rc = request_irq(gpio_to_irq(plat->cd_gpio), carddetect_irq,
157 IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
158 mmc_hostname(host->mmc), host);
159
160 if (rc) {
161 dev_err(mmc_dev(host->mmc), "request irq error\n");
162 goto out_cd;
163 }
164
165 }
166
167 if (gpio_is_valid(plat->wp_gpio)) {
168 rc = gpio_request(plat->wp_gpio, "sdhci_wp");
169 if (rc) {
170 dev_err(mmc_dev(host->mmc),
171 "failed to allocate wp gpio\n");
172 goto out_cd;
173 }
174 tegra_gpio_enable(plat->wp_gpio);
175 gpio_direction_input(plat->wp_gpio);
176 }
177
178 clk = clk_get(mmc_dev(host->mmc), NULL);
179 if (IS_ERR(clk)) {
180 dev_err(mmc_dev(host->mmc), "clk err\n");
181 rc = PTR_ERR(clk);
182 goto out_wp;
183 }
184 clk_enable(clk);
185 pltfm_host->clk = clk;
186
187 if (plat->is_8bit)
188 host->mmc->caps |= MMC_CAP_8_BIT_DATA;
189
190 return 0;
191
192out_wp:
193 if (gpio_is_valid(plat->wp_gpio)) {
194 tegra_gpio_disable(plat->wp_gpio);
195 gpio_free(plat->wp_gpio);
196 }
197
198out_cd:
199 if (gpio_is_valid(plat->cd_gpio)) {
200 tegra_gpio_disable(plat->cd_gpio);
201 gpio_free(plat->cd_gpio);
202 }
203
204out_power:
205 if (gpio_is_valid(plat->power_gpio)) {
206 tegra_gpio_disable(plat->power_gpio);
207 gpio_free(plat->power_gpio);
208 }
209
210out:
211 return rc;
212}
213
214static void tegra_sdhci_pltfm_exit(struct sdhci_host *host)
215{
216 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
217 struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
218 struct tegra_sdhci_platform_data *plat;
219
220 plat = pdev->dev.platform_data;
221
222 if (gpio_is_valid(plat->wp_gpio)) {
223 tegra_gpio_disable(plat->wp_gpio);
224 gpio_free(plat->wp_gpio);
225 }
226
227 if (gpio_is_valid(plat->cd_gpio)) {
228 tegra_gpio_disable(plat->cd_gpio);
229 gpio_free(plat->cd_gpio);
230 }
231
232 if (gpio_is_valid(plat->power_gpio)) {
233 tegra_gpio_disable(plat->power_gpio);
234 gpio_free(plat->power_gpio);
235 }
236
237 clk_disable(pltfm_host->clk);
238 clk_put(pltfm_host->clk);
239}
240
241static struct sdhci_ops tegra_sdhci_ops = {
242 .get_ro = tegra_sdhci_get_ro,
243 .read_l = tegra_sdhci_readl,
244 .read_w = tegra_sdhci_readw,
245 .write_l = tegra_sdhci_writel,
246 .platform_8bit_width = tegra_sdhci_8bit,
247};
248
249struct sdhci_pltfm_data sdhci_tegra_pdata = {
250 .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
251 SDHCI_QUIRK_SINGLE_POWER_WRITE |
252 SDHCI_QUIRK_NO_HISPD_BIT |
253 SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC,
254 .ops = &tegra_sdhci_ops,
255 .init = tegra_sdhci_pltfm_init,
256 .exit = tegra_sdhci_pltfm_exit,
257};
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index a25db426c910..9e15f41f87be 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -23,6 +23,7 @@
23 23
24#include <linux/leds.h> 24#include <linux/leds.h>
25 25
26#include <linux/mmc/mmc.h>
26#include <linux/mmc/host.h> 27#include <linux/mmc/host.h>
27 28
28#include "sdhci.h" 29#include "sdhci.h"
@@ -77,8 +78,11 @@ static void sdhci_dumpregs(struct sdhci_host *host)
77 printk(KERN_DEBUG DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n", 78 printk(KERN_DEBUG DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n",
78 sdhci_readw(host, SDHCI_ACMD12_ERR), 79 sdhci_readw(host, SDHCI_ACMD12_ERR),
79 sdhci_readw(host, SDHCI_SLOT_INT_STATUS)); 80 sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
80 printk(KERN_DEBUG DRIVER_NAME ": Caps: 0x%08x | Max curr: 0x%08x\n", 81 printk(KERN_DEBUG DRIVER_NAME ": Caps: 0x%08x | Caps_1: 0x%08x\n",
81 sdhci_readl(host, SDHCI_CAPABILITIES), 82 sdhci_readl(host, SDHCI_CAPABILITIES),
83 sdhci_readl(host, SDHCI_CAPABILITIES_1));
84 printk(KERN_DEBUG DRIVER_NAME ": Cmd: 0x%08x | Max curr: 0x%08x\n",
85 sdhci_readw(host, SDHCI_COMMAND),
82 sdhci_readl(host, SDHCI_MAX_CURRENT)); 86 sdhci_readl(host, SDHCI_MAX_CURRENT));
83 87
84 if (host->flags & SDHCI_USE_ADMA) 88 if (host->flags & SDHCI_USE_ADMA)
@@ -1518,7 +1522,11 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
1518 1522
1519 if (intmask & SDHCI_INT_DATA_TIMEOUT) 1523 if (intmask & SDHCI_INT_DATA_TIMEOUT)
1520 host->data->error = -ETIMEDOUT; 1524 host->data->error = -ETIMEDOUT;
1521 else if (intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_END_BIT)) 1525 else if (intmask & SDHCI_INT_DATA_END_BIT)
1526 host->data->error = -EILSEQ;
1527 else if ((intmask & SDHCI_INT_DATA_CRC) &&
1528 SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
1529 != MMC_BUS_TEST_R)
1522 host->data->error = -EILSEQ; 1530 host->data->error = -EILSEQ;
1523 else if (intmask & SDHCI_INT_ADMA_ERROR) { 1531 else if (intmask & SDHCI_INT_ADMA_ERROR) {
1524 printk(KERN_ERR "%s: ADMA error\n", mmc_hostname(host->mmc)); 1532 printk(KERN_ERR "%s: ADMA error\n", mmc_hostname(host->mmc));
@@ -1736,7 +1744,7 @@ EXPORT_SYMBOL_GPL(sdhci_alloc_host);
1736int sdhci_add_host(struct sdhci_host *host) 1744int sdhci_add_host(struct sdhci_host *host)
1737{ 1745{
1738 struct mmc_host *mmc; 1746 struct mmc_host *mmc;
1739 unsigned int caps; 1747 unsigned int caps, ocr_avail;
1740 int ret; 1748 int ret;
1741 1749
1742 WARN_ON(host == NULL); 1750 WARN_ON(host == NULL);
@@ -1890,13 +1898,26 @@ int sdhci_add_host(struct sdhci_host *host)
1890 mmc_card_is_removable(mmc)) 1898 mmc_card_is_removable(mmc))
1891 mmc->caps |= MMC_CAP_NEEDS_POLL; 1899 mmc->caps |= MMC_CAP_NEEDS_POLL;
1892 1900
1893 mmc->ocr_avail = 0; 1901 ocr_avail = 0;
1894 if (caps & SDHCI_CAN_VDD_330) 1902 if (caps & SDHCI_CAN_VDD_330)
1895 mmc->ocr_avail |= MMC_VDD_32_33|MMC_VDD_33_34; 1903 ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
1896 if (caps & SDHCI_CAN_VDD_300) 1904 if (caps & SDHCI_CAN_VDD_300)
1897 mmc->ocr_avail |= MMC_VDD_29_30|MMC_VDD_30_31; 1905 ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
1898 if (caps & SDHCI_CAN_VDD_180) 1906 if (caps & SDHCI_CAN_VDD_180)
1899 mmc->ocr_avail |= MMC_VDD_165_195; 1907 ocr_avail |= MMC_VDD_165_195;
1908
1909 mmc->ocr_avail = ocr_avail;
1910 mmc->ocr_avail_sdio = ocr_avail;
1911 if (host->ocr_avail_sdio)
1912 mmc->ocr_avail_sdio &= host->ocr_avail_sdio;
1913 mmc->ocr_avail_sd = ocr_avail;
1914 if (host->ocr_avail_sd)
1915 mmc->ocr_avail_sd &= host->ocr_avail_sd;
1916 else /* normal SD controllers don't support 1.8V */
1917 mmc->ocr_avail_sd &= ~MMC_VDD_165_195;
1918 mmc->ocr_avail_mmc = ocr_avail;
1919 if (host->ocr_avail_mmc)
1920 mmc->ocr_avail_mmc &= host->ocr_avail_mmc;
1900 1921
1901 if (mmc->ocr_avail == 0) { 1922 if (mmc->ocr_avail == 0) {
1902 printk(KERN_ERR "%s: Hardware doesn't report any " 1923 printk(KERN_ERR "%s: Hardware doesn't report any "
@@ -1928,10 +1949,14 @@ int sdhci_add_host(struct sdhci_host *host)
1928 * of bytes. When doing hardware scatter/gather, each entry cannot 1949 * of bytes. When doing hardware scatter/gather, each entry cannot
1929 * be larger than 64 KiB though. 1950 * be larger than 64 KiB though.
1930 */ 1951 */
1931 if (host->flags & SDHCI_USE_ADMA) 1952 if (host->flags & SDHCI_USE_ADMA) {
1932 mmc->max_seg_size = 65536; 1953 if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC)
1933 else 1954 mmc->max_seg_size = 65535;
1955 else
1956 mmc->max_seg_size = 65536;
1957 } else {
1934 mmc->max_seg_size = mmc->max_req_size; 1958 mmc->max_seg_size = mmc->max_req_size;
1959 }
1935 1960
1936 /* 1961 /*
1937 * Maximum block size. This varies from controller to controller and 1962 * Maximum block size. This varies from controller to controller and
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index e42d7f00c060..6e0969e40650 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -52,6 +52,7 @@
52#define SDHCI_CMD_RESP_SHORT_BUSY 0x03 52#define SDHCI_CMD_RESP_SHORT_BUSY 0x03
53 53
54#define SDHCI_MAKE_CMD(c, f) (((c & 0xff) << 8) | (f & 0xff)) 54#define SDHCI_MAKE_CMD(c, f) (((c & 0xff) << 8) | (f & 0xff))
55#define SDHCI_GET_CMD(c) ((c>>8) & 0x3f)
55 56
56#define SDHCI_RESPONSE 0x10 57#define SDHCI_RESPONSE 0x10
57 58
@@ -165,7 +166,7 @@
165#define SDHCI_CAN_VDD_180 0x04000000 166#define SDHCI_CAN_VDD_180 0x04000000
166#define SDHCI_CAN_64BIT 0x10000000 167#define SDHCI_CAN_64BIT 0x10000000
167 168
168/* 44-47 reserved for more caps */ 169#define SDHCI_CAPABILITIES_1 0x44
169 170
170#define SDHCI_MAX_CURRENT 0x48 171#define SDHCI_MAX_CURRENT 0x48
171 172
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index e7765a89593e..e3c6ef208391 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -25,16 +25,261 @@
25 * double buffer support 25 * double buffer support
26 * 26 *
27 */ 27 */
28#include <linux/module.h> 28
29#include <linux/irq.h>
30#include <linux/device.h>
31#include <linux/delay.h> 29#include <linux/delay.h>
30#include <linux/device.h>
32#include <linux/dmaengine.h> 31#include <linux/dmaengine.h>
33#include <linux/mmc/host.h> 32#include <linux/highmem.h>
33#include <linux/interrupt.h>
34#include <linux/io.h>
35#include <linux/irq.h>
34#include <linux/mfd/core.h> 36#include <linux/mfd/core.h>
35#include <linux/mfd/tmio.h> 37#include <linux/mfd/tmio.h>
38#include <linux/mmc/host.h>
39#include <linux/module.h>
40#include <linux/pagemap.h>
41#include <linux/scatterlist.h>
42#include <linux/workqueue.h>
43#include <linux/spinlock.h>
44
45#define CTL_SD_CMD 0x00
46#define CTL_ARG_REG 0x04
47#define CTL_STOP_INTERNAL_ACTION 0x08
48#define CTL_XFER_BLK_COUNT 0xa
49#define CTL_RESPONSE 0x0c
50#define CTL_STATUS 0x1c
51#define CTL_IRQ_MASK 0x20
52#define CTL_SD_CARD_CLK_CTL 0x24
53#define CTL_SD_XFER_LEN 0x26
54#define CTL_SD_MEM_CARD_OPT 0x28
55#define CTL_SD_ERROR_DETAIL_STATUS 0x2c
56#define CTL_SD_DATA_PORT 0x30
57#define CTL_TRANSACTION_CTL 0x34
58#define CTL_SDIO_STATUS 0x36
59#define CTL_SDIO_IRQ_MASK 0x38
60#define CTL_RESET_SD 0xe0
61#define CTL_SDIO_REGS 0x100
62#define CTL_CLK_AND_WAIT_CTL 0x138
63#define CTL_RESET_SDIO 0x1e0
64
65/* Definitions for values the CTRL_STATUS register can take. */
66#define TMIO_STAT_CMDRESPEND 0x00000001
67#define TMIO_STAT_DATAEND 0x00000004
68#define TMIO_STAT_CARD_REMOVE 0x00000008
69#define TMIO_STAT_CARD_INSERT 0x00000010
70#define TMIO_STAT_SIGSTATE 0x00000020
71#define TMIO_STAT_WRPROTECT 0x00000080
72#define TMIO_STAT_CARD_REMOVE_A 0x00000100
73#define TMIO_STAT_CARD_INSERT_A 0x00000200
74#define TMIO_STAT_SIGSTATE_A 0x00000400
75#define TMIO_STAT_CMD_IDX_ERR 0x00010000
76#define TMIO_STAT_CRCFAIL 0x00020000
77#define TMIO_STAT_STOPBIT_ERR 0x00040000
78#define TMIO_STAT_DATATIMEOUT 0x00080000
79#define TMIO_STAT_RXOVERFLOW 0x00100000
80#define TMIO_STAT_TXUNDERRUN 0x00200000
81#define TMIO_STAT_CMDTIMEOUT 0x00400000
82#define TMIO_STAT_RXRDY 0x01000000
83#define TMIO_STAT_TXRQ 0x02000000
84#define TMIO_STAT_ILL_FUNC 0x20000000
85#define TMIO_STAT_CMD_BUSY 0x40000000
86#define TMIO_STAT_ILL_ACCESS 0x80000000
87
88/* Definitions for values the CTRL_SDIO_STATUS register can take. */
89#define TMIO_SDIO_STAT_IOIRQ 0x0001
90#define TMIO_SDIO_STAT_EXPUB52 0x4000
91#define TMIO_SDIO_STAT_EXWT 0x8000
92#define TMIO_SDIO_MASK_ALL 0xc007
93
94/* Define some IRQ masks */
95/* This is the mask used at reset by the chip */
96#define TMIO_MASK_ALL 0x837f031d
97#define TMIO_MASK_READOP (TMIO_STAT_RXRDY | TMIO_STAT_DATAEND)
98#define TMIO_MASK_WRITEOP (TMIO_STAT_TXRQ | TMIO_STAT_DATAEND)
99#define TMIO_MASK_CMD (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT | \
100 TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT)
101#define TMIO_MASK_IRQ (TMIO_MASK_READOP | TMIO_MASK_WRITEOP | TMIO_MASK_CMD)
102
103#define enable_mmc_irqs(host, i) \
104 do { \
105 u32 mask;\
106 mask = sd_ctrl_read32((host), CTL_IRQ_MASK); \
107 mask &= ~((i) & TMIO_MASK_IRQ); \
108 sd_ctrl_write32((host), CTL_IRQ_MASK, mask); \
109 } while (0)
110
111#define disable_mmc_irqs(host, i) \
112 do { \
113 u32 mask;\
114 mask = sd_ctrl_read32((host), CTL_IRQ_MASK); \
115 mask |= ((i) & TMIO_MASK_IRQ); \
116 sd_ctrl_write32((host), CTL_IRQ_MASK, mask); \
117 } while (0)
118
119#define ack_mmc_irqs(host, i) \
120 do { \
121 sd_ctrl_write32((host), CTL_STATUS, ~(i)); \
122 } while (0)
123
124/* This is arbitrary, just noone needed any higher alignment yet */
125#define MAX_ALIGN 4
126
127struct tmio_mmc_host {
128 void __iomem *ctl;
129 unsigned long bus_shift;
130 struct mmc_command *cmd;
131 struct mmc_request *mrq;
132 struct mmc_data *data;
133 struct mmc_host *mmc;
134 int irq;
135 unsigned int sdio_irq_enabled;
136
137 /* Callbacks for clock / power control */
138 void (*set_pwr)(struct platform_device *host, int state);
139 void (*set_clk_div)(struct platform_device *host, int state);
140
141 /* pio related stuff */
142 struct scatterlist *sg_ptr;
143 struct scatterlist *sg_orig;
144 unsigned int sg_len;
145 unsigned int sg_off;
146
147 struct platform_device *pdev;
148
149 /* DMA support */
150 struct dma_chan *chan_rx;
151 struct dma_chan *chan_tx;
152 struct tasklet_struct dma_complete;
153 struct tasklet_struct dma_issue;
154#ifdef CONFIG_TMIO_MMC_DMA
155 unsigned int dma_sglen;
156 u8 bounce_buf[PAGE_CACHE_SIZE] __attribute__((aligned(MAX_ALIGN)));
157 struct scatterlist bounce_sg;
158#endif
159
160 /* Track lost interrupts */
161 struct delayed_work delayed_reset_work;
162 spinlock_t lock;
163 unsigned long last_req_ts;
164};
165
166static void tmio_check_bounce_buffer(struct tmio_mmc_host *host);
167
168static u16 sd_ctrl_read16(struct tmio_mmc_host *host, int addr)
169{
170 return readw(host->ctl + (addr << host->bus_shift));
171}
172
173static void sd_ctrl_read16_rep(struct tmio_mmc_host *host, int addr,
174 u16 *buf, int count)
175{
176 readsw(host->ctl + (addr << host->bus_shift), buf, count);
177}
36 178
37#include "tmio_mmc.h" 179static u32 sd_ctrl_read32(struct tmio_mmc_host *host, int addr)
180{
181 return readw(host->ctl + (addr << host->bus_shift)) |
182 readw(host->ctl + ((addr + 2) << host->bus_shift)) << 16;
183}
184
185static void sd_ctrl_write16(struct tmio_mmc_host *host, int addr, u16 val)
186{
187 writew(val, host->ctl + (addr << host->bus_shift));
188}
189
190static void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr,
191 u16 *buf, int count)
192{
193 writesw(host->ctl + (addr << host->bus_shift), buf, count);
194}
195
196static void sd_ctrl_write32(struct tmio_mmc_host *host, int addr, u32 val)
197{
198 writew(val, host->ctl + (addr << host->bus_shift));
199 writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift));
200}
201
202static void tmio_mmc_init_sg(struct tmio_mmc_host *host, struct mmc_data *data)
203{
204 host->sg_len = data->sg_len;
205 host->sg_ptr = data->sg;
206 host->sg_orig = data->sg;
207 host->sg_off = 0;
208}
209
210static int tmio_mmc_next_sg(struct tmio_mmc_host *host)
211{
212 host->sg_ptr = sg_next(host->sg_ptr);
213 host->sg_off = 0;
214 return --host->sg_len;
215}
216
217static char *tmio_mmc_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
218{
219 local_irq_save(*flags);
220 return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
221}
222
223static void tmio_mmc_kunmap_atomic(void *virt, unsigned long *flags)
224{
225 kunmap_atomic(virt, KM_BIO_SRC_IRQ);
226 local_irq_restore(*flags);
227}
228
229#ifdef CONFIG_MMC_DEBUG
230
231#define STATUS_TO_TEXT(a) \
232 do { \
233 if (status & TMIO_STAT_##a) \
234 printk(#a); \
235 } while (0)
236
237void pr_debug_status(u32 status)
238{
239 printk(KERN_DEBUG "status: %08x = ", status);
240 STATUS_TO_TEXT(CARD_REMOVE);
241 STATUS_TO_TEXT(CARD_INSERT);
242 STATUS_TO_TEXT(SIGSTATE);
243 STATUS_TO_TEXT(WRPROTECT);
244 STATUS_TO_TEXT(CARD_REMOVE_A);
245 STATUS_TO_TEXT(CARD_INSERT_A);
246 STATUS_TO_TEXT(SIGSTATE_A);
247 STATUS_TO_TEXT(CMD_IDX_ERR);
248 STATUS_TO_TEXT(STOPBIT_ERR);
249 STATUS_TO_TEXT(ILL_FUNC);
250 STATUS_TO_TEXT(CMD_BUSY);
251 STATUS_TO_TEXT(CMDRESPEND);
252 STATUS_TO_TEXT(DATAEND);
253 STATUS_TO_TEXT(CRCFAIL);
254 STATUS_TO_TEXT(DATATIMEOUT);
255 STATUS_TO_TEXT(CMDTIMEOUT);
256 STATUS_TO_TEXT(RXOVERFLOW);
257 STATUS_TO_TEXT(TXUNDERRUN);
258 STATUS_TO_TEXT(RXRDY);
259 STATUS_TO_TEXT(TXRQ);
260 STATUS_TO_TEXT(ILL_ACCESS);
261 printk("\n");
262}
263
264#else
265#define pr_debug_status(s) do { } while (0)
266#endif
267
268static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
269{
270 struct tmio_mmc_host *host = mmc_priv(mmc);
271
272 if (enable) {
273 host->sdio_irq_enabled = 1;
274 sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001);
275 sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK,
276 (TMIO_SDIO_MASK_ALL & ~TMIO_SDIO_STAT_IOIRQ));
277 } else {
278 sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, TMIO_SDIO_MASK_ALL);
279 sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000);
280 host->sdio_irq_enabled = 0;
281 }
282}
38 283
39static void tmio_mmc_set_clock(struct tmio_mmc_host *host, int new_clock) 284static void tmio_mmc_set_clock(struct tmio_mmc_host *host, int new_clock)
40{ 285{
@@ -55,8 +300,23 @@ static void tmio_mmc_set_clock(struct tmio_mmc_host *host, int new_clock)
55 300
56static void tmio_mmc_clk_stop(struct tmio_mmc_host *host) 301static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
57{ 302{
303 struct mfd_cell *cell = host->pdev->dev.platform_data;
304 struct tmio_mmc_data *pdata = cell->driver_data;
305
306 /*
307 * Testing on sh-mobile showed that SDIO IRQs are unmasked when
308 * CTL_CLK_AND_WAIT_CTL gets written, so we have to disable the
309 * device IRQ here and restore the SDIO IRQ mask before
310 * re-enabling the device IRQ.
311 */
312 if (pdata->flags & TMIO_MMC_SDIO_IRQ)
313 disable_irq(host->irq);
58 sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000); 314 sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000);
59 msleep(10); 315 msleep(10);
316 if (pdata->flags & TMIO_MMC_SDIO_IRQ) {
317 tmio_mmc_enable_sdio_irq(host->mmc, host->sdio_irq_enabled);
318 enable_irq(host->irq);
319 }
60 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~0x0100 & 320 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~0x0100 &
61 sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); 321 sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
62 msleep(10); 322 msleep(10);
@@ -64,11 +324,21 @@ static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
64 324
65static void tmio_mmc_clk_start(struct tmio_mmc_host *host) 325static void tmio_mmc_clk_start(struct tmio_mmc_host *host)
66{ 326{
327 struct mfd_cell *cell = host->pdev->dev.platform_data;
328 struct tmio_mmc_data *pdata = cell->driver_data;
329
67 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, 0x0100 | 330 sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, 0x0100 |
68 sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); 331 sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
69 msleep(10); 332 msleep(10);
333 /* see comment in tmio_mmc_clk_stop above */
334 if (pdata->flags & TMIO_MMC_SDIO_IRQ)
335 disable_irq(host->irq);
70 sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100); 336 sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100);
71 msleep(10); 337 msleep(10);
338 if (pdata->flags & TMIO_MMC_SDIO_IRQ) {
339 tmio_mmc_enable_sdio_irq(host->mmc, host->sdio_irq_enabled);
340 enable_irq(host->irq);
341 }
72} 342}
73 343
74static void reset(struct tmio_mmc_host *host) 344static void reset(struct tmio_mmc_host *host)
@@ -82,15 +352,60 @@ static void reset(struct tmio_mmc_host *host)
82 msleep(10); 352 msleep(10);
83} 353}
84 354
355static void tmio_mmc_reset_work(struct work_struct *work)
356{
357 struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host,
358 delayed_reset_work.work);
359 struct mmc_request *mrq;
360 unsigned long flags;
361
362 spin_lock_irqsave(&host->lock, flags);
363 mrq = host->mrq;
364
365 /* request already finished */
366 if (!mrq
367 || time_is_after_jiffies(host->last_req_ts +
368 msecs_to_jiffies(2000))) {
369 spin_unlock_irqrestore(&host->lock, flags);
370 return;
371 }
372
373 dev_warn(&host->pdev->dev,
374 "timeout waiting for hardware interrupt (CMD%u)\n",
375 mrq->cmd->opcode);
376
377 if (host->data)
378 host->data->error = -ETIMEDOUT;
379 else if (host->cmd)
380 host->cmd->error = -ETIMEDOUT;
381 else
382 mrq->cmd->error = -ETIMEDOUT;
383
384 host->cmd = NULL;
385 host->data = NULL;
386 host->mrq = NULL;
387
388 spin_unlock_irqrestore(&host->lock, flags);
389
390 reset(host);
391
392 mmc_request_done(host->mmc, mrq);
393}
394
85static void 395static void
86tmio_mmc_finish_request(struct tmio_mmc_host *host) 396tmio_mmc_finish_request(struct tmio_mmc_host *host)
87{ 397{
88 struct mmc_request *mrq = host->mrq; 398 struct mmc_request *mrq = host->mrq;
89 399
400 if (!mrq)
401 return;
402
90 host->mrq = NULL; 403 host->mrq = NULL;
91 host->cmd = NULL; 404 host->cmd = NULL;
92 host->data = NULL; 405 host->data = NULL;
93 406
407 cancel_delayed_work(&host->delayed_reset_work);
408
94 mmc_request_done(host->mmc, mrq); 409 mmc_request_done(host->mmc, mrq);
95} 410}
96 411
@@ -200,6 +515,7 @@ static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
200 return; 515 return;
201} 516}
202 517
518/* needs to be called with host->lock held */
203static void tmio_mmc_do_data_irq(struct tmio_mmc_host *host) 519static void tmio_mmc_do_data_irq(struct tmio_mmc_host *host)
204{ 520{
205 struct mmc_data *data = host->data; 521 struct mmc_data *data = host->data;
@@ -233,6 +549,8 @@ static void tmio_mmc_do_data_irq(struct tmio_mmc_host *host)
233 if (data->flags & MMC_DATA_READ) { 549 if (data->flags & MMC_DATA_READ) {
234 if (!host->chan_rx) 550 if (!host->chan_rx)
235 disable_mmc_irqs(host, TMIO_MASK_READOP); 551 disable_mmc_irqs(host, TMIO_MASK_READOP);
552 else
553 tmio_check_bounce_buffer(host);
236 dev_dbg(&host->pdev->dev, "Complete Rx request %p\n", 554 dev_dbg(&host->pdev->dev, "Complete Rx request %p\n",
237 host->mrq); 555 host->mrq);
238 } else { 556 } else {
@@ -254,10 +572,12 @@ static void tmio_mmc_do_data_irq(struct tmio_mmc_host *host)
254 572
255static void tmio_mmc_data_irq(struct tmio_mmc_host *host) 573static void tmio_mmc_data_irq(struct tmio_mmc_host *host)
256{ 574{
257 struct mmc_data *data = host->data; 575 struct mmc_data *data;
576 spin_lock(&host->lock);
577 data = host->data;
258 578
259 if (!data) 579 if (!data)
260 return; 580 goto out;
261 581
262 if (host->chan_tx && (data->flags & MMC_DATA_WRITE)) { 582 if (host->chan_tx && (data->flags & MMC_DATA_WRITE)) {
263 /* 583 /*
@@ -278,6 +598,8 @@ static void tmio_mmc_data_irq(struct tmio_mmc_host *host)
278 } else { 598 } else {
279 tmio_mmc_do_data_irq(host); 599 tmio_mmc_do_data_irq(host);
280 } 600 }
601out:
602 spin_unlock(&host->lock);
281} 603}
282 604
283static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host, 605static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
@@ -286,9 +608,11 @@ static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
286 struct mmc_command *cmd = host->cmd; 608 struct mmc_command *cmd = host->cmd;
287 int i, addr; 609 int i, addr;
288 610
611 spin_lock(&host->lock);
612
289 if (!host->cmd) { 613 if (!host->cmd) {
290 pr_debug("Spurious CMD irq\n"); 614 pr_debug("Spurious CMD irq\n");
291 return; 615 goto out;
292 } 616 }
293 617
294 host->cmd = NULL; 618 host->cmd = NULL;
@@ -324,8 +648,7 @@ static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
324 if (!host->chan_rx) 648 if (!host->chan_rx)
325 enable_mmc_irqs(host, TMIO_MASK_READOP); 649 enable_mmc_irqs(host, TMIO_MASK_READOP);
326 } else { 650 } else {
327 struct dma_chan *chan = host->chan_tx; 651 if (!host->chan_tx)
328 if (!chan)
329 enable_mmc_irqs(host, TMIO_MASK_WRITEOP); 652 enable_mmc_irqs(host, TMIO_MASK_WRITEOP);
330 else 653 else
331 tasklet_schedule(&host->dma_issue); 654 tasklet_schedule(&host->dma_issue);
@@ -334,13 +657,19 @@ static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
334 tmio_mmc_finish_request(host); 657 tmio_mmc_finish_request(host);
335 } 658 }
336 659
660out:
661 spin_unlock(&host->lock);
662
337 return; 663 return;
338} 664}
339 665
340static irqreturn_t tmio_mmc_irq(int irq, void *devid) 666static irqreturn_t tmio_mmc_irq(int irq, void *devid)
341{ 667{
342 struct tmio_mmc_host *host = devid; 668 struct tmio_mmc_host *host = devid;
669 struct mfd_cell *cell = host->pdev->dev.platform_data;
670 struct tmio_mmc_data *pdata = cell->driver_data;
343 unsigned int ireg, irq_mask, status; 671 unsigned int ireg, irq_mask, status;
672 unsigned int sdio_ireg, sdio_irq_mask, sdio_status;
344 673
345 pr_debug("MMC IRQ begin\n"); 674 pr_debug("MMC IRQ begin\n");
346 675
@@ -348,6 +677,29 @@ static irqreturn_t tmio_mmc_irq(int irq, void *devid)
348 irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK); 677 irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK);
349 ireg = status & TMIO_MASK_IRQ & ~irq_mask; 678 ireg = status & TMIO_MASK_IRQ & ~irq_mask;
350 679
680 sdio_ireg = 0;
681 if (!ireg && pdata->flags & TMIO_MMC_SDIO_IRQ) {
682 sdio_status = sd_ctrl_read16(host, CTL_SDIO_STATUS);
683 sdio_irq_mask = sd_ctrl_read16(host, CTL_SDIO_IRQ_MASK);
684 sdio_ireg = sdio_status & TMIO_SDIO_MASK_ALL & ~sdio_irq_mask;
685
686 sd_ctrl_write16(host, CTL_SDIO_STATUS, sdio_status & ~TMIO_SDIO_MASK_ALL);
687
688 if (sdio_ireg && !host->sdio_irq_enabled) {
689 pr_warning("tmio_mmc: Spurious SDIO IRQ, disabling! 0x%04x 0x%04x 0x%04x\n",
690 sdio_status, sdio_irq_mask, sdio_ireg);
691 tmio_mmc_enable_sdio_irq(host->mmc, 0);
692 goto out;
693 }
694
695 if (host->mmc->caps & MMC_CAP_SDIO_IRQ &&
696 sdio_ireg & TMIO_SDIO_STAT_IOIRQ)
697 mmc_signal_sdio_irq(host->mmc);
698
699 if (sdio_ireg)
700 goto out;
701 }
702
351 pr_debug_status(status); 703 pr_debug_status(status);
352 pr_debug_status(ireg); 704 pr_debug_status(ireg);
353 705
@@ -375,8 +727,10 @@ static irqreturn_t tmio_mmc_irq(int irq, void *devid)
375 */ 727 */
376 728
377 /* Command completion */ 729 /* Command completion */
378 if (ireg & TMIO_MASK_CMD) { 730 if (ireg & (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT)) {
379 ack_mmc_irqs(host, TMIO_MASK_CMD); 731 ack_mmc_irqs(host,
732 TMIO_STAT_CMDRESPEND |
733 TMIO_STAT_CMDTIMEOUT);
380 tmio_mmc_cmd_irq(host, status); 734 tmio_mmc_cmd_irq(host, status);
381 } 735 }
382 736
@@ -407,6 +761,16 @@ out:
407} 761}
408 762
409#ifdef CONFIG_TMIO_MMC_DMA 763#ifdef CONFIG_TMIO_MMC_DMA
764static void tmio_check_bounce_buffer(struct tmio_mmc_host *host)
765{
766 if (host->sg_ptr == &host->bounce_sg) {
767 unsigned long flags;
768 void *sg_vaddr = tmio_mmc_kmap_atomic(host->sg_orig, &flags);
769 memcpy(sg_vaddr, host->bounce_buf, host->bounce_sg.length);
770 tmio_mmc_kunmap_atomic(sg_vaddr, &flags);
771 }
772}
773
410static void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable) 774static void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable)
411{ 775{
412#if defined(CONFIG_SUPERH) || defined(CONFIG_ARCH_SHMOBILE) 776#if defined(CONFIG_SUPERH) || defined(CONFIG_ARCH_SHMOBILE)
@@ -427,12 +791,39 @@ static void tmio_dma_complete(void *arg)
427 enable_mmc_irqs(host, TMIO_STAT_DATAEND); 791 enable_mmc_irqs(host, TMIO_STAT_DATAEND);
428} 792}
429 793
430static int tmio_mmc_start_dma_rx(struct tmio_mmc_host *host) 794static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
431{ 795{
432 struct scatterlist *sg = host->sg_ptr; 796 struct scatterlist *sg = host->sg_ptr, *sg_tmp;
433 struct dma_async_tx_descriptor *desc = NULL; 797 struct dma_async_tx_descriptor *desc = NULL;
434 struct dma_chan *chan = host->chan_rx; 798 struct dma_chan *chan = host->chan_rx;
435 int ret; 799 struct mfd_cell *cell = host->pdev->dev.platform_data;
800 struct tmio_mmc_data *pdata = cell->driver_data;
801 dma_cookie_t cookie;
802 int ret, i;
803 bool aligned = true, multiple = true;
804 unsigned int align = (1 << pdata->dma->alignment_shift) - 1;
805
806 for_each_sg(sg, sg_tmp, host->sg_len, i) {
807 if (sg_tmp->offset & align)
808 aligned = false;
809 if (sg_tmp->length & align) {
810 multiple = false;
811 break;
812 }
813 }
814
815 if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE ||
816 align >= MAX_ALIGN)) || !multiple) {
817 ret = -EINVAL;
818 goto pio;
819 }
820
821 /* The only sg element can be unaligned, use our bounce buffer then */
822 if (!aligned) {
823 sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length);
824 host->sg_ptr = &host->bounce_sg;
825 sg = host->sg_ptr;
826 }
436 827
437 ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, DMA_FROM_DEVICE); 828 ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, DMA_FROM_DEVICE);
438 if (ret > 0) { 829 if (ret > 0) {
@@ -442,21 +833,21 @@ static int tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
442 } 833 }
443 834
444 if (desc) { 835 if (desc) {
445 host->desc = desc;
446 desc->callback = tmio_dma_complete; 836 desc->callback = tmio_dma_complete;
447 desc->callback_param = host; 837 desc->callback_param = host;
448 host->cookie = desc->tx_submit(desc); 838 cookie = desc->tx_submit(desc);
449 if (host->cookie < 0) { 839 if (cookie < 0) {
450 host->desc = NULL; 840 desc = NULL;
451 ret = host->cookie; 841 ret = cookie;
452 } else { 842 } else {
453 chan->device->device_issue_pending(chan); 843 chan->device->device_issue_pending(chan);
454 } 844 }
455 } 845 }
456 dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", 846 dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
457 __func__, host->sg_len, ret, host->cookie, host->mrq); 847 __func__, host->sg_len, ret, cookie, host->mrq);
458 848
459 if (!host->desc) { 849pio:
850 if (!desc) {
460 /* DMA failed, fall back to PIO */ 851 /* DMA failed, fall back to PIO */
461 if (ret >= 0) 852 if (ret >= 0)
462 ret = -EIO; 853 ret = -EIO;
@@ -471,24 +862,49 @@ static int tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
471 dev_warn(&host->pdev->dev, 862 dev_warn(&host->pdev->dev,
472 "DMA failed: %d, falling back to PIO\n", ret); 863 "DMA failed: %d, falling back to PIO\n", ret);
473 tmio_mmc_enable_dma(host, false); 864 tmio_mmc_enable_dma(host, false);
474 reset(host);
475 /* Fail this request, let above layers recover */
476 host->mrq->cmd->error = ret;
477 tmio_mmc_finish_request(host);
478 } 865 }
479 866
480 dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__, 867 dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
481 desc, host->cookie, host->sg_len); 868 desc, cookie, host->sg_len);
482
483 return ret > 0 ? 0 : ret;
484} 869}
485 870
486static int tmio_mmc_start_dma_tx(struct tmio_mmc_host *host) 871static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
487{ 872{
488 struct scatterlist *sg = host->sg_ptr; 873 struct scatterlist *sg = host->sg_ptr, *sg_tmp;
489 struct dma_async_tx_descriptor *desc = NULL; 874 struct dma_async_tx_descriptor *desc = NULL;
490 struct dma_chan *chan = host->chan_tx; 875 struct dma_chan *chan = host->chan_tx;
491 int ret; 876 struct mfd_cell *cell = host->pdev->dev.platform_data;
877 struct tmio_mmc_data *pdata = cell->driver_data;
878 dma_cookie_t cookie;
879 int ret, i;
880 bool aligned = true, multiple = true;
881 unsigned int align = (1 << pdata->dma->alignment_shift) - 1;
882
883 for_each_sg(sg, sg_tmp, host->sg_len, i) {
884 if (sg_tmp->offset & align)
885 aligned = false;
886 if (sg_tmp->length & align) {
887 multiple = false;
888 break;
889 }
890 }
891
892 if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE ||
893 align >= MAX_ALIGN)) || !multiple) {
894 ret = -EINVAL;
895 goto pio;
896 }
897
898 /* The only sg element can be unaligned, use our bounce buffer then */
899 if (!aligned) {
900 unsigned long flags;
901 void *sg_vaddr = tmio_mmc_kmap_atomic(sg, &flags);
902 sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length);
903 memcpy(host->bounce_buf, sg_vaddr, host->bounce_sg.length);
904 tmio_mmc_kunmap_atomic(sg_vaddr, &flags);
905 host->sg_ptr = &host->bounce_sg;
906 sg = host->sg_ptr;
907 }
492 908
493 ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, DMA_TO_DEVICE); 909 ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, DMA_TO_DEVICE);
494 if (ret > 0) { 910 if (ret > 0) {
@@ -498,19 +914,19 @@ static int tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
498 } 914 }
499 915
500 if (desc) { 916 if (desc) {
501 host->desc = desc;
502 desc->callback = tmio_dma_complete; 917 desc->callback = tmio_dma_complete;
503 desc->callback_param = host; 918 desc->callback_param = host;
504 host->cookie = desc->tx_submit(desc); 919 cookie = desc->tx_submit(desc);
505 if (host->cookie < 0) { 920 if (cookie < 0) {
506 host->desc = NULL; 921 desc = NULL;
507 ret = host->cookie; 922 ret = cookie;
508 } 923 }
509 } 924 }
510 dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", 925 dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
511 __func__, host->sg_len, ret, host->cookie, host->mrq); 926 __func__, host->sg_len, ret, cookie, host->mrq);
512 927
513 if (!host->desc) { 928pio:
929 if (!desc) {
514 /* DMA failed, fall back to PIO */ 930 /* DMA failed, fall back to PIO */
515 if (ret >= 0) 931 if (ret >= 0)
516 ret = -EIO; 932 ret = -EIO;
@@ -525,30 +941,22 @@ static int tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
525 dev_warn(&host->pdev->dev, 941 dev_warn(&host->pdev->dev,
526 "DMA failed: %d, falling back to PIO\n", ret); 942 "DMA failed: %d, falling back to PIO\n", ret);
527 tmio_mmc_enable_dma(host, false); 943 tmio_mmc_enable_dma(host, false);
528 reset(host);
529 /* Fail this request, let above layers recover */
530 host->mrq->cmd->error = ret;
531 tmio_mmc_finish_request(host);
532 } 944 }
533 945
534 dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__, 946 dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__,
535 desc, host->cookie); 947 desc, cookie);
536
537 return ret > 0 ? 0 : ret;
538} 948}
539 949
540static int tmio_mmc_start_dma(struct tmio_mmc_host *host, 950static void tmio_mmc_start_dma(struct tmio_mmc_host *host,
541 struct mmc_data *data) 951 struct mmc_data *data)
542{ 952{
543 if (data->flags & MMC_DATA_READ) { 953 if (data->flags & MMC_DATA_READ) {
544 if (host->chan_rx) 954 if (host->chan_rx)
545 return tmio_mmc_start_dma_rx(host); 955 tmio_mmc_start_dma_rx(host);
546 } else { 956 } else {
547 if (host->chan_tx) 957 if (host->chan_tx)
548 return tmio_mmc_start_dma_tx(host); 958 tmio_mmc_start_dma_tx(host);
549 } 959 }
550
551 return 0;
552} 960}
553 961
554static void tmio_issue_tasklet_fn(unsigned long priv) 962static void tmio_issue_tasklet_fn(unsigned long priv)
@@ -562,6 +970,12 @@ static void tmio_issue_tasklet_fn(unsigned long priv)
562static void tmio_tasklet_fn(unsigned long arg) 970static void tmio_tasklet_fn(unsigned long arg)
563{ 971{
564 struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg; 972 struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg;
973 unsigned long flags;
974
975 spin_lock_irqsave(&host->lock, flags);
976
977 if (!host->data)
978 goto out;
565 979
566 if (host->data->flags & MMC_DATA_READ) 980 if (host->data->flags & MMC_DATA_READ)
567 dma_unmap_sg(&host->pdev->dev, host->sg_ptr, host->dma_sglen, 981 dma_unmap_sg(&host->pdev->dev, host->sg_ptr, host->dma_sglen,
@@ -571,6 +985,8 @@ static void tmio_tasklet_fn(unsigned long arg)
571 DMA_TO_DEVICE); 985 DMA_TO_DEVICE);
572 986
573 tmio_mmc_do_data_irq(host); 987 tmio_mmc_do_data_irq(host);
988out:
989 spin_unlock_irqrestore(&host->lock, flags);
574} 990}
575 991
576/* It might be necessary to make filter MFD specific */ 992/* It might be necessary to make filter MFD specific */
@@ -584,9 +1000,6 @@ static bool tmio_mmc_filter(struct dma_chan *chan, void *arg)
584static void tmio_mmc_request_dma(struct tmio_mmc_host *host, 1000static void tmio_mmc_request_dma(struct tmio_mmc_host *host,
585 struct tmio_mmc_data *pdata) 1001 struct tmio_mmc_data *pdata)
586{ 1002{
587 host->cookie = -EINVAL;
588 host->desc = NULL;
589
590 /* We can only either use DMA for both Tx and Rx or not use it at all */ 1003 /* We can only either use DMA for both Tx and Rx or not use it at all */
591 if (pdata->dma) { 1004 if (pdata->dma) {
592 dma_cap_mask_t mask; 1005 dma_cap_mask_t mask;
@@ -632,15 +1045,15 @@ static void tmio_mmc_release_dma(struct tmio_mmc_host *host)
632 host->chan_rx = NULL; 1045 host->chan_rx = NULL;
633 dma_release_channel(chan); 1046 dma_release_channel(chan);
634 } 1047 }
635
636 host->cookie = -EINVAL;
637 host->desc = NULL;
638} 1048}
639#else 1049#else
640static int tmio_mmc_start_dma(struct tmio_mmc_host *host, 1050static void tmio_check_bounce_buffer(struct tmio_mmc_host *host)
1051{
1052}
1053
1054static void tmio_mmc_start_dma(struct tmio_mmc_host *host,
641 struct mmc_data *data) 1055 struct mmc_data *data)
642{ 1056{
643 return 0;
644} 1057}
645 1058
646static void tmio_mmc_request_dma(struct tmio_mmc_host *host, 1059static void tmio_mmc_request_dma(struct tmio_mmc_host *host,
@@ -682,7 +1095,9 @@ static int tmio_mmc_start_data(struct tmio_mmc_host *host,
682 sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz); 1095 sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz);
683 sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks); 1096 sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks);
684 1097
685 return tmio_mmc_start_dma(host, data); 1098 tmio_mmc_start_dma(host, data);
1099
1100 return 0;
686} 1101}
687 1102
688/* Process requests from the MMC layer */ 1103/* Process requests from the MMC layer */
@@ -694,6 +1109,8 @@ static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
694 if (host->mrq) 1109 if (host->mrq)
695 pr_debug("request not null\n"); 1110 pr_debug("request not null\n");
696 1111
1112 host->last_req_ts = jiffies;
1113 wmb();
697 host->mrq = mrq; 1114 host->mrq = mrq;
698 1115
699 if (mrq->data) { 1116 if (mrq->data) {
@@ -703,10 +1120,14 @@ static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
703 } 1120 }
704 1121
705 ret = tmio_mmc_start_command(host, mrq->cmd); 1122 ret = tmio_mmc_start_command(host, mrq->cmd);
706 if (!ret) 1123 if (!ret) {
1124 schedule_delayed_work(&host->delayed_reset_work,
1125 msecs_to_jiffies(2000));
707 return; 1126 return;
1127 }
708 1128
709fail: 1129fail:
1130 host->mrq = NULL;
710 mrq->cmd->error = ret; 1131 mrq->cmd->error = ret;
711 mmc_request_done(mmc, mrq); 1132 mmc_request_done(mmc, mrq);
712} 1133}
@@ -780,6 +1201,7 @@ static const struct mmc_host_ops tmio_mmc_ops = {
780 .set_ios = tmio_mmc_set_ios, 1201 .set_ios = tmio_mmc_set_ios,
781 .get_ro = tmio_mmc_get_ro, 1202 .get_ro = tmio_mmc_get_ro,
782 .get_cd = tmio_mmc_get_cd, 1203 .get_cd = tmio_mmc_get_cd,
1204 .enable_sdio_irq = tmio_mmc_enable_sdio_irq,
783}; 1205};
784 1206
785#ifdef CONFIG_PM 1207#ifdef CONFIG_PM
@@ -864,10 +1286,15 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev)
864 goto host_free; 1286 goto host_free;
865 1287
866 mmc->ops = &tmio_mmc_ops; 1288 mmc->ops = &tmio_mmc_ops;
867 mmc->caps = MMC_CAP_4_BIT_DATA; 1289 mmc->caps = MMC_CAP_4_BIT_DATA | pdata->capabilities;
868 mmc->caps |= pdata->capabilities;
869 mmc->f_max = pdata->hclk; 1290 mmc->f_max = pdata->hclk;
870 mmc->f_min = mmc->f_max / 512; 1291 mmc->f_min = mmc->f_max / 512;
1292 mmc->max_segs = 32;
1293 mmc->max_blk_size = 512;
1294 mmc->max_blk_count = (PAGE_CACHE_SIZE / mmc->max_blk_size) *
1295 mmc->max_segs;
1296 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
1297 mmc->max_seg_size = mmc->max_req_size;
871 if (pdata->ocr_mask) 1298 if (pdata->ocr_mask)
872 mmc->ocr_avail = pdata->ocr_mask; 1299 mmc->ocr_avail = pdata->ocr_mask;
873 else 1300 else
@@ -890,12 +1317,19 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev)
890 goto cell_disable; 1317 goto cell_disable;
891 1318
892 disable_mmc_irqs(host, TMIO_MASK_ALL); 1319 disable_mmc_irqs(host, TMIO_MASK_ALL);
1320 if (pdata->flags & TMIO_MMC_SDIO_IRQ)
1321 tmio_mmc_enable_sdio_irq(mmc, 0);
893 1322
894 ret = request_irq(host->irq, tmio_mmc_irq, IRQF_DISABLED | 1323 ret = request_irq(host->irq, tmio_mmc_irq, IRQF_DISABLED |
895 IRQF_TRIGGER_FALLING, dev_name(&dev->dev), host); 1324 IRQF_TRIGGER_FALLING, dev_name(&dev->dev), host);
896 if (ret) 1325 if (ret)
897 goto cell_disable; 1326 goto cell_disable;
898 1327
1328 spin_lock_init(&host->lock);
1329
1330 /* Init delayed work for request timeouts */
1331 INIT_DELAYED_WORK(&host->delayed_reset_work, tmio_mmc_reset_work);
1332
899 /* See if we also get DMA */ 1333 /* See if we also get DMA */
900 tmio_mmc_request_dma(host, pdata); 1334 tmio_mmc_request_dma(host, pdata);
901 1335
@@ -934,6 +1368,7 @@ static int __devexit tmio_mmc_remove(struct platform_device *dev)
934 if (mmc) { 1368 if (mmc) {
935 struct tmio_mmc_host *host = mmc_priv(mmc); 1369 struct tmio_mmc_host *host = mmc_priv(mmc);
936 mmc_remove_host(mmc); 1370 mmc_remove_host(mmc);
1371 cancel_delayed_work_sync(&host->delayed_reset_work);
937 tmio_mmc_release_dma(host); 1372 tmio_mmc_release_dma(host);
938 free_irq(host->irq, host); 1373 free_irq(host->irq, host);
939 if (cell->disable) 1374 if (cell->disable)
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
deleted file mode 100644
index 0fedc78e3ea5..000000000000
--- a/drivers/mmc/host/tmio_mmc.h
+++ /dev/null
@@ -1,228 +0,0 @@
1/* Definitons for use with the tmio_mmc.c
2 *
3 * (c) 2004 Ian Molton <spyro@f2s.com>
4 * (c) 2007 Ian Molton <spyro@f2s.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11
12#include <linux/highmem.h>
13#include <linux/interrupt.h>
14#include <linux/dmaengine.h>
15
16#define CTL_SD_CMD 0x00
17#define CTL_ARG_REG 0x04
18#define CTL_STOP_INTERNAL_ACTION 0x08
19#define CTL_XFER_BLK_COUNT 0xa
20#define CTL_RESPONSE 0x0c
21#define CTL_STATUS 0x1c
22#define CTL_IRQ_MASK 0x20
23#define CTL_SD_CARD_CLK_CTL 0x24
24#define CTL_SD_XFER_LEN 0x26
25#define CTL_SD_MEM_CARD_OPT 0x28
26#define CTL_SD_ERROR_DETAIL_STATUS 0x2c
27#define CTL_SD_DATA_PORT 0x30
28#define CTL_TRANSACTION_CTL 0x34
29#define CTL_RESET_SD 0xe0
30#define CTL_SDIO_REGS 0x100
31#define CTL_CLK_AND_WAIT_CTL 0x138
32#define CTL_RESET_SDIO 0x1e0
33
34/* Definitions for values the CTRL_STATUS register can take. */
35#define TMIO_STAT_CMDRESPEND 0x00000001
36#define TMIO_STAT_DATAEND 0x00000004
37#define TMIO_STAT_CARD_REMOVE 0x00000008
38#define TMIO_STAT_CARD_INSERT 0x00000010
39#define TMIO_STAT_SIGSTATE 0x00000020
40#define TMIO_STAT_WRPROTECT 0x00000080
41#define TMIO_STAT_CARD_REMOVE_A 0x00000100
42#define TMIO_STAT_CARD_INSERT_A 0x00000200
43#define TMIO_STAT_SIGSTATE_A 0x00000400
44#define TMIO_STAT_CMD_IDX_ERR 0x00010000
45#define TMIO_STAT_CRCFAIL 0x00020000
46#define TMIO_STAT_STOPBIT_ERR 0x00040000
47#define TMIO_STAT_DATATIMEOUT 0x00080000
48#define TMIO_STAT_RXOVERFLOW 0x00100000
49#define TMIO_STAT_TXUNDERRUN 0x00200000
50#define TMIO_STAT_CMDTIMEOUT 0x00400000
51#define TMIO_STAT_RXRDY 0x01000000
52#define TMIO_STAT_TXRQ 0x02000000
53#define TMIO_STAT_ILL_FUNC 0x20000000
54#define TMIO_STAT_CMD_BUSY 0x40000000
55#define TMIO_STAT_ILL_ACCESS 0x80000000
56
57/* Define some IRQ masks */
58/* This is the mask used at reset by the chip */
59#define TMIO_MASK_ALL 0x837f031d
60#define TMIO_MASK_READOP (TMIO_STAT_RXRDY | TMIO_STAT_DATAEND)
61#define TMIO_MASK_WRITEOP (TMIO_STAT_TXRQ | TMIO_STAT_DATAEND)
62#define TMIO_MASK_CMD (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT | \
63 TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT)
64#define TMIO_MASK_IRQ (TMIO_MASK_READOP | TMIO_MASK_WRITEOP | TMIO_MASK_CMD)
65
66
67#define enable_mmc_irqs(host, i) \
68 do { \
69 u32 mask;\
70 mask = sd_ctrl_read32((host), CTL_IRQ_MASK); \
71 mask &= ~((i) & TMIO_MASK_IRQ); \
72 sd_ctrl_write32((host), CTL_IRQ_MASK, mask); \
73 } while (0)
74
75#define disable_mmc_irqs(host, i) \
76 do { \
77 u32 mask;\
78 mask = sd_ctrl_read32((host), CTL_IRQ_MASK); \
79 mask |= ((i) & TMIO_MASK_IRQ); \
80 sd_ctrl_write32((host), CTL_IRQ_MASK, mask); \
81 } while (0)
82
83#define ack_mmc_irqs(host, i) \
84 do { \
85 sd_ctrl_write32((host), CTL_STATUS, ~(i)); \
86 } while (0)
87
88
89struct tmio_mmc_host {
90 void __iomem *ctl;
91 unsigned long bus_shift;
92 struct mmc_command *cmd;
93 struct mmc_request *mrq;
94 struct mmc_data *data;
95 struct mmc_host *mmc;
96 int irq;
97
98 /* Callbacks for clock / power control */
99 void (*set_pwr)(struct platform_device *host, int state);
100 void (*set_clk_div)(struct platform_device *host, int state);
101
102 /* pio related stuff */
103 struct scatterlist *sg_ptr;
104 unsigned int sg_len;
105 unsigned int sg_off;
106
107 struct platform_device *pdev;
108
109 /* DMA support */
110 struct dma_chan *chan_rx;
111 struct dma_chan *chan_tx;
112 struct tasklet_struct dma_complete;
113 struct tasklet_struct dma_issue;
114#ifdef CONFIG_TMIO_MMC_DMA
115 struct dma_async_tx_descriptor *desc;
116 unsigned int dma_sglen;
117 dma_cookie_t cookie;
118#endif
119};
120
121#include <linux/io.h>
122
123static inline u16 sd_ctrl_read16(struct tmio_mmc_host *host, int addr)
124{
125 return readw(host->ctl + (addr << host->bus_shift));
126}
127
128static inline void sd_ctrl_read16_rep(struct tmio_mmc_host *host, int addr,
129 u16 *buf, int count)
130{
131 readsw(host->ctl + (addr << host->bus_shift), buf, count);
132}
133
134static inline u32 sd_ctrl_read32(struct tmio_mmc_host *host, int addr)
135{
136 return readw(host->ctl + (addr << host->bus_shift)) |
137 readw(host->ctl + ((addr + 2) << host->bus_shift)) << 16;
138}
139
140static inline void sd_ctrl_write16(struct tmio_mmc_host *host, int addr,
141 u16 val)
142{
143 writew(val, host->ctl + (addr << host->bus_shift));
144}
145
146static inline void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr,
147 u16 *buf, int count)
148{
149 writesw(host->ctl + (addr << host->bus_shift), buf, count);
150}
151
152static inline void sd_ctrl_write32(struct tmio_mmc_host *host, int addr,
153 u32 val)
154{
155 writew(val, host->ctl + (addr << host->bus_shift));
156 writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift));
157}
158
159#include <linux/scatterlist.h>
160#include <linux/blkdev.h>
161
162static inline void tmio_mmc_init_sg(struct tmio_mmc_host *host,
163 struct mmc_data *data)
164{
165 host->sg_len = data->sg_len;
166 host->sg_ptr = data->sg;
167 host->sg_off = 0;
168}
169
170static inline int tmio_mmc_next_sg(struct tmio_mmc_host *host)
171{
172 host->sg_ptr = sg_next(host->sg_ptr);
173 host->sg_off = 0;
174 return --host->sg_len;
175}
176
177static inline char *tmio_mmc_kmap_atomic(struct scatterlist *sg,
178 unsigned long *flags)
179{
180 local_irq_save(*flags);
181 return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
182}
183
184static inline void tmio_mmc_kunmap_atomic(void *virt,
185 unsigned long *flags)
186{
187 kunmap_atomic(virt, KM_BIO_SRC_IRQ);
188 local_irq_restore(*flags);
189}
190
191#ifdef CONFIG_MMC_DEBUG
192
193#define STATUS_TO_TEXT(a) \
194 do { \
195 if (status & TMIO_STAT_##a) \
196 printk(#a); \
197 } while (0)
198
199void pr_debug_status(u32 status)
200{
201 printk(KERN_DEBUG "status: %08x = ", status);
202 STATUS_TO_TEXT(CARD_REMOVE);
203 STATUS_TO_TEXT(CARD_INSERT);
204 STATUS_TO_TEXT(SIGSTATE);
205 STATUS_TO_TEXT(WRPROTECT);
206 STATUS_TO_TEXT(CARD_REMOVE_A);
207 STATUS_TO_TEXT(CARD_INSERT_A);
208 STATUS_TO_TEXT(SIGSTATE_A);
209 STATUS_TO_TEXT(CMD_IDX_ERR);
210 STATUS_TO_TEXT(STOPBIT_ERR);
211 STATUS_TO_TEXT(ILL_FUNC);
212 STATUS_TO_TEXT(CMD_BUSY);
213 STATUS_TO_TEXT(CMDRESPEND);
214 STATUS_TO_TEXT(DATAEND);
215 STATUS_TO_TEXT(CRCFAIL);
216 STATUS_TO_TEXT(DATATIMEOUT);
217 STATUS_TO_TEXT(CMDTIMEOUT);
218 STATUS_TO_TEXT(RXOVERFLOW);
219 STATUS_TO_TEXT(TXUNDERRUN);
220 STATUS_TO_TEXT(RXRDY);
221 STATUS_TO_TEXT(TXRQ);
222 STATUS_TO_TEXT(ILL_ACCESS);
223 printk("\n");
224}
225
226#else
227#define pr_debug_status(s) do { } while (0)
228#endif
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 3fda24a28d2f..ff652c77a0a5 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1944,19 +1944,12 @@ config 68360_ENET
1944config FEC 1944config FEC
1945 bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)" 1945 bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
1946 depends on M523x || M527x || M5272 || M528x || M520x || M532x || \ 1946 depends on M523x || M527x || M5272 || M528x || M520x || M532x || \
1947 MACH_MX27 || ARCH_MX35 || ARCH_MX25 || ARCH_MX5 1947 MACH_MX27 || ARCH_MX35 || ARCH_MX25 || ARCH_MX5 || SOC_IMX28
1948 select PHYLIB 1948 select PHYLIB
1949 help 1949 help
1950 Say Y here if you want to use the built-in 10/100 Fast ethernet 1950 Say Y here if you want to use the built-in 10/100 Fast ethernet
1951 controller on some Motorola ColdFire and Freescale i.MX processors. 1951 controller on some Motorola ColdFire and Freescale i.MX processors.
1952 1952
1953config FEC2
1954 bool "Second FEC ethernet controller (on some ColdFire CPUs)"
1955 depends on FEC
1956 help
1957 Say Y here if you want to use the second built-in 10/100 Fast
1958 ethernet controller on some Motorola ColdFire processors.
1959
1960config FEC_MPC52xx 1953config FEC_MPC52xx
1961 tristate "MPC52xx FEC driver" 1954 tristate "MPC52xx FEC driver"
1962 depends on PPC_MPC52xx && PPC_BESTCOMM 1955 depends on PPC_MPC52xx && PPC_BESTCOMM
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index ce1e5e9d06f6..0b9fc5173aef 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -8,6 +8,11 @@
8 * Licensed under the GPL-2 or later. 8 * Licensed under the GPL-2 or later.
9 */ 9 */
10 10
11#define DRV_VERSION "1.1"
12#define DRV_DESC "Blackfin on-chip Ethernet MAC driver"
13
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
11#include <linux/init.h> 16#include <linux/init.h>
12#include <linux/module.h> 17#include <linux/module.h>
13#include <linux/kernel.h> 18#include <linux/kernel.h>
@@ -41,12 +46,7 @@
41 46
42#include "bfin_mac.h" 47#include "bfin_mac.h"
43 48
44#define DRV_NAME "bfin_mac" 49MODULE_AUTHOR("Bryan Wu, Luke Yang");
45#define DRV_VERSION "1.1"
46#define DRV_AUTHOR "Bryan Wu, Luke Yang"
47#define DRV_DESC "Blackfin on-chip Ethernet MAC driver"
48
49MODULE_AUTHOR(DRV_AUTHOR);
50MODULE_LICENSE("GPL"); 50MODULE_LICENSE("GPL");
51MODULE_DESCRIPTION(DRV_DESC); 51MODULE_DESCRIPTION(DRV_DESC);
52MODULE_ALIAS("platform:bfin_mac"); 52MODULE_ALIAS("platform:bfin_mac");
@@ -189,8 +189,7 @@ static int desc_list_init(void)
189 /* allocate a new skb for next time receive */ 189 /* allocate a new skb for next time receive */
190 new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN); 190 new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN);
191 if (!new_skb) { 191 if (!new_skb) {
192 printk(KERN_NOTICE DRV_NAME 192 pr_notice("init: low on mem - packet dropped\n");
193 ": init: low on mem - packet dropped\n");
194 goto init_error; 193 goto init_error;
195 } 194 }
196 skb_reserve(new_skb, NET_IP_ALIGN); 195 skb_reserve(new_skb, NET_IP_ALIGN);
@@ -240,7 +239,7 @@ static int desc_list_init(void)
240 239
241init_error: 240init_error:
242 desc_list_free(); 241 desc_list_free();
243 printk(KERN_ERR DRV_NAME ": kmalloc failed\n"); 242 pr_err("kmalloc failed\n");
244 return -ENOMEM; 243 return -ENOMEM;
245} 244}
246 245
@@ -259,8 +258,7 @@ static int bfin_mdio_poll(void)
259 while ((bfin_read_EMAC_STAADD()) & STABUSY) { 258 while ((bfin_read_EMAC_STAADD()) & STABUSY) {
260 udelay(1); 259 udelay(1);
261 if (timeout_cnt-- < 0) { 260 if (timeout_cnt-- < 0) {
262 printk(KERN_ERR DRV_NAME 261 pr_err("wait MDC/MDIO transaction to complete timeout\n");
263 ": wait MDC/MDIO transaction to complete timeout\n");
264 return -ETIMEDOUT; 262 return -ETIMEDOUT;
265 } 263 }
266 } 264 }
@@ -350,9 +348,9 @@ static void bfin_mac_adjust_link(struct net_device *dev)
350 opmode &= ~RMII_10; 348 opmode &= ~RMII_10;
351 break; 349 break;
352 default: 350 default:
353 printk(KERN_WARNING 351 netdev_warn(dev,
354 "%s: Ack! Speed (%d) is not 10/100!\n", 352 "Ack! Speed (%d) is not 10/100!\n",
355 DRV_NAME, phydev->speed); 353 phydev->speed);
356 break; 354 break;
357 } 355 }
358 bfin_write_EMAC_OPMODE(opmode); 356 bfin_write_EMAC_OPMODE(opmode);
@@ -417,14 +415,13 @@ static int mii_probe(struct net_device *dev, int phy_mode)
417 415
418 /* now we are supposed to have a proper phydev, to attach to... */ 416 /* now we are supposed to have a proper phydev, to attach to... */
419 if (!phydev) { 417 if (!phydev) {
420 printk(KERN_INFO "%s: Don't found any phy device at all\n", 418 netdev_err(dev, "no phy device found\n");
421 dev->name);
422 return -ENODEV; 419 return -ENODEV;
423 } 420 }
424 421
425 if (phy_mode != PHY_INTERFACE_MODE_RMII && 422 if (phy_mode != PHY_INTERFACE_MODE_RMII &&
426 phy_mode != PHY_INTERFACE_MODE_MII) { 423 phy_mode != PHY_INTERFACE_MODE_MII) {
427 printk(KERN_INFO "%s: Invalid phy interface mode\n", dev->name); 424 netdev_err(dev, "invalid phy interface mode\n");
428 return -EINVAL; 425 return -EINVAL;
429 } 426 }
430 427
@@ -432,7 +429,7 @@ static int mii_probe(struct net_device *dev, int phy_mode)
432 0, phy_mode); 429 0, phy_mode);
433 430
434 if (IS_ERR(phydev)) { 431 if (IS_ERR(phydev)) {
435 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); 432 netdev_err(dev, "could not attach PHY\n");
436 return PTR_ERR(phydev); 433 return PTR_ERR(phydev);
437 } 434 }
438 435
@@ -453,11 +450,10 @@ static int mii_probe(struct net_device *dev, int phy_mode)
453 lp->old_duplex = -1; 450 lp->old_duplex = -1;
454 lp->phydev = phydev; 451 lp->phydev = phydev;
455 452
456 printk(KERN_INFO "%s: attached PHY driver [%s] " 453 pr_info("attached PHY driver [%s] "
457 "(mii_bus:phy_addr=%s, irq=%d, mdc_clk=%dHz(mdc_div=%d)" 454 "(mii_bus:phy_addr=%s, irq=%d, mdc_clk=%dHz(mdc_div=%d)@sclk=%dMHz)\n",
458 "@sclk=%dMHz)\n", 455 phydev->drv->name, dev_name(&phydev->dev), phydev->irq,
459 DRV_NAME, phydev->drv->name, dev_name(&phydev->dev), phydev->irq, 456 MDC_CLK, mdc_div, sclk/1000000);
460 MDC_CLK, mdc_div, sclk/1000000);
461 457
462 return 0; 458 return 0;
463} 459}
@@ -502,7 +498,7 @@ bfin_mac_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
502static void bfin_mac_ethtool_getdrvinfo(struct net_device *dev, 498static void bfin_mac_ethtool_getdrvinfo(struct net_device *dev,
503 struct ethtool_drvinfo *info) 499 struct ethtool_drvinfo *info)
504{ 500{
505 strcpy(info->driver, DRV_NAME); 501 strcpy(info->driver, KBUILD_MODNAME);
506 strcpy(info->version, DRV_VERSION); 502 strcpy(info->version, DRV_VERSION);
507 strcpy(info->fw_version, "N/A"); 503 strcpy(info->fw_version, "N/A");
508 strcpy(info->bus_info, dev_name(&dev->dev)); 504 strcpy(info->bus_info, dev_name(&dev->dev));
@@ -562,7 +558,7 @@ static const struct ethtool_ops bfin_mac_ethtool_ops = {
562}; 558};
563 559
564/**************************************************************************/ 560/**************************************************************************/
565void setup_system_regs(struct net_device *dev) 561static void setup_system_regs(struct net_device *dev)
566{ 562{
567 struct bfin_mac_local *lp = netdev_priv(dev); 563 struct bfin_mac_local *lp = netdev_priv(dev);
568 int i; 564 int i;
@@ -592,6 +588,10 @@ void setup_system_regs(struct net_device *dev)
592 588
593 bfin_write_EMAC_MMC_CTL(RSTC | CROLL); 589 bfin_write_EMAC_MMC_CTL(RSTC | CROLL);
594 590
591 /* Set vlan regs to let 1522 bytes long packets pass through */
592 bfin_write_EMAC_VLAN1(lp->vlan1_mask);
593 bfin_write_EMAC_VLAN2(lp->vlan2_mask);
594
595 /* Initialize the TX DMA channel registers */ 595 /* Initialize the TX DMA channel registers */
596 bfin_write_DMA2_X_COUNT(0); 596 bfin_write_DMA2_X_COUNT(0);
597 bfin_write_DMA2_X_MODIFY(4); 597 bfin_write_DMA2_X_MODIFY(4);
@@ -827,8 +827,7 @@ static void bfin_tx_hwtstamp(struct net_device *netdev, struct sk_buff *skb)
827 while ((!(bfin_read_EMAC_PTP_ISTAT() & TXTL)) && (--timeout_cnt)) 827 while ((!(bfin_read_EMAC_PTP_ISTAT() & TXTL)) && (--timeout_cnt))
828 udelay(1); 828 udelay(1);
829 if (timeout_cnt == 0) 829 if (timeout_cnt == 0)
830 printk(KERN_ERR DRV_NAME 830 netdev_err(netdev, "timestamp the TX packet failed\n");
831 ": fails to timestamp the TX packet\n");
832 else { 831 else {
833 struct skb_shared_hwtstamps shhwtstamps; 832 struct skb_shared_hwtstamps shhwtstamps;
834 u64 ns; 833 u64 ns;
@@ -1083,8 +1082,7 @@ static void bfin_mac_rx(struct net_device *dev)
1083 * we which case we simply drop the packet 1082 * we which case we simply drop the packet
1084 */ 1083 */
1085 if (current_rx_ptr->status.status_word & RX_ERROR_MASK) { 1084 if (current_rx_ptr->status.status_word & RX_ERROR_MASK) {
1086 printk(KERN_NOTICE DRV_NAME 1085 netdev_notice(dev, "rx: receive error - packet dropped\n");
1087 ": rx: receive error - packet dropped\n");
1088 dev->stats.rx_dropped++; 1086 dev->stats.rx_dropped++;
1089 goto out; 1087 goto out;
1090 } 1088 }
@@ -1094,8 +1092,7 @@ static void bfin_mac_rx(struct net_device *dev)
1094 1092
1095 new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN); 1093 new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN);
1096 if (!new_skb) { 1094 if (!new_skb) {
1097 printk(KERN_NOTICE DRV_NAME 1095 netdev_notice(dev, "rx: low on mem - packet dropped\n");
1098 ": rx: low on mem - packet dropped\n");
1099 dev->stats.rx_dropped++; 1096 dev->stats.rx_dropped++;
1100 goto out; 1097 goto out;
1101 } 1098 }
@@ -1213,7 +1210,7 @@ static int bfin_mac_enable(struct phy_device *phydev)
1213 int ret; 1210 int ret;
1214 u32 opmode; 1211 u32 opmode;
1215 1212
1216 pr_debug("%s: %s\n", DRV_NAME, __func__); 1213 pr_debug("%s\n", __func__);
1217 1214
1218 /* Set RX DMA */ 1215 /* Set RX DMA */
1219 bfin_write_DMA1_NEXT_DESC_PTR(&(rx_list_head->desc_a)); 1216 bfin_write_DMA1_NEXT_DESC_PTR(&(rx_list_head->desc_a));
@@ -1323,7 +1320,7 @@ static void bfin_mac_set_multicast_list(struct net_device *dev)
1323 u32 sysctl; 1320 u32 sysctl;
1324 1321
1325 if (dev->flags & IFF_PROMISC) { 1322 if (dev->flags & IFF_PROMISC) {
1326 printk(KERN_INFO "%s: set to promisc mode\n", dev->name); 1323 netdev_info(dev, "set promisc mode\n");
1327 sysctl = bfin_read_EMAC_OPMODE(); 1324 sysctl = bfin_read_EMAC_OPMODE();
1328 sysctl |= PR; 1325 sysctl |= PR;
1329 bfin_write_EMAC_OPMODE(sysctl); 1326 bfin_write_EMAC_OPMODE(sysctl);
@@ -1393,7 +1390,7 @@ static int bfin_mac_open(struct net_device *dev)
1393 * address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx 1390 * address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx
1394 */ 1391 */
1395 if (!is_valid_ether_addr(dev->dev_addr)) { 1392 if (!is_valid_ether_addr(dev->dev_addr)) {
1396 printk(KERN_WARNING DRV_NAME ": no valid ethernet hw addr\n"); 1393 netdev_warn(dev, "no valid ethernet hw addr\n");
1397 return -EINVAL; 1394 return -EINVAL;
1398 } 1395 }
1399 1396
@@ -1527,6 +1524,9 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev)
1527 goto out_err_mii_probe; 1524 goto out_err_mii_probe;
1528 } 1525 }
1529 1526
1527 lp->vlan1_mask = ETH_P_8021Q | mii_bus_data->vlan1_mask;
1528 lp->vlan2_mask = ETH_P_8021Q | mii_bus_data->vlan2_mask;
1529
1530 /* Fill in the fields of the device structure with ethernet values. */ 1530 /* Fill in the fields of the device structure with ethernet values. */
1531 ether_setup(ndev); 1531 ether_setup(ndev);
1532 1532
@@ -1558,7 +1558,7 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev)
1558 bfin_mac_hwtstamp_init(ndev); 1558 bfin_mac_hwtstamp_init(ndev);
1559 1559
1560 /* now, print out the card info, in a short format.. */ 1560 /* now, print out the card info, in a short format.. */
1561 dev_info(&pdev->dev, "%s, Version %s\n", DRV_DESC, DRV_VERSION); 1561 netdev_info(ndev, "%s, Version %s\n", DRV_DESC, DRV_VERSION);
1562 1562
1563 return 0; 1563 return 0;
1564 1564
@@ -1650,7 +1650,7 @@ static int __devinit bfin_mii_bus_probe(struct platform_device *pdev)
1650 * so set the GPIO pins to Ethernet mode 1650 * so set the GPIO pins to Ethernet mode
1651 */ 1651 */
1652 pin_req = mii_bus_pd->mac_peripherals; 1652 pin_req = mii_bus_pd->mac_peripherals;
1653 rc = peripheral_request_list(pin_req, DRV_NAME); 1653 rc = peripheral_request_list(pin_req, KBUILD_MODNAME);
1654 if (rc) { 1654 if (rc) {
1655 dev_err(&pdev->dev, "Requesting peripherals failed!\n"); 1655 dev_err(&pdev->dev, "Requesting peripherals failed!\n");
1656 return rc; 1656 return rc;
@@ -1739,7 +1739,7 @@ static struct platform_driver bfin_mac_driver = {
1739 .resume = bfin_mac_resume, 1739 .resume = bfin_mac_resume,
1740 .suspend = bfin_mac_suspend, 1740 .suspend = bfin_mac_suspend,
1741 .driver = { 1741 .driver = {
1742 .name = DRV_NAME, 1742 .name = KBUILD_MODNAME,
1743 .owner = THIS_MODULE, 1743 .owner = THIS_MODULE,
1744 }, 1744 },
1745}; 1745};
diff --git a/drivers/net/bfin_mac.h b/drivers/net/bfin_mac.h
index aed68bed2365..f8559ac9a403 100644
--- a/drivers/net/bfin_mac.h
+++ b/drivers/net/bfin_mac.h
@@ -17,7 +17,14 @@
17#include <linux/etherdevice.h> 17#include <linux/etherdevice.h>
18#include <linux/bfin_mac.h> 18#include <linux/bfin_mac.h>
19 19
20/*
21 * Disable hardware checksum for bug #5600 if writeback cache is
22 * enabled. Otherwize, corrupted RX packet will be sent up stack
23 * without error mark.
24 */
25#ifndef CONFIG_BFIN_EXTMEM_WRITEBACK
20#define BFIN_MAC_CSUM_OFFLOAD 26#define BFIN_MAC_CSUM_OFFLOAD
27#endif
21 28
22#define TX_RECLAIM_JIFFIES (HZ / 5) 29#define TX_RECLAIM_JIFFIES (HZ / 5)
23 30
@@ -68,7 +75,6 @@ struct bfin_mac_local {
68 */ 75 */
69 struct net_device_stats stats; 76 struct net_device_stats stats;
70 77
71 unsigned char Mac[6]; /* MAC address of the board */
72 spinlock_t lock; 78 spinlock_t lock;
73 79
74 int wol; /* Wake On Lan */ 80 int wol; /* Wake On Lan */
@@ -76,6 +82,9 @@ struct bfin_mac_local {
76 struct timer_list tx_reclaim_timer; 82 struct timer_list tx_reclaim_timer;
77 struct net_device *ndev; 83 struct net_device *ndev;
78 84
85 /* Data for EMAC_VLAN1 regs */
86 u16 vlan1_mask, vlan2_mask;
87
79 /* MII and PHY stuffs */ 88 /* MII and PHY stuffs */
80 int old_link; /* used by bf537_adjust_link */ 89 int old_link; /* used by bf537_adjust_link */
81 int old_speed; 90 int old_speed;
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index 77d6c8d6d86b..6a858a29db56 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -636,6 +636,7 @@ struct bnx2x_common {
636 636
637#define CHIP_METAL(bp) (bp->common.chip_id & 0x00000ff0) 637#define CHIP_METAL(bp) (bp->common.chip_id & 0x00000ff0)
638#define CHIP_BOND_ID(bp) (bp->common.chip_id & 0x0000000f) 638#define CHIP_BOND_ID(bp) (bp->common.chip_id & 0x0000000f)
639#define CHIP_PARITY_ENABLED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
639 640
640 int flash_size; 641 int flash_size;
641#define NVRAM_1MB_SIZE 0x20000 /* 1M bit in bytes */ 642#define NVRAM_1MB_SIZE 0x20000 /* 1M bit in bytes */
diff --git a/drivers/net/bnx2x/bnx2x_dump.h b/drivers/net/bnx2x/bnx2x_dump.h
index dc18c25ca9e5..fb3ff7c4d7ca 100644
--- a/drivers/net/bnx2x/bnx2x_dump.h
+++ b/drivers/net/bnx2x/bnx2x_dump.h
@@ -1,10 +1,16 @@
1/* bnx2x_dump.h: Broadcom Everest network driver. 1/* bnx2x_dump.h: Broadcom Everest network driver.
2 * 2 *
3 * Copyright (c) 2009 Broadcom Corporation 3 * Copyright (c) 2011 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * Unless you and Broadcom execute a separate written software license
6 * it under the terms of the GNU General Public License as published by 6 * agreement governing use of this software, this software is licensed to you
7 * the Free Software Foundation. 7 * under the terms of the GNU General Public License version 2, available
8 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
9 *
10 * Notwithstanding the above, under no circumstances may you combine this
11 * software in any way with any other Broadcom software provided under a
12 * license other than the GPL, without Broadcom's express prior written
13 * consent.
8 */ 14 */
9 15
10 16
@@ -17,53 +23,53 @@
17#define BNX2X_DUMP_H 23#define BNX2X_DUMP_H
18 24
19 25
20struct dump_sign {
21 u32 time_stamp;
22 u32 diag_ver;
23 u32 grc_dump_ver;
24};
25 26
26#define TSTORM_WAITP_ADDR 0x1b8a80 27/*definitions */
27#define CSTORM_WAITP_ADDR 0x238a80 28#define XSTORM_WAITP_ADDR 0x2b8a80
28#define XSTORM_WAITP_ADDR 0x2b8a80 29#define TSTORM_WAITP_ADDR 0x1b8a80
29#define USTORM_WAITP_ADDR 0x338a80 30#define USTORM_WAITP_ADDR 0x338a80
30#define TSTORM_CAM_MODE 0x1b1440 31#define CSTORM_WAITP_ADDR 0x238a80
32#define TSTORM_CAM_MODE 0x1B1440
31 33
32#define RI_E1 0x1 34#define MAX_TIMER_PENDING 200
33#define RI_E1H 0x2 35#define TIMER_SCAN_DONT_CARE 0xFF
36#define RI_E1 0x1
37#define RI_E1H 0x2
34#define RI_E2 0x4 38#define RI_E2 0x4
35#define RI_ONLINE 0x100 39#define RI_ONLINE 0x100
36#define RI_PATH0_DUMP 0x200 40#define RI_PATH0_DUMP 0x200
37#define RI_PATH1_DUMP 0x400 41#define RI_PATH1_DUMP 0x400
38#define RI_E1_OFFLINE (RI_E1) 42#define RI_E1_OFFLINE (RI_E1)
39#define RI_E1_ONLINE (RI_E1 | RI_ONLINE) 43#define RI_E1_ONLINE (RI_E1 | RI_ONLINE)
40#define RI_E1H_OFFLINE (RI_E1H) 44#define RI_E1H_OFFLINE (RI_E1H)
41#define RI_E1H_ONLINE (RI_E1H | RI_ONLINE) 45#define RI_E1H_ONLINE (RI_E1H | RI_ONLINE)
42#define RI_E2_OFFLINE (RI_E2) 46#define RI_E2_OFFLINE (RI_E2)
43#define RI_E2_ONLINE (RI_E2 | RI_ONLINE) 47#define RI_E2_ONLINE (RI_E2 | RI_ONLINE)
44#define RI_E1E1H_OFFLINE (RI_E1 | RI_E1H) 48#define RI_E1E1H_OFFLINE (RI_E1 | RI_E1H)
45#define RI_E1E1H_ONLINE (RI_E1 | RI_E1H | RI_ONLINE) 49#define RI_E1E1H_ONLINE (RI_E1 | RI_E1H | RI_ONLINE)
46#define RI_E1HE2_OFFLINE (RI_E2 | RI_E1H) 50#define RI_E1HE2_OFFLINE (RI_E2 | RI_E1H)
47#define RI_E1HE2_ONLINE (RI_E2 | RI_E1H | RI_ONLINE) 51#define RI_E1HE2_ONLINE (RI_E2 | RI_E1H | RI_ONLINE)
48#define RI_E1E2_OFFLINE (RI_E2 | RI_E1) 52#define RI_E1E2_OFFLINE (RI_E2 | RI_E1)
49#define RI_E1E2_ONLINE (RI_E2 | RI_E1 | RI_ONLINE) 53#define RI_E1E2_ONLINE (RI_E2 | RI_E1 | RI_ONLINE)
50#define RI_ALL_OFFLINE (RI_E1 | RI_E1H | RI_E2) 54#define RI_ALL_OFFLINE (RI_E1 | RI_E1H | RI_E2)
51#define RI_ALL_ONLINE (RI_E1 | RI_E1H | RI_E2 | RI_ONLINE) 55#define RI_ALL_ONLINE (RI_E1 | RI_E1H | RI_E2 | RI_ONLINE)
52
53#define MAX_TIMER_PENDING 200
54#define TIMER_SCAN_DONT_CARE 0xFF
55 56
57struct dump_sign {
58 u32 time_stamp;
59 u32 diag_ver;
60 u32 grc_dump_ver;
61};
56 62
57struct dump_hdr { 63struct dump_hdr {
58 u32 hdr_size; /* in dwords, excluding this field */ 64 u32 hdr_size; /* in dwords, excluding this field */
59 struct dump_sign dump_sign; 65 struct dump_sign dump_sign;
60 u32 xstorm_waitp; 66 u32 xstorm_waitp;
61 u32 tstorm_waitp; 67 u32 tstorm_waitp;
62 u32 ustorm_waitp; 68 u32 ustorm_waitp;
63 u32 cstorm_waitp; 69 u32 cstorm_waitp;
64 u16 info; 70 u16 info;
65 u8 idle_chk; 71 u8 idle_chk;
66 u8 reserved; 72 u8 reserved;
67}; 73};
68 74
69struct reg_addr { 75struct reg_addr {
@@ -80,202 +86,185 @@ struct wreg_addr {
80 u16 info; 86 u16 info;
81}; 87};
82 88
83 89#define REGS_COUNT 834
84#define REGS_COUNT 558
85static const struct reg_addr reg_addrs[REGS_COUNT] = { 90static const struct reg_addr reg_addrs[REGS_COUNT] = {
86 { 0x2000, 341, RI_ALL_ONLINE }, { 0x2800, 103, RI_ALL_ONLINE }, 91 { 0x2000, 341, RI_ALL_ONLINE }, { 0x2800, 103, RI_ALL_ONLINE },
87 { 0x3000, 287, RI_ALL_ONLINE }, { 0x3800, 331, RI_ALL_ONLINE }, 92 { 0x3000, 287, RI_ALL_ONLINE }, { 0x3800, 331, RI_ALL_ONLINE },
88 { 0x8800, 6, RI_E1_ONLINE }, { 0xa000, 223, RI_ALL_ONLINE }, 93 { 0x8800, 6, RI_ALL_ONLINE }, { 0x8818, 1, RI_E1HE2_ONLINE },
89 { 0xa388, 1, RI_ALL_ONLINE }, { 0xa398, 1, RI_ALL_ONLINE }, 94 { 0x9000, 164, RI_E2_ONLINE }, { 0x9400, 33, RI_E2_ONLINE },
90 { 0xa39c, 7, RI_E1H_ONLINE }, { 0xa3c0, 3, RI_E1H_ONLINE }, 95 { 0xa000, 27, RI_ALL_ONLINE }, { 0xa06c, 1, RI_E1E1H_ONLINE },
91 { 0xa3d0, 1, RI_E1H_ONLINE }, { 0xa3d8, 1, RI_E1H_ONLINE }, 96 { 0xa070, 71, RI_ALL_ONLINE }, { 0xa18c, 4, RI_E1E1H_ONLINE },
92 { 0xa3e0, 1, RI_E1H_ONLINE }, { 0xa3e8, 1, RI_E1H_ONLINE }, 97 { 0xa19c, 62, RI_ALL_ONLINE }, { 0xa294, 2, RI_E1E1H_ONLINE },
93 { 0xa3f0, 1, RI_E1H_ONLINE }, { 0xa3f8, 1, RI_E1H_ONLINE }, 98 { 0xa29c, 56, RI_ALL_ONLINE }, { 0xa39c, 7, RI_E1HE2_ONLINE },
94 { 0xa400, 69, RI_ALL_ONLINE }, { 0xa518, 1, RI_ALL_ONLINE }, 99 { 0xa3c0, 3, RI_E1HE2_ONLINE }, { 0xa3d0, 1, RI_E1HE2_ONLINE },
95 { 0xa520, 1, RI_ALL_ONLINE }, { 0xa528, 1, RI_ALL_ONLINE }, 100 { 0xa3d8, 1, RI_E1HE2_ONLINE }, { 0xa3e0, 1, RI_E1HE2_ONLINE },
96 { 0xa530, 1, RI_ALL_ONLINE }, { 0xa538, 1, RI_ALL_ONLINE }, 101 { 0xa3e8, 1, RI_E1HE2_ONLINE }, { 0xa3f0, 1, RI_E1HE2_ONLINE },
97 { 0xa540, 1, RI_ALL_ONLINE }, { 0xa548, 1, RI_ALL_ONLINE }, 102 { 0xa3f8, 1, RI_E1HE2_ONLINE }, { 0xa400, 43, RI_ALL_ONLINE },
98 { 0xa550, 1, RI_ALL_ONLINE }, { 0xa558, 1, RI_ALL_ONLINE }, 103 { 0xa4ac, 2, RI_E1E1H_ONLINE }, { 0xa4b4, 1, RI_ALL_ONLINE },
99 { 0xa560, 1, RI_ALL_ONLINE }, { 0xa568, 1, RI_ALL_ONLINE }, 104 { 0xa4b8, 2, RI_E1E1H_ONLINE }, { 0xa4c0, 3, RI_ALL_ONLINE },
100 { 0xa570, 1, RI_ALL_ONLINE }, { 0xa580, 1, RI_ALL_ONLINE }, 105 { 0xa4cc, 5, RI_E1E1H_ONLINE }, { 0xa4e0, 9, RI_ALL_ONLINE },
101 { 0xa590, 1, RI_ALL_ONLINE }, { 0xa5a0, 1, RI_ALL_ONLINE }, 106 { 0xa504, 1, RI_E1E1H_ONLINE }, { 0xa508, 3, RI_ALL_ONLINE },
102 { 0xa5c0, 1, RI_ALL_ONLINE }, { 0xa5e0, 1, RI_E1H_ONLINE }, 107 { 0xa518, 1, RI_ALL_ONLINE }, { 0xa520, 1, RI_ALL_ONLINE },
103 { 0xa5e8, 1, RI_E1H_ONLINE }, { 0xa5f0, 1, RI_E1H_ONLINE }, 108 { 0xa528, 1, RI_ALL_ONLINE }, { 0xa530, 1, RI_ALL_ONLINE },
104 { 0xa5f8, 10, RI_E1H_ONLINE }, { 0x10000, 236, RI_ALL_ONLINE }, 109 { 0xa538, 1, RI_ALL_ONLINE }, { 0xa540, 1, RI_ALL_ONLINE },
105 { 0x103bc, 1, RI_ALL_ONLINE }, { 0x103cc, 1, RI_ALL_ONLINE }, 110 { 0xa548, 1, RI_E1E1H_ONLINE }, { 0xa550, 1, RI_E1E1H_ONLINE },
106 { 0x103dc, 1, RI_ALL_ONLINE }, { 0x10400, 57, RI_ALL_ONLINE }, 111 { 0xa558, 1, RI_E1E1H_ONLINE }, { 0xa560, 1, RI_E1E1H_ONLINE },
107 { 0x104e8, 2, RI_ALL_ONLINE }, { 0x104f4, 2, RI_ALL_ONLINE }, 112 { 0xa568, 1, RI_E1E1H_ONLINE }, { 0xa570, 1, RI_ALL_ONLINE },
108 { 0x10500, 146, RI_ALL_ONLINE }, { 0x10750, 2, RI_ALL_ONLINE }, 113 { 0xa580, 1, RI_ALL_ONLINE }, { 0xa590, 1, RI_ALL_ONLINE },
109 { 0x10760, 2, RI_ALL_ONLINE }, { 0x10770, 2, RI_ALL_ONLINE }, 114 { 0xa5a0, 1, RI_ALL_ONLINE }, { 0xa5c0, 1, RI_ALL_ONLINE },
110 { 0x10780, 2, RI_ALL_ONLINE }, { 0x10790, 2, RI_ALL_ONLINE }, 115 { 0xa5e0, 1, RI_E1HE2_ONLINE }, { 0xa5e8, 1, RI_E1HE2_ONLINE },
111 { 0x107a0, 2, RI_ALL_ONLINE }, { 0x107b0, 2, RI_ALL_ONLINE }, 116 { 0xa5f0, 1, RI_E1HE2_ONLINE }, { 0xa5f8, 10, RI_E1HE2_ONLINE },
112 { 0x107c0, 2, RI_ALL_ONLINE }, { 0x107d0, 2, RI_ALL_ONLINE }, 117 { 0xa620, 111, RI_E2_ONLINE }, { 0xa800, 51, RI_E2_ONLINE },
113 { 0x107e0, 2, RI_ALL_ONLINE }, { 0x10880, 2, RI_ALL_ONLINE }, 118 { 0xa8d4, 4, RI_E2_ONLINE }, { 0xa8e8, 1, RI_E2_ONLINE },
114 { 0x10900, 2, RI_ALL_ONLINE }, { 0x12000, 1, RI_ALL_ONLINE }, 119 { 0xa8f0, 1, RI_E2_ONLINE }, { 0x10000, 236, RI_ALL_ONLINE },
115 { 0x14000, 1, RI_ALL_ONLINE }, { 0x16000, 26, RI_E1H_ONLINE }, 120 { 0x10400, 57, RI_ALL_ONLINE }, { 0x104e8, 2, RI_ALL_ONLINE },
116 { 0x16070, 18, RI_E1H_ONLINE }, { 0x160c0, 27, RI_E1H_ONLINE }, 121 { 0x104f4, 2, RI_ALL_ONLINE }, { 0x10500, 146, RI_ALL_ONLINE },
117 { 0x16140, 1, RI_E1H_ONLINE }, { 0x16160, 1, RI_E1H_ONLINE }, 122 { 0x10750, 2, RI_ALL_ONLINE }, { 0x10760, 2, RI_ALL_ONLINE },
118 { 0x16180, 2, RI_E1H_ONLINE }, { 0x161c0, 2, RI_E1H_ONLINE }, 123 { 0x10770, 2, RI_ALL_ONLINE }, { 0x10780, 2, RI_ALL_ONLINE },
119 { 0x16204, 5, RI_E1H_ONLINE }, { 0x18000, 1, RI_E1H_ONLINE }, 124 { 0x10790, 2, RI_ALL_ONLINE }, { 0x107a0, 2, RI_ALL_ONLINE },
120 { 0x18008, 1, RI_E1H_ONLINE }, { 0x20000, 24, RI_ALL_ONLINE }, 125 { 0x107b0, 2, RI_ALL_ONLINE }, { 0x107c0, 2, RI_ALL_ONLINE },
121 { 0x20060, 8, RI_ALL_ONLINE }, { 0x20080, 138, RI_ALL_ONLINE }, 126 { 0x107d0, 2, RI_ALL_ONLINE }, { 0x107e0, 2, RI_ALL_ONLINE },
122 { 0x202b4, 1, RI_ALL_ONLINE }, { 0x202c4, 1, RI_ALL_ONLINE }, 127 { 0x10880, 2, RI_ALL_ONLINE }, { 0x10900, 2, RI_ALL_ONLINE },
123 { 0x20400, 2, RI_ALL_ONLINE }, { 0x2040c, 8, RI_ALL_ONLINE }, 128 { 0x16000, 26, RI_E1HE2_ONLINE }, { 0x16070, 18, RI_E1HE2_ONLINE },
124 { 0x2042c, 18, RI_E1H_ONLINE }, { 0x20480, 1, RI_ALL_ONLINE }, 129 { 0x160c0, 27, RI_E1HE2_ONLINE }, { 0x16140, 1, RI_E1HE2_ONLINE },
125 { 0x20500, 1, RI_ALL_ONLINE }, { 0x20600, 1, RI_ALL_ONLINE }, 130 { 0x16160, 1, RI_E1HE2_ONLINE }, { 0x16180, 2, RI_E1HE2_ONLINE },
126 { 0x28000, 1, RI_ALL_ONLINE }, { 0x28004, 8191, RI_ALL_OFFLINE }, 131 { 0x161c0, 2, RI_E1HE2_ONLINE }, { 0x16204, 5, RI_E1HE2_ONLINE },
127 { 0x30000, 1, RI_ALL_ONLINE }, { 0x30004, 16383, RI_ALL_OFFLINE }, 132 { 0x18000, 1, RI_E1HE2_ONLINE }, { 0x18008, 1, RI_E1HE2_ONLINE },
128 { 0x40000, 98, RI_ALL_ONLINE }, { 0x40194, 1, RI_ALL_ONLINE }, 133 { 0x18010, 35, RI_E2_ONLINE }, { 0x180a4, 2, RI_E2_ONLINE },
129 { 0x401a4, 1, RI_ALL_ONLINE }, { 0x401a8, 11, RI_E1H_ONLINE }, 134 { 0x180c0, 191, RI_E2_ONLINE }, { 0x18440, 1, RI_E2_ONLINE },
130 { 0x40200, 4, RI_ALL_ONLINE }, { 0x40400, 43, RI_ALL_ONLINE }, 135 { 0x18460, 1, RI_E2_ONLINE }, { 0x18480, 2, RI_E2_ONLINE },
131 { 0x404b8, 1, RI_ALL_ONLINE }, { 0x404c8, 1, RI_ALL_ONLINE }, 136 { 0x184c0, 2, RI_E2_ONLINE }, { 0x18500, 15, RI_E2_ONLINE },
132 { 0x404cc, 3, RI_E1H_ONLINE }, { 0x40500, 2, RI_ALL_ONLINE }, 137 { 0x20000, 24, RI_ALL_ONLINE }, { 0x20060, 8, RI_ALL_ONLINE },
138 { 0x20080, 94, RI_ALL_ONLINE }, { 0x201f8, 1, RI_E1E1H_ONLINE },
139 { 0x201fc, 1, RI_ALL_ONLINE }, { 0x20200, 1, RI_E1E1H_ONLINE },
140 { 0x20204, 1, RI_ALL_ONLINE }, { 0x20208, 1, RI_E1E1H_ONLINE },
141 { 0x2020c, 39, RI_ALL_ONLINE }, { 0x202c8, 1, RI_E2_ONLINE },
142 { 0x202d8, 4, RI_E2_ONLINE }, { 0x20400, 2, RI_ALL_ONLINE },
143 { 0x2040c, 8, RI_ALL_ONLINE }, { 0x2042c, 18, RI_E1HE2_ONLINE },
144 { 0x20480, 1, RI_ALL_ONLINE }, { 0x20500, 1, RI_ALL_ONLINE },
145 { 0x20600, 1, RI_ALL_ONLINE }, { 0x28000, 1, RI_ALL_ONLINE },
146 { 0x28004, 8191, RI_ALL_OFFLINE }, { 0x30000, 1, RI_ALL_ONLINE },
147 { 0x30004, 16383, RI_ALL_OFFLINE }, { 0x40000, 98, RI_ALL_ONLINE },
148 { 0x401a8, 8, RI_E1HE2_ONLINE }, { 0x401c8, 1, RI_E1H_ONLINE },
149 { 0x401cc, 2, RI_E1HE2_ONLINE }, { 0x401d4, 2, RI_E2_ONLINE },
150 { 0x40200, 4, RI_ALL_ONLINE }, { 0x40220, 18, RI_E2_ONLINE },
151 { 0x40400, 43, RI_ALL_ONLINE }, { 0x404cc, 3, RI_E1HE2_ONLINE },
152 { 0x404e0, 1, RI_E2_ONLINE }, { 0x40500, 2, RI_ALL_ONLINE },
133 { 0x40510, 2, RI_ALL_ONLINE }, { 0x40520, 2, RI_ALL_ONLINE }, 153 { 0x40510, 2, RI_ALL_ONLINE }, { 0x40520, 2, RI_ALL_ONLINE },
134 { 0x40530, 2, RI_ALL_ONLINE }, { 0x40540, 2, RI_ALL_ONLINE }, 154 { 0x40530, 2, RI_ALL_ONLINE }, { 0x40540, 2, RI_ALL_ONLINE },
135 { 0x42000, 164, RI_ALL_ONLINE }, { 0x4229c, 1, RI_ALL_ONLINE }, 155 { 0x40550, 10, RI_E2_ONLINE }, { 0x40610, 2, RI_E2_ONLINE },
136 { 0x422ac, 1, RI_ALL_ONLINE }, { 0x422bc, 1, RI_ALL_ONLINE }, 156 { 0x42000, 164, RI_ALL_ONLINE }, { 0x422c0, 4, RI_E2_ONLINE },
137 { 0x422d4, 5, RI_E1H_ONLINE }, { 0x42400, 49, RI_ALL_ONLINE }, 157 { 0x422d4, 5, RI_E1HE2_ONLINE }, { 0x422e8, 1, RI_E2_ONLINE },
138 { 0x424c8, 38, RI_ALL_ONLINE }, { 0x42568, 2, RI_ALL_ONLINE }, 158 { 0x42400, 49, RI_ALL_ONLINE }, { 0x424c8, 38, RI_ALL_ONLINE },
139 { 0x42800, 1, RI_ALL_ONLINE }, { 0x50000, 20, RI_ALL_ONLINE }, 159 { 0x42568, 2, RI_ALL_ONLINE }, { 0x42640, 5, RI_E2_ONLINE },
140 { 0x50050, 8, RI_ALL_ONLINE }, { 0x50070, 88, RI_ALL_ONLINE }, 160 { 0x42800, 1, RI_ALL_ONLINE }, { 0x50000, 1, RI_ALL_ONLINE },
141 { 0x501dc, 1, RI_ALL_ONLINE }, { 0x501ec, 1, RI_ALL_ONLINE }, 161 { 0x50004, 19, RI_ALL_ONLINE }, { 0x50050, 8, RI_ALL_ONLINE },
142 { 0x501f0, 4, RI_E1H_ONLINE }, { 0x50200, 2, RI_ALL_ONLINE }, 162 { 0x50070, 88, RI_ALL_ONLINE }, { 0x501f0, 4, RI_E1HE2_ONLINE },
143 { 0x5020c, 7, RI_ALL_ONLINE }, { 0x50228, 6, RI_E1H_ONLINE }, 163 { 0x50200, 2, RI_ALL_ONLINE }, { 0x5020c, 7, RI_ALL_ONLINE },
144 { 0x50240, 1, RI_ALL_ONLINE }, { 0x50280, 1, RI_ALL_ONLINE }, 164 { 0x50228, 6, RI_E1HE2_ONLINE }, { 0x50240, 1, RI_ALL_ONLINE },
165 { 0x50280, 1, RI_ALL_ONLINE }, { 0x50300, 1, RI_E2_ONLINE },
166 { 0x5030c, 1, RI_E2_ONLINE }, { 0x50318, 1, RI_E2_ONLINE },
167 { 0x5031c, 1, RI_E2_ONLINE }, { 0x50320, 2, RI_E2_ONLINE },
145 { 0x52000, 1, RI_ALL_ONLINE }, { 0x54000, 1, RI_ALL_ONLINE }, 168 { 0x52000, 1, RI_ALL_ONLINE }, { 0x54000, 1, RI_ALL_ONLINE },
146 { 0x54004, 3327, RI_ALL_OFFLINE }, { 0x58000, 1, RI_ALL_ONLINE }, 169 { 0x54004, 3327, RI_ALL_OFFLINE }, { 0x58000, 1, RI_ALL_ONLINE },
147 { 0x58004, 8191, RI_ALL_OFFLINE }, { 0x60000, 71, RI_ALL_ONLINE }, 170 { 0x58004, 8191, RI_E1E1H_OFFLINE }, { 0x60000, 26, RI_ALL_ONLINE },
148 { 0x60128, 1, RI_ALL_ONLINE }, { 0x60138, 1, RI_ALL_ONLINE }, 171 { 0x60068, 8, RI_E1E1H_ONLINE }, { 0x60088, 12, RI_ALL_ONLINE },
149 { 0x6013c, 24, RI_E1H_ONLINE }, { 0x60200, 1, RI_ALL_ONLINE }, 172 { 0x600b8, 9, RI_E1E1H_ONLINE }, { 0x600dc, 1, RI_ALL_ONLINE },
173 { 0x600e0, 5, RI_E1E1H_ONLINE }, { 0x600f4, 1, RI_ALL_ONLINE },
174 { 0x600f8, 1, RI_E1E1H_ONLINE }, { 0x600fc, 8, RI_ALL_ONLINE },
175 { 0x6013c, 24, RI_E1H_ONLINE }, { 0x6019c, 2, RI_E2_ONLINE },
176 { 0x601ac, 18, RI_E2_ONLINE }, { 0x60200, 1, RI_ALL_ONLINE },
177 { 0x60204, 2, RI_ALL_OFFLINE }, { 0x60210, 13, RI_E2_ONLINE },
150 { 0x61000, 1, RI_ALL_ONLINE }, { 0x61004, 511, RI_ALL_OFFLINE }, 178 { 0x61000, 1, RI_ALL_ONLINE }, { 0x61004, 511, RI_ALL_OFFLINE },
151 { 0x70000, 8, RI_ALL_ONLINE }, { 0x70020, 21496, RI_ALL_OFFLINE }, 179 { 0x70000, 8, RI_ALL_ONLINE }, { 0x70020, 8184, RI_ALL_OFFLINE },
152 { 0x85000, 3, RI_ALL_ONLINE }, { 0x8500c, 4, RI_ALL_OFFLINE }, 180 { 0x85000, 3, RI_ALL_ONLINE }, { 0x8501c, 7, RI_ALL_ONLINE },
153 { 0x8501c, 7, RI_ALL_ONLINE }, { 0x85038, 4, RI_ALL_OFFLINE }, 181 { 0x85048, 1, RI_ALL_ONLINE }, { 0x85200, 32, RI_ALL_ONLINE },
154 { 0x85048, 1, RI_ALL_ONLINE }, { 0x8504c, 109, RI_ALL_OFFLINE }, 182 { 0xc1000, 7, RI_ALL_ONLINE }, { 0xc103c, 2, RI_E2_ONLINE },
155 { 0x85200, 32, RI_ALL_ONLINE }, { 0x85280, 11104, RI_ALL_OFFLINE }, 183 { 0xc1800, 2, RI_ALL_ONLINE }, { 0xc2000, 164, RI_ALL_ONLINE },
156 { 0xa0000, 16384, RI_ALL_ONLINE }, { 0xb0000, 16384, RI_E1H_ONLINE }, 184 { 0xc22c0, 5, RI_E2_ONLINE }, { 0xc22d8, 4, RI_E2_ONLINE },
157 { 0xc1000, 7, RI_ALL_ONLINE }, { 0xc1028, 1, RI_ALL_ONLINE },
158 { 0xc1038, 1, RI_ALL_ONLINE }, { 0xc1800, 2, RI_ALL_ONLINE },
159 { 0xc2000, 164, RI_ALL_ONLINE }, { 0xc229c, 1, RI_ALL_ONLINE },
160 { 0xc22ac, 1, RI_ALL_ONLINE }, { 0xc22bc, 1, RI_ALL_ONLINE },
161 { 0xc2400, 49, RI_ALL_ONLINE }, { 0xc24c8, 38, RI_ALL_ONLINE }, 185 { 0xc2400, 49, RI_ALL_ONLINE }, { 0xc24c8, 38, RI_ALL_ONLINE },
162 { 0xc2568, 2, RI_ALL_ONLINE }, { 0xc2600, 1, RI_ALL_ONLINE }, 186 { 0xc2568, 2, RI_ALL_ONLINE }, { 0xc2600, 1, RI_ALL_ONLINE },
163 { 0xc4000, 165, RI_ALL_ONLINE }, { 0xc42a0, 1, RI_ALL_ONLINE }, 187 { 0xc4000, 165, RI_ALL_ONLINE }, { 0xc42d8, 2, RI_E2_ONLINE },
164 { 0xc42b0, 1, RI_ALL_ONLINE }, { 0xc42c0, 1, RI_ALL_ONLINE }, 188 { 0xc42e0, 7, RI_E1HE2_ONLINE }, { 0xc42fc, 1, RI_E2_ONLINE },
165 { 0xc42e0, 7, RI_E1H_ONLINE }, { 0xc4400, 51, RI_ALL_ONLINE }, 189 { 0xc4400, 51, RI_ALL_ONLINE }, { 0xc44d0, 38, RI_ALL_ONLINE },
166 { 0xc44d0, 38, RI_ALL_ONLINE }, { 0xc4570, 2, RI_ALL_ONLINE }, 190 { 0xc4570, 2, RI_ALL_ONLINE }, { 0xc4578, 5, RI_E2_ONLINE },
167 { 0xc4600, 1, RI_ALL_ONLINE }, { 0xd0000, 19, RI_ALL_ONLINE }, 191 { 0xc4600, 1, RI_ALL_ONLINE }, { 0xd0000, 19, RI_ALL_ONLINE },
168 { 0xd004c, 8, RI_ALL_ONLINE }, { 0xd006c, 91, RI_ALL_ONLINE }, 192 { 0xd004c, 8, RI_ALL_ONLINE }, { 0xd006c, 91, RI_ALL_ONLINE },
169 { 0xd01e4, 1, RI_ALL_ONLINE }, { 0xd01f4, 1, RI_ALL_ONLINE }, 193 { 0xd01fc, 1, RI_E2_ONLINE }, { 0xd0200, 2, RI_ALL_ONLINE },
170 { 0xd0200, 2, RI_ALL_ONLINE }, { 0xd020c, 7, RI_ALL_ONLINE }, 194 { 0xd020c, 7, RI_ALL_ONLINE }, { 0xd0228, 18, RI_E1HE2_ONLINE },
171 { 0xd0228, 18, RI_E1H_ONLINE }, { 0xd0280, 1, RI_ALL_ONLINE }, 195 { 0xd0280, 1, RI_ALL_ONLINE }, { 0xd0300, 1, RI_ALL_ONLINE },
172 { 0xd0300, 1, RI_ALL_ONLINE }, { 0xd0400, 1, RI_ALL_ONLINE }, 196 { 0xd0400, 1, RI_ALL_ONLINE }, { 0xd4000, 1, RI_ALL_ONLINE },
173 { 0xd4000, 1, RI_ALL_ONLINE }, { 0xd4004, 2559, RI_ALL_OFFLINE }, 197 { 0xd4004, 2559, RI_ALL_OFFLINE }, { 0xd8000, 1, RI_ALL_ONLINE },
174 { 0xd8000, 1, RI_ALL_ONLINE }, { 0xd8004, 8191, RI_ALL_OFFLINE }, 198 { 0xd8004, 8191, RI_ALL_OFFLINE }, { 0xe0000, 21, RI_ALL_ONLINE },
175 { 0xe0000, 21, RI_ALL_ONLINE }, { 0xe0054, 8, RI_ALL_ONLINE }, 199 { 0xe0054, 8, RI_ALL_ONLINE }, { 0xe0074, 49, RI_ALL_ONLINE },
176 { 0xe0074, 85, RI_ALL_ONLINE }, { 0xe01d4, 1, RI_ALL_ONLINE }, 200 { 0xe0138, 1, RI_E1E1H_ONLINE }, { 0xe013c, 35, RI_ALL_ONLINE },
177 { 0xe01e4, 1, RI_ALL_ONLINE }, { 0xe0200, 2, RI_ALL_ONLINE }, 201 { 0xe01f4, 2, RI_E2_ONLINE }, { 0xe0200, 2, RI_ALL_ONLINE },
178 { 0xe020c, 8, RI_ALL_ONLINE }, { 0xe022c, 18, RI_E1H_ONLINE }, 202 { 0xe020c, 8, RI_ALL_ONLINE }, { 0xe022c, 18, RI_E1HE2_ONLINE },
179 { 0xe0280, 1, RI_ALL_ONLINE }, { 0xe0300, 1, RI_ALL_ONLINE }, 203 { 0xe0280, 1, RI_ALL_ONLINE }, { 0xe0300, 1, RI_ALL_ONLINE },
180 { 0xe1000, 1, RI_ALL_ONLINE }, { 0xe2000, 1, RI_ALL_ONLINE }, 204 { 0xe1000, 1, RI_ALL_ONLINE }, { 0xe2000, 1, RI_ALL_ONLINE },
181 { 0xe2004, 2047, RI_ALL_OFFLINE }, { 0xf0000, 1, RI_ALL_ONLINE }, 205 { 0xe2004, 2047, RI_ALL_OFFLINE }, { 0xf0000, 1, RI_ALL_ONLINE },
182 { 0xf0004, 16383, RI_ALL_OFFLINE }, { 0x101000, 12, RI_ALL_ONLINE }, 206 { 0xf0004, 16383, RI_ALL_OFFLINE }, { 0x101000, 12, RI_ALL_ONLINE },
183 { 0x10103c, 1, RI_ALL_ONLINE }, { 0x10104c, 1, RI_ALL_ONLINE }, 207 { 0x101050, 1, RI_E1HE2_ONLINE }, { 0x101054, 3, RI_E2_ONLINE },
184 { 0x101050, 1, RI_E1H_ONLINE }, { 0x101100, 1, RI_ALL_ONLINE }, 208 { 0x101100, 1, RI_ALL_ONLINE }, { 0x101800, 8, RI_ALL_ONLINE },
185 { 0x101800, 8, RI_ALL_ONLINE }, { 0x102000, 18, RI_ALL_ONLINE }, 209 { 0x102000, 18, RI_ALL_ONLINE }, { 0x102068, 6, RI_E2_ONLINE },
186 { 0x102054, 1, RI_ALL_ONLINE }, { 0x102064, 1, RI_ALL_ONLINE },
187 { 0x102080, 17, RI_ALL_ONLINE }, { 0x1020c8, 8, RI_E1H_ONLINE }, 210 { 0x102080, 17, RI_ALL_ONLINE }, { 0x1020c8, 8, RI_E1H_ONLINE },
188 { 0x102400, 1, RI_ALL_ONLINE }, { 0x103000, 26, RI_ALL_ONLINE }, 211 { 0x1020e8, 9, RI_E2_ONLINE }, { 0x102400, 1, RI_ALL_ONLINE },
189 { 0x103074, 1, RI_ALL_ONLINE }, { 0x103084, 1, RI_ALL_ONLINE }, 212 { 0x103000, 26, RI_ALL_ONLINE }, { 0x103098, 5, RI_E1HE2_ONLINE },
190 { 0x103094, 1, RI_ALL_ONLINE }, { 0x103098, 5, RI_E1H_ONLINE }, 213 { 0x1030ac, 10, RI_E2_ONLINE }, { 0x1030d8, 8, RI_E2_ONLINE },
214 { 0x103400, 1, RI_E2_ONLINE }, { 0x103404, 135, RI_E2_OFFLINE },
191 { 0x103800, 8, RI_ALL_ONLINE }, { 0x104000, 63, RI_ALL_ONLINE }, 215 { 0x103800, 8, RI_ALL_ONLINE }, { 0x104000, 63, RI_ALL_ONLINE },
192 { 0x104108, 1, RI_ALL_ONLINE }, { 0x104118, 1, RI_ALL_ONLINE }, 216 { 0x10411c, 16, RI_E2_ONLINE }, { 0x104200, 17, RI_ALL_ONLINE },
193 { 0x104200, 17, RI_ALL_ONLINE }, { 0x104400, 64, RI_ALL_ONLINE }, 217 { 0x104400, 64, RI_ALL_ONLINE }, { 0x104500, 192, RI_ALL_OFFLINE },
194 { 0x104500, 192, RI_ALL_OFFLINE }, { 0x104800, 64, RI_ALL_ONLINE }, 218 { 0x104800, 64, RI_ALL_ONLINE }, { 0x104900, 192, RI_ALL_OFFLINE },
195 { 0x104900, 192, RI_ALL_OFFLINE }, { 0x105000, 7, RI_ALL_ONLINE }, 219 { 0x105000, 256, RI_ALL_ONLINE }, { 0x105400, 768, RI_ALL_OFFLINE },
196 { 0x10501c, 1, RI_ALL_OFFLINE }, { 0x105020, 3, RI_ALL_ONLINE }, 220 { 0x107000, 7, RI_E2_ONLINE }, { 0x108000, 33, RI_E1E1H_ONLINE },
197 { 0x10502c, 1, RI_ALL_OFFLINE }, { 0x105030, 3, RI_ALL_ONLINE }, 221 { 0x1080ac, 5, RI_E1H_ONLINE }, { 0x108100, 5, RI_E1E1H_ONLINE },
198 { 0x10503c, 1, RI_ALL_OFFLINE }, { 0x105040, 3, RI_ALL_ONLINE }, 222 { 0x108120, 5, RI_E1E1H_ONLINE }, { 0x108200, 74, RI_E1E1H_ONLINE },
199 { 0x10504c, 1, RI_ALL_OFFLINE }, { 0x105050, 3, RI_ALL_ONLINE }, 223 { 0x108400, 74, RI_E1E1H_ONLINE }, { 0x108800, 152, RI_E1E1H_ONLINE },
200 { 0x10505c, 1, RI_ALL_OFFLINE }, { 0x105060, 3, RI_ALL_ONLINE }, 224 { 0x110000, 111, RI_E2_ONLINE }, { 0x110200, 4, RI_E2_ONLINE },
201 { 0x10506c, 1, RI_ALL_OFFLINE }, { 0x105070, 3, RI_ALL_ONLINE }, 225 { 0x120000, 2, RI_ALL_ONLINE }, { 0x120008, 4, RI_ALL_ONLINE },
202 { 0x10507c, 1, RI_ALL_OFFLINE }, { 0x105080, 3, RI_ALL_ONLINE }, 226 { 0x120018, 3, RI_ALL_ONLINE }, { 0x120024, 4, RI_ALL_ONLINE },
203 { 0x10508c, 1, RI_ALL_OFFLINE }, { 0x105090, 3, RI_ALL_ONLINE }, 227 { 0x120034, 3, RI_ALL_ONLINE }, { 0x120040, 4, RI_ALL_ONLINE },
204 { 0x10509c, 1, RI_ALL_OFFLINE }, { 0x1050a0, 3, RI_ALL_ONLINE }, 228 { 0x120050, 3, RI_ALL_ONLINE }, { 0x12005c, 4, RI_ALL_ONLINE },
205 { 0x1050ac, 1, RI_ALL_OFFLINE }, { 0x1050b0, 3, RI_ALL_ONLINE }, 229 { 0x12006c, 3, RI_ALL_ONLINE }, { 0x120078, 4, RI_ALL_ONLINE },
206 { 0x1050bc, 1, RI_ALL_OFFLINE }, { 0x1050c0, 3, RI_ALL_ONLINE }, 230 { 0x120088, 3, RI_ALL_ONLINE }, { 0x120094, 4, RI_ALL_ONLINE },
207 { 0x1050cc, 1, RI_ALL_OFFLINE }, { 0x1050d0, 3, RI_ALL_ONLINE }, 231 { 0x1200a4, 3, RI_ALL_ONLINE }, { 0x1200b0, 4, RI_ALL_ONLINE },
208 { 0x1050dc, 1, RI_ALL_OFFLINE }, { 0x1050e0, 3, RI_ALL_ONLINE }, 232 { 0x1200c0, 3, RI_ALL_ONLINE }, { 0x1200cc, 4, RI_ALL_ONLINE },
209 { 0x1050ec, 1, RI_ALL_OFFLINE }, { 0x1050f0, 3, RI_ALL_ONLINE }, 233 { 0x1200dc, 3, RI_ALL_ONLINE }, { 0x1200e8, 4, RI_ALL_ONLINE },
210 { 0x1050fc, 1, RI_ALL_OFFLINE }, { 0x105100, 3, RI_ALL_ONLINE }, 234 { 0x1200f8, 3, RI_ALL_ONLINE }, { 0x120104, 4, RI_ALL_ONLINE },
211 { 0x10510c, 1, RI_ALL_OFFLINE }, { 0x105110, 3, RI_ALL_ONLINE }, 235 { 0x120114, 1, RI_ALL_ONLINE }, { 0x120118, 22, RI_ALL_ONLINE },
212 { 0x10511c, 1, RI_ALL_OFFLINE }, { 0x105120, 3, RI_ALL_ONLINE }, 236 { 0x120170, 2, RI_E1E1H_ONLINE }, { 0x120178, 243, RI_ALL_ONLINE },
213 { 0x10512c, 1, RI_ALL_OFFLINE }, { 0x105130, 3, RI_ALL_ONLINE }, 237 { 0x120544, 4, RI_E1E1H_ONLINE }, { 0x120554, 7, RI_ALL_ONLINE },
214 { 0x10513c, 1, RI_ALL_OFFLINE }, { 0x105140, 3, RI_ALL_ONLINE }, 238 { 0x12059c, 6, RI_E1HE2_ONLINE }, { 0x1205b4, 1, RI_E1HE2_ONLINE },
215 { 0x10514c, 1, RI_ALL_OFFLINE }, { 0x105150, 3, RI_ALL_ONLINE }, 239 { 0x1205b8, 16, RI_E1HE2_ONLINE }, { 0x1205f8, 4, RI_E2_ONLINE },
216 { 0x10515c, 1, RI_ALL_OFFLINE }, { 0x105160, 3, RI_ALL_ONLINE }, 240 { 0x120618, 1, RI_E2_ONLINE }, { 0x12061c, 20, RI_E1HE2_ONLINE },
217 { 0x10516c, 1, RI_ALL_OFFLINE }, { 0x105170, 3, RI_ALL_ONLINE }, 241 { 0x12066c, 11, RI_E1HE2_ONLINE }, { 0x120698, 5, RI_E2_ONLINE },
218 { 0x10517c, 1, RI_ALL_OFFLINE }, { 0x105180, 3, RI_ALL_ONLINE }, 242 { 0x1206b0, 76, RI_E2_ONLINE }, { 0x1207fc, 1, RI_E2_ONLINE },
219 { 0x10518c, 1, RI_ALL_OFFLINE }, { 0x105190, 3, RI_ALL_ONLINE }, 243 { 0x120808, 66, RI_ALL_ONLINE }, { 0x120910, 7, RI_E2_ONLINE },
220 { 0x10519c, 1, RI_ALL_OFFLINE }, { 0x1051a0, 3, RI_ALL_ONLINE }, 244 { 0x120930, 9, RI_E2_ONLINE }, { 0x120a00, 2, RI_ALL_ONLINE },
221 { 0x1051ac, 1, RI_ALL_OFFLINE }, { 0x1051b0, 3, RI_ALL_ONLINE }, 245 { 0x122000, 2, RI_ALL_ONLINE }, { 0x122008, 2046, RI_E1_OFFLINE },
222 { 0x1051bc, 1, RI_ALL_OFFLINE }, { 0x1051c0, 3, RI_ALL_ONLINE }, 246 { 0x128000, 2, RI_E1HE2_ONLINE }, { 0x128008, 6142, RI_E1HE2_OFFLINE },
223 { 0x1051cc, 1, RI_ALL_OFFLINE }, { 0x1051d0, 3, RI_ALL_ONLINE }, 247 { 0x130000, 35, RI_E2_ONLINE }, { 0x130100, 29, RI_E2_ONLINE },
224 { 0x1051dc, 1, RI_ALL_OFFLINE }, { 0x1051e0, 3, RI_ALL_ONLINE }, 248 { 0x130180, 1, RI_E2_ONLINE }, { 0x130200, 1, RI_E2_ONLINE },
225 { 0x1051ec, 1, RI_ALL_OFFLINE }, { 0x1051f0, 3, RI_ALL_ONLINE }, 249 { 0x130280, 1, RI_E2_ONLINE }, { 0x130300, 5, RI_E2_ONLINE },
226 { 0x1051fc, 1, RI_ALL_OFFLINE }, { 0x105200, 3, RI_ALL_ONLINE }, 250 { 0x130380, 1, RI_E2_ONLINE }, { 0x130400, 1, RI_E2_ONLINE },
227 { 0x10520c, 1, RI_ALL_OFFLINE }, { 0x105210, 3, RI_ALL_ONLINE }, 251 { 0x130480, 5, RI_E2_ONLINE }, { 0x130800, 72, RI_E2_ONLINE },
228 { 0x10521c, 1, RI_ALL_OFFLINE }, { 0x105220, 3, RI_ALL_ONLINE }, 252 { 0x131000, 136, RI_E2_ONLINE }, { 0x132000, 148, RI_E2_ONLINE },
229 { 0x10522c, 1, RI_ALL_OFFLINE }, { 0x105230, 3, RI_ALL_ONLINE }, 253 { 0x134000, 544, RI_E2_ONLINE }, { 0x140000, 64, RI_ALL_ONLINE },
230 { 0x10523c, 1, RI_ALL_OFFLINE }, { 0x105240, 3, RI_ALL_ONLINE }, 254 { 0x140100, 5, RI_E1E1H_ONLINE }, { 0x140114, 45, RI_ALL_ONLINE },
231 { 0x10524c, 1, RI_ALL_OFFLINE }, { 0x105250, 3, RI_ALL_ONLINE }, 255 { 0x140200, 6, RI_ALL_ONLINE }, { 0x140220, 4, RI_E2_ONLINE },
232 { 0x10525c, 1, RI_ALL_OFFLINE }, { 0x105260, 3, RI_ALL_ONLINE }, 256 { 0x140240, 4, RI_E2_ONLINE }, { 0x140260, 4, RI_E2_ONLINE },
233 { 0x10526c, 1, RI_ALL_OFFLINE }, { 0x105270, 3, RI_ALL_ONLINE }, 257 { 0x140280, 4, RI_E2_ONLINE }, { 0x1402a0, 4, RI_E2_ONLINE },
234 { 0x10527c, 1, RI_ALL_OFFLINE }, { 0x105280, 3, RI_ALL_ONLINE }, 258 { 0x1402c0, 4, RI_E2_ONLINE }, { 0x1402e0, 13, RI_E2_ONLINE },
235 { 0x10528c, 1, RI_ALL_OFFLINE }, { 0x105290, 3, RI_ALL_ONLINE }, 259 { 0x144000, 4, RI_E1E1H_ONLINE }, { 0x148000, 4, RI_E1E1H_ONLINE },
236 { 0x10529c, 1, RI_ALL_OFFLINE }, { 0x1052a0, 3, RI_ALL_ONLINE }, 260 { 0x14c000, 4, RI_E1E1H_ONLINE }, { 0x150000, 4, RI_E1E1H_ONLINE },
237 { 0x1052ac, 1, RI_ALL_OFFLINE }, { 0x1052b0, 3, RI_ALL_ONLINE }, 261 { 0x154000, 4, RI_E1E1H_ONLINE }, { 0x158000, 4, RI_E1E1H_ONLINE },
238 { 0x1052bc, 1, RI_ALL_OFFLINE }, { 0x1052c0, 3, RI_ALL_ONLINE }, 262 { 0x15c000, 2, RI_E1HE2_ONLINE }, { 0x15c008, 5, RI_E1H_ONLINE },
239 { 0x1052cc, 1, RI_ALL_OFFLINE }, { 0x1052d0, 3, RI_ALL_ONLINE }, 263 { 0x15c020, 27, RI_E2_ONLINE }, { 0x15c090, 13, RI_E2_ONLINE },
240 { 0x1052dc, 1, RI_ALL_OFFLINE }, { 0x1052e0, 3, RI_ALL_ONLINE }, 264 { 0x15c0c8, 34, RI_E2_ONLINE }, { 0x161000, 7, RI_ALL_ONLINE },
241 { 0x1052ec, 1, RI_ALL_OFFLINE }, { 0x1052f0, 3, RI_ALL_ONLINE }, 265 { 0x16103c, 2, RI_E2_ONLINE }, { 0x161800, 2, RI_ALL_ONLINE },
242 { 0x1052fc, 1, RI_ALL_OFFLINE }, { 0x105300, 3, RI_ALL_ONLINE }, 266 { 0x164000, 60, RI_ALL_ONLINE }, { 0x164110, 2, RI_E1HE2_ONLINE },
243 { 0x10530c, 1, RI_ALL_OFFLINE }, { 0x105310, 3, RI_ALL_ONLINE }, 267 { 0x164118, 15, RI_E2_ONLINE }, { 0x164200, 1, RI_ALL_ONLINE },
244 { 0x10531c, 1, RI_ALL_OFFLINE }, { 0x105320, 3, RI_ALL_ONLINE },
245 { 0x10532c, 1, RI_ALL_OFFLINE }, { 0x105330, 3, RI_ALL_ONLINE },
246 { 0x10533c, 1, RI_ALL_OFFLINE }, { 0x105340, 3, RI_ALL_ONLINE },
247 { 0x10534c, 1, RI_ALL_OFFLINE }, { 0x105350, 3, RI_ALL_ONLINE },
248 { 0x10535c, 1, RI_ALL_OFFLINE }, { 0x105360, 3, RI_ALL_ONLINE },
249 { 0x10536c, 1, RI_ALL_OFFLINE }, { 0x105370, 3, RI_ALL_ONLINE },
250 { 0x10537c, 1, RI_ALL_OFFLINE }, { 0x105380, 3, RI_ALL_ONLINE },
251 { 0x10538c, 1, RI_ALL_OFFLINE }, { 0x105390, 3, RI_ALL_ONLINE },
252 { 0x10539c, 1, RI_ALL_OFFLINE }, { 0x1053a0, 3, RI_ALL_ONLINE },
253 { 0x1053ac, 1, RI_ALL_OFFLINE }, { 0x1053b0, 3, RI_ALL_ONLINE },
254 { 0x1053bc, 1, RI_ALL_OFFLINE }, { 0x1053c0, 3, RI_ALL_ONLINE },
255 { 0x1053cc, 1, RI_ALL_OFFLINE }, { 0x1053d0, 3, RI_ALL_ONLINE },
256 { 0x1053dc, 1, RI_ALL_OFFLINE }, { 0x1053e0, 3, RI_ALL_ONLINE },
257 { 0x1053ec, 1, RI_ALL_OFFLINE }, { 0x1053f0, 3, RI_ALL_ONLINE },
258 { 0x1053fc, 769, RI_ALL_OFFLINE }, { 0x108000, 33, RI_ALL_ONLINE },
259 { 0x108090, 1, RI_ALL_ONLINE }, { 0x1080a0, 1, RI_ALL_ONLINE },
260 { 0x1080ac, 5, RI_E1H_ONLINE }, { 0x108100, 5, RI_ALL_ONLINE },
261 { 0x108120, 5, RI_ALL_ONLINE }, { 0x108200, 74, RI_ALL_ONLINE },
262 { 0x108400, 74, RI_ALL_ONLINE }, { 0x108800, 152, RI_ALL_ONLINE },
263 { 0x109000, 1, RI_ALL_ONLINE }, { 0x120000, 347, RI_ALL_ONLINE },
264 { 0x120578, 1, RI_ALL_ONLINE }, { 0x120588, 1, RI_ALL_ONLINE },
265 { 0x120598, 1, RI_ALL_ONLINE }, { 0x12059c, 23, RI_E1H_ONLINE },
266 { 0x120614, 1, RI_E1H_ONLINE }, { 0x12061c, 30, RI_E1H_ONLINE },
267 { 0x12080c, 65, RI_ALL_ONLINE }, { 0x120a00, 2, RI_ALL_ONLINE },
268 { 0x122000, 2, RI_ALL_ONLINE }, { 0x128000, 2, RI_E1H_ONLINE },
269 { 0x140000, 114, RI_ALL_ONLINE }, { 0x1401d4, 1, RI_ALL_ONLINE },
270 { 0x1401e4, 1, RI_ALL_ONLINE }, { 0x140200, 6, RI_ALL_ONLINE },
271 { 0x144000, 4, RI_ALL_ONLINE }, { 0x148000, 4, RI_ALL_ONLINE },
272 { 0x14c000, 4, RI_ALL_ONLINE }, { 0x150000, 4, RI_ALL_ONLINE },
273 { 0x154000, 4, RI_ALL_ONLINE }, { 0x158000, 4, RI_ALL_ONLINE },
274 { 0x15c000, 7, RI_E1H_ONLINE }, { 0x161000, 7, RI_ALL_ONLINE },
275 { 0x161028, 1, RI_ALL_ONLINE }, { 0x161038, 1, RI_ALL_ONLINE },
276 { 0x161800, 2, RI_ALL_ONLINE }, { 0x164000, 60, RI_ALL_ONLINE },
277 { 0x1640fc, 1, RI_ALL_ONLINE }, { 0x16410c, 1, RI_ALL_ONLINE },
278 { 0x164110, 2, RI_E1H_ONLINE }, { 0x164200, 1, RI_ALL_ONLINE },
279 { 0x164208, 1, RI_ALL_ONLINE }, { 0x164210, 1, RI_ALL_ONLINE }, 268 { 0x164208, 1, RI_ALL_ONLINE }, { 0x164210, 1, RI_ALL_ONLINE },
280 { 0x164218, 1, RI_ALL_ONLINE }, { 0x164220, 1, RI_ALL_ONLINE }, 269 { 0x164218, 1, RI_ALL_ONLINE }, { 0x164220, 1, RI_ALL_ONLINE },
281 { 0x164228, 1, RI_ALL_ONLINE }, { 0x164230, 1, RI_ALL_ONLINE }, 270 { 0x164228, 1, RI_ALL_ONLINE }, { 0x164230, 1, RI_ALL_ONLINE },
@@ -284,169 +273,298 @@ static const struct reg_addr reg_addrs[REGS_COUNT] = {
284 { 0x164258, 1, RI_ALL_ONLINE }, { 0x164260, 1, RI_ALL_ONLINE }, 273 { 0x164258, 1, RI_ALL_ONLINE }, { 0x164260, 1, RI_ALL_ONLINE },
285 { 0x164270, 2, RI_ALL_ONLINE }, { 0x164280, 2, RI_ALL_ONLINE }, 274 { 0x164270, 2, RI_ALL_ONLINE }, { 0x164280, 2, RI_ALL_ONLINE },
286 { 0x164800, 2, RI_ALL_ONLINE }, { 0x165000, 2, RI_ALL_ONLINE }, 275 { 0x164800, 2, RI_ALL_ONLINE }, { 0x165000, 2, RI_ALL_ONLINE },
287 { 0x166000, 164, RI_ALL_ONLINE }, { 0x16629c, 1, RI_ALL_ONLINE }, 276 { 0x166000, 164, RI_ALL_ONLINE }, { 0x1662cc, 7, RI_E2_ONLINE },
288 { 0x1662ac, 1, RI_ALL_ONLINE }, { 0x1662bc, 1, RI_ALL_ONLINE },
289 { 0x166400, 49, RI_ALL_ONLINE }, { 0x1664c8, 38, RI_ALL_ONLINE }, 277 { 0x166400, 49, RI_ALL_ONLINE }, { 0x1664c8, 38, RI_ALL_ONLINE },
290 { 0x166568, 2, RI_ALL_ONLINE }, { 0x166800, 1, RI_ALL_ONLINE }, 278 { 0x166568, 2, RI_ALL_ONLINE }, { 0x166570, 5, RI_E2_ONLINE },
291 { 0x168000, 270, RI_ALL_ONLINE }, { 0x168444, 1, RI_ALL_ONLINE }, 279 { 0x166800, 1, RI_ALL_ONLINE }, { 0x168000, 137, RI_ALL_ONLINE },
292 { 0x168454, 1, RI_ALL_ONLINE }, { 0x168800, 19, RI_ALL_ONLINE }, 280 { 0x168224, 2, RI_E1E1H_ONLINE }, { 0x16822c, 29, RI_ALL_ONLINE },
293 { 0x168900, 1, RI_ALL_ONLINE }, { 0x168a00, 128, RI_ALL_ONLINE }, 281 { 0x1682a0, 12, RI_E1E1H_ONLINE }, { 0x1682d0, 12, RI_ALL_ONLINE },
294 { 0x16a000, 1, RI_ALL_ONLINE }, { 0x16a004, 1535, RI_ALL_OFFLINE }, 282 { 0x168300, 2, RI_E1E1H_ONLINE }, { 0x168308, 68, RI_ALL_ONLINE },
295 { 0x16c000, 1, RI_ALL_ONLINE }, { 0x16c004, 1535, RI_ALL_OFFLINE }, 283 { 0x168418, 2, RI_E1E1H_ONLINE }, { 0x168420, 6, RI_ALL_ONLINE },
296 { 0x16e000, 16, RI_E1H_ONLINE }, { 0x16e100, 1, RI_E1H_ONLINE }, 284 { 0x168800, 19, RI_ALL_ONLINE }, { 0x168900, 1, RI_ALL_ONLINE },
297 { 0x16e200, 2, RI_E1H_ONLINE }, { 0x16e400, 183, RI_E1H_ONLINE }, 285 { 0x168a00, 128, RI_ALL_ONLINE }, { 0x16a000, 1, RI_ALL_ONLINE },
298 { 0x170000, 93, RI_ALL_ONLINE }, { 0x170180, 1, RI_ALL_ONLINE }, 286 { 0x16a004, 1535, RI_ALL_OFFLINE }, { 0x16c000, 1, RI_ALL_ONLINE },
299 { 0x170190, 1, RI_ALL_ONLINE }, { 0x170200, 4, RI_ALL_ONLINE }, 287 { 0x16c004, 1535, RI_ALL_OFFLINE }, { 0x16e000, 16, RI_E1H_ONLINE },
300 { 0x170214, 1, RI_ALL_ONLINE }, { 0x178000, 1, RI_ALL_ONLINE }, 288 { 0x16e040, 8, RI_E2_ONLINE }, { 0x16e100, 1, RI_E1H_ONLINE },
301 { 0x180000, 61, RI_ALL_ONLINE }, { 0x180100, 1, RI_ALL_ONLINE }, 289 { 0x16e200, 2, RI_E1H_ONLINE }, { 0x16e400, 161, RI_E1H_ONLINE },
302 { 0x180110, 1, RI_ALL_ONLINE }, { 0x180120, 1, RI_ALL_ONLINE }, 290 { 0x16e684, 2, RI_E1HE2_ONLINE }, { 0x16e68c, 12, RI_E1H_ONLINE },
303 { 0x180130, 1, RI_ALL_ONLINE }, { 0x18013c, 2, RI_E1H_ONLINE }, 291 { 0x16e6bc, 4, RI_E1HE2_ONLINE }, { 0x16e6cc, 4, RI_E1H_ONLINE },
304 { 0x180200, 58, RI_ALL_ONLINE }, { 0x180340, 4, RI_ALL_ONLINE }, 292 { 0x16e6e0, 12, RI_E2_ONLINE }, { 0x16e768, 17, RI_E2_ONLINE },
305 { 0x180400, 1, RI_ALL_ONLINE }, { 0x180404, 255, RI_ALL_OFFLINE }, 293 { 0x170000, 24, RI_ALL_ONLINE }, { 0x170060, 4, RI_E1E1H_ONLINE },
294 { 0x170070, 65, RI_ALL_ONLINE }, { 0x170194, 11, RI_E2_ONLINE },
295 { 0x1701c4, 1, RI_E2_ONLINE }, { 0x1701cc, 7, RI_E2_ONLINE },
296 { 0x1701ec, 1, RI_E2_ONLINE }, { 0x1701f4, 1, RI_E2_ONLINE },
297 { 0x170200, 4, RI_ALL_ONLINE }, { 0x170214, 1, RI_ALL_ONLINE },
298 { 0x170218, 77, RI_E2_ONLINE }, { 0x170400, 64, RI_E2_ONLINE },
299 { 0x178000, 1, RI_ALL_ONLINE }, { 0x180000, 61, RI_ALL_ONLINE },
300 { 0x18013c, 2, RI_E1HE2_ONLINE }, { 0x180200, 58, RI_ALL_ONLINE },
301 { 0x180340, 4, RI_ALL_ONLINE }, { 0x180380, 1, RI_E2_ONLINE },
302 { 0x180388, 1, RI_E2_ONLINE }, { 0x180390, 1, RI_E2_ONLINE },
303 { 0x180398, 1, RI_E2_ONLINE }, { 0x1803a0, 5, RI_E2_ONLINE },
304 { 0x180400, 1, RI_ALL_ONLINE }, { 0x180404, 255, RI_E1E1H_OFFLINE },
306 { 0x181000, 4, RI_ALL_ONLINE }, { 0x181010, 1020, RI_ALL_OFFLINE }, 305 { 0x181000, 4, RI_ALL_ONLINE }, { 0x181010, 1020, RI_ALL_OFFLINE },
307 { 0x1a0000, 1, RI_ALL_ONLINE }, { 0x1a0004, 1023, RI_ALL_OFFLINE }, 306 { 0x1a0000, 1, RI_ALL_ONLINE }, { 0x1a0004, 5631, RI_ALL_OFFLINE },
308 { 0x1a1000, 1, RI_ALL_ONLINE }, { 0x1a1004, 4607, RI_ALL_OFFLINE }, 307 { 0x1a5800, 2560, RI_E1HE2_OFFLINE }, { 0x1a8000, 1, RI_ALL_ONLINE },
309 { 0x1a5800, 2560, RI_E1H_OFFLINE }, { 0x1a8000, 64, RI_ALL_OFFLINE }, 308 { 0x1a8004, 8191, RI_E1HE2_OFFLINE }, { 0x1b0000, 1, RI_ALL_ONLINE },
310 { 0x1a8100, 1984, RI_E1H_OFFLINE }, { 0x1aa000, 1, RI_E1H_ONLINE }, 309 { 0x1b0004, 15, RI_E1H_OFFLINE }, { 0x1b0040, 1, RI_E1HE2_ONLINE },
311 { 0x1aa004, 6655, RI_E1H_OFFLINE }, { 0x1b1800, 128, RI_ALL_OFFLINE }, 310 { 0x1b0044, 239, RI_E1H_OFFLINE }, { 0x1b0400, 1, RI_ALL_ONLINE },
312 { 0x1b1c00, 128, RI_ALL_OFFLINE }, { 0x1b2000, 1, RI_ALL_OFFLINE }, 311 { 0x1b0404, 255, RI_E1H_OFFLINE }, { 0x1b0800, 1, RI_ALL_ONLINE },
313 { 0x1b2400, 64, RI_E1H_OFFLINE }, { 0x1b8200, 1, RI_ALL_ONLINE }, 312 { 0x1b0840, 1, RI_E1HE2_ONLINE }, { 0x1b0c00, 1, RI_ALL_ONLINE },
313 { 0x1b1000, 1, RI_ALL_ONLINE }, { 0x1b1040, 1, RI_E1HE2_ONLINE },
314 { 0x1b1400, 1, RI_ALL_ONLINE }, { 0x1b1440, 1, RI_E1HE2_ONLINE },
315 { 0x1b1480, 1, RI_E1HE2_ONLINE }, { 0x1b14c0, 1, RI_E1HE2_ONLINE },
316 { 0x1b1800, 128, RI_ALL_OFFLINE }, { 0x1b1c00, 128, RI_ALL_OFFLINE },
317 { 0x1b2000, 1, RI_ALL_ONLINE }, { 0x1b2400, 1, RI_E1HE2_ONLINE },
318 { 0x1b2404, 5631, RI_E2_OFFLINE }, { 0x1b8000, 1, RI_ALL_ONLINE },
319 { 0x1b8040, 1, RI_ALL_ONLINE }, { 0x1b8080, 1, RI_ALL_ONLINE },
320 { 0x1b80c0, 1, RI_ALL_ONLINE }, { 0x1b8100, 1, RI_ALL_ONLINE },
321 { 0x1b8140, 1, RI_ALL_ONLINE }, { 0x1b8180, 1, RI_ALL_ONLINE },
322 { 0x1b81c0, 1, RI_ALL_ONLINE }, { 0x1b8200, 1, RI_ALL_ONLINE },
314 { 0x1b8240, 1, RI_ALL_ONLINE }, { 0x1b8280, 1, RI_ALL_ONLINE }, 323 { 0x1b8240, 1, RI_ALL_ONLINE }, { 0x1b8280, 1, RI_ALL_ONLINE },
315 { 0x1b82c0, 1, RI_ALL_ONLINE }, { 0x1b8a00, 1, RI_ALL_ONLINE }, 324 { 0x1b82c0, 1, RI_ALL_ONLINE }, { 0x1b8300, 1, RI_ALL_ONLINE },
316 { 0x1b8a80, 1, RI_ALL_ONLINE }, { 0x1c0000, 2, RI_ALL_ONLINE }, 325 { 0x1b8340, 1, RI_ALL_ONLINE }, { 0x1b8380, 1, RI_ALL_ONLINE },
317 { 0x200000, 65, RI_ALL_ONLINE }, { 0x200110, 1, RI_ALL_ONLINE }, 326 { 0x1b83c0, 1, RI_ALL_ONLINE }, { 0x1b8400, 1, RI_ALL_ONLINE },
318 { 0x200120, 1, RI_ALL_ONLINE }, { 0x200130, 1, RI_ALL_ONLINE }, 327 { 0x1b8440, 1, RI_ALL_ONLINE }, { 0x1b8480, 1, RI_ALL_ONLINE },
319 { 0x200140, 1, RI_ALL_ONLINE }, { 0x20014c, 2, RI_E1H_ONLINE }, 328 { 0x1b84c0, 1, RI_ALL_ONLINE }, { 0x1b8500, 1, RI_ALL_ONLINE },
320 { 0x200200, 58, RI_ALL_ONLINE }, { 0x200340, 4, RI_ALL_ONLINE }, 329 { 0x1b8540, 1, RI_ALL_ONLINE }, { 0x1b8580, 1, RI_ALL_ONLINE },
321 { 0x200400, 1, RI_ALL_ONLINE }, { 0x200404, 255, RI_ALL_OFFLINE }, 330 { 0x1b85c0, 19, RI_E2_ONLINE }, { 0x1b8800, 1, RI_ALL_ONLINE },
322 { 0x202000, 4, RI_ALL_ONLINE }, { 0x202010, 2044, RI_ALL_OFFLINE }, 331 { 0x1b8840, 1, RI_ALL_ONLINE }, { 0x1b8880, 1, RI_ALL_ONLINE },
323 { 0x220000, 1, RI_ALL_ONLINE }, { 0x220004, 1023, RI_ALL_OFFLINE }, 332 { 0x1b88c0, 1, RI_ALL_ONLINE }, { 0x1b8900, 1, RI_ALL_ONLINE },
324 { 0x221000, 1, RI_ALL_ONLINE }, { 0x221004, 4607, RI_ALL_OFFLINE }, 333 { 0x1b8940, 1, RI_ALL_ONLINE }, { 0x1b8980, 1, RI_ALL_ONLINE },
325 { 0x225800, 1536, RI_E1H_OFFLINE }, { 0x227000, 1, RI_E1H_ONLINE }, 334 { 0x1b89c0, 1, RI_ALL_ONLINE }, { 0x1b8a00, 1, RI_ALL_ONLINE },
326 { 0x227004, 1023, RI_E1H_OFFLINE }, { 0x228000, 64, RI_ALL_OFFLINE }, 335 { 0x1b8a40, 1, RI_ALL_ONLINE }, { 0x1b8a80, 1, RI_ALL_ONLINE },
327 { 0x228100, 8640, RI_E1H_OFFLINE }, { 0x231800, 128, RI_ALL_OFFLINE }, 336 { 0x1b8ac0, 1, RI_ALL_ONLINE }, { 0x1b8b00, 1, RI_ALL_ONLINE },
328 { 0x231c00, 128, RI_ALL_OFFLINE }, { 0x232000, 1, RI_ALL_OFFLINE }, 337 { 0x1b8b40, 1, RI_ALL_ONLINE }, { 0x1b8b80, 1, RI_ALL_ONLINE },
329 { 0x232400, 64, RI_E1H_OFFLINE }, { 0x238200, 1, RI_ALL_ONLINE }, 338 { 0x1b8bc0, 1, RI_ALL_ONLINE }, { 0x1b8c00, 1, RI_ALL_ONLINE },
330 { 0x238240, 1, RI_ALL_ONLINE }, { 0x238280, 1, RI_ALL_ONLINE }, 339 { 0x1b8c40, 1, RI_ALL_ONLINE }, { 0x1b8c80, 1, RI_ALL_ONLINE },
331 { 0x2382c0, 1, RI_ALL_ONLINE }, { 0x238a00, 1, RI_ALL_ONLINE }, 340 { 0x1b8cc0, 1, RI_ALL_ONLINE }, { 0x1b8cc4, 1, RI_E2_ONLINE },
332 { 0x238a80, 1, RI_ALL_ONLINE }, { 0x240000, 2, RI_ALL_ONLINE }, 341 { 0x1b8d00, 1, RI_ALL_ONLINE }, { 0x1b8d40, 1, RI_ALL_ONLINE },
333 { 0x280000, 65, RI_ALL_ONLINE }, { 0x280110, 1, RI_ALL_ONLINE }, 342 { 0x1b8d80, 1, RI_ALL_ONLINE }, { 0x1b8dc0, 1, RI_ALL_ONLINE },
334 { 0x280120, 1, RI_ALL_ONLINE }, { 0x280130, 1, RI_ALL_ONLINE }, 343 { 0x1b8e00, 1, RI_ALL_ONLINE }, { 0x1b8e40, 1, RI_ALL_ONLINE },
335 { 0x280140, 1, RI_ALL_ONLINE }, { 0x28014c, 2, RI_E1H_ONLINE }, 344 { 0x1b8e80, 1, RI_ALL_ONLINE }, { 0x1b8e84, 1, RI_E2_ONLINE },
336 { 0x280200, 58, RI_ALL_ONLINE }, { 0x280340, 4, RI_ALL_ONLINE }, 345 { 0x1b8ec0, 1, RI_E1HE2_ONLINE }, { 0x1b8f00, 1, RI_E1HE2_ONLINE },
337 { 0x280400, 1, RI_ALL_ONLINE }, { 0x280404, 255, RI_ALL_OFFLINE }, 346 { 0x1b8f40, 1, RI_E1HE2_ONLINE }, { 0x1b8f80, 1, RI_E1HE2_ONLINE },
338 { 0x282000, 4, RI_ALL_ONLINE }, { 0x282010, 2044, RI_ALL_OFFLINE }, 347 { 0x1b8fc0, 1, RI_E1HE2_ONLINE }, { 0x1b8fc4, 2, RI_E2_ONLINE },
339 { 0x2a0000, 1, RI_ALL_ONLINE }, { 0x2a0004, 1023, RI_ALL_OFFLINE }, 348 { 0x1b8fd0, 6, RI_E2_ONLINE }, { 0x1b9000, 1, RI_E2_ONLINE },
340 { 0x2a1000, 1, RI_ALL_ONLINE }, { 0x2a1004, 4607, RI_ALL_OFFLINE }, 349 { 0x1b9040, 3, RI_E2_ONLINE }, { 0x1b9400, 14, RI_E2_ONLINE },
341 { 0x2a5800, 2560, RI_E1H_OFFLINE }, { 0x2a8000, 64, RI_ALL_OFFLINE }, 350 { 0x1b943c, 19, RI_E2_ONLINE }, { 0x1b9490, 10, RI_E2_ONLINE },
342 { 0x2a8100, 960, RI_E1H_OFFLINE }, { 0x2a9000, 1, RI_E1H_ONLINE }, 351 { 0x1c0000, 2, RI_ALL_ONLINE }, { 0x200000, 65, RI_ALL_ONLINE },
343 { 0x2a9004, 7679, RI_E1H_OFFLINE }, { 0x2b1800, 128, RI_ALL_OFFLINE }, 352 { 0x20014c, 2, RI_E1HE2_ONLINE }, { 0x200200, 58, RI_ALL_ONLINE },
344 { 0x2b1c00, 128, RI_ALL_OFFLINE }, { 0x2b2000, 1, RI_ALL_OFFLINE }, 353 { 0x200340, 4, RI_ALL_ONLINE }, { 0x200380, 1, RI_E2_ONLINE },
345 { 0x2b2400, 64, RI_E1H_OFFLINE }, { 0x2b8200, 1, RI_ALL_ONLINE }, 354 { 0x200388, 1, RI_E2_ONLINE }, { 0x200390, 1, RI_E2_ONLINE },
346 { 0x2b8240, 1, RI_ALL_ONLINE }, { 0x2b8280, 1, RI_ALL_ONLINE }, 355 { 0x200398, 1, RI_E2_ONLINE }, { 0x2003a0, 1, RI_E2_ONLINE },
347 { 0x2b82c0, 1, RI_ALL_ONLINE }, { 0x2b8a00, 1, RI_ALL_ONLINE }, 356 { 0x2003a8, 2, RI_E2_ONLINE }, { 0x200400, 1, RI_ALL_ONLINE },
348 { 0x2b8a80, 1, RI_ALL_ONLINE }, { 0x2c0000, 2, RI_ALL_ONLINE }, 357 { 0x200404, 255, RI_E1E1H_OFFLINE }, { 0x202000, 4, RI_ALL_ONLINE },
349 { 0x300000, 65, RI_ALL_ONLINE }, { 0x300110, 1, RI_ALL_ONLINE }, 358 { 0x202010, 2044, RI_ALL_OFFLINE }, { 0x220000, 1, RI_ALL_ONLINE },
350 { 0x300120, 1, RI_ALL_ONLINE }, { 0x300130, 1, RI_ALL_ONLINE }, 359 { 0x220004, 5631, RI_ALL_OFFLINE }, { 0x225800, 2560, RI_E1HE2_OFFLINE},
351 { 0x300140, 1, RI_ALL_ONLINE }, { 0x30014c, 2, RI_E1H_ONLINE }, 360 { 0x228000, 1, RI_ALL_ONLINE }, { 0x228004, 8191, RI_E1HE2_OFFLINE },
361 { 0x230000, 1, RI_ALL_ONLINE }, { 0x230004, 15, RI_E1H_OFFLINE },
362 { 0x230040, 1, RI_E1HE2_ONLINE }, { 0x230044, 239, RI_E1H_OFFLINE },
363 { 0x230400, 1, RI_ALL_ONLINE }, { 0x230404, 255, RI_E1H_OFFLINE },
364 { 0x230800, 1, RI_ALL_ONLINE }, { 0x230840, 1, RI_E1HE2_ONLINE },
365 { 0x230c00, 1, RI_ALL_ONLINE }, { 0x231000, 1, RI_ALL_ONLINE },
366 { 0x231040, 1, RI_E1HE2_ONLINE }, { 0x231400, 1, RI_ALL_ONLINE },
367 { 0x231440, 1, RI_E1HE2_ONLINE }, { 0x231480, 1, RI_E1HE2_ONLINE },
368 { 0x2314c0, 1, RI_E1HE2_ONLINE }, { 0x231800, 128, RI_ALL_OFFLINE },
369 { 0x231c00, 128, RI_ALL_OFFLINE }, { 0x232000, 1, RI_ALL_ONLINE },
370 { 0x232400, 1, RI_E1HE2_ONLINE }, { 0x232404, 5631, RI_E2_OFFLINE },
371 { 0x238000, 1, RI_ALL_ONLINE }, { 0x238040, 1, RI_ALL_ONLINE },
372 { 0x238080, 1, RI_ALL_ONLINE }, { 0x2380c0, 1, RI_ALL_ONLINE },
373 { 0x238100, 1, RI_ALL_ONLINE }, { 0x238140, 1, RI_ALL_ONLINE },
374 { 0x238180, 1, RI_ALL_ONLINE }, { 0x2381c0, 1, RI_ALL_ONLINE },
375 { 0x238200, 1, RI_ALL_ONLINE }, { 0x238240, 1, RI_ALL_ONLINE },
376 { 0x238280, 1, RI_ALL_ONLINE }, { 0x2382c0, 1, RI_ALL_ONLINE },
377 { 0x238300, 1, RI_ALL_ONLINE }, { 0x238340, 1, RI_ALL_ONLINE },
378 { 0x238380, 1, RI_ALL_ONLINE }, { 0x2383c0, 1, RI_ALL_ONLINE },
379 { 0x238400, 1, RI_ALL_ONLINE }, { 0x238440, 1, RI_ALL_ONLINE },
380 { 0x238480, 1, RI_ALL_ONLINE }, { 0x2384c0, 1, RI_ALL_ONLINE },
381 { 0x238500, 1, RI_ALL_ONLINE }, { 0x238540, 1, RI_ALL_ONLINE },
382 { 0x238580, 1, RI_ALL_ONLINE }, { 0x2385c0, 19, RI_E2_ONLINE },
383 { 0x238800, 1, RI_ALL_ONLINE }, { 0x238840, 1, RI_ALL_ONLINE },
384 { 0x238880, 1, RI_ALL_ONLINE }, { 0x2388c0, 1, RI_ALL_ONLINE },
385 { 0x238900, 1, RI_ALL_ONLINE }, { 0x238940, 1, RI_ALL_ONLINE },
386 { 0x238980, 1, RI_ALL_ONLINE }, { 0x2389c0, 1, RI_ALL_ONLINE },
387 { 0x238a00, 1, RI_ALL_ONLINE }, { 0x238a40, 1, RI_ALL_ONLINE },
388 { 0x238a80, 1, RI_ALL_ONLINE }, { 0x238ac0, 1, RI_ALL_ONLINE },
389 { 0x238b00, 1, RI_ALL_ONLINE }, { 0x238b40, 1, RI_ALL_ONLINE },
390 { 0x238b80, 1, RI_ALL_ONLINE }, { 0x238bc0, 1, RI_ALL_ONLINE },
391 { 0x238c00, 1, RI_ALL_ONLINE }, { 0x238c40, 1, RI_ALL_ONLINE },
392 { 0x238c80, 1, RI_ALL_ONLINE }, { 0x238cc0, 1, RI_ALL_ONLINE },
393 { 0x238cc4, 1, RI_E2_ONLINE }, { 0x238d00, 1, RI_ALL_ONLINE },
394 { 0x238d40, 1, RI_ALL_ONLINE }, { 0x238d80, 1, RI_ALL_ONLINE },
395 { 0x238dc0, 1, RI_ALL_ONLINE }, { 0x238e00, 1, RI_ALL_ONLINE },
396 { 0x238e40, 1, RI_ALL_ONLINE }, { 0x238e80, 1, RI_ALL_ONLINE },
397 { 0x238e84, 1, RI_E2_ONLINE }, { 0x238ec0, 1, RI_E1HE2_ONLINE },
398 { 0x238f00, 1, RI_E1HE2_ONLINE }, { 0x238f40, 1, RI_E1HE2_ONLINE },
399 { 0x238f80, 1, RI_E1HE2_ONLINE }, { 0x238fc0, 1, RI_E1HE2_ONLINE },
400 { 0x238fc4, 2, RI_E2_ONLINE }, { 0x238fd0, 6, RI_E2_ONLINE },
401 { 0x239000, 1, RI_E2_ONLINE }, { 0x239040, 3, RI_E2_ONLINE },
402 { 0x240000, 2, RI_ALL_ONLINE }, { 0x280000, 65, RI_ALL_ONLINE },
403 { 0x28014c, 2, RI_E1HE2_ONLINE }, { 0x280200, 58, RI_ALL_ONLINE },
404 { 0x280340, 4, RI_ALL_ONLINE }, { 0x280380, 1, RI_E2_ONLINE },
405 { 0x280388, 1, RI_E2_ONLINE }, { 0x280390, 1, RI_E2_ONLINE },
406 { 0x280398, 1, RI_E2_ONLINE }, { 0x2803a0, 1, RI_E2_ONLINE },
407 { 0x2803a8, 2, RI_E2_ONLINE }, { 0x280400, 1, RI_ALL_ONLINE },
408 { 0x280404, 255, RI_E1E1H_OFFLINE }, { 0x282000, 4, RI_ALL_ONLINE },
409 { 0x282010, 2044, RI_ALL_OFFLINE }, { 0x2a0000, 1, RI_ALL_ONLINE },
410 { 0x2a0004, 5631, RI_ALL_OFFLINE }, { 0x2a5800, 2560, RI_E1HE2_OFFLINE},
411 { 0x2a8000, 1, RI_ALL_ONLINE }, { 0x2a8004, 8191, RI_E1HE2_OFFLINE },
412 { 0x2b0000, 1, RI_ALL_ONLINE }, { 0x2b0004, 15, RI_E1H_OFFLINE },
413 { 0x2b0040, 1, RI_E1HE2_ONLINE }, { 0x2b0044, 239, RI_E1H_OFFLINE },
414 { 0x2b0400, 1, RI_ALL_ONLINE }, { 0x2b0404, 255, RI_E1H_OFFLINE },
415 { 0x2b0800, 1, RI_ALL_ONLINE }, { 0x2b0840, 1, RI_E1HE2_ONLINE },
416 { 0x2b0c00, 1, RI_ALL_ONLINE }, { 0x2b1000, 1, RI_ALL_ONLINE },
417 { 0x2b1040, 1, RI_E1HE2_ONLINE }, { 0x2b1400, 1, RI_ALL_ONLINE },
418 { 0x2b1440, 1, RI_E1HE2_ONLINE }, { 0x2b1480, 1, RI_E1HE2_ONLINE },
419 { 0x2b14c0, 1, RI_E1HE2_ONLINE }, { 0x2b1800, 128, RI_ALL_OFFLINE },
420 { 0x2b1c00, 128, RI_ALL_OFFLINE }, { 0x2b2000, 1, RI_ALL_ONLINE },
421 { 0x2b2400, 1, RI_E1HE2_ONLINE }, { 0x2b2404, 5631, RI_E2_OFFLINE },
422 { 0x2b8000, 1, RI_ALL_ONLINE }, { 0x2b8040, 1, RI_ALL_ONLINE },
423 { 0x2b8080, 1, RI_ALL_ONLINE }, { 0x2b80c0, 1, RI_ALL_ONLINE },
424 { 0x2b8100, 1, RI_ALL_ONLINE }, { 0x2b8140, 1, RI_ALL_ONLINE },
425 { 0x2b8180, 1, RI_ALL_ONLINE }, { 0x2b81c0, 1, RI_ALL_ONLINE },
426 { 0x2b8200, 1, RI_ALL_ONLINE }, { 0x2b8240, 1, RI_ALL_ONLINE },
427 { 0x2b8280, 1, RI_ALL_ONLINE }, { 0x2b82c0, 1, RI_ALL_ONLINE },
428 { 0x2b8300, 1, RI_ALL_ONLINE }, { 0x2b8340, 1, RI_ALL_ONLINE },
429 { 0x2b8380, 1, RI_ALL_ONLINE }, { 0x2b83c0, 1, RI_ALL_ONLINE },
430 { 0x2b8400, 1, RI_ALL_ONLINE }, { 0x2b8440, 1, RI_ALL_ONLINE },
431 { 0x2b8480, 1, RI_ALL_ONLINE }, { 0x2b84c0, 1, RI_ALL_ONLINE },
432 { 0x2b8500, 1, RI_ALL_ONLINE }, { 0x2b8540, 1, RI_ALL_ONLINE },
433 { 0x2b8580, 1, RI_ALL_ONLINE }, { 0x2b85c0, 19, RI_E2_ONLINE },
434 { 0x2b8800, 1, RI_ALL_ONLINE }, { 0x2b8840, 1, RI_ALL_ONLINE },
435 { 0x2b8880, 1, RI_ALL_ONLINE }, { 0x2b88c0, 1, RI_ALL_ONLINE },
436 { 0x2b8900, 1, RI_ALL_ONLINE }, { 0x2b8940, 1, RI_ALL_ONLINE },
437 { 0x2b8980, 1, RI_ALL_ONLINE }, { 0x2b89c0, 1, RI_ALL_ONLINE },
438 { 0x2b8a00, 1, RI_ALL_ONLINE }, { 0x2b8a40, 1, RI_ALL_ONLINE },
439 { 0x2b8a80, 1, RI_ALL_ONLINE }, { 0x2b8ac0, 1, RI_ALL_ONLINE },
440 { 0x2b8b00, 1, RI_ALL_ONLINE }, { 0x2b8b40, 1, RI_ALL_ONLINE },
441 { 0x2b8b80, 1, RI_ALL_ONLINE }, { 0x2b8bc0, 1, RI_ALL_ONLINE },
442 { 0x2b8c00, 1, RI_ALL_ONLINE }, { 0x2b8c40, 1, RI_ALL_ONLINE },
443 { 0x2b8c80, 1, RI_ALL_ONLINE }, { 0x2b8cc0, 1, RI_ALL_ONLINE },
444 { 0x2b8cc4, 1, RI_E2_ONLINE }, { 0x2b8d00, 1, RI_ALL_ONLINE },
445 { 0x2b8d40, 1, RI_ALL_ONLINE }, { 0x2b8d80, 1, RI_ALL_ONLINE },
446 { 0x2b8dc0, 1, RI_ALL_ONLINE }, { 0x2b8e00, 1, RI_ALL_ONLINE },
447 { 0x2b8e40, 1, RI_ALL_ONLINE }, { 0x2b8e80, 1, RI_ALL_ONLINE },
448 { 0x2b8e84, 1, RI_E2_ONLINE }, { 0x2b8ec0, 1, RI_E1HE2_ONLINE },
449 { 0x2b8f00, 1, RI_E1HE2_ONLINE }, { 0x2b8f40, 1, RI_E1HE2_ONLINE },
450 { 0x2b8f80, 1, RI_E1HE2_ONLINE }, { 0x2b8fc0, 1, RI_E1HE2_ONLINE },
451 { 0x2b8fc4, 2, RI_E2_ONLINE }, { 0x2b8fd0, 6, RI_E2_ONLINE },
452 { 0x2b9000, 1, RI_E2_ONLINE }, { 0x2b9040, 3, RI_E2_ONLINE },
453 { 0x2b9400, 14, RI_E2_ONLINE }, { 0x2b943c, 19, RI_E2_ONLINE },
454 { 0x2b9490, 10, RI_E2_ONLINE }, { 0x2c0000, 2, RI_ALL_ONLINE },
455 { 0x300000, 65, RI_ALL_ONLINE }, { 0x30014c, 2, RI_E1HE2_ONLINE },
352 { 0x300200, 58, RI_ALL_ONLINE }, { 0x300340, 4, RI_ALL_ONLINE }, 456 { 0x300200, 58, RI_ALL_ONLINE }, { 0x300340, 4, RI_ALL_ONLINE },
353 { 0x300400, 1, RI_ALL_ONLINE }, { 0x300404, 255, RI_ALL_OFFLINE }, 457 { 0x300380, 1, RI_E2_ONLINE }, { 0x300388, 1, RI_E2_ONLINE },
458 { 0x300390, 1, RI_E2_ONLINE }, { 0x300398, 1, RI_E2_ONLINE },
459 { 0x3003a0, 1, RI_E2_ONLINE }, { 0x3003a8, 2, RI_E2_ONLINE },
460 { 0x300400, 1, RI_ALL_ONLINE }, { 0x300404, 255, RI_E1E1H_OFFLINE },
354 { 0x302000, 4, RI_ALL_ONLINE }, { 0x302010, 2044, RI_ALL_OFFLINE }, 461 { 0x302000, 4, RI_ALL_ONLINE }, { 0x302010, 2044, RI_ALL_OFFLINE },
355 { 0x320000, 1, RI_ALL_ONLINE }, { 0x320004, 1023, RI_ALL_OFFLINE }, 462 { 0x320000, 1, RI_ALL_ONLINE }, { 0x320004, 5631, RI_ALL_OFFLINE },
356 { 0x321000, 1, RI_ALL_ONLINE }, { 0x321004, 4607, RI_ALL_OFFLINE }, 463 { 0x325800, 2560, RI_E1HE2_OFFLINE }, { 0x328000, 1, RI_ALL_ONLINE },
357 { 0x325800, 2560, RI_E1H_OFFLINE }, { 0x328000, 64, RI_ALL_OFFLINE }, 464 { 0x328004, 8191, RI_E1HE2_OFFLINE }, { 0x330000, 1, RI_ALL_ONLINE },
358 { 0x328100, 536, RI_E1H_OFFLINE }, { 0x328960, 1, RI_E1H_ONLINE }, 465 { 0x330004, 15, RI_E1H_OFFLINE }, { 0x330040, 1, RI_E1HE2_ONLINE },
359 { 0x328964, 8103, RI_E1H_OFFLINE }, { 0x331800, 128, RI_ALL_OFFLINE }, 466 { 0x330044, 239, RI_E1H_OFFLINE }, { 0x330400, 1, RI_ALL_ONLINE },
360 { 0x331c00, 128, RI_ALL_OFFLINE }, { 0x332000, 1, RI_ALL_OFFLINE }, 467 { 0x330404, 255, RI_E1H_OFFLINE }, { 0x330800, 1, RI_ALL_ONLINE },
361 { 0x332400, 64, RI_E1H_OFFLINE }, { 0x338200, 1, RI_ALL_ONLINE }, 468 { 0x330840, 1, RI_E1HE2_ONLINE }, { 0x330c00, 1, RI_ALL_ONLINE },
469 { 0x331000, 1, RI_ALL_ONLINE }, { 0x331040, 1, RI_E1HE2_ONLINE },
470 { 0x331400, 1, RI_ALL_ONLINE }, { 0x331440, 1, RI_E1HE2_ONLINE },
471 { 0x331480, 1, RI_E1HE2_ONLINE }, { 0x3314c0, 1, RI_E1HE2_ONLINE },
472 { 0x331800, 128, RI_ALL_OFFLINE }, { 0x331c00, 128, RI_ALL_OFFLINE },
473 { 0x332000, 1, RI_ALL_ONLINE }, { 0x332400, 1, RI_E1HE2_ONLINE },
474 { 0x332404, 5631, RI_E2_OFFLINE }, { 0x338000, 1, RI_ALL_ONLINE },
475 { 0x338040, 1, RI_ALL_ONLINE }, { 0x338080, 1, RI_ALL_ONLINE },
476 { 0x3380c0, 1, RI_ALL_ONLINE }, { 0x338100, 1, RI_ALL_ONLINE },
477 { 0x338140, 1, RI_ALL_ONLINE }, { 0x338180, 1, RI_ALL_ONLINE },
478 { 0x3381c0, 1, RI_ALL_ONLINE }, { 0x338200, 1, RI_ALL_ONLINE },
362 { 0x338240, 1, RI_ALL_ONLINE }, { 0x338280, 1, RI_ALL_ONLINE }, 479 { 0x338240, 1, RI_ALL_ONLINE }, { 0x338280, 1, RI_ALL_ONLINE },
363 { 0x3382c0, 1, RI_ALL_ONLINE }, { 0x338a00, 1, RI_ALL_ONLINE }, 480 { 0x3382c0, 1, RI_ALL_ONLINE }, { 0x338300, 1, RI_ALL_ONLINE },
364 { 0x338a80, 1, RI_ALL_ONLINE }, { 0x340000, 2, RI_ALL_ONLINE } 481 { 0x338340, 1, RI_ALL_ONLINE }, { 0x338380, 1, RI_ALL_ONLINE },
482 { 0x3383c0, 1, RI_ALL_ONLINE }, { 0x338400, 1, RI_ALL_ONLINE },
483 { 0x338440, 1, RI_ALL_ONLINE }, { 0x338480, 1, RI_ALL_ONLINE },
484 { 0x3384c0, 1, RI_ALL_ONLINE }, { 0x338500, 1, RI_ALL_ONLINE },
485 { 0x338540, 1, RI_ALL_ONLINE }, { 0x338580, 1, RI_ALL_ONLINE },
486 { 0x3385c0, 19, RI_E2_ONLINE }, { 0x338800, 1, RI_ALL_ONLINE },
487 { 0x338840, 1, RI_ALL_ONLINE }, { 0x338880, 1, RI_ALL_ONLINE },
488 { 0x3388c0, 1, RI_ALL_ONLINE }, { 0x338900, 1, RI_ALL_ONLINE },
489 { 0x338940, 1, RI_ALL_ONLINE }, { 0x338980, 1, RI_ALL_ONLINE },
490 { 0x3389c0, 1, RI_ALL_ONLINE }, { 0x338a00, 1, RI_ALL_ONLINE },
491 { 0x338a40, 1, RI_ALL_ONLINE }, { 0x338a80, 1, RI_ALL_ONLINE },
492 { 0x338ac0, 1, RI_ALL_ONLINE }, { 0x338b00, 1, RI_ALL_ONLINE },
493 { 0x338b40, 1, RI_ALL_ONLINE }, { 0x338b80, 1, RI_ALL_ONLINE },
494 { 0x338bc0, 1, RI_ALL_ONLINE }, { 0x338c00, 1, RI_ALL_ONLINE },
495 { 0x338c40, 1, RI_ALL_ONLINE }, { 0x338c80, 1, RI_ALL_ONLINE },
496 { 0x338cc0, 1, RI_ALL_ONLINE }, { 0x338cc4, 1, RI_E2_ONLINE },
497 { 0x338d00, 1, RI_ALL_ONLINE }, { 0x338d40, 1, RI_ALL_ONLINE },
498 { 0x338d80, 1, RI_ALL_ONLINE }, { 0x338dc0, 1, RI_ALL_ONLINE },
499 { 0x338e00, 1, RI_ALL_ONLINE }, { 0x338e40, 1, RI_ALL_ONLINE },
500 { 0x338e80, 1, RI_ALL_ONLINE }, { 0x338e84, 1, RI_E2_ONLINE },
501 { 0x338ec0, 1, RI_E1HE2_ONLINE }, { 0x338f00, 1, RI_E1HE2_ONLINE },
502 { 0x338f40, 1, RI_E1HE2_ONLINE }, { 0x338f80, 1, RI_E1HE2_ONLINE },
503 { 0x338fc0, 1, RI_E1HE2_ONLINE }, { 0x338fc4, 2, RI_E2_ONLINE },
504 { 0x338fd0, 6, RI_E2_ONLINE }, { 0x339000, 1, RI_E2_ONLINE },
505 { 0x339040, 3, RI_E2_ONLINE }, { 0x340000, 2, RI_ALL_ONLINE },
365}; 506};
366 507
367 508#define IDLE_REGS_COUNT 237
368#define IDLE_REGS_COUNT 277
369static const struct reg_addr idle_addrs[IDLE_REGS_COUNT] = { 509static const struct reg_addr idle_addrs[IDLE_REGS_COUNT] = {
370 { 0x2114, 1, RI_ALL_ONLINE }, { 0x2120, 1, RI_ALL_ONLINE }, 510 { 0x2104, 1, RI_ALL_ONLINE }, { 0x2110, 2, RI_ALL_ONLINE },
371 { 0x212c, 4, RI_ALL_ONLINE }, { 0x2814, 1, RI_ALL_ONLINE }, 511 { 0x211c, 8, RI_ALL_ONLINE }, { 0x2814, 1, RI_ALL_ONLINE },
372 { 0x281c, 2, RI_ALL_ONLINE }, { 0xa38c, 1, RI_ALL_ONLINE }, 512 { 0x281c, 2, RI_ALL_ONLINE }, { 0x2854, 1, RI_ALL_ONLINE },
513 { 0x285c, 1, RI_ALL_ONLINE }, { 0x9010, 7, RI_E2_ONLINE },
514 { 0x9030, 1, RI_E2_ONLINE }, { 0x9068, 16, RI_E2_ONLINE },
515 { 0x9230, 2, RI_E2_ONLINE }, { 0x9244, 1, RI_E2_ONLINE },
516 { 0x9298, 1, RI_E2_ONLINE }, { 0x92a8, 1, RI_E2_ONLINE },
517 { 0xa38c, 1, RI_ALL_ONLINE }, { 0xa3c4, 1, RI_E1HE2_ONLINE },
373 { 0xa408, 1, RI_ALL_ONLINE }, { 0xa42c, 12, RI_ALL_ONLINE }, 518 { 0xa408, 1, RI_ALL_ONLINE }, { 0xa42c, 12, RI_ALL_ONLINE },
374 { 0xa600, 5, RI_E1H_ONLINE }, { 0xa618, 1, RI_E1H_ONLINE }, 519 { 0xa600, 5, RI_E1HE2_ONLINE }, { 0xa618, 1, RI_E1HE2_ONLINE },
375 { 0xc09c, 1, RI_ALL_ONLINE }, { 0x103b0, 1, RI_ALL_ONLINE }, 520 { 0xa714, 1, RI_E2_ONLINE }, { 0xa720, 1, RI_E2_ONLINE },
376 { 0x103c0, 1, RI_ALL_ONLINE }, { 0x103d0, 1, RI_E1H_ONLINE }, 521 { 0xa750, 1, RI_E2_ONLINE }, { 0xc09c, 1, RI_E1E1H_ONLINE },
377 { 0x2021c, 11, RI_ALL_ONLINE }, { 0x202a8, 1, RI_ALL_ONLINE }, 522 { 0x103b0, 1, RI_ALL_ONLINE }, { 0x103c0, 1, RI_ALL_ONLINE },
378 { 0x202b8, 1, RI_ALL_ONLINE }, { 0x20404, 1, RI_ALL_ONLINE }, 523 { 0x103d0, 1, RI_E1H_ONLINE }, { 0x183bc, 1, RI_E2_ONLINE },
379 { 0x2040c, 2, RI_ALL_ONLINE }, { 0x2041c, 2, RI_ALL_ONLINE }, 524 { 0x183cc, 1, RI_E2_ONLINE }, { 0x2021c, 11, RI_ALL_ONLINE },
380 { 0x40154, 14, RI_ALL_ONLINE }, { 0x40198, 1, RI_ALL_ONLINE }, 525 { 0x202a8, 1, RI_ALL_ONLINE }, { 0x202b8, 1, RI_ALL_ONLINE },
381 { 0x404ac, 1, RI_ALL_ONLINE }, { 0x404bc, 1, RI_ALL_ONLINE }, 526 { 0x20404, 1, RI_ALL_ONLINE }, { 0x2040c, 2, RI_ALL_ONLINE },
382 { 0x42290, 1, RI_ALL_ONLINE }, { 0x422a0, 1, RI_ALL_ONLINE }, 527 { 0x2041c, 2, RI_ALL_ONLINE }, { 0x40154, 14, RI_ALL_ONLINE },
383 { 0x422b0, 1, RI_ALL_ONLINE }, { 0x42548, 1, RI_ALL_ONLINE }, 528 { 0x40198, 1, RI_ALL_ONLINE }, { 0x404ac, 1, RI_ALL_ONLINE },
384 { 0x42550, 1, RI_ALL_ONLINE }, { 0x42558, 1, RI_ALL_ONLINE }, 529 { 0x404bc, 1, RI_ALL_ONLINE }, { 0x42290, 1, RI_ALL_ONLINE },
385 { 0x50160, 8, RI_ALL_ONLINE }, { 0x501d0, 1, RI_ALL_ONLINE }, 530 { 0x422a0, 1, RI_ALL_ONLINE }, { 0x422b0, 1, RI_ALL_ONLINE },
386 { 0x501e0, 1, RI_ALL_ONLINE }, { 0x50204, 1, RI_ALL_ONLINE }, 531 { 0x42548, 1, RI_ALL_ONLINE }, { 0x42550, 1, RI_ALL_ONLINE },
387 { 0x5020c, 2, RI_ALL_ONLINE }, { 0x5021c, 1, RI_ALL_ONLINE }, 532 { 0x42558, 1, RI_ALL_ONLINE }, { 0x50160, 8, RI_ALL_ONLINE },
388 { 0x60090, 1, RI_ALL_ONLINE }, { 0x6011c, 1, RI_ALL_ONLINE }, 533 { 0x501d0, 1, RI_ALL_ONLINE }, { 0x501e0, 1, RI_ALL_ONLINE },
389 { 0x6012c, 1, RI_ALL_ONLINE }, { 0xc101c, 1, RI_ALL_ONLINE }, 534 { 0x50204, 1, RI_ALL_ONLINE }, { 0x5020c, 2, RI_ALL_ONLINE },
390 { 0xc102c, 1, RI_ALL_ONLINE }, { 0xc2290, 1, RI_ALL_ONLINE }, 535 { 0x5021c, 1, RI_ALL_ONLINE }, { 0x60090, 1, RI_ALL_ONLINE },
391 { 0xc22a0, 1, RI_ALL_ONLINE }, { 0xc22b0, 1, RI_ALL_ONLINE }, 536 { 0x6011c, 1, RI_ALL_ONLINE }, { 0x6012c, 1, RI_ALL_ONLINE },
392 { 0xc2548, 1, RI_ALL_ONLINE }, { 0xc2550, 1, RI_ALL_ONLINE }, 537 { 0xc101c, 1, RI_ALL_ONLINE }, { 0xc102c, 1, RI_ALL_ONLINE },
393 { 0xc2558, 1, RI_ALL_ONLINE }, { 0xc4294, 1, RI_ALL_ONLINE }, 538 { 0xc2290, 1, RI_ALL_ONLINE }, { 0xc22a0, 1, RI_ALL_ONLINE },
394 { 0xc42a4, 1, RI_ALL_ONLINE }, { 0xc42b4, 1, RI_ALL_ONLINE }, 539 { 0xc22b0, 1, RI_ALL_ONLINE }, { 0xc2548, 1, RI_ALL_ONLINE },
395 { 0xc4550, 1, RI_ALL_ONLINE }, { 0xc4558, 1, RI_ALL_ONLINE }, 540 { 0xc2550, 1, RI_ALL_ONLINE }, { 0xc2558, 1, RI_ALL_ONLINE },
396 { 0xc4560, 1, RI_ALL_ONLINE }, { 0xd016c, 8, RI_ALL_ONLINE }, 541 { 0xc4294, 1, RI_ALL_ONLINE }, { 0xc42a4, 1, RI_ALL_ONLINE },
397 { 0xd01d8, 1, RI_ALL_ONLINE }, { 0xd01e8, 1, RI_ALL_ONLINE }, 542 { 0xc42b4, 1, RI_ALL_ONLINE }, { 0xc4550, 1, RI_ALL_ONLINE },
398 { 0xd0204, 1, RI_ALL_ONLINE }, { 0xd020c, 3, RI_ALL_ONLINE }, 543 { 0xc4558, 1, RI_ALL_ONLINE }, { 0xc4560, 1, RI_ALL_ONLINE },
399 { 0xe0154, 8, RI_ALL_ONLINE }, { 0xe01c8, 1, RI_ALL_ONLINE }, 544 { 0xd016c, 8, RI_ALL_ONLINE }, { 0xd01d8, 1, RI_ALL_ONLINE },
400 { 0xe01d8, 1, RI_ALL_ONLINE }, { 0xe0204, 1, RI_ALL_ONLINE }, 545 { 0xd01e8, 1, RI_ALL_ONLINE }, { 0xd0204, 1, RI_ALL_ONLINE },
401 { 0xe020c, 2, RI_ALL_ONLINE }, { 0xe021c, 2, RI_ALL_ONLINE }, 546 { 0xd020c, 3, RI_ALL_ONLINE }, { 0xe0154, 8, RI_ALL_ONLINE },
402 { 0x101014, 1, RI_ALL_ONLINE }, { 0x101030, 1, RI_ALL_ONLINE }, 547 { 0xe01c8, 1, RI_ALL_ONLINE }, { 0xe01d8, 1, RI_ALL_ONLINE },
403 { 0x101040, 1, RI_ALL_ONLINE }, { 0x102058, 1, RI_ALL_ONLINE }, 548 { 0xe0204, 1, RI_ALL_ONLINE }, { 0xe020c, 2, RI_ALL_ONLINE },
404 { 0x102080, 16, RI_ALL_ONLINE }, { 0x103004, 2, RI_ALL_ONLINE }, 549 { 0xe021c, 2, RI_ALL_ONLINE }, { 0x101014, 1, RI_ALL_ONLINE },
405 { 0x103068, 1, RI_ALL_ONLINE }, { 0x103078, 1, RI_ALL_ONLINE }, 550 { 0x101030, 1, RI_ALL_ONLINE }, { 0x101040, 1, RI_ALL_ONLINE },
406 { 0x103088, 1, RI_ALL_ONLINE }, { 0x10309c, 2, RI_E1H_ONLINE }, 551 { 0x102058, 1, RI_ALL_ONLINE }, { 0x102080, 16, RI_ALL_ONLINE },
552 { 0x103004, 2, RI_ALL_ONLINE }, { 0x103068, 1, RI_ALL_ONLINE },
553 { 0x103078, 1, RI_ALL_ONLINE }, { 0x103088, 1, RI_ALL_ONLINE },
554 { 0x10309c, 2, RI_E1HE2_ONLINE }, { 0x1030b8, 2, RI_E2_ONLINE },
555 { 0x1030cc, 1, RI_E2_ONLINE }, { 0x1030e0, 1, RI_E2_ONLINE },
407 { 0x104004, 1, RI_ALL_ONLINE }, { 0x104018, 1, RI_ALL_ONLINE }, 556 { 0x104004, 1, RI_ALL_ONLINE }, { 0x104018, 1, RI_ALL_ONLINE },
408 { 0x104020, 1, RI_ALL_ONLINE }, { 0x10403c, 1, RI_ALL_ONLINE }, 557 { 0x104020, 1, RI_ALL_ONLINE }, { 0x10403c, 1, RI_ALL_ONLINE },
409 { 0x1040fc, 1, RI_ALL_ONLINE }, { 0x10410c, 1, RI_ALL_ONLINE }, 558 { 0x1040fc, 1, RI_ALL_ONLINE }, { 0x10410c, 1, RI_ALL_ONLINE },
410 { 0x104400, 64, RI_ALL_ONLINE }, { 0x104800, 64, RI_ALL_ONLINE }, 559 { 0x104400, 64, RI_ALL_ONLINE }, { 0x104800, 64, RI_ALL_ONLINE },
411 { 0x105000, 3, RI_ALL_ONLINE }, { 0x105010, 3, RI_ALL_ONLINE }, 560 { 0x105000, 256, RI_ALL_ONLINE }, { 0x108094, 1, RI_E1E1H_ONLINE },
412 { 0x105020, 3, RI_ALL_ONLINE }, { 0x105030, 3, RI_ALL_ONLINE }, 561 { 0x1201b0, 2, RI_ALL_ONLINE }, { 0x12032c, 1, RI_ALL_ONLINE },
413 { 0x105040, 3, RI_ALL_ONLINE }, { 0x105050, 3, RI_ALL_ONLINE }, 562 { 0x12036c, 3, RI_ALL_ONLINE }, { 0x120408, 2, RI_ALL_ONLINE },
414 { 0x105060, 3, RI_ALL_ONLINE }, { 0x105070, 3, RI_ALL_ONLINE }, 563 { 0x120414, 15, RI_ALL_ONLINE }, { 0x120478, 2, RI_ALL_ONLINE },
415 { 0x105080, 3, RI_ALL_ONLINE }, { 0x105090, 3, RI_ALL_ONLINE }, 564 { 0x12052c, 1, RI_ALL_ONLINE }, { 0x120564, 3, RI_ALL_ONLINE },
416 { 0x1050a0, 3, RI_ALL_ONLINE }, { 0x1050b0, 3, RI_ALL_ONLINE }, 565 { 0x12057c, 1, RI_ALL_ONLINE }, { 0x12058c, 1, RI_ALL_ONLINE },
417 { 0x1050c0, 3, RI_ALL_ONLINE }, { 0x1050d0, 3, RI_ALL_ONLINE }, 566 { 0x120608, 1, RI_E1HE2_ONLINE }, { 0x120738, 1, RI_E2_ONLINE },
418 { 0x1050e0, 3, RI_ALL_ONLINE }, { 0x1050f0, 3, RI_ALL_ONLINE }, 567 { 0x120778, 2, RI_E2_ONLINE }, { 0x120808, 3, RI_ALL_ONLINE },
419 { 0x105100, 3, RI_ALL_ONLINE }, { 0x105110, 3, RI_ALL_ONLINE },
420 { 0x105120, 3, RI_ALL_ONLINE }, { 0x105130, 3, RI_ALL_ONLINE },
421 { 0x105140, 3, RI_ALL_ONLINE }, { 0x105150, 3, RI_ALL_ONLINE },
422 { 0x105160, 3, RI_ALL_ONLINE }, { 0x105170, 3, RI_ALL_ONLINE },
423 { 0x105180, 3, RI_ALL_ONLINE }, { 0x105190, 3, RI_ALL_ONLINE },
424 { 0x1051a0, 3, RI_ALL_ONLINE }, { 0x1051b0, 3, RI_ALL_ONLINE },
425 { 0x1051c0, 3, RI_ALL_ONLINE }, { 0x1051d0, 3, RI_ALL_ONLINE },
426 { 0x1051e0, 3, RI_ALL_ONLINE }, { 0x1051f0, 3, RI_ALL_ONLINE },
427 { 0x105200, 3, RI_ALL_ONLINE }, { 0x105210, 3, RI_ALL_ONLINE },
428 { 0x105220, 3, RI_ALL_ONLINE }, { 0x105230, 3, RI_ALL_ONLINE },
429 { 0x105240, 3, RI_ALL_ONLINE }, { 0x105250, 3, RI_ALL_ONLINE },
430 { 0x105260, 3, RI_ALL_ONLINE }, { 0x105270, 3, RI_ALL_ONLINE },
431 { 0x105280, 3, RI_ALL_ONLINE }, { 0x105290, 3, RI_ALL_ONLINE },
432 { 0x1052a0, 3, RI_ALL_ONLINE }, { 0x1052b0, 3, RI_ALL_ONLINE },
433 { 0x1052c0, 3, RI_ALL_ONLINE }, { 0x1052d0, 3, RI_ALL_ONLINE },
434 { 0x1052e0, 3, RI_ALL_ONLINE }, { 0x1052f0, 3, RI_ALL_ONLINE },
435 { 0x105300, 3, RI_ALL_ONLINE }, { 0x105310, 3, RI_ALL_ONLINE },
436 { 0x105320, 3, RI_ALL_ONLINE }, { 0x105330, 3, RI_ALL_ONLINE },
437 { 0x105340, 3, RI_ALL_ONLINE }, { 0x105350, 3, RI_ALL_ONLINE },
438 { 0x105360, 3, RI_ALL_ONLINE }, { 0x105370, 3, RI_ALL_ONLINE },
439 { 0x105380, 3, RI_ALL_ONLINE }, { 0x105390, 3, RI_ALL_ONLINE },
440 { 0x1053a0, 3, RI_ALL_ONLINE }, { 0x1053b0, 3, RI_ALL_ONLINE },
441 { 0x1053c0, 3, RI_ALL_ONLINE }, { 0x1053d0, 3, RI_ALL_ONLINE },
442 { 0x1053e0, 3, RI_ALL_ONLINE }, { 0x1053f0, 3, RI_ALL_ONLINE },
443 { 0x108094, 1, RI_ALL_ONLINE }, { 0x1201b0, 2, RI_ALL_ONLINE },
444 { 0x12032c, 1, RI_ALL_ONLINE }, { 0x12036c, 3, RI_ALL_ONLINE },
445 { 0x120408, 2, RI_ALL_ONLINE }, { 0x120414, 15, RI_ALL_ONLINE },
446 { 0x120478, 2, RI_ALL_ONLINE }, { 0x12052c, 1, RI_ALL_ONLINE },
447 { 0x120564, 3, RI_ALL_ONLINE }, { 0x12057c, 1, RI_ALL_ONLINE },
448 { 0x12058c, 1, RI_ALL_ONLINE }, { 0x120608, 1, RI_E1H_ONLINE },
449 { 0x120808, 1, RI_E1_ONLINE }, { 0x12080c, 2, RI_ALL_ONLINE },
450 { 0x120818, 1, RI_ALL_ONLINE }, { 0x120820, 1, RI_ALL_ONLINE }, 568 { 0x120818, 1, RI_ALL_ONLINE }, { 0x120820, 1, RI_ALL_ONLINE },
451 { 0x120828, 1, RI_ALL_ONLINE }, { 0x120830, 1, RI_ALL_ONLINE }, 569 { 0x120828, 1, RI_ALL_ONLINE }, { 0x120830, 1, RI_ALL_ONLINE },
452 { 0x120838, 1, RI_ALL_ONLINE }, { 0x120840, 1, RI_ALL_ONLINE }, 570 { 0x120838, 1, RI_ALL_ONLINE }, { 0x120840, 1, RI_ALL_ONLINE },
@@ -462,48 +580,50 @@ static const struct reg_addr idle_addrs[IDLE_REGS_COUNT] = {
462 { 0x1208d8, 1, RI_ALL_ONLINE }, { 0x1208e0, 1, RI_ALL_ONLINE }, 580 { 0x1208d8, 1, RI_ALL_ONLINE }, { 0x1208e0, 1, RI_ALL_ONLINE },
463 { 0x1208e8, 1, RI_ALL_ONLINE }, { 0x1208f0, 1, RI_ALL_ONLINE }, 581 { 0x1208e8, 1, RI_ALL_ONLINE }, { 0x1208f0, 1, RI_ALL_ONLINE },
464 { 0x1208f8, 1, RI_ALL_ONLINE }, { 0x120900, 1, RI_ALL_ONLINE }, 582 { 0x1208f8, 1, RI_ALL_ONLINE }, { 0x120900, 1, RI_ALL_ONLINE },
465 { 0x120908, 1, RI_ALL_ONLINE }, { 0x14005c, 2, RI_ALL_ONLINE }, 583 { 0x120908, 1, RI_ALL_ONLINE }, { 0x120940, 5, RI_E2_ONLINE },
466 { 0x1400d0, 2, RI_ALL_ONLINE }, { 0x1400e0, 1, RI_ALL_ONLINE }, 584 { 0x130030, 1, RI_E2_ONLINE }, { 0x13004c, 3, RI_E2_ONLINE },
467 { 0x1401c8, 1, RI_ALL_ONLINE }, { 0x140200, 6, RI_ALL_ONLINE }, 585 { 0x130064, 2, RI_E2_ONLINE }, { 0x13009c, 1, RI_E2_ONLINE },
468 { 0x16101c, 1, RI_ALL_ONLINE }, { 0x16102c, 1, RI_ALL_ONLINE }, 586 { 0x130130, 1, RI_E2_ONLINE }, { 0x13016c, 1, RI_E2_ONLINE },
469 { 0x164014, 2, RI_ALL_ONLINE }, { 0x1640f0, 1, RI_ALL_ONLINE }, 587 { 0x130300, 1, RI_E2_ONLINE }, { 0x130480, 1, RI_E2_ONLINE },
470 { 0x166290, 1, RI_ALL_ONLINE }, { 0x1662a0, 1, RI_ALL_ONLINE }, 588 { 0x14005c, 2, RI_ALL_ONLINE }, { 0x1400d0, 2, RI_ALL_ONLINE },
471 { 0x1662b0, 1, RI_ALL_ONLINE }, { 0x166548, 1, RI_ALL_ONLINE }, 589 { 0x1400e0, 1, RI_ALL_ONLINE }, { 0x1401c8, 1, RI_ALL_ONLINE },
472 { 0x166550, 1, RI_ALL_ONLINE }, { 0x166558, 1, RI_ALL_ONLINE }, 590 { 0x140200, 6, RI_ALL_ONLINE }, { 0x16101c, 1, RI_ALL_ONLINE },
473 { 0x168000, 1, RI_ALL_ONLINE }, { 0x168008, 1, RI_ALL_ONLINE }, 591 { 0x16102c, 1, RI_ALL_ONLINE }, { 0x164014, 2, RI_ALL_ONLINE },
474 { 0x168010, 1, RI_ALL_ONLINE }, { 0x168018, 1, RI_ALL_ONLINE }, 592 { 0x1640f0, 1, RI_ALL_ONLINE }, { 0x166290, 1, RI_ALL_ONLINE },
475 { 0x168028, 2, RI_ALL_ONLINE }, { 0x168058, 4, RI_ALL_ONLINE }, 593 { 0x1662a0, 1, RI_ALL_ONLINE }, { 0x1662b0, 1, RI_ALL_ONLINE },
476 { 0x168070, 1, RI_ALL_ONLINE }, { 0x168238, 1, RI_ALL_ONLINE }, 594 { 0x166548, 1, RI_ALL_ONLINE }, { 0x166550, 1, RI_ALL_ONLINE },
477 { 0x1682d0, 2, RI_ALL_ONLINE }, { 0x1682e0, 1, RI_ALL_ONLINE }, 595 { 0x166558, 1, RI_ALL_ONLINE }, { 0x168000, 1, RI_ALL_ONLINE },
478 { 0x168300, 67, RI_ALL_ONLINE }, { 0x168410, 2, RI_ALL_ONLINE }, 596 { 0x168008, 1, RI_ALL_ONLINE }, { 0x168010, 1, RI_ALL_ONLINE },
597 { 0x168018, 1, RI_ALL_ONLINE }, { 0x168028, 2, RI_ALL_ONLINE },
598 { 0x168058, 4, RI_ALL_ONLINE }, { 0x168070, 1, RI_ALL_ONLINE },
599 { 0x168238, 1, RI_ALL_ONLINE }, { 0x1682d0, 2, RI_ALL_ONLINE },
600 { 0x1682e0, 1, RI_ALL_ONLINE }, { 0x168300, 2, RI_E1E1H_ONLINE },
601 { 0x168308, 65, RI_ALL_ONLINE }, { 0x168410, 2, RI_ALL_ONLINE },
479 { 0x168438, 1, RI_ALL_ONLINE }, { 0x168448, 1, RI_ALL_ONLINE }, 602 { 0x168438, 1, RI_ALL_ONLINE }, { 0x168448, 1, RI_ALL_ONLINE },
480 { 0x168a00, 128, RI_ALL_ONLINE }, { 0x16e200, 128, RI_E1H_ONLINE }, 603 { 0x168a00, 128, RI_ALL_ONLINE }, { 0x16e200, 128, RI_E1H_ONLINE },
481 { 0x16e404, 2, RI_E1H_ONLINE }, { 0x16e584, 70, RI_E1H_ONLINE }, 604 { 0x16e404, 2, RI_E1H_ONLINE }, { 0x16e584, 64, RI_E1H_ONLINE },
482 { 0x1700a4, 1, RI_ALL_ONLINE }, { 0x1700ac, 2, RI_ALL_ONLINE }, 605 { 0x16e684, 2, RI_E1HE2_ONLINE }, { 0x16e68c, 4, RI_E1H_ONLINE },
483 { 0x1700c0, 1, RI_ALL_ONLINE }, { 0x170174, 1, RI_ALL_ONLINE }, 606 { 0x16e6fc, 4, RI_E2_ONLINE }, { 0x1700a4, 1, RI_ALL_ONLINE },
484 { 0x170184, 1, RI_ALL_ONLINE }, { 0x1800f4, 1, RI_ALL_ONLINE }, 607 { 0x1700ac, 2, RI_ALL_ONLINE }, { 0x1700c0, 1, RI_ALL_ONLINE },
485 { 0x180104, 1, RI_ALL_ONLINE }, { 0x180114, 1, RI_ALL_ONLINE }, 608 { 0x170174, 1, RI_ALL_ONLINE }, { 0x170184, 1, RI_ALL_ONLINE },
486 { 0x180124, 1, RI_ALL_ONLINE }, { 0x18026c, 1, RI_ALL_ONLINE }, 609 { 0x1800f4, 1, RI_ALL_ONLINE }, { 0x180104, 1, RI_ALL_ONLINE },
487 { 0x1802a0, 1, RI_ALL_ONLINE }, { 0x1a1000, 1, RI_ALL_ONLINE }, 610 { 0x180114, 1, RI_ALL_ONLINE }, { 0x180124, 1, RI_ALL_ONLINE },
488 { 0x1aa000, 1, RI_E1H_ONLINE }, { 0x1b8000, 1, RI_ALL_ONLINE }, 611 { 0x18026c, 1, RI_ALL_ONLINE }, { 0x1802a0, 1, RI_ALL_ONLINE },
489 { 0x1b8040, 1, RI_ALL_ONLINE }, { 0x1b8080, 1, RI_ALL_ONLINE }, 612 { 0x1b8000, 1, RI_ALL_ONLINE }, { 0x1b8040, 1, RI_ALL_ONLINE },
490 { 0x1b80c0, 1, RI_ALL_ONLINE }, { 0x200104, 1, RI_ALL_ONLINE }, 613 { 0x1b8080, 1, RI_ALL_ONLINE }, { 0x1b80c0, 1, RI_ALL_ONLINE },
491 { 0x200114, 1, RI_ALL_ONLINE }, { 0x200124, 1, RI_ALL_ONLINE }, 614 { 0x200104, 1, RI_ALL_ONLINE }, { 0x200114, 1, RI_ALL_ONLINE },
492 { 0x200134, 1, RI_ALL_ONLINE }, { 0x20026c, 1, RI_ALL_ONLINE }, 615 { 0x200124, 1, RI_ALL_ONLINE }, { 0x200134, 1, RI_ALL_ONLINE },
493 { 0x2002a0, 1, RI_ALL_ONLINE }, { 0x221000, 1, RI_ALL_ONLINE }, 616 { 0x20026c, 1, RI_ALL_ONLINE }, { 0x2002a0, 1, RI_ALL_ONLINE },
494 { 0x227000, 1, RI_E1H_ONLINE }, { 0x238000, 1, RI_ALL_ONLINE }, 617 { 0x238000, 1, RI_ALL_ONLINE }, { 0x238040, 1, RI_ALL_ONLINE },
495 { 0x238040, 1, RI_ALL_ONLINE }, { 0x238080, 1, RI_ALL_ONLINE }, 618 { 0x238080, 1, RI_ALL_ONLINE }, { 0x2380c0, 1, RI_ALL_ONLINE },
496 { 0x2380c0, 1, RI_ALL_ONLINE }, { 0x280104, 1, RI_ALL_ONLINE }, 619 { 0x280104, 1, RI_ALL_ONLINE }, { 0x280114, 1, RI_ALL_ONLINE },
497 { 0x280114, 1, RI_ALL_ONLINE }, { 0x280124, 1, RI_ALL_ONLINE }, 620 { 0x280124, 1, RI_ALL_ONLINE }, { 0x280134, 1, RI_ALL_ONLINE },
498 { 0x280134, 1, RI_ALL_ONLINE }, { 0x28026c, 1, RI_ALL_ONLINE }, 621 { 0x28026c, 1, RI_ALL_ONLINE }, { 0x2802a0, 1, RI_ALL_ONLINE },
499 { 0x2802a0, 1, RI_ALL_ONLINE }, { 0x2a1000, 1, RI_ALL_ONLINE }, 622 { 0x2b8000, 1, RI_ALL_ONLINE }, { 0x2b8040, 1, RI_ALL_ONLINE },
500 { 0x2a9000, 1, RI_E1H_ONLINE }, { 0x2b8000, 1, RI_ALL_ONLINE }, 623 { 0x2b8080, 1, RI_ALL_ONLINE }, { 0x300104, 1, RI_ALL_ONLINE },
501 { 0x2b8040, 1, RI_ALL_ONLINE }, { 0x2b8080, 1, RI_ALL_ONLINE },
502 { 0x2b80c0, 1, RI_ALL_ONLINE }, { 0x300104, 1, RI_ALL_ONLINE },
503 { 0x300114, 1, RI_ALL_ONLINE }, { 0x300124, 1, RI_ALL_ONLINE }, 624 { 0x300114, 1, RI_ALL_ONLINE }, { 0x300124, 1, RI_ALL_ONLINE },
504 { 0x300134, 1, RI_ALL_ONLINE }, { 0x30026c, 1, RI_ALL_ONLINE }, 625 { 0x300134, 1, RI_ALL_ONLINE }, { 0x30026c, 1, RI_ALL_ONLINE },
505 { 0x3002a0, 1, RI_ALL_ONLINE }, { 0x321000, 1, RI_ALL_ONLINE }, 626 { 0x3002a0, 1, RI_ALL_ONLINE }, { 0x338000, 1, RI_ALL_ONLINE },
506 { 0x328960, 1, RI_E1H_ONLINE }, { 0x338000, 1, RI_ALL_ONLINE },
507 { 0x338040, 1, RI_ALL_ONLINE }, { 0x338080, 1, RI_ALL_ONLINE }, 627 { 0x338040, 1, RI_ALL_ONLINE }, { 0x338080, 1, RI_ALL_ONLINE },
508 { 0x3380c0, 1, RI_ALL_ONLINE } 628 { 0x3380c0, 1, RI_ALL_ONLINE }
509}; 629};
@@ -515,7 +635,6 @@ static const struct wreg_addr wreg_addrs_e1[WREGS_COUNT_E1] = {
515 { 0x1b0c00, 192, 1, read_reg_e1_0, RI_E1_OFFLINE } 635 { 0x1b0c00, 192, 1, read_reg_e1_0, RI_E1_OFFLINE }
516}; 636};
517 637
518
519#define WREGS_COUNT_E1H 1 638#define WREGS_COUNT_E1H 1
520static const u32 read_reg_e1h_0[] = { 0x1b1040, 0x1b1000 }; 639static const u32 read_reg_e1h_0[] = { 0x1b1040, 0x1b1000 };
521 640
@@ -530,22 +649,53 @@ static const struct wreg_addr wreg_addrs_e2[WREGS_COUNT_E2] = {
530 { 0x1b0c00, 128, 2, read_reg_e2_0, RI_E2_OFFLINE } 649 { 0x1b0c00, 128, 2, read_reg_e2_0, RI_E2_OFFLINE }
531}; 650};
532 651
533static const struct dump_sign dump_sign_all = { 0x49aa93ee, 0x40835, 0x22 }; 652static const struct dump_sign dump_sign_all = { 0x4d18b0a4, 0x60010, 0x3a };
534
535 653
536#define TIMER_REGS_COUNT_E1 2 654#define TIMER_REGS_COUNT_E1 2
537static const u32 timer_status_regs_e1[TIMER_REGS_COUNT_E1] =
538 { 0x164014, 0x164018 };
539static const u32 timer_scan_regs_e1[TIMER_REGS_COUNT_E1] =
540 { 0x1640d0, 0x1640d4 };
541 655
656static const u32 timer_status_regs_e1[TIMER_REGS_COUNT_E1] = {
657 0x164014, 0x164018 };
658static const u32 timer_scan_regs_e1[TIMER_REGS_COUNT_E1] = {
659 0x1640d0, 0x1640d4 };
542 660
543#define TIMER_REGS_COUNT_E1H 2 661#define TIMER_REGS_COUNT_E1H 2
544static const u32 timer_status_regs_e1h[TIMER_REGS_COUNT_E1H] =
545 { 0x164014, 0x164018 };
546static const u32 timer_scan_regs_e1h[TIMER_REGS_COUNT_E1H] =
547 { 0x1640d0, 0x1640d4 };
548 662
663static const u32 timer_status_regs_e1h[TIMER_REGS_COUNT_E1H] = {
664 0x164014, 0x164018 };
665static const u32 timer_scan_regs_e1h[TIMER_REGS_COUNT_E1H] = {
666 0x1640d0, 0x1640d4 };
667
668#define TIMER_REGS_COUNT_E2 2
669
670static const u32 timer_status_regs_e2[TIMER_REGS_COUNT_E2] = {
671 0x164014, 0x164018 };
672static const u32 timer_scan_regs_e2[TIMER_REGS_COUNT_E2] = {
673 0x1640d0, 0x1640d4 };
674
675#define PAGE_MODE_VALUES_E1 0
676
677#define PAGE_READ_REGS_E1 0
678
679#define PAGE_WRITE_REGS_E1 0
680
681static const u32 page_vals_e1[] = { 0 };
682
683static const u32 page_write_regs_e1[] = { 0 };
684
685static const struct reg_addr page_read_regs_e1[] = { { 0x0, 0, RI_E1_ONLINE } };
686
687#define PAGE_MODE_VALUES_E1H 0
688
689#define PAGE_READ_REGS_E1H 0
690
691#define PAGE_WRITE_REGS_E1H 0
692
693static const u32 page_vals_e1h[] = { 0 };
694
695static const u32 page_write_regs_e1h[] = { 0 };
696
697static const struct reg_addr page_read_regs_e1h[] = {
698 { 0x0, 0, RI_E1H_ONLINE } };
549 699
550#define PAGE_MODE_VALUES_E2 2 700#define PAGE_MODE_VALUES_E2 2
551 701
diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c
index 99c672d894ca..5b44a8b48509 100644
--- a/drivers/net/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/bnx2x/bnx2x_ethtool.c
@@ -24,6 +24,7 @@
24#include "bnx2x.h" 24#include "bnx2x.h"
25#include "bnx2x_cmn.h" 25#include "bnx2x_cmn.h"
26#include "bnx2x_dump.h" 26#include "bnx2x_dump.h"
27#include "bnx2x_init.h"
27 28
28/* Note: in the format strings below %s is replaced by the queue-name which is 29/* Note: in the format strings below %s is replaced by the queue-name which is
29 * either its index or 'fcoe' for the fcoe queue. Make sure the format string 30 * either its index or 'fcoe' for the fcoe queue. Make sure the format string
@@ -472,7 +473,7 @@ static int bnx2x_get_regs_len(struct net_device *dev)
472{ 473{
473 struct bnx2x *bp = netdev_priv(dev); 474 struct bnx2x *bp = netdev_priv(dev);
474 int regdump_len = 0; 475 int regdump_len = 0;
475 int i; 476 int i, j, k;
476 477
477 if (CHIP_IS_E1(bp)) { 478 if (CHIP_IS_E1(bp)) {
478 for (i = 0; i < REGS_COUNT; i++) 479 for (i = 0; i < REGS_COUNT; i++)
@@ -502,6 +503,15 @@ static int bnx2x_get_regs_len(struct net_device *dev)
502 if (IS_E2_ONLINE(wreg_addrs_e2[i].info)) 503 if (IS_E2_ONLINE(wreg_addrs_e2[i].info))
503 regdump_len += wreg_addrs_e2[i].size * 504 regdump_len += wreg_addrs_e2[i].size *
504 (1 + wreg_addrs_e2[i].read_regs_count); 505 (1 + wreg_addrs_e2[i].read_regs_count);
506
507 for (i = 0; i < PAGE_MODE_VALUES_E2; i++)
508 for (j = 0; j < PAGE_WRITE_REGS_E2; j++) {
509 for (k = 0; k < PAGE_READ_REGS_E2; k++)
510 if (IS_E2_ONLINE(page_read_regs_e2[k].
511 info))
512 regdump_len +=
513 page_read_regs_e2[k].size;
514 }
505 } 515 }
506 regdump_len *= 4; 516 regdump_len *= 4;
507 regdump_len += sizeof(struct dump_hdr); 517 regdump_len += sizeof(struct dump_hdr);
@@ -539,6 +549,12 @@ static void bnx2x_get_regs(struct net_device *dev,
539 if (!netif_running(bp->dev)) 549 if (!netif_running(bp->dev))
540 return; 550 return;
541 551
552 /* Disable parity attentions as long as following dump may
553 * cause false alarms by reading never written registers. We
554 * will re-enable parity attentions right after the dump.
555 */
556 bnx2x_disable_blocks_parity(bp);
557
542 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1; 558 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
543 dump_hdr.dump_sign = dump_sign_all; 559 dump_hdr.dump_sign = dump_sign_all;
544 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR); 560 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
@@ -580,6 +596,10 @@ static void bnx2x_get_regs(struct net_device *dev,
580 596
581 bnx2x_read_pages_regs_e2(bp, p); 597 bnx2x_read_pages_regs_e2(bp, p);
582 } 598 }
599 /* Re-enable parity attentions */
600 bnx2x_clear_blocks_parity(bp);
601 if (CHIP_PARITY_ENABLED(bp))
602 bnx2x_enable_blocks_parity(bp);
583} 603}
584 604
585#define PHY_FW_VER_LEN 20 605#define PHY_FW_VER_LEN 20
diff --git a/drivers/net/bnx2x/bnx2x_init.h b/drivers/net/bnx2x/bnx2x_init.h
index a9d54874a559..5a268e9a0895 100644
--- a/drivers/net/bnx2x/bnx2x_init.h
+++ b/drivers/net/bnx2x/bnx2x_init.h
@@ -192,5 +192,225 @@ struct src_ent {
192 u64 next; 192 u64 next;
193}; 193};
194 194
195/****************************************************************************
196* Parity configuration
197****************************************************************************/
198#define BLOCK_PRTY_INFO(block, en_mask, m1, m1h, m2) \
199{ \
200 block##_REG_##block##_PRTY_MASK, \
201 block##_REG_##block##_PRTY_STS_CLR, \
202 en_mask, {m1, m1h, m2}, #block \
203}
204
205#define BLOCK_PRTY_INFO_0(block, en_mask, m1, m1h, m2) \
206{ \
207 block##_REG_##block##_PRTY_MASK_0, \
208 block##_REG_##block##_PRTY_STS_CLR_0, \
209 en_mask, {m1, m1h, m2}, #block"_0" \
210}
211
212#define BLOCK_PRTY_INFO_1(block, en_mask, m1, m1h, m2) \
213{ \
214 block##_REG_##block##_PRTY_MASK_1, \
215 block##_REG_##block##_PRTY_STS_CLR_1, \
216 en_mask, {m1, m1h, m2}, #block"_1" \
217}
218
219static const struct {
220 u32 mask_addr;
221 u32 sts_clr_addr;
222 u32 en_mask; /* Mask to enable parity attentions */
223 struct {
224 u32 e1; /* 57710 */
225 u32 e1h; /* 57711 */
226 u32 e2; /* 57712 */
227 } reg_mask; /* Register mask (all valid bits) */
228 char name[7]; /* Block's longest name is 6 characters long
229 * (name + suffix)
230 */
231} bnx2x_blocks_parity_data[] = {
232 /* bit 19 masked */
233 /* REG_WR(bp, PXP_REG_PXP_PRTY_MASK, 0x80000); */
234 /* bit 5,18,20-31 */
235 /* REG_WR(bp, PXP2_REG_PXP2_PRTY_MASK_0, 0xfff40020); */
236 /* bit 5 */
237 /* REG_WR(bp, PXP2_REG_PXP2_PRTY_MASK_1, 0x20); */
238 /* REG_WR(bp, HC_REG_HC_PRTY_MASK, 0x0); */
239 /* REG_WR(bp, MISC_REG_MISC_PRTY_MASK, 0x0); */
240
241 /* Block IGU, MISC, PXP and PXP2 parity errors as long as we don't
242 * want to handle "system kill" flow at the moment.
243 */
244 BLOCK_PRTY_INFO(PXP, 0x3ffffff, 0x3ffffff, 0x3ffffff, 0x3ffffff),
245 BLOCK_PRTY_INFO_0(PXP2, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff),
246 BLOCK_PRTY_INFO_1(PXP2, 0x7ff, 0x7f, 0x7f, 0x7ff),
247 BLOCK_PRTY_INFO(HC, 0x7, 0x7, 0x7, 0),
248 BLOCK_PRTY_INFO(IGU, 0x7ff, 0, 0, 0x7ff),
249 BLOCK_PRTY_INFO(MISC, 0x1, 0x1, 0x1, 0x1),
250 BLOCK_PRTY_INFO(QM, 0, 0x1ff, 0xfff, 0xfff),
251 BLOCK_PRTY_INFO(DORQ, 0, 0x3, 0x3, 0x3),
252 {GRCBASE_UPB + PB_REG_PB_PRTY_MASK,
253 GRCBASE_UPB + PB_REG_PB_PRTY_STS_CLR, 0,
254 {0xf, 0xf, 0xf}, "UPB"},
255 {GRCBASE_XPB + PB_REG_PB_PRTY_MASK,
256 GRCBASE_XPB + PB_REG_PB_PRTY_STS_CLR, 0,
257 {0xf, 0xf, 0xf}, "XPB"},
258 BLOCK_PRTY_INFO(SRC, 0x4, 0x7, 0x7, 0x7),
259 BLOCK_PRTY_INFO(CDU, 0, 0x1f, 0x1f, 0x1f),
260 BLOCK_PRTY_INFO(CFC, 0, 0xf, 0xf, 0xf),
261 BLOCK_PRTY_INFO(DBG, 0, 0x1, 0x1, 0x1),
262 BLOCK_PRTY_INFO(DMAE, 0, 0xf, 0xf, 0xf),
263 BLOCK_PRTY_INFO(BRB1, 0, 0xf, 0xf, 0xf),
264 BLOCK_PRTY_INFO(PRS, (1<<6), 0xff, 0xff, 0xff),
265 BLOCK_PRTY_INFO(TSDM, 0x18, 0x7ff, 0x7ff, 0x7ff),
266 BLOCK_PRTY_INFO(CSDM, 0x8, 0x7ff, 0x7ff, 0x7ff),
267 BLOCK_PRTY_INFO(USDM, 0x38, 0x7ff, 0x7ff, 0x7ff),
268 BLOCK_PRTY_INFO(XSDM, 0x8, 0x7ff, 0x7ff, 0x7ff),
269 BLOCK_PRTY_INFO_0(TSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff),
270 BLOCK_PRTY_INFO_1(TSEM, 0, 0x3, 0x1f, 0x3f),
271 BLOCK_PRTY_INFO_0(USEM, 0, 0xffffffff, 0xffffffff, 0xffffffff),
272 BLOCK_PRTY_INFO_1(USEM, 0, 0x3, 0x1f, 0x1f),
273 BLOCK_PRTY_INFO_0(CSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff),
274 BLOCK_PRTY_INFO_1(CSEM, 0, 0x3, 0x1f, 0x1f),
275 BLOCK_PRTY_INFO_0(XSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff),
276 BLOCK_PRTY_INFO_1(XSEM, 0, 0x3, 0x1f, 0x3f),
277};
278
279
280/* [28] MCP Latched rom_parity
281 * [29] MCP Latched ump_rx_parity
282 * [30] MCP Latched ump_tx_parity
283 * [31] MCP Latched scpad_parity
284 */
285#define MISC_AEU_ENABLE_MCP_PRTY_BITS \
286 (AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \
287 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \
288 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \
289 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY)
290
291/* Below registers control the MCP parity attention output. When
292 * MISC_AEU_ENABLE_MCP_PRTY_BITS are set - attentions are
293 * enabled, when cleared - disabled.
294 */
295static const u32 mcp_attn_ctl_regs[] = {
296 MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0,
297 MISC_REG_AEU_ENABLE4_NIG_0,
298 MISC_REG_AEU_ENABLE4_PXP_0,
299 MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0,
300 MISC_REG_AEU_ENABLE4_NIG_1,
301 MISC_REG_AEU_ENABLE4_PXP_1
302};
303
304static inline void bnx2x_set_mcp_parity(struct bnx2x *bp, u8 enable)
305{
306 int i;
307 u32 reg_val;
308
309 for (i = 0; i < ARRAY_SIZE(mcp_attn_ctl_regs); i++) {
310 reg_val = REG_RD(bp, mcp_attn_ctl_regs[i]);
311
312 if (enable)
313 reg_val |= MISC_AEU_ENABLE_MCP_PRTY_BITS;
314 else
315 reg_val &= ~MISC_AEU_ENABLE_MCP_PRTY_BITS;
316
317 REG_WR(bp, mcp_attn_ctl_regs[i], reg_val);
318 }
319}
320
321static inline u32 bnx2x_parity_reg_mask(struct bnx2x *bp, int idx)
322{
323 if (CHIP_IS_E1(bp))
324 return bnx2x_blocks_parity_data[idx].reg_mask.e1;
325 else if (CHIP_IS_E1H(bp))
326 return bnx2x_blocks_parity_data[idx].reg_mask.e1h;
327 else
328 return bnx2x_blocks_parity_data[idx].reg_mask.e2;
329}
330
331static inline void bnx2x_disable_blocks_parity(struct bnx2x *bp)
332{
333 int i;
334
335 for (i = 0; i < ARRAY_SIZE(bnx2x_blocks_parity_data); i++) {
336 u32 dis_mask = bnx2x_parity_reg_mask(bp, i);
337
338 if (dis_mask) {
339 REG_WR(bp, bnx2x_blocks_parity_data[i].mask_addr,
340 dis_mask);
341 DP(NETIF_MSG_HW, "Setting parity mask "
342 "for %s to\t\t0x%x\n",
343 bnx2x_blocks_parity_data[i].name, dis_mask);
344 }
345 }
346
347 /* Disable MCP parity attentions */
348 bnx2x_set_mcp_parity(bp, false);
349}
350
351/**
352 * Clear the parity error status registers.
353 */
354static inline void bnx2x_clear_blocks_parity(struct bnx2x *bp)
355{
356 int i;
357 u32 reg_val, mcp_aeu_bits =
358 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY |
359 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY |
360 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY |
361 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY;
362
363 /* Clear SEM_FAST parities */
364 REG_WR(bp, XSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
365 REG_WR(bp, TSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
366 REG_WR(bp, USEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
367 REG_WR(bp, CSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
368
369 for (i = 0; i < ARRAY_SIZE(bnx2x_blocks_parity_data); i++) {
370 u32 reg_mask = bnx2x_parity_reg_mask(bp, i);
371
372 if (reg_mask) {
373 reg_val = REG_RD(bp, bnx2x_blocks_parity_data[i].
374 sts_clr_addr);
375 if (reg_val & reg_mask)
376 DP(NETIF_MSG_HW,
377 "Parity errors in %s: 0x%x\n",
378 bnx2x_blocks_parity_data[i].name,
379 reg_val & reg_mask);
380 }
381 }
382
383 /* Check if there were parity attentions in MCP */
384 reg_val = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_MCP);
385 if (reg_val & mcp_aeu_bits)
386 DP(NETIF_MSG_HW, "Parity error in MCP: 0x%x\n",
387 reg_val & mcp_aeu_bits);
388
389 /* Clear parity attentions in MCP:
390 * [7] clears Latched rom_parity
391 * [8] clears Latched ump_rx_parity
392 * [9] clears Latched ump_tx_parity
393 * [10] clears Latched scpad_parity (both ports)
394 */
395 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x780);
396}
397
398static inline void bnx2x_enable_blocks_parity(struct bnx2x *bp)
399{
400 int i;
401
402 for (i = 0; i < ARRAY_SIZE(bnx2x_blocks_parity_data); i++) {
403 u32 reg_mask = bnx2x_parity_reg_mask(bp, i);
404
405 if (reg_mask)
406 REG_WR(bp, bnx2x_blocks_parity_data[i].mask_addr,
407 bnx2x_blocks_parity_data[i].en_mask & reg_mask);
408 }
409
410 /* Enable MCP parity attentions */
411 bnx2x_set_mcp_parity(bp, true);
412}
413
414
195#endif /* BNX2X_INIT_H */ 415#endif /* BNX2X_INIT_H */
196 416
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index 489a5512a04d..84e1af4d65e1 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -3152,7 +3152,6 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3152#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1) 3152#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
3153#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK) 3153#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
3154#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS 3154#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
3155#define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
3156 3155
3157/* 3156/*
3158 * should be run under rtnl lock 3157 * should be run under rtnl lock
@@ -3527,7 +3526,7 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3527 try to handle this event */ 3526 try to handle this event */
3528 bnx2x_acquire_alr(bp); 3527 bnx2x_acquire_alr(bp);
3529 3528
3530 if (bnx2x_chk_parity_attn(bp)) { 3529 if (CHIP_PARITY_ENABLED(bp) && bnx2x_chk_parity_attn(bp)) {
3531 bp->recovery_state = BNX2X_RECOVERY_INIT; 3530 bp->recovery_state = BNX2X_RECOVERY_INIT;
3532 bnx2x_set_reset_in_progress(bp); 3531 bnx2x_set_reset_in_progress(bp);
3533 schedule_delayed_work(&bp->reset_task, 0); 3532 schedule_delayed_work(&bp->reset_task, 0);
@@ -4754,7 +4753,7 @@ static int bnx2x_int_mem_test(struct bnx2x *bp)
4754 return 0; /* OK */ 4753 return 0; /* OK */
4755} 4754}
4756 4755
4757static void enable_blocks_attention(struct bnx2x *bp) 4756static void bnx2x_enable_blocks_attention(struct bnx2x *bp)
4758{ 4757{
4759 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0); 4758 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
4760 if (CHIP_IS_E2(bp)) 4759 if (CHIP_IS_E2(bp))
@@ -4808,53 +4807,9 @@ static void enable_blocks_attention(struct bnx2x *bp)
4808 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0); 4807 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
4809 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0); 4808 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
4810/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */ 4809/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
4811 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */ 4810 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */
4812} 4811}
4813 4812
4814static const struct {
4815 u32 addr;
4816 u32 mask;
4817} bnx2x_parity_mask[] = {
4818 {PXP_REG_PXP_PRTY_MASK, 0x3ffffff},
4819 {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
4820 {PXP2_REG_PXP2_PRTY_MASK_1, 0x7f},
4821 {HC_REG_HC_PRTY_MASK, 0x7},
4822 {MISC_REG_MISC_PRTY_MASK, 0x1},
4823 {QM_REG_QM_PRTY_MASK, 0x0},
4824 {DORQ_REG_DORQ_PRTY_MASK, 0x0},
4825 {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
4826 {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
4827 {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
4828 {CDU_REG_CDU_PRTY_MASK, 0x0},
4829 {CFC_REG_CFC_PRTY_MASK, 0x0},
4830 {DBG_REG_DBG_PRTY_MASK, 0x0},
4831 {DMAE_REG_DMAE_PRTY_MASK, 0x0},
4832 {BRB1_REG_BRB1_PRTY_MASK, 0x0},
4833 {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
4834 {TSDM_REG_TSDM_PRTY_MASK, 0x18}, /* bit 3,4 */
4835 {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
4836 {USDM_REG_USDM_PRTY_MASK, 0x38}, /* bit 3,4,5 */
4837 {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
4838 {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
4839 {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
4840 {USEM_REG_USEM_PRTY_MASK_0, 0x0},
4841 {USEM_REG_USEM_PRTY_MASK_1, 0x0},
4842 {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
4843 {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
4844 {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
4845 {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
4846};
4847
4848static void enable_blocks_parity(struct bnx2x *bp)
4849{
4850 int i;
4851
4852 for (i = 0; i < ARRAY_SIZE(bnx2x_parity_mask); i++)
4853 REG_WR(bp, bnx2x_parity_mask[i].addr,
4854 bnx2x_parity_mask[i].mask);
4855}
4856
4857
4858static void bnx2x_reset_common(struct bnx2x *bp) 4813static void bnx2x_reset_common(struct bnx2x *bp)
4859{ 4814{
4860 /* reset_common */ 4815 /* reset_common */
@@ -5350,9 +5305,9 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
5350 /* clear PXP2 attentions */ 5305 /* clear PXP2 attentions */
5351 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0); 5306 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5352 5307
5353 enable_blocks_attention(bp); 5308 bnx2x_enable_blocks_attention(bp);
5354 if (CHIP_PARITY_SUPPORTED(bp)) 5309 if (CHIP_PARITY_ENABLED(bp))
5355 enable_blocks_parity(bp); 5310 bnx2x_enable_blocks_parity(bp);
5356 5311
5357 if (!BP_NOMCP(bp)) { 5312 if (!BP_NOMCP(bp)) {
5358 /* In E2 2-PORT mode, same ext phy is used for the two paths */ 5313 /* In E2 2-PORT mode, same ext phy is used for the two paths */
@@ -8751,13 +8706,6 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8751 dev_err(&bp->pdev->dev, "MCP disabled, " 8706 dev_err(&bp->pdev->dev, "MCP disabled, "
8752 "must load devices in order!\n"); 8707 "must load devices in order!\n");
8753 8708
8754 /* Set multi queue mode */
8755 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8756 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
8757 dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
8758 "requested is not MSI-X\n");
8759 multi_mode = ETH_RSS_MODE_DISABLED;
8760 }
8761 bp->multi_mode = multi_mode; 8709 bp->multi_mode = multi_mode;
8762 bp->int_mode = int_mode; 8710 bp->int_mode = int_mode;
8763 8711
@@ -9560,9 +9508,15 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
9560 /* Delete all NAPI objects */ 9508 /* Delete all NAPI objects */
9561 bnx2x_del_all_napi(bp); 9509 bnx2x_del_all_napi(bp);
9562 9510
9511 /* Power on: we can't let PCI layer write to us while we are in D3 */
9512 bnx2x_set_power_state(bp, PCI_D0);
9513
9563 /* Disable MSI/MSI-X */ 9514 /* Disable MSI/MSI-X */
9564 bnx2x_disable_msi(bp); 9515 bnx2x_disable_msi(bp);
9565 9516
9517 /* Power off */
9518 bnx2x_set_power_state(bp, PCI_D3hot);
9519
9566 /* Make sure RESET task is not scheduled before continuing */ 9520 /* Make sure RESET task is not scheduled before continuing */
9567 cancel_delayed_work_sync(&bp->reset_task); 9521 cancel_delayed_work_sync(&bp->reset_task);
9568 9522
diff --git a/drivers/net/bnx2x/bnx2x_reg.h b/drivers/net/bnx2x/bnx2x_reg.h
index bfd875b72906..38ef7ca9f21d 100644
--- a/drivers/net/bnx2x/bnx2x_reg.h
+++ b/drivers/net/bnx2x/bnx2x_reg.h
@@ -18,6 +18,8 @@
18 * WR - Write Clear (write 1 to clear the bit) 18 * WR - Write Clear (write 1 to clear the bit)
19 * 19 *
20 */ 20 */
21#ifndef BNX2X_REG_H
22#define BNX2X_REG_H
21 23
22#define ATC_ATC_INT_STS_REG_ADDRESS_ERROR (0x1<<0) 24#define ATC_ATC_INT_STS_REG_ADDRESS_ERROR (0x1<<0)
23#define ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS (0x1<<2) 25#define ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS (0x1<<2)
@@ -39,6 +41,8 @@
39#define BRB1_REG_BRB1_PRTY_MASK 0x60138 41#define BRB1_REG_BRB1_PRTY_MASK 0x60138
40/* [R 4] Parity register #0 read */ 42/* [R 4] Parity register #0 read */
41#define BRB1_REG_BRB1_PRTY_STS 0x6012c 43#define BRB1_REG_BRB1_PRTY_STS 0x6012c
44/* [RC 4] Parity register #0 read clear */
45#define BRB1_REG_BRB1_PRTY_STS_CLR 0x60130
42/* [RW 10] At address BRB1_IND_FREE_LIST_PRS_CRDT initialize free head. At 46/* [RW 10] At address BRB1_IND_FREE_LIST_PRS_CRDT initialize free head. At
43 * address BRB1_IND_FREE_LIST_PRS_CRDT+1 initialize free tail. At address 47 * address BRB1_IND_FREE_LIST_PRS_CRDT+1 initialize free tail. At address
44 * BRB1_IND_FREE_LIST_PRS_CRDT+2 initialize parser initial credit. Warning - 48 * BRB1_IND_FREE_LIST_PRS_CRDT+2 initialize parser initial credit. Warning -
@@ -132,8 +136,12 @@
132#define CCM_REG_CCM_INT_MASK 0xd01e4 136#define CCM_REG_CCM_INT_MASK 0xd01e4
133/* [R 11] Interrupt register #0 read */ 137/* [R 11] Interrupt register #0 read */
134#define CCM_REG_CCM_INT_STS 0xd01d8 138#define CCM_REG_CCM_INT_STS 0xd01d8
139/* [RW 27] Parity mask register #0 read/write */
140#define CCM_REG_CCM_PRTY_MASK 0xd01f4
135/* [R 27] Parity register #0 read */ 141/* [R 27] Parity register #0 read */
136#define CCM_REG_CCM_PRTY_STS 0xd01e8 142#define CCM_REG_CCM_PRTY_STS 0xd01e8
143/* [RC 27] Parity register #0 read clear */
144#define CCM_REG_CCM_PRTY_STS_CLR 0xd01ec
137/* [RW 3] The size of AG context region 0 in REG-pairs. Designates the MS 145/* [RW 3] The size of AG context region 0 in REG-pairs. Designates the MS
138 REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5). 146 REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
139 Is used to determine the number of the AG context REG-pairs written back; 147 Is used to determine the number of the AG context REG-pairs written back;
@@ -350,6 +358,8 @@
350#define CDU_REG_CDU_PRTY_MASK 0x10104c 358#define CDU_REG_CDU_PRTY_MASK 0x10104c
351/* [R 5] Parity register #0 read */ 359/* [R 5] Parity register #0 read */
352#define CDU_REG_CDU_PRTY_STS 0x101040 360#define CDU_REG_CDU_PRTY_STS 0x101040
361/* [RC 5] Parity register #0 read clear */
362#define CDU_REG_CDU_PRTY_STS_CLR 0x101044
353/* [RC 32] logging of error data in case of a CDU load error: 363/* [RC 32] logging of error data in case of a CDU load error:
354 {expected_cid[15:0]; xpected_type[2:0]; xpected_region[2:0]; ctive_error; 364 {expected_cid[15:0]; xpected_type[2:0]; xpected_region[2:0]; ctive_error;
355 ype_error; ctual_active; ctual_compressed_context}; */ 365 ype_error; ctual_active; ctual_compressed_context}; */
@@ -381,6 +391,8 @@
381#define CFC_REG_CFC_PRTY_MASK 0x104118 391#define CFC_REG_CFC_PRTY_MASK 0x104118
382/* [R 4] Parity register #0 read */ 392/* [R 4] Parity register #0 read */
383#define CFC_REG_CFC_PRTY_STS 0x10410c 393#define CFC_REG_CFC_PRTY_STS 0x10410c
394/* [RC 4] Parity register #0 read clear */
395#define CFC_REG_CFC_PRTY_STS_CLR 0x104110
384/* [RW 21] CID cam access (21:1 - Data; alid - 0) */ 396/* [RW 21] CID cam access (21:1 - Data; alid - 0) */
385#define CFC_REG_CID_CAM 0x104800 397#define CFC_REG_CID_CAM 0x104800
386#define CFC_REG_CONTROL0 0x104028 398#define CFC_REG_CONTROL0 0x104028
@@ -466,6 +478,8 @@
466#define CSDM_REG_CSDM_PRTY_MASK 0xc22bc 478#define CSDM_REG_CSDM_PRTY_MASK 0xc22bc
467/* [R 11] Parity register #0 read */ 479/* [R 11] Parity register #0 read */
468#define CSDM_REG_CSDM_PRTY_STS 0xc22b0 480#define CSDM_REG_CSDM_PRTY_STS 0xc22b0
481/* [RC 11] Parity register #0 read clear */
482#define CSDM_REG_CSDM_PRTY_STS_CLR 0xc22b4
469#define CSDM_REG_ENABLE_IN1 0xc2238 483#define CSDM_REG_ENABLE_IN1 0xc2238
470#define CSDM_REG_ENABLE_IN2 0xc223c 484#define CSDM_REG_ENABLE_IN2 0xc223c
471#define CSDM_REG_ENABLE_OUT1 0xc2240 485#define CSDM_REG_ENABLE_OUT1 0xc2240
@@ -556,6 +570,9 @@
556/* [R 32] Parity register #0 read */ 570/* [R 32] Parity register #0 read */
557#define CSEM_REG_CSEM_PRTY_STS_0 0x200124 571#define CSEM_REG_CSEM_PRTY_STS_0 0x200124
558#define CSEM_REG_CSEM_PRTY_STS_1 0x200134 572#define CSEM_REG_CSEM_PRTY_STS_1 0x200134
573/* [RC 32] Parity register #0 read clear */
574#define CSEM_REG_CSEM_PRTY_STS_CLR_0 0x200128
575#define CSEM_REG_CSEM_PRTY_STS_CLR_1 0x200138
559#define CSEM_REG_ENABLE_IN 0x2000a4 576#define CSEM_REG_ENABLE_IN 0x2000a4
560#define CSEM_REG_ENABLE_OUT 0x2000a8 577#define CSEM_REG_ENABLE_OUT 0x2000a8
561/* [RW 32] This address space contains all registers and memories that are 578/* [RW 32] This address space contains all registers and memories that are
@@ -648,6 +665,8 @@
648#define DBG_REG_DBG_PRTY_MASK 0xc0a8 665#define DBG_REG_DBG_PRTY_MASK 0xc0a8
649/* [R 1] Parity register #0 read */ 666/* [R 1] Parity register #0 read */
650#define DBG_REG_DBG_PRTY_STS 0xc09c 667#define DBG_REG_DBG_PRTY_STS 0xc09c
668/* [RC 1] Parity register #0 read clear */
669#define DBG_REG_DBG_PRTY_STS_CLR 0xc0a0
651/* [RW 1] When set the DMAE will process the commands as in E1.5. 1.The 670/* [RW 1] When set the DMAE will process the commands as in E1.5. 1.The
652 * function that is used is always SRC-PCI; 2.VF_Valid = 0; 3.VFID=0; 671 * function that is used is always SRC-PCI; 2.VF_Valid = 0; 3.VFID=0;
653 * 4.Completion function=0; 5.Error handling=0 */ 672 * 4.Completion function=0; 5.Error handling=0 */
@@ -668,6 +687,8 @@
668#define DMAE_REG_DMAE_PRTY_MASK 0x102064 687#define DMAE_REG_DMAE_PRTY_MASK 0x102064
669/* [R 4] Parity register #0 read */ 688/* [R 4] Parity register #0 read */
670#define DMAE_REG_DMAE_PRTY_STS 0x102058 689#define DMAE_REG_DMAE_PRTY_STS 0x102058
690/* [RC 4] Parity register #0 read clear */
691#define DMAE_REG_DMAE_PRTY_STS_CLR 0x10205c
671/* [RW 1] Command 0 go. */ 692/* [RW 1] Command 0 go. */
672#define DMAE_REG_GO_C0 0x102080 693#define DMAE_REG_GO_C0 0x102080
673/* [RW 1] Command 1 go. */ 694/* [RW 1] Command 1 go. */
@@ -734,6 +755,8 @@
734#define DORQ_REG_DORQ_PRTY_MASK 0x170190 755#define DORQ_REG_DORQ_PRTY_MASK 0x170190
735/* [R 2] Parity register #0 read */ 756/* [R 2] Parity register #0 read */
736#define DORQ_REG_DORQ_PRTY_STS 0x170184 757#define DORQ_REG_DORQ_PRTY_STS 0x170184
758/* [RC 2] Parity register #0 read clear */
759#define DORQ_REG_DORQ_PRTY_STS_CLR 0x170188
737/* [RW 8] The address to write the DPM CID to STORM. */ 760/* [RW 8] The address to write the DPM CID to STORM. */
738#define DORQ_REG_DPM_CID_ADDR 0x170044 761#define DORQ_REG_DPM_CID_ADDR 0x170044
739/* [RW 5] The DPM mode CID extraction offset. */ 762/* [RW 5] The DPM mode CID extraction offset. */
@@ -842,8 +865,12 @@
842/* [R 1] data availble for error memory. If this bit is clear do not red 865/* [R 1] data availble for error memory. If this bit is clear do not red
843 * from error_handling_memory. */ 866 * from error_handling_memory. */
844#define IGU_REG_ERROR_HANDLING_DATA_VALID 0x130130 867#define IGU_REG_ERROR_HANDLING_DATA_VALID 0x130130
868/* [RW 11] Parity mask register #0 read/write */
869#define IGU_REG_IGU_PRTY_MASK 0x1300a8
845/* [R 11] Parity register #0 read */ 870/* [R 11] Parity register #0 read */
846#define IGU_REG_IGU_PRTY_STS 0x13009c 871#define IGU_REG_IGU_PRTY_STS 0x13009c
872/* [RC 11] Parity register #0 read clear */
873#define IGU_REG_IGU_PRTY_STS_CLR 0x1300a0
847/* [R 4] Debug: int_handle_fsm */ 874/* [R 4] Debug: int_handle_fsm */
848#define IGU_REG_INT_HANDLE_FSM 0x130050 875#define IGU_REG_INT_HANDLE_FSM 0x130050
849#define IGU_REG_LEADING_EDGE_LATCH 0x130134 876#define IGU_REG_LEADING_EDGE_LATCH 0x130134
@@ -1501,6 +1528,8 @@
1501#define MISC_REG_MISC_PRTY_MASK 0xa398 1528#define MISC_REG_MISC_PRTY_MASK 0xa398
1502/* [R 1] Parity register #0 read */ 1529/* [R 1] Parity register #0 read */
1503#define MISC_REG_MISC_PRTY_STS 0xa38c 1530#define MISC_REG_MISC_PRTY_STS 0xa38c
1531/* [RC 1] Parity register #0 read clear */
1532#define MISC_REG_MISC_PRTY_STS_CLR 0xa390
1504#define MISC_REG_NIG_WOL_P0 0xa270 1533#define MISC_REG_NIG_WOL_P0 0xa270
1505#define MISC_REG_NIG_WOL_P1 0xa274 1534#define MISC_REG_NIG_WOL_P1 0xa274
1506/* [R 1] If set indicate that the pcie_rst_b was asserted without perst 1535/* [R 1] If set indicate that the pcie_rst_b was asserted without perst
@@ -2082,6 +2111,10 @@
2082#define PBF_REG_PBF_INT_MASK 0x1401d4 2111#define PBF_REG_PBF_INT_MASK 0x1401d4
2083/* [R 5] Interrupt register #0 read */ 2112/* [R 5] Interrupt register #0 read */
2084#define PBF_REG_PBF_INT_STS 0x1401c8 2113#define PBF_REG_PBF_INT_STS 0x1401c8
2114/* [RW 20] Parity mask register #0 read/write */
2115#define PBF_REG_PBF_PRTY_MASK 0x1401e4
2116/* [RC 20] Parity register #0 read clear */
2117#define PBF_REG_PBF_PRTY_STS_CLR 0x1401dc
2085#define PB_REG_CONTROL 0 2118#define PB_REG_CONTROL 0
2086/* [RW 2] Interrupt mask register #0 read/write */ 2119/* [RW 2] Interrupt mask register #0 read/write */
2087#define PB_REG_PB_INT_MASK 0x28 2120#define PB_REG_PB_INT_MASK 0x28
@@ -2091,6 +2124,8 @@
2091#define PB_REG_PB_PRTY_MASK 0x38 2124#define PB_REG_PB_PRTY_MASK 0x38
2092/* [R 4] Parity register #0 read */ 2125/* [R 4] Parity register #0 read */
2093#define PB_REG_PB_PRTY_STS 0x2c 2126#define PB_REG_PB_PRTY_STS 0x2c
2127/* [RC 4] Parity register #0 read clear */
2128#define PB_REG_PB_PRTY_STS_CLR 0x30
2094#define PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR (0x1<<0) 2129#define PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR (0x1<<0)
2095#define PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW (0x1<<8) 2130#define PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW (0x1<<8)
2096#define PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR (0x1<<1) 2131#define PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR (0x1<<1)
@@ -2446,6 +2481,8 @@
2446#define PRS_REG_PRS_PRTY_MASK 0x401a4 2481#define PRS_REG_PRS_PRTY_MASK 0x401a4
2447/* [R 8] Parity register #0 read */ 2482/* [R 8] Parity register #0 read */
2448#define PRS_REG_PRS_PRTY_STS 0x40198 2483#define PRS_REG_PRS_PRTY_STS 0x40198
2484/* [RC 8] Parity register #0 read clear */
2485#define PRS_REG_PRS_PRTY_STS_CLR 0x4019c
2449/* [RW 8] Context region for pure acknowledge packets. Used in CFC load 2486/* [RW 8] Context region for pure acknowledge packets. Used in CFC load
2450 request message */ 2487 request message */
2451#define PRS_REG_PURE_REGIONS 0x40024 2488#define PRS_REG_PURE_REGIONS 0x40024
@@ -2599,6 +2636,9 @@
2599/* [R 32] Parity register #0 read */ 2636/* [R 32] Parity register #0 read */
2600#define PXP2_REG_PXP2_PRTY_STS_0 0x12057c 2637#define PXP2_REG_PXP2_PRTY_STS_0 0x12057c
2601#define PXP2_REG_PXP2_PRTY_STS_1 0x12058c 2638#define PXP2_REG_PXP2_PRTY_STS_1 0x12058c
2639/* [RC 32] Parity register #0 read clear */
2640#define PXP2_REG_PXP2_PRTY_STS_CLR_0 0x120580
2641#define PXP2_REG_PXP2_PRTY_STS_CLR_1 0x120590
2602/* [R 1] Debug only: The 'almost full' indication from each fifo (gives 2642/* [R 1] Debug only: The 'almost full' indication from each fifo (gives
2603 indication about backpressure) */ 2643 indication about backpressure) */
2604#define PXP2_REG_RD_ALMOST_FULL_0 0x120424 2644#define PXP2_REG_RD_ALMOST_FULL_0 0x120424
@@ -3001,6 +3041,8 @@
3001#define PXP_REG_PXP_PRTY_MASK 0x103094 3041#define PXP_REG_PXP_PRTY_MASK 0x103094
3002/* [R 26] Parity register #0 read */ 3042/* [R 26] Parity register #0 read */
3003#define PXP_REG_PXP_PRTY_STS 0x103088 3043#define PXP_REG_PXP_PRTY_STS 0x103088
3044/* [RC 27] Parity register #0 read clear */
3045#define PXP_REG_PXP_PRTY_STS_CLR 0x10308c
3004/* [RW 4] The activity counter initial increment value sent in the load 3046/* [RW 4] The activity counter initial increment value sent in the load
3005 request */ 3047 request */
3006#define QM_REG_ACTCTRINITVAL_0 0x168040 3048#define QM_REG_ACTCTRINITVAL_0 0x168040
@@ -3157,6 +3199,8 @@
3157#define QM_REG_QM_PRTY_MASK 0x168454 3199#define QM_REG_QM_PRTY_MASK 0x168454
3158/* [R 12] Parity register #0 read */ 3200/* [R 12] Parity register #0 read */
3159#define QM_REG_QM_PRTY_STS 0x168448 3201#define QM_REG_QM_PRTY_STS 0x168448
3202/* [RC 12] Parity register #0 read clear */
3203#define QM_REG_QM_PRTY_STS_CLR 0x16844c
3160/* [R 32] Current queues in pipeline: Queues from 32 to 63 */ 3204/* [R 32] Current queues in pipeline: Queues from 32 to 63 */
3161#define QM_REG_QSTATUS_HIGH 0x16802c 3205#define QM_REG_QSTATUS_HIGH 0x16802c
3162/* [R 32] Current queues in pipeline: Queues from 96 to 127 */ 3206/* [R 32] Current queues in pipeline: Queues from 96 to 127 */
@@ -3442,6 +3486,8 @@
3442#define QM_REG_WRRWEIGHTS_9 0x168848 3486#define QM_REG_WRRWEIGHTS_9 0x168848
3443/* [R 6] Keep the fill level of the fifo from write client 1 */ 3487/* [R 6] Keep the fill level of the fifo from write client 1 */
3444#define QM_REG_XQM_WRC_FIFOLVL 0x168000 3488#define QM_REG_XQM_WRC_FIFOLVL 0x168000
3489/* [W 1] reset to parity interrupt */
3490#define SEM_FAST_REG_PARITY_RST 0x18840
3445#define SRC_REG_COUNTFREE0 0x40500 3491#define SRC_REG_COUNTFREE0 0x40500
3446/* [RW 1] If clr the searcher is compatible to E1 A0 - support only two 3492/* [RW 1] If clr the searcher is compatible to E1 A0 - support only two
3447 ports. If set the searcher support 8 functions. */ 3493 ports. If set the searcher support 8 functions. */
@@ -3470,6 +3516,8 @@
3470#define SRC_REG_SRC_PRTY_MASK 0x404c8 3516#define SRC_REG_SRC_PRTY_MASK 0x404c8
3471/* [R 3] Parity register #0 read */ 3517/* [R 3] Parity register #0 read */
3472#define SRC_REG_SRC_PRTY_STS 0x404bc 3518#define SRC_REG_SRC_PRTY_STS 0x404bc
3519/* [RC 3] Parity register #0 read clear */
3520#define SRC_REG_SRC_PRTY_STS_CLR 0x404c0
3473/* [R 4] Used to read the value of the XX protection CAM occupancy counter. */ 3521/* [R 4] Used to read the value of the XX protection CAM occupancy counter. */
3474#define TCM_REG_CAM_OCCUP 0x5017c 3522#define TCM_REG_CAM_OCCUP 0x5017c
3475/* [RW 1] CDU AG read Interface enable. If 0 - the request input is 3523/* [RW 1] CDU AG read Interface enable. If 0 - the request input is
@@ -3596,8 +3644,12 @@
3596#define TCM_REG_TCM_INT_MASK 0x501dc 3644#define TCM_REG_TCM_INT_MASK 0x501dc
3597/* [R 11] Interrupt register #0 read */ 3645/* [R 11] Interrupt register #0 read */
3598#define TCM_REG_TCM_INT_STS 0x501d0 3646#define TCM_REG_TCM_INT_STS 0x501d0
3647/* [RW 27] Parity mask register #0 read/write */
3648#define TCM_REG_TCM_PRTY_MASK 0x501ec
3599/* [R 27] Parity register #0 read */ 3649/* [R 27] Parity register #0 read */
3600#define TCM_REG_TCM_PRTY_STS 0x501e0 3650#define TCM_REG_TCM_PRTY_STS 0x501e0
3651/* [RC 27] Parity register #0 read clear */
3652#define TCM_REG_TCM_PRTY_STS_CLR 0x501e4
3601/* [RW 3] The size of AG context region 0 in REG-pairs. Designates the MS 3653/* [RW 3] The size of AG context region 0 in REG-pairs. Designates the MS
3602 REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5). 3654 REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
3603 Is used to determine the number of the AG context REG-pairs written back; 3655 Is used to determine the number of the AG context REG-pairs written back;
@@ -3755,6 +3807,10 @@
3755#define TM_REG_TM_INT_MASK 0x1640fc 3807#define TM_REG_TM_INT_MASK 0x1640fc
3756/* [R 1] Interrupt register #0 read */ 3808/* [R 1] Interrupt register #0 read */
3757#define TM_REG_TM_INT_STS 0x1640f0 3809#define TM_REG_TM_INT_STS 0x1640f0
3810/* [RW 7] Parity mask register #0 read/write */
3811#define TM_REG_TM_PRTY_MASK 0x16410c
3812/* [RC 7] Parity register #0 read clear */
3813#define TM_REG_TM_PRTY_STS_CLR 0x164104
3758/* [RW 8] The event id for aggregated interrupt 0 */ 3814/* [RW 8] The event id for aggregated interrupt 0 */
3759#define TSDM_REG_AGG_INT_EVENT_0 0x42038 3815#define TSDM_REG_AGG_INT_EVENT_0 0x42038
3760#define TSDM_REG_AGG_INT_EVENT_1 0x4203c 3816#define TSDM_REG_AGG_INT_EVENT_1 0x4203c
@@ -3835,6 +3891,8 @@
3835#define TSDM_REG_TSDM_PRTY_MASK 0x422bc 3891#define TSDM_REG_TSDM_PRTY_MASK 0x422bc
3836/* [R 11] Parity register #0 read */ 3892/* [R 11] Parity register #0 read */
3837#define TSDM_REG_TSDM_PRTY_STS 0x422b0 3893#define TSDM_REG_TSDM_PRTY_STS 0x422b0
3894/* [RC 11] Parity register #0 read clear */
3895#define TSDM_REG_TSDM_PRTY_STS_CLR 0x422b4
3838/* [RW 5] The number of time_slots in the arbitration cycle */ 3896/* [RW 5] The number of time_slots in the arbitration cycle */
3839#define TSEM_REG_ARB_CYCLE_SIZE 0x180034 3897#define TSEM_REG_ARB_CYCLE_SIZE 0x180034
3840/* [RW 3] The source that is associated with arbitration element 0. Source 3898/* [RW 3] The source that is associated with arbitration element 0. Source
@@ -3914,6 +3972,9 @@
3914#define TSEM_REG_SLOW_EXT_STORE_EMPTY 0x1802a0 3972#define TSEM_REG_SLOW_EXT_STORE_EMPTY 0x1802a0
3915/* [RW 8] List of free threads . There is a bit per thread. */ 3973/* [RW 8] List of free threads . There is a bit per thread. */
3916#define TSEM_REG_THREADS_LIST 0x1802e4 3974#define TSEM_REG_THREADS_LIST 0x1802e4
3975/* [RC 32] Parity register #0 read clear */
3976#define TSEM_REG_TSEM_PRTY_STS_CLR_0 0x180118
3977#define TSEM_REG_TSEM_PRTY_STS_CLR_1 0x180128
3917/* [RW 3] The arbitration scheme of time_slot 0 */ 3978/* [RW 3] The arbitration scheme of time_slot 0 */
3918#define TSEM_REG_TS_0_AS 0x180038 3979#define TSEM_REG_TS_0_AS 0x180038
3919/* [RW 3] The arbitration scheme of time_slot 10 */ 3980/* [RW 3] The arbitration scheme of time_slot 10 */
@@ -4116,6 +4177,8 @@
4116#define UCM_REG_UCM_INT_STS 0xe01c8 4177#define UCM_REG_UCM_INT_STS 0xe01c8
4117/* [R 27] Parity register #0 read */ 4178/* [R 27] Parity register #0 read */
4118#define UCM_REG_UCM_PRTY_STS 0xe01d8 4179#define UCM_REG_UCM_PRTY_STS 0xe01d8
4180/* [RC 27] Parity register #0 read clear */
4181#define UCM_REG_UCM_PRTY_STS_CLR 0xe01dc
4119/* [RW 2] The size of AG context region 0 in REG-pairs. Designates the MS 4182/* [RW 2] The size of AG context region 0 in REG-pairs. Designates the MS
4120 REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5). 4183 REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
4121 Is used to determine the number of the AG context REG-pairs written back; 4184 Is used to determine the number of the AG context REG-pairs written back;
@@ -4292,6 +4355,8 @@
4292#define USDM_REG_USDM_PRTY_MASK 0xc42c0 4355#define USDM_REG_USDM_PRTY_MASK 0xc42c0
4293/* [R 11] Parity register #0 read */ 4356/* [R 11] Parity register #0 read */
4294#define USDM_REG_USDM_PRTY_STS 0xc42b4 4357#define USDM_REG_USDM_PRTY_STS 0xc42b4
4358/* [RC 11] Parity register #0 read clear */
4359#define USDM_REG_USDM_PRTY_STS_CLR 0xc42b8
4295/* [RW 5] The number of time_slots in the arbitration cycle */ 4360/* [RW 5] The number of time_slots in the arbitration cycle */
4296#define USEM_REG_ARB_CYCLE_SIZE 0x300034 4361#define USEM_REG_ARB_CYCLE_SIZE 0x300034
4297/* [RW 3] The source that is associated with arbitration element 0. Source 4362/* [RW 3] The source that is associated with arbitration element 0. Source
@@ -4421,6 +4486,9 @@
4421/* [R 32] Parity register #0 read */ 4486/* [R 32] Parity register #0 read */
4422#define USEM_REG_USEM_PRTY_STS_0 0x300124 4487#define USEM_REG_USEM_PRTY_STS_0 0x300124
4423#define USEM_REG_USEM_PRTY_STS_1 0x300134 4488#define USEM_REG_USEM_PRTY_STS_1 0x300134
4489/* [RC 32] Parity register #0 read clear */
4490#define USEM_REG_USEM_PRTY_STS_CLR_0 0x300128
4491#define USEM_REG_USEM_PRTY_STS_CLR_1 0x300138
4424/* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64 4492/* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64
4425 * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */ 4493 * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */
4426#define USEM_REG_VFPF_ERR_NUM 0x300380 4494#define USEM_REG_VFPF_ERR_NUM 0x300380
@@ -4797,6 +4865,8 @@
4797#define XSDM_REG_XSDM_PRTY_MASK 0x1662bc 4865#define XSDM_REG_XSDM_PRTY_MASK 0x1662bc
4798/* [R 11] Parity register #0 read */ 4866/* [R 11] Parity register #0 read */
4799#define XSDM_REG_XSDM_PRTY_STS 0x1662b0 4867#define XSDM_REG_XSDM_PRTY_STS 0x1662b0
4868/* [RC 11] Parity register #0 read clear */
4869#define XSDM_REG_XSDM_PRTY_STS_CLR 0x1662b4
4800/* [RW 5] The number of time_slots in the arbitration cycle */ 4870/* [RW 5] The number of time_slots in the arbitration cycle */
4801#define XSEM_REG_ARB_CYCLE_SIZE 0x280034 4871#define XSEM_REG_ARB_CYCLE_SIZE 0x280034
4802/* [RW 3] The source that is associated with arbitration element 0. Source 4872/* [RW 3] The source that is associated with arbitration element 0. Source
@@ -4929,6 +4999,9 @@
4929/* [R 32] Parity register #0 read */ 4999/* [R 32] Parity register #0 read */
4930#define XSEM_REG_XSEM_PRTY_STS_0 0x280124 5000#define XSEM_REG_XSEM_PRTY_STS_0 0x280124
4931#define XSEM_REG_XSEM_PRTY_STS_1 0x280134 5001#define XSEM_REG_XSEM_PRTY_STS_1 0x280134
5002/* [RC 32] Parity register #0 read clear */
5003#define XSEM_REG_XSEM_PRTY_STS_CLR_0 0x280128
5004#define XSEM_REG_XSEM_PRTY_STS_CLR_1 0x280138
4932#define MCPR_NVM_ACCESS_ENABLE_EN (1L<<0) 5005#define MCPR_NVM_ACCESS_ENABLE_EN (1L<<0)
4933#define MCPR_NVM_ACCESS_ENABLE_WR_EN (1L<<1) 5006#define MCPR_NVM_ACCESS_ENABLE_WR_EN (1L<<1)
4934#define MCPR_NVM_ADDR_NVM_ADDR_VALUE (0xffffffL<<0) 5007#define MCPR_NVM_ADDR_NVM_ADDR_VALUE (0xffffffL<<0)
@@ -6316,3 +6389,4 @@ static inline u8 calc_crc8(u32 data, u8 crc)
6316} 6389}
6317 6390
6318 6391
6392#endif /* BNX2X_REG_H */
diff --git a/drivers/net/bnx2x/bnx2x_stats.c b/drivers/net/bnx2x/bnx2x_stats.c
index 6e4d9b144cc4..bda60d590fa8 100644
--- a/drivers/net/bnx2x/bnx2x_stats.c
+++ b/drivers/net/bnx2x/bnx2x_stats.c
@@ -158,6 +158,11 @@ static void bnx2x_storm_stats_post(struct bnx2x *bp)
158 158
159 spin_lock_bh(&bp->stats_lock); 159 spin_lock_bh(&bp->stats_lock);
160 160
161 if (bp->stats_pending) {
162 spin_unlock_bh(&bp->stats_lock);
163 return;
164 }
165
161 ramrod_data.drv_counter = bp->stats_counter++; 166 ramrod_data.drv_counter = bp->stats_counter++;
162 ramrod_data.collect_port = bp->port.pmf ? 1 : 0; 167 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
163 for_each_eth_queue(bp, i) 168 for_each_eth_queue(bp, i)
diff --git a/drivers/net/cxgb4vf/cxgb4vf_main.c b/drivers/net/cxgb4vf/cxgb4vf_main.c
index 3c403f895750..56166ae2059f 100644
--- a/drivers/net/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/cxgb4vf/cxgb4vf_main.c
@@ -749,13 +749,19 @@ static int cxgb4vf_open(struct net_device *dev)
749 netif_set_real_num_tx_queues(dev, pi->nqsets); 749 netif_set_real_num_tx_queues(dev, pi->nqsets);
750 err = netif_set_real_num_rx_queues(dev, pi->nqsets); 750 err = netif_set_real_num_rx_queues(dev, pi->nqsets);
751 if (err) 751 if (err)
752 return err; 752 goto err_unwind;
753 set_bit(pi->port_id, &adapter->open_device_map);
754 err = link_start(dev); 753 err = link_start(dev);
755 if (err) 754 if (err)
756 return err; 755 goto err_unwind;
756
757 netif_tx_start_all_queues(dev); 757 netif_tx_start_all_queues(dev);
758 set_bit(pi->port_id, &adapter->open_device_map);
758 return 0; 759 return 0;
760
761err_unwind:
762 if (adapter->open_device_map == 0)
763 adapter_down(adapter);
764 return err;
759} 765}
760 766
761/* 767/*
@@ -764,13 +770,12 @@ static int cxgb4vf_open(struct net_device *dev)
764 */ 770 */
765static int cxgb4vf_stop(struct net_device *dev) 771static int cxgb4vf_stop(struct net_device *dev)
766{ 772{
767 int ret;
768 struct port_info *pi = netdev_priv(dev); 773 struct port_info *pi = netdev_priv(dev);
769 struct adapter *adapter = pi->adapter; 774 struct adapter *adapter = pi->adapter;
770 775
771 netif_tx_stop_all_queues(dev); 776 netif_tx_stop_all_queues(dev);
772 netif_carrier_off(dev); 777 netif_carrier_off(dev);
773 ret = t4vf_enable_vi(adapter, pi->viid, false, false); 778 t4vf_enable_vi(adapter, pi->viid, false, false);
774 pi->link_cfg.link_ok = 0; 779 pi->link_cfg.link_ok = 0;
775 780
776 clear_bit(pi->port_id, &adapter->open_device_map); 781 clear_bit(pi->port_id, &adapter->open_device_map);
diff --git a/drivers/net/cxgb4vf/t4vf_hw.c b/drivers/net/cxgb4vf/t4vf_hw.c
index e4bec78c8e3f..0f51c80475ce 100644
--- a/drivers/net/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/cxgb4vf/t4vf_hw.c
@@ -147,9 +147,20 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
147 /* 147 /*
148 * Write the command array into the Mailbox Data register array and 148 * Write the command array into the Mailbox Data register array and
149 * transfer ownership of the mailbox to the firmware. 149 * transfer ownership of the mailbox to the firmware.
150 *
151 * For the VFs, the Mailbox Data "registers" are actually backed by
152 * T4's "MA" interface rather than PL Registers (as is the case for
153 * the PFs). Because these are in different coherency domains, the
154 * write to the VF's PL-register-backed Mailbox Control can race in
155 * front of the writes to the MA-backed VF Mailbox Data "registers".
156 * So we need to do a read-back on at least one byte of the VF Mailbox
157 * Data registers before doing the write to the VF Mailbox Control
158 * register.
150 */ 159 */
151 for (i = 0, p = cmd; i < size; i += 8) 160 for (i = 0, p = cmd; i < size; i += 8)
152 t4_write_reg64(adapter, mbox_data + i, be64_to_cpu(*p++)); 161 t4_write_reg64(adapter, mbox_data + i, be64_to_cpu(*p++));
162 t4_read_reg(adapter, mbox_data); /* flush write */
163
153 t4_write_reg(adapter, mbox_ctl, 164 t4_write_reg(adapter, mbox_ctl,
154 MBMSGVALID | MBOWNER(MBOX_OWNER_FW)); 165 MBMSGVALID | MBOWNER(MBOX_OWNER_FW));
155 t4_read_reg(adapter, mbox_ctl); /* flush write */ 166 t4_read_reg(adapter, mbox_ctl); /* flush write */
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index 77d08e697b74..aed223b1b897 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -130,10 +130,15 @@ static s32 e1000_set_phy_type(struct e1000_hw *hw)
130 if (hw->mac_type == e1000_82541 || 130 if (hw->mac_type == e1000_82541 ||
131 hw->mac_type == e1000_82541_rev_2 || 131 hw->mac_type == e1000_82541_rev_2 ||
132 hw->mac_type == e1000_82547 || 132 hw->mac_type == e1000_82547 ||
133 hw->mac_type == e1000_82547_rev_2) { 133 hw->mac_type == e1000_82547_rev_2)
134 hw->phy_type = e1000_phy_igp; 134 hw->phy_type = e1000_phy_igp;
135 break; 135 break;
136 } 136 case RTL8211B_PHY_ID:
137 hw->phy_type = e1000_phy_8211;
138 break;
139 case RTL8201N_PHY_ID:
140 hw->phy_type = e1000_phy_8201;
141 break;
137 default: 142 default:
138 /* Should never have loaded on this device */ 143 /* Should never have loaded on this device */
139 hw->phy_type = e1000_phy_undefined; 144 hw->phy_type = e1000_phy_undefined;
@@ -318,6 +323,9 @@ s32 e1000_set_mac_type(struct e1000_hw *hw)
318 case E1000_DEV_ID_82547GI: 323 case E1000_DEV_ID_82547GI:
319 hw->mac_type = e1000_82547_rev_2; 324 hw->mac_type = e1000_82547_rev_2;
320 break; 325 break;
326 case E1000_DEV_ID_INTEL_CE4100_GBE:
327 hw->mac_type = e1000_ce4100;
328 break;
321 default: 329 default:
322 /* Should never have loaded on this device */ 330 /* Should never have loaded on this device */
323 return -E1000_ERR_MAC_TYPE; 331 return -E1000_ERR_MAC_TYPE;
@@ -372,6 +380,9 @@ void e1000_set_media_type(struct e1000_hw *hw)
372 case e1000_82542_rev2_1: 380 case e1000_82542_rev2_1:
373 hw->media_type = e1000_media_type_fiber; 381 hw->media_type = e1000_media_type_fiber;
374 break; 382 break;
383 case e1000_ce4100:
384 hw->media_type = e1000_media_type_copper;
385 break;
375 default: 386 default:
376 status = er32(STATUS); 387 status = er32(STATUS);
377 if (status & E1000_STATUS_TBIMODE) { 388 if (status & E1000_STATUS_TBIMODE) {
@@ -460,6 +471,7 @@ s32 e1000_reset_hw(struct e1000_hw *hw)
460 /* Reset is performed on a shadow of the control register */ 471 /* Reset is performed on a shadow of the control register */
461 ew32(CTRL_DUP, (ctrl | E1000_CTRL_RST)); 472 ew32(CTRL_DUP, (ctrl | E1000_CTRL_RST));
462 break; 473 break;
474 case e1000_ce4100:
463 default: 475 default:
464 ew32(CTRL, (ctrl | E1000_CTRL_RST)); 476 ew32(CTRL, (ctrl | E1000_CTRL_RST));
465 break; 477 break;
@@ -952,6 +964,67 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
952} 964}
953 965
954/** 966/**
967 * e1000_copper_link_rtl_setup - Copper link setup for e1000_phy_rtl series.
968 * @hw: Struct containing variables accessed by shared code
969 *
970 * Commits changes to PHY configuration by calling e1000_phy_reset().
971 */
972static s32 e1000_copper_link_rtl_setup(struct e1000_hw *hw)
973{
974 s32 ret_val;
975
976 /* SW reset the PHY so all changes take effect */
977 ret_val = e1000_phy_reset(hw);
978 if (ret_val) {
979 e_dbg("Error Resetting the PHY\n");
980 return ret_val;
981 }
982
983 return E1000_SUCCESS;
984}
985
986static s32 gbe_dhg_phy_setup(struct e1000_hw *hw)
987{
988 s32 ret_val;
989 u32 ctrl_aux;
990
991 switch (hw->phy_type) {
992 case e1000_phy_8211:
993 ret_val = e1000_copper_link_rtl_setup(hw);
994 if (ret_val) {
995 e_dbg("e1000_copper_link_rtl_setup failed!\n");
996 return ret_val;
997 }
998 break;
999 case e1000_phy_8201:
1000 /* Set RMII mode */
1001 ctrl_aux = er32(CTL_AUX);
1002 ctrl_aux |= E1000_CTL_AUX_RMII;
1003 ew32(CTL_AUX, ctrl_aux);
1004 E1000_WRITE_FLUSH();
1005
1006 /* Disable the J/K bits required for receive */
1007 ctrl_aux = er32(CTL_AUX);
1008 ctrl_aux |= 0x4;
1009 ctrl_aux &= ~0x2;
1010 ew32(CTL_AUX, ctrl_aux);
1011 E1000_WRITE_FLUSH();
1012 ret_val = e1000_copper_link_rtl_setup(hw);
1013
1014 if (ret_val) {
1015 e_dbg("e1000_copper_link_rtl_setup failed!\n");
1016 return ret_val;
1017 }
1018 break;
1019 default:
1020 e_dbg("Error Resetting the PHY\n");
1021 return E1000_ERR_PHY_TYPE;
1022 }
1023
1024 return E1000_SUCCESS;
1025}
1026
1027/**
955 * e1000_copper_link_preconfig - early configuration for copper 1028 * e1000_copper_link_preconfig - early configuration for copper
956 * @hw: Struct containing variables accessed by shared code 1029 * @hw: Struct containing variables accessed by shared code
957 * 1030 *
@@ -1286,6 +1359,10 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
1286 if (hw->autoneg_advertised == 0) 1359 if (hw->autoneg_advertised == 0)
1287 hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT; 1360 hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT;
1288 1361
1362 /* IFE/RTL8201N PHY only supports 10/100 */
1363 if (hw->phy_type == e1000_phy_8201)
1364 hw->autoneg_advertised &= AUTONEG_ADVERTISE_10_100_ALL;
1365
1289 e_dbg("Reconfiguring auto-neg advertisement params\n"); 1366 e_dbg("Reconfiguring auto-neg advertisement params\n");
1290 ret_val = e1000_phy_setup_autoneg(hw); 1367 ret_val = e1000_phy_setup_autoneg(hw);
1291 if (ret_val) { 1368 if (ret_val) {
@@ -1341,7 +1418,7 @@ static s32 e1000_copper_link_postconfig(struct e1000_hw *hw)
1341 s32 ret_val; 1418 s32 ret_val;
1342 e_dbg("e1000_copper_link_postconfig"); 1419 e_dbg("e1000_copper_link_postconfig");
1343 1420
1344 if (hw->mac_type >= e1000_82544) { 1421 if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100)) {
1345 e1000_config_collision_dist(hw); 1422 e1000_config_collision_dist(hw);
1346 } else { 1423 } else {
1347 ret_val = e1000_config_mac_to_phy(hw); 1424 ret_val = e1000_config_mac_to_phy(hw);
@@ -1395,6 +1472,12 @@ static s32 e1000_setup_copper_link(struct e1000_hw *hw)
1395 ret_val = e1000_copper_link_mgp_setup(hw); 1472 ret_val = e1000_copper_link_mgp_setup(hw);
1396 if (ret_val) 1473 if (ret_val)
1397 return ret_val; 1474 return ret_val;
1475 } else {
1476 ret_val = gbe_dhg_phy_setup(hw);
1477 if (ret_val) {
1478 e_dbg("gbe_dhg_phy_setup failed!\n");
1479 return ret_val;
1480 }
1398 } 1481 }
1399 1482
1400 if (hw->autoneg) { 1483 if (hw->autoneg) {
@@ -1461,10 +1544,11 @@ s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
1461 return ret_val; 1544 return ret_val;
1462 1545
1463 /* Read the MII 1000Base-T Control Register (Address 9). */ 1546 /* Read the MII 1000Base-T Control Register (Address 9). */
1464 ret_val = 1547 ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg);
1465 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg);
1466 if (ret_val) 1548 if (ret_val)
1467 return ret_val; 1549 return ret_val;
1550 else if (hw->phy_type == e1000_phy_8201)
1551 mii_1000t_ctrl_reg &= ~REG9_SPEED_MASK;
1468 1552
1469 /* Need to parse both autoneg_advertised and fc and set up 1553 /* Need to parse both autoneg_advertised and fc and set up
1470 * the appropriate PHY registers. First we will parse for 1554 * the appropriate PHY registers. First we will parse for
@@ -1577,9 +1661,14 @@ s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
1577 1661
1578 e_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); 1662 e_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
1579 1663
1580 ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg); 1664 if (hw->phy_type == e1000_phy_8201) {
1581 if (ret_val) 1665 mii_1000t_ctrl_reg = 0;
1582 return ret_val; 1666 } else {
1667 ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL,
1668 mii_1000t_ctrl_reg);
1669 if (ret_val)
1670 return ret_val;
1671 }
1583 1672
1584 return E1000_SUCCESS; 1673 return E1000_SUCCESS;
1585} 1674}
@@ -1860,7 +1949,7 @@ static s32 e1000_config_mac_to_phy(struct e1000_hw *hw)
1860 1949
1861 /* 82544 or newer MAC, Auto Speed Detection takes care of 1950 /* 82544 or newer MAC, Auto Speed Detection takes care of
1862 * MAC speed/duplex configuration.*/ 1951 * MAC speed/duplex configuration.*/
1863 if (hw->mac_type >= e1000_82544) 1952 if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100))
1864 return E1000_SUCCESS; 1953 return E1000_SUCCESS;
1865 1954
1866 /* Read the Device Control Register and set the bits to Force Speed 1955 /* Read the Device Control Register and set the bits to Force Speed
@@ -1870,27 +1959,49 @@ static s32 e1000_config_mac_to_phy(struct e1000_hw *hw)
1870 ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); 1959 ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
1871 ctrl &= ~(E1000_CTRL_SPD_SEL | E1000_CTRL_ILOS); 1960 ctrl &= ~(E1000_CTRL_SPD_SEL | E1000_CTRL_ILOS);
1872 1961
1873 /* Set up duplex in the Device Control and Transmit Control 1962 switch (hw->phy_type) {
1874 * registers depending on negotiated values. 1963 case e1000_phy_8201:
1875 */ 1964 ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data);
1876 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); 1965 if (ret_val)
1877 if (ret_val) 1966 return ret_val;
1878 return ret_val;
1879 1967
1880 if (phy_data & M88E1000_PSSR_DPLX) 1968 if (phy_data & RTL_PHY_CTRL_FD)
1881 ctrl |= E1000_CTRL_FD; 1969 ctrl |= E1000_CTRL_FD;
1882 else 1970 else
1883 ctrl &= ~E1000_CTRL_FD; 1971 ctrl &= ~E1000_CTRL_FD;
1884 1972
1885 e1000_config_collision_dist(hw); 1973 if (phy_data & RTL_PHY_CTRL_SPD_100)
1974 ctrl |= E1000_CTRL_SPD_100;
1975 else
1976 ctrl |= E1000_CTRL_SPD_10;
1886 1977
1887 /* Set up speed in the Device Control register depending on 1978 e1000_config_collision_dist(hw);
1888 * negotiated values. 1979 break;
1889 */ 1980 default:
1890 if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) 1981 /* Set up duplex in the Device Control and Transmit Control
1891 ctrl |= E1000_CTRL_SPD_1000; 1982 * registers depending on negotiated values.
1892 else if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_100MBS) 1983 */
1893 ctrl |= E1000_CTRL_SPD_100; 1984 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
1985 &phy_data);
1986 if (ret_val)
1987 return ret_val;
1988
1989 if (phy_data & M88E1000_PSSR_DPLX)
1990 ctrl |= E1000_CTRL_FD;
1991 else
1992 ctrl &= ~E1000_CTRL_FD;
1993
1994 e1000_config_collision_dist(hw);
1995
1996 /* Set up speed in the Device Control register depending on
1997 * negotiated values.
1998 */
1999 if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS)
2000 ctrl |= E1000_CTRL_SPD_1000;
2001 else if ((phy_data & M88E1000_PSSR_SPEED) ==
2002 M88E1000_PSSR_100MBS)
2003 ctrl |= E1000_CTRL_SPD_100;
2004 }
1894 2005
1895 /* Write the configured values back to the Device Control Reg. */ 2006 /* Write the configured values back to the Device Control Reg. */
1896 ew32(CTRL, ctrl); 2007 ew32(CTRL, ctrl);
@@ -2401,7 +2512,8 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
2401 * speed/duplex on the MAC to the current PHY speed/duplex 2512 * speed/duplex on the MAC to the current PHY speed/duplex
2402 * settings. 2513 * settings.
2403 */ 2514 */
2404 if (hw->mac_type >= e1000_82544) 2515 if ((hw->mac_type >= e1000_82544) &&
2516 (hw->mac_type != e1000_ce4100))
2405 e1000_config_collision_dist(hw); 2517 e1000_config_collision_dist(hw);
2406 else { 2518 else {
2407 ret_val = e1000_config_mac_to_phy(hw); 2519 ret_val = e1000_config_mac_to_phy(hw);
@@ -2738,7 +2850,7 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
2738{ 2850{
2739 u32 i; 2851 u32 i;
2740 u32 mdic = 0; 2852 u32 mdic = 0;
2741 const u32 phy_addr = 1; 2853 const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1;
2742 2854
2743 e_dbg("e1000_read_phy_reg_ex"); 2855 e_dbg("e1000_read_phy_reg_ex");
2744 2856
@@ -2752,28 +2864,61 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
2752 * Control register. The MAC will take care of interfacing with the 2864 * Control register. The MAC will take care of interfacing with the
2753 * PHY to retrieve the desired data. 2865 * PHY to retrieve the desired data.
2754 */ 2866 */
2755 mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) | 2867 if (hw->mac_type == e1000_ce4100) {
2756 (phy_addr << E1000_MDIC_PHY_SHIFT) | 2868 mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) |
2757 (E1000_MDIC_OP_READ)); 2869 (phy_addr << E1000_MDIC_PHY_SHIFT) |
2870 (INTEL_CE_GBE_MDIC_OP_READ) |
2871 (INTEL_CE_GBE_MDIC_GO));
2758 2872
2759 ew32(MDIC, mdic); 2873 writel(mdic, E1000_MDIO_CMD);
2760 2874
2761 /* Poll the ready bit to see if the MDI read completed */ 2875 /* Poll the ready bit to see if the MDI read
2762 for (i = 0; i < 64; i++) { 2876 * completed
2763 udelay(50); 2877 */
2764 mdic = er32(MDIC); 2878 for (i = 0; i < 64; i++) {
2765 if (mdic & E1000_MDIC_READY) 2879 udelay(50);
2766 break; 2880 mdic = readl(E1000_MDIO_CMD);
2767 } 2881 if (!(mdic & INTEL_CE_GBE_MDIC_GO))
2768 if (!(mdic & E1000_MDIC_READY)) { 2882 break;
2769 e_dbg("MDI Read did not complete\n"); 2883 }
2770 return -E1000_ERR_PHY; 2884
2771 } 2885 if (mdic & INTEL_CE_GBE_MDIC_GO) {
2772 if (mdic & E1000_MDIC_ERROR) { 2886 e_dbg("MDI Read did not complete\n");
2773 e_dbg("MDI Error\n"); 2887 return -E1000_ERR_PHY;
2774 return -E1000_ERR_PHY; 2888 }
2889
2890 mdic = readl(E1000_MDIO_STS);
2891 if (mdic & INTEL_CE_GBE_MDIC_READ_ERROR) {
2892 e_dbg("MDI Read Error\n");
2893 return -E1000_ERR_PHY;
2894 }
2895 *phy_data = (u16) mdic;
2896 } else {
2897 mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) |
2898 (phy_addr << E1000_MDIC_PHY_SHIFT) |
2899 (E1000_MDIC_OP_READ));
2900
2901 ew32(MDIC, mdic);
2902
2903 /* Poll the ready bit to see if the MDI read
2904 * completed
2905 */
2906 for (i = 0; i < 64; i++) {
2907 udelay(50);
2908 mdic = er32(MDIC);
2909 if (mdic & E1000_MDIC_READY)
2910 break;
2911 }
2912 if (!(mdic & E1000_MDIC_READY)) {
2913 e_dbg("MDI Read did not complete\n");
2914 return -E1000_ERR_PHY;
2915 }
2916 if (mdic & E1000_MDIC_ERROR) {
2917 e_dbg("MDI Error\n");
2918 return -E1000_ERR_PHY;
2919 }
2920 *phy_data = (u16) mdic;
2775 } 2921 }
2776 *phy_data = (u16) mdic;
2777 } else { 2922 } else {
2778 /* We must first send a preamble through the MDIO pin to signal the 2923 /* We must first send a preamble through the MDIO pin to signal the
2779 * beginning of an MII instruction. This is done by sending 32 2924 * beginning of an MII instruction. This is done by sending 32
@@ -2840,7 +2985,7 @@ static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
2840{ 2985{
2841 u32 i; 2986 u32 i;
2842 u32 mdic = 0; 2987 u32 mdic = 0;
2843 const u32 phy_addr = 1; 2988 const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1;
2844 2989
2845 e_dbg("e1000_write_phy_reg_ex"); 2990 e_dbg("e1000_write_phy_reg_ex");
2846 2991
@@ -2850,27 +2995,54 @@ static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
2850 } 2995 }
2851 2996
2852 if (hw->mac_type > e1000_82543) { 2997 if (hw->mac_type > e1000_82543) {
2853 /* Set up Op-code, Phy Address, register address, and data intended 2998 /* Set up Op-code, Phy Address, register address, and data
2854 * for the PHY register in the MDI Control register. The MAC will take 2999 * intended for the PHY register in the MDI Control register.
2855 * care of interfacing with the PHY to send the desired data. 3000 * The MAC will take care of interfacing with the PHY to send
3001 * the desired data.
2856 */ 3002 */
2857 mdic = (((u32) phy_data) | 3003 if (hw->mac_type == e1000_ce4100) {
2858 (reg_addr << E1000_MDIC_REG_SHIFT) | 3004 mdic = (((u32) phy_data) |
2859 (phy_addr << E1000_MDIC_PHY_SHIFT) | 3005 (reg_addr << E1000_MDIC_REG_SHIFT) |
2860 (E1000_MDIC_OP_WRITE)); 3006 (phy_addr << E1000_MDIC_PHY_SHIFT) |
3007 (INTEL_CE_GBE_MDIC_OP_WRITE) |
3008 (INTEL_CE_GBE_MDIC_GO));
2861 3009
2862 ew32(MDIC, mdic); 3010 writel(mdic, E1000_MDIO_CMD);
2863 3011
2864 /* Poll the ready bit to see if the MDI read completed */ 3012 /* Poll the ready bit to see if the MDI read
2865 for (i = 0; i < 641; i++) { 3013 * completed
2866 udelay(5); 3014 */
2867 mdic = er32(MDIC); 3015 for (i = 0; i < 640; i++) {
2868 if (mdic & E1000_MDIC_READY) 3016 udelay(5);
2869 break; 3017 mdic = readl(E1000_MDIO_CMD);
2870 } 3018 if (!(mdic & INTEL_CE_GBE_MDIC_GO))
2871 if (!(mdic & E1000_MDIC_READY)) { 3019 break;
2872 e_dbg("MDI Write did not complete\n"); 3020 }
2873 return -E1000_ERR_PHY; 3021 if (mdic & INTEL_CE_GBE_MDIC_GO) {
3022 e_dbg("MDI Write did not complete\n");
3023 return -E1000_ERR_PHY;
3024 }
3025 } else {
3026 mdic = (((u32) phy_data) |
3027 (reg_addr << E1000_MDIC_REG_SHIFT) |
3028 (phy_addr << E1000_MDIC_PHY_SHIFT) |
3029 (E1000_MDIC_OP_WRITE));
3030
3031 ew32(MDIC, mdic);
3032
3033 /* Poll the ready bit to see if the MDI read
3034 * completed
3035 */
3036 for (i = 0; i < 641; i++) {
3037 udelay(5);
3038 mdic = er32(MDIC);
3039 if (mdic & E1000_MDIC_READY)
3040 break;
3041 }
3042 if (!(mdic & E1000_MDIC_READY)) {
3043 e_dbg("MDI Write did not complete\n");
3044 return -E1000_ERR_PHY;
3045 }
2874 } 3046 }
2875 } else { 3047 } else {
2876 /* We'll need to use the SW defined pins to shift the write command 3048 /* We'll need to use the SW defined pins to shift the write command
@@ -3048,6 +3220,11 @@ static s32 e1000_detect_gig_phy(struct e1000_hw *hw)
3048 if (hw->phy_id == M88E1011_I_PHY_ID) 3220 if (hw->phy_id == M88E1011_I_PHY_ID)
3049 match = true; 3221 match = true;
3050 break; 3222 break;
3223 case e1000_ce4100:
3224 if ((hw->phy_id == RTL8211B_PHY_ID) ||
3225 (hw->phy_id == RTL8201N_PHY_ID))
3226 match = true;
3227 break;
3051 case e1000_82541: 3228 case e1000_82541:
3052 case e1000_82541_rev_2: 3229 case e1000_82541_rev_2:
3053 case e1000_82547: 3230 case e1000_82547:
@@ -3291,6 +3468,9 @@ s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info)
3291 3468
3292 if (hw->phy_type == e1000_phy_igp) 3469 if (hw->phy_type == e1000_phy_igp)
3293 return e1000_phy_igp_get_info(hw, phy_info); 3470 return e1000_phy_igp_get_info(hw, phy_info);
3471 else if ((hw->phy_type == e1000_phy_8211) ||
3472 (hw->phy_type == e1000_phy_8201))
3473 return E1000_SUCCESS;
3294 else 3474 else
3295 return e1000_phy_m88_get_info(hw, phy_info); 3475 return e1000_phy_m88_get_info(hw, phy_info);
3296} 3476}
@@ -3742,6 +3922,12 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
3742 3922
3743 e_dbg("e1000_read_eeprom"); 3923 e_dbg("e1000_read_eeprom");
3744 3924
3925 if (hw->mac_type == e1000_ce4100) {
3926 GBE_CONFIG_FLASH_READ(GBE_CONFIG_BASE_VIRT, offset, words,
3927 data);
3928 return E1000_SUCCESS;
3929 }
3930
3745 /* If eeprom is not yet detected, do so now */ 3931 /* If eeprom is not yet detected, do so now */
3746 if (eeprom->word_size == 0) 3932 if (eeprom->word_size == 0)
3747 e1000_init_eeprom_params(hw); 3933 e1000_init_eeprom_params(hw);
@@ -3904,6 +4090,12 @@ static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
3904 4090
3905 e_dbg("e1000_write_eeprom"); 4091 e_dbg("e1000_write_eeprom");
3906 4092
4093 if (hw->mac_type == e1000_ce4100) {
4094 GBE_CONFIG_FLASH_WRITE(GBE_CONFIG_BASE_VIRT, offset, words,
4095 data);
4096 return E1000_SUCCESS;
4097 }
4098
3907 /* If eeprom is not yet detected, do so now */ 4099 /* If eeprom is not yet detected, do so now */
3908 if (eeprom->word_size == 0) 4100 if (eeprom->word_size == 0)
3909 e1000_init_eeprom_params(hw); 4101 e1000_init_eeprom_params(hw);
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h
index ecd9f6c6bcd5..f5514a0d5be6 100644
--- a/drivers/net/e1000/e1000_hw.h
+++ b/drivers/net/e1000/e1000_hw.h
@@ -52,6 +52,7 @@ typedef enum {
52 e1000_82545, 52 e1000_82545,
53 e1000_82545_rev_3, 53 e1000_82545_rev_3,
54 e1000_82546, 54 e1000_82546,
55 e1000_ce4100,
55 e1000_82546_rev_3, 56 e1000_82546_rev_3,
56 e1000_82541, 57 e1000_82541,
57 e1000_82541_rev_2, 58 e1000_82541_rev_2,
@@ -209,9 +210,11 @@ typedef enum {
209} e1000_1000t_rx_status; 210} e1000_1000t_rx_status;
210 211
211typedef enum { 212typedef enum {
212 e1000_phy_m88 = 0, 213 e1000_phy_m88 = 0,
213 e1000_phy_igp, 214 e1000_phy_igp,
214 e1000_phy_undefined = 0xFF 215 e1000_phy_8211,
216 e1000_phy_8201,
217 e1000_phy_undefined = 0xFF
215} e1000_phy_type; 218} e1000_phy_type;
216 219
217typedef enum { 220typedef enum {
@@ -442,6 +445,7 @@ void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value);
442#define E1000_DEV_ID_82547EI 0x1019 445#define E1000_DEV_ID_82547EI 0x1019
443#define E1000_DEV_ID_82547EI_MOBILE 0x101A 446#define E1000_DEV_ID_82547EI_MOBILE 0x101A
444#define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5 447#define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5
448#define E1000_DEV_ID_INTEL_CE4100_GBE 0x2E6E
445 449
446#define NODE_ADDRESS_SIZE 6 450#define NODE_ADDRESS_SIZE 6
447#define ETH_LENGTH_OF_ADDRESS 6 451#define ETH_LENGTH_OF_ADDRESS 6
@@ -808,6 +812,16 @@ struct e1000_ffvt_entry {
808#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */ 812#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */
809#define E1000_FLA 0x0001C /* Flash Access - RW */ 813#define E1000_FLA 0x0001C /* Flash Access - RW */
810#define E1000_MDIC 0x00020 /* MDI Control - RW */ 814#define E1000_MDIC 0x00020 /* MDI Control - RW */
815
816extern void __iomem *ce4100_gbe_mdio_base_virt;
817#define INTEL_CE_GBE_MDIO_RCOMP_BASE (ce4100_gbe_mdio_base_virt)
818#define E1000_MDIO_STS (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0)
819#define E1000_MDIO_CMD (INTEL_CE_GBE_MDIO_RCOMP_BASE + 4)
820#define E1000_MDIO_DRV (INTEL_CE_GBE_MDIO_RCOMP_BASE + 8)
821#define E1000_MDC_CMD (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0xC)
822#define E1000_RCOMP_CTL (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0x20)
823#define E1000_RCOMP_STS (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0x24)
824
811#define E1000_SCTL 0x00024 /* SerDes Control - RW */ 825#define E1000_SCTL 0x00024 /* SerDes Control - RW */
812#define E1000_FEXTNVM 0x00028 /* Future Extended NVM register */ 826#define E1000_FEXTNVM 0x00028 /* Future Extended NVM register */
813#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */ 827#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */
@@ -820,6 +834,34 @@ struct e1000_ffvt_entry {
820#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */ 834#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */
821#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */ 835#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */
822#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */ 836#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */
837
838/* Auxiliary Control Register. This register is CE4100 specific,
839 * RMII/RGMII function is switched by this register - RW
840 * Following are bits definitions of the Auxiliary Control Register
841 */
842#define E1000_CTL_AUX 0x000E0
843#define E1000_CTL_AUX_END_SEL_SHIFT 10
844#define E1000_CTL_AUX_ENDIANESS_SHIFT 8
845#define E1000_CTL_AUX_RGMII_RMII_SHIFT 0
846
847/* descriptor and packet transfer use CTL_AUX.ENDIANESS */
848#define E1000_CTL_AUX_DES_PKT (0x0 << E1000_CTL_AUX_END_SEL_SHIFT)
849/* descriptor use CTL_AUX.ENDIANESS, packet use default */
850#define E1000_CTL_AUX_DES (0x1 << E1000_CTL_AUX_END_SEL_SHIFT)
851/* descriptor use default, packet use CTL_AUX.ENDIANESS */
852#define E1000_CTL_AUX_PKT (0x2 << E1000_CTL_AUX_END_SEL_SHIFT)
853/* all use CTL_AUX.ENDIANESS */
854#define E1000_CTL_AUX_ALL (0x3 << E1000_CTL_AUX_END_SEL_SHIFT)
855
856#define E1000_CTL_AUX_RGMII (0x0 << E1000_CTL_AUX_RGMII_RMII_SHIFT)
857#define E1000_CTL_AUX_RMII (0x1 << E1000_CTL_AUX_RGMII_RMII_SHIFT)
858
859/* LW little endian, Byte big endian */
860#define E1000_CTL_AUX_LWLE_BBE (0x0 << E1000_CTL_AUX_ENDIANESS_SHIFT)
861#define E1000_CTL_AUX_LWLE_BLE (0x1 << E1000_CTL_AUX_ENDIANESS_SHIFT)
862#define E1000_CTL_AUX_LWBE_BBE (0x2 << E1000_CTL_AUX_ENDIANESS_SHIFT)
863#define E1000_CTL_AUX_LWBE_BLE (0x3 << E1000_CTL_AUX_ENDIANESS_SHIFT)
864
823#define E1000_RCTL 0x00100 /* RX Control - RW */ 865#define E1000_RCTL 0x00100 /* RX Control - RW */
824#define E1000_RDTR1 0x02820 /* RX Delay Timer (1) - RW */ 866#define E1000_RDTR1 0x02820 /* RX Delay Timer (1) - RW */
825#define E1000_RDBAL1 0x02900 /* RX Descriptor Base Address Low (1) - RW */ 867#define E1000_RDBAL1 0x02900 /* RX Descriptor Base Address Low (1) - RW */
@@ -1011,6 +1053,7 @@ struct e1000_ffvt_entry {
1011 * in more current versions of the 8254x. Despite the difference in location, 1053 * in more current versions of the 8254x. Despite the difference in location,
1012 * the registers function in the same manner. 1054 * the registers function in the same manner.
1013 */ 1055 */
1056#define E1000_82542_CTL_AUX E1000_CTL_AUX
1014#define E1000_82542_CTRL E1000_CTRL 1057#define E1000_82542_CTRL E1000_CTRL
1015#define E1000_82542_CTRL_DUP E1000_CTRL_DUP 1058#define E1000_82542_CTRL_DUP E1000_CTRL_DUP
1016#define E1000_82542_STATUS E1000_STATUS 1059#define E1000_82542_STATUS E1000_STATUS
@@ -1571,6 +1614,11 @@ struct e1000_hw {
1571#define E1000_MDIC_INT_EN 0x20000000 1614#define E1000_MDIC_INT_EN 0x20000000
1572#define E1000_MDIC_ERROR 0x40000000 1615#define E1000_MDIC_ERROR 0x40000000
1573 1616
1617#define INTEL_CE_GBE_MDIC_OP_WRITE 0x04000000
1618#define INTEL_CE_GBE_MDIC_OP_READ 0x00000000
1619#define INTEL_CE_GBE_MDIC_GO 0x80000000
1620#define INTEL_CE_GBE_MDIC_READ_ERROR 0x80000000
1621
1574#define E1000_KUMCTRLSTA_MASK 0x0000FFFF 1622#define E1000_KUMCTRLSTA_MASK 0x0000FFFF
1575#define E1000_KUMCTRLSTA_OFFSET 0x001F0000 1623#define E1000_KUMCTRLSTA_OFFSET 0x001F0000
1576#define E1000_KUMCTRLSTA_OFFSET_SHIFT 16 1624#define E1000_KUMCTRLSTA_OFFSET_SHIFT 16
@@ -2871,6 +2919,11 @@ struct e1000_host_command_info {
2871#define M88E1111_I_PHY_ID 0x01410CC0 2919#define M88E1111_I_PHY_ID 0x01410CC0
2872#define L1LXT971A_PHY_ID 0x001378E0 2920#define L1LXT971A_PHY_ID 0x001378E0
2873 2921
2922#define RTL8211B_PHY_ID 0x001CC910
2923#define RTL8201N_PHY_ID 0x8200
2924#define RTL_PHY_CTRL_FD 0x0100 /* Full duplex.0=half; 1=full */
2925#define RTL_PHY_CTRL_SPD_100 0x200000 /* Force 100Mb */
2926
2874/* Bits... 2927/* Bits...
2875 * 15-5: page 2928 * 15-5: page
2876 * 4-0: register offset 2929 * 4-0: register offset
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 340e12d2e4a9..4ff88a683f61 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -28,6 +28,12 @@
28 28
29#include "e1000.h" 29#include "e1000.h"
30#include <net/ip6_checksum.h> 30#include <net/ip6_checksum.h>
31#include <linux/io.h>
32
33/* Intel Media SOC GbE MDIO physical base address */
34static unsigned long ce4100_gbe_mdio_base_phy;
35/* Intel Media SOC GbE MDIO virtual base address */
36void __iomem *ce4100_gbe_mdio_base_virt;
31 37
32char e1000_driver_name[] = "e1000"; 38char e1000_driver_name[] = "e1000";
33static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; 39static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
@@ -79,6 +85,7 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
79 INTEL_E1000_ETHERNET_DEVICE(0x108A), 85 INTEL_E1000_ETHERNET_DEVICE(0x108A),
80 INTEL_E1000_ETHERNET_DEVICE(0x1099), 86 INTEL_E1000_ETHERNET_DEVICE(0x1099),
81 INTEL_E1000_ETHERNET_DEVICE(0x10B5), 87 INTEL_E1000_ETHERNET_DEVICE(0x10B5),
88 INTEL_E1000_ETHERNET_DEVICE(0x2E6E),
82 /* required last entry */ 89 /* required last entry */
83 {0,} 90 {0,}
84}; 91};
@@ -459,6 +466,7 @@ static void e1000_power_down_phy(struct e1000_adapter *adapter)
459 case e1000_82545: 466 case e1000_82545:
460 case e1000_82545_rev_3: 467 case e1000_82545_rev_3:
461 case e1000_82546: 468 case e1000_82546:
469 case e1000_ce4100:
462 case e1000_82546_rev_3: 470 case e1000_82546_rev_3:
463 case e1000_82541: 471 case e1000_82541:
464 case e1000_82541_rev_2: 472 case e1000_82541_rev_2:
@@ -573,6 +581,7 @@ void e1000_reset(struct e1000_adapter *adapter)
573 case e1000_82545: 581 case e1000_82545:
574 case e1000_82545_rev_3: 582 case e1000_82545_rev_3:
575 case e1000_82546: 583 case e1000_82546:
584 case e1000_ce4100:
576 case e1000_82546_rev_3: 585 case e1000_82546_rev_3:
577 pba = E1000_PBA_48K; 586 pba = E1000_PBA_48K;
578 break; 587 break;
@@ -894,6 +903,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
894 static int global_quad_port_a = 0; /* global ksp3 port a indication */ 903 static int global_quad_port_a = 0; /* global ksp3 port a indication */
895 int i, err, pci_using_dac; 904 int i, err, pci_using_dac;
896 u16 eeprom_data = 0; 905 u16 eeprom_data = 0;
906 u16 tmp = 0;
897 u16 eeprom_apme_mask = E1000_EEPROM_APME; 907 u16 eeprom_apme_mask = E1000_EEPROM_APME;
898 int bars, need_ioport; 908 int bars, need_ioport;
899 909
@@ -996,6 +1006,14 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
996 goto err_sw_init; 1006 goto err_sw_init;
997 1007
998 err = -EIO; 1008 err = -EIO;
1009 if (hw->mac_type == e1000_ce4100) {
1010 ce4100_gbe_mdio_base_phy = pci_resource_start(pdev, BAR_1);
1011 ce4100_gbe_mdio_base_virt = ioremap(ce4100_gbe_mdio_base_phy,
1012 pci_resource_len(pdev, BAR_1));
1013
1014 if (!ce4100_gbe_mdio_base_virt)
1015 goto err_mdio_ioremap;
1016 }
999 1017
1000 if (hw->mac_type >= e1000_82543) { 1018 if (hw->mac_type >= e1000_82543) {
1001 netdev->features = NETIF_F_SG | 1019 netdev->features = NETIF_F_SG |
@@ -1135,6 +1153,20 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
1135 adapter->wol = adapter->eeprom_wol; 1153 adapter->wol = adapter->eeprom_wol;
1136 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); 1154 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1137 1155
1156 /* Auto detect PHY address */
1157 if (hw->mac_type == e1000_ce4100) {
1158 for (i = 0; i < 32; i++) {
1159 hw->phy_addr = i;
1160 e1000_read_phy_reg(hw, PHY_ID2, &tmp);
1161 if (tmp == 0 || tmp == 0xFF) {
1162 if (i == 31)
1163 goto err_eeprom;
1164 continue;
1165 } else
1166 break;
1167 }
1168 }
1169
1138 /* reset the hardware with the new settings */ 1170 /* reset the hardware with the new settings */
1139 e1000_reset(adapter); 1171 e1000_reset(adapter);
1140 1172
@@ -1171,6 +1203,8 @@ err_eeprom:
1171 kfree(adapter->rx_ring); 1203 kfree(adapter->rx_ring);
1172err_dma: 1204err_dma:
1173err_sw_init: 1205err_sw_init:
1206err_mdio_ioremap:
1207 iounmap(ce4100_gbe_mdio_base_virt);
1174 iounmap(hw->hw_addr); 1208 iounmap(hw->hw_addr);
1175err_ioremap: 1209err_ioremap:
1176 free_netdev(netdev); 1210 free_netdev(netdev);
@@ -1409,6 +1443,7 @@ static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
1409 /* First rev 82545 and 82546 need to not allow any memory 1443 /* First rev 82545 and 82546 need to not allow any memory
1410 * write location to cross 64k boundary due to errata 23 */ 1444 * write location to cross 64k boundary due to errata 23 */
1411 if (hw->mac_type == e1000_82545 || 1445 if (hw->mac_type == e1000_82545 ||
1446 hw->mac_type == e1000_ce4100 ||
1412 hw->mac_type == e1000_82546) { 1447 hw->mac_type == e1000_82546) {
1413 return ((begin ^ (end - 1)) >> 16) != 0 ? false : true; 1448 return ((begin ^ (end - 1)) >> 16) != 0 ? false : true;
1414 } 1449 }
diff --git a/drivers/net/e1000/e1000_osdep.h b/drivers/net/e1000/e1000_osdep.h
index edd1c75aa895..55c1711f1688 100644
--- a/drivers/net/e1000/e1000_osdep.h
+++ b/drivers/net/e1000/e1000_osdep.h
@@ -34,12 +34,21 @@
34#ifndef _E1000_OSDEP_H_ 34#ifndef _E1000_OSDEP_H_
35#define _E1000_OSDEP_H_ 35#define _E1000_OSDEP_H_
36 36
37#include <linux/types.h>
38#include <linux/pci.h>
39#include <linux/delay.h>
40#include <asm/io.h> 37#include <asm/io.h>
41#include <linux/interrupt.h> 38
42#include <linux/sched.h> 39#define CONFIG_RAM_BASE 0x60000
40#define GBE_CONFIG_OFFSET 0x0
41
42#define GBE_CONFIG_RAM_BASE \
43 ((unsigned int)(CONFIG_RAM_BASE + GBE_CONFIG_OFFSET))
44
45#define GBE_CONFIG_BASE_VIRT phys_to_virt(GBE_CONFIG_RAM_BASE)
46
47#define GBE_CONFIG_FLASH_WRITE(base, offset, count, data) \
48 (iowrite16_rep(base + offset, data, count))
49
50#define GBE_CONFIG_FLASH_READ(base, offset, count, data) \
51 (ioread16_rep(base + (offset << 1), data, count))
43 52
44#define er32(reg) \ 53#define er32(reg) \
45 (readl(hw->hw_addr + ((hw->mac_type >= e1000_82543) \ 54 (readl(hw->hw_addr + ((hw->mac_type >= e1000_82543) \
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index e57e4097ef1b..cb6c7b1c1fb8 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -78,6 +78,8 @@ static void e1000_power_down_phy_copper_82571(struct e1000_hw *hw);
78static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw); 78static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw);
79static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw); 79static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw);
80static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw); 80static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw);
81static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active);
82static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active);
81 83
82/** 84/**
83 * e1000_init_phy_params_82571 - Init PHY func ptrs. 85 * e1000_init_phy_params_82571 - Init PHY func ptrs.
@@ -113,6 +115,8 @@ static s32 e1000_init_phy_params_82571(struct e1000_hw *hw)
113 phy->type = e1000_phy_bm; 115 phy->type = e1000_phy_bm;
114 phy->ops.acquire = e1000_get_hw_semaphore_82574; 116 phy->ops.acquire = e1000_get_hw_semaphore_82574;
115 phy->ops.release = e1000_put_hw_semaphore_82574; 117 phy->ops.release = e1000_put_hw_semaphore_82574;
118 phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82574;
119 phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82574;
116 break; 120 break;
117 default: 121 default:
118 return -E1000_ERR_PHY; 122 return -E1000_ERR_PHY;
@@ -121,29 +125,36 @@ static s32 e1000_init_phy_params_82571(struct e1000_hw *hw)
121 125
122 /* This can only be done after all function pointers are setup. */ 126 /* This can only be done after all function pointers are setup. */
123 ret_val = e1000_get_phy_id_82571(hw); 127 ret_val = e1000_get_phy_id_82571(hw);
128 if (ret_val) {
129 e_dbg("Error getting PHY ID\n");
130 return ret_val;
131 }
124 132
125 /* Verify phy id */ 133 /* Verify phy id */
126 switch (hw->mac.type) { 134 switch (hw->mac.type) {
127 case e1000_82571: 135 case e1000_82571:
128 case e1000_82572: 136 case e1000_82572:
129 if (phy->id != IGP01E1000_I_PHY_ID) 137 if (phy->id != IGP01E1000_I_PHY_ID)
130 return -E1000_ERR_PHY; 138 ret_val = -E1000_ERR_PHY;
131 break; 139 break;
132 case e1000_82573: 140 case e1000_82573:
133 if (phy->id != M88E1111_I_PHY_ID) 141 if (phy->id != M88E1111_I_PHY_ID)
134 return -E1000_ERR_PHY; 142 ret_val = -E1000_ERR_PHY;
135 break; 143 break;
136 case e1000_82574: 144 case e1000_82574:
137 case e1000_82583: 145 case e1000_82583:
138 if (phy->id != BME1000_E_PHY_ID_R2) 146 if (phy->id != BME1000_E_PHY_ID_R2)
139 return -E1000_ERR_PHY; 147 ret_val = -E1000_ERR_PHY;
140 break; 148 break;
141 default: 149 default:
142 return -E1000_ERR_PHY; 150 ret_val = -E1000_ERR_PHY;
143 break; 151 break;
144 } 152 }
145 153
146 return 0; 154 if (ret_val)
155 e_dbg("PHY ID unknown: type = 0x%08x\n", phy->id);
156
157 return ret_val;
147} 158}
148 159
149/** 160/**
@@ -649,6 +660,58 @@ static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw)
649} 660}
650 661
651/** 662/**
663 * e1000_set_d0_lplu_state_82574 - Set Low Power Linkup D0 state
664 * @hw: pointer to the HW structure
665 * @active: true to enable LPLU, false to disable
666 *
667 * Sets the LPLU D0 state according to the active flag.
668 * LPLU will not be activated unless the
669 * device autonegotiation advertisement meets standards of
670 * either 10 or 10/100 or 10/100/1000 at all duplexes.
671 * This is a function pointer entry point only called by
672 * PHY setup routines.
673 **/
674static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active)
675{
676 u16 data = er32(POEMB);
677
678 if (active)
679 data |= E1000_PHY_CTRL_D0A_LPLU;
680 else
681 data &= ~E1000_PHY_CTRL_D0A_LPLU;
682
683 ew32(POEMB, data);
684 return 0;
685}
686
687/**
688 * e1000_set_d3_lplu_state_82574 - Sets low power link up state for D3
689 * @hw: pointer to the HW structure
690 * @active: boolean used to enable/disable lplu
691 *
692 * The low power link up (lplu) state is set to the power management level D3
693 * when active is true, else clear lplu for D3. LPLU
694 * is used during Dx states where the power conservation is most important.
695 * During driver activity, SmartSpeed should be enabled so performance is
696 * maintained.
697 **/
698static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active)
699{
700 u16 data = er32(POEMB);
701
702 if (!active) {
703 data &= ~E1000_PHY_CTRL_NOND0A_LPLU;
704 } else if ((hw->phy.autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
705 (hw->phy.autoneg_advertised == E1000_ALL_NOT_GIG) ||
706 (hw->phy.autoneg_advertised == E1000_ALL_10_SPEED)) {
707 data |= E1000_PHY_CTRL_NOND0A_LPLU;
708 }
709
710 ew32(POEMB, data);
711 return 0;
712}
713
714/**
652 * e1000_acquire_nvm_82571 - Request for access to the EEPROM 715 * e1000_acquire_nvm_82571 - Request for access to the EEPROM
653 * @hw: pointer to the HW structure 716 * @hw: pointer to the HW structure
654 * 717 *
@@ -956,7 +1019,7 @@ static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active)
956 **/ 1019 **/
957static s32 e1000_reset_hw_82571(struct e1000_hw *hw) 1020static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
958{ 1021{
959 u32 ctrl, ctrl_ext, icr; 1022 u32 ctrl, ctrl_ext;
960 s32 ret_val; 1023 s32 ret_val;
961 1024
962 /* 1025 /*
@@ -1040,7 +1103,7 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
1040 1103
1041 /* Clear any pending interrupt events. */ 1104 /* Clear any pending interrupt events. */
1042 ew32(IMC, 0xffffffff); 1105 ew32(IMC, 0xffffffff);
1043 icr = er32(ICR); 1106 er32(ICR);
1044 1107
1045 if (hw->mac.type == e1000_82571) { 1108 if (hw->mac.type == e1000_82571) {
1046 /* Install any alternate MAC address into RAR0 */ 1109 /* Install any alternate MAC address into RAR0 */
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index 2c913b8e9116..5255be753746 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -38,6 +38,7 @@
38#include <linux/netdevice.h> 38#include <linux/netdevice.h>
39#include <linux/pci.h> 39#include <linux/pci.h>
40#include <linux/pci-aspm.h> 40#include <linux/pci-aspm.h>
41#include <linux/crc32.h>
41 42
42#include "hw.h" 43#include "hw.h"
43 44
@@ -496,6 +497,8 @@ extern void e1000e_free_tx_resources(struct e1000_adapter *adapter);
496extern void e1000e_update_stats(struct e1000_adapter *adapter); 497extern void e1000e_update_stats(struct e1000_adapter *adapter);
497extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter); 498extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
498extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter); 499extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
500extern void e1000e_get_hw_control(struct e1000_adapter *adapter);
501extern void e1000e_release_hw_control(struct e1000_adapter *adapter);
499extern void e1000e_disable_aspm(struct pci_dev *pdev, u16 state); 502extern void e1000e_disable_aspm(struct pci_dev *pdev, u16 state);
500 503
501extern unsigned int copybreak; 504extern unsigned int copybreak;
diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
index b18c644e13d1..e45a61c8930a 100644
--- a/drivers/net/e1000e/es2lan.c
+++ b/drivers/net/e1000e/es2lan.c
@@ -784,7 +784,7 @@ static s32 e1000_get_link_up_info_80003es2lan(struct e1000_hw *hw, u16 *speed,
784 **/ 784 **/
785static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw) 785static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
786{ 786{
787 u32 ctrl, icr; 787 u32 ctrl;
788 s32 ret_val; 788 s32 ret_val;
789 789
790 /* 790 /*
@@ -818,7 +818,7 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
818 818
819 /* Clear any pending interrupt events. */ 819 /* Clear any pending interrupt events. */
820 ew32(IMC, 0xffffffff); 820 ew32(IMC, 0xffffffff);
821 icr = er32(ICR); 821 er32(ICR);
822 822
823 ret_val = e1000_check_alt_mac_addr_generic(hw); 823 ret_val = e1000_check_alt_mac_addr_generic(hw);
824 824
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index affcacf6f5a9..f8ed03dab9b1 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -624,20 +624,24 @@ static void e1000_get_drvinfo(struct net_device *netdev,
624 struct e1000_adapter *adapter = netdev_priv(netdev); 624 struct e1000_adapter *adapter = netdev_priv(netdev);
625 char firmware_version[32]; 625 char firmware_version[32];
626 626
627 strncpy(drvinfo->driver, e1000e_driver_name, 32); 627 strncpy(drvinfo->driver, e1000e_driver_name,
628 strncpy(drvinfo->version, e1000e_driver_version, 32); 628 sizeof(drvinfo->driver) - 1);
629 strncpy(drvinfo->version, e1000e_driver_version,
630 sizeof(drvinfo->version) - 1);
629 631
630 /* 632 /*
631 * EEPROM image version # is reported as firmware version # for 633 * EEPROM image version # is reported as firmware version # for
632 * PCI-E controllers 634 * PCI-E controllers
633 */ 635 */
634 sprintf(firmware_version, "%d.%d-%d", 636 snprintf(firmware_version, sizeof(firmware_version), "%d.%d-%d",
635 (adapter->eeprom_vers & 0xF000) >> 12, 637 (adapter->eeprom_vers & 0xF000) >> 12,
636 (adapter->eeprom_vers & 0x0FF0) >> 4, 638 (adapter->eeprom_vers & 0x0FF0) >> 4,
637 (adapter->eeprom_vers & 0x000F)); 639 (adapter->eeprom_vers & 0x000F));
638 640
639 strncpy(drvinfo->fw_version, firmware_version, 32); 641 strncpy(drvinfo->fw_version, firmware_version,
640 strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); 642 sizeof(drvinfo->fw_version) - 1);
643 strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
644 sizeof(drvinfo->bus_info) - 1);
641 drvinfo->regdump_len = e1000_get_regs_len(netdev); 645 drvinfo->regdump_len = e1000_get_regs_len(netdev);
642 drvinfo->eedump_len = e1000_get_eeprom_len(netdev); 646 drvinfo->eedump_len = e1000_get_eeprom_len(netdev);
643} 647}
@@ -1704,6 +1708,19 @@ static void e1000_diag_test(struct net_device *netdev,
1704 bool if_running = netif_running(netdev); 1708 bool if_running = netif_running(netdev);
1705 1709
1706 set_bit(__E1000_TESTING, &adapter->state); 1710 set_bit(__E1000_TESTING, &adapter->state);
1711
1712 if (!if_running) {
1713 /* Get control of and reset hardware */
1714 if (adapter->flags & FLAG_HAS_AMT)
1715 e1000e_get_hw_control(adapter);
1716
1717 e1000e_power_up_phy(adapter);
1718
1719 adapter->hw.phy.autoneg_wait_to_complete = 1;
1720 e1000e_reset(adapter);
1721 adapter->hw.phy.autoneg_wait_to_complete = 0;
1722 }
1723
1707 if (eth_test->flags == ETH_TEST_FL_OFFLINE) { 1724 if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
1708 /* Offline tests */ 1725 /* Offline tests */
1709 1726
@@ -1717,8 +1734,6 @@ static void e1000_diag_test(struct net_device *netdev,
1717 if (if_running) 1734 if (if_running)
1718 /* indicate we're in test mode */ 1735 /* indicate we're in test mode */
1719 dev_close(netdev); 1736 dev_close(netdev);
1720 else
1721 e1000e_reset(adapter);
1722 1737
1723 if (e1000_reg_test(adapter, &data[0])) 1738 if (e1000_reg_test(adapter, &data[0]))
1724 eth_test->flags |= ETH_TEST_FL_FAILED; 1739 eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -1732,8 +1747,6 @@ static void e1000_diag_test(struct net_device *netdev,
1732 eth_test->flags |= ETH_TEST_FL_FAILED; 1747 eth_test->flags |= ETH_TEST_FL_FAILED;
1733 1748
1734 e1000e_reset(adapter); 1749 e1000e_reset(adapter);
1735 /* make sure the phy is powered up */
1736 e1000e_power_up_phy(adapter);
1737 if (e1000_loopback_test(adapter, &data[3])) 1750 if (e1000_loopback_test(adapter, &data[3]))
1738 eth_test->flags |= ETH_TEST_FL_FAILED; 1751 eth_test->flags |= ETH_TEST_FL_FAILED;
1739 1752
@@ -1755,28 +1768,29 @@ static void e1000_diag_test(struct net_device *netdev,
1755 if (if_running) 1768 if (if_running)
1756 dev_open(netdev); 1769 dev_open(netdev);
1757 } else { 1770 } else {
1758 if (!if_running && (adapter->flags & FLAG_HAS_AMT)) { 1771 /* Online tests */
1759 clear_bit(__E1000_TESTING, &adapter->state);
1760 dev_open(netdev);
1761 set_bit(__E1000_TESTING, &adapter->state);
1762 }
1763 1772
1764 e_info("online testing starting\n"); 1773 e_info("online testing starting\n");
1765 /* Online tests */
1766 if (e1000_link_test(adapter, &data[4]))
1767 eth_test->flags |= ETH_TEST_FL_FAILED;
1768 1774
1769 /* Online tests aren't run; pass by default */ 1775 /* register, eeprom, intr and loopback tests not run online */
1770 data[0] = 0; 1776 data[0] = 0;
1771 data[1] = 0; 1777 data[1] = 0;
1772 data[2] = 0; 1778 data[2] = 0;
1773 data[3] = 0; 1779 data[3] = 0;
1774 1780
1775 if (!if_running && (adapter->flags & FLAG_HAS_AMT)) 1781 if (e1000_link_test(adapter, &data[4]))
1776 dev_close(netdev); 1782 eth_test->flags |= ETH_TEST_FL_FAILED;
1777 1783
1778 clear_bit(__E1000_TESTING, &adapter->state); 1784 clear_bit(__E1000_TESTING, &adapter->state);
1779 } 1785 }
1786
1787 if (!if_running) {
1788 e1000e_reset(adapter);
1789
1790 if (adapter->flags & FLAG_HAS_AMT)
1791 e1000e_release_hw_control(adapter);
1792 }
1793
1780 msleep_interruptible(4 * 1000); 1794 msleep_interruptible(4 * 1000);
1781} 1795}
1782 1796
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index ba302a5c2c30..e774380c7cec 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -83,6 +83,7 @@ enum e1e_registers {
83 E1000_EXTCNF_CTRL = 0x00F00, /* Extended Configuration Control */ 83 E1000_EXTCNF_CTRL = 0x00F00, /* Extended Configuration Control */
84 E1000_EXTCNF_SIZE = 0x00F08, /* Extended Configuration Size */ 84 E1000_EXTCNF_SIZE = 0x00F08, /* Extended Configuration Size */
85 E1000_PHY_CTRL = 0x00F10, /* PHY Control Register in CSR */ 85 E1000_PHY_CTRL = 0x00F10, /* PHY Control Register in CSR */
86#define E1000_POEMB E1000_PHY_CTRL /* PHY OEM Bits */
86 E1000_PBA = 0x01000, /* Packet Buffer Allocation - RW */ 87 E1000_PBA = 0x01000, /* Packet Buffer Allocation - RW */
87 E1000_PBS = 0x01008, /* Packet Buffer Size */ 88 E1000_PBS = 0x01008, /* Packet Buffer Size */
88 E1000_EEMNGCTL = 0x01010, /* MNG EEprom Control */ 89 E1000_EEMNGCTL = 0x01010, /* MNG EEprom Control */
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index d86cc0832720..5328a2927731 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -1395,22 +1395,6 @@ void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
1395 } 1395 }
1396} 1396}
1397 1397
1398static u32 e1000_calc_rx_da_crc(u8 mac[])
1399{
1400 u32 poly = 0xEDB88320; /* Polynomial for 802.3 CRC calculation */
1401 u32 i, j, mask, crc;
1402
1403 crc = 0xffffffff;
1404 for (i = 0; i < 6; i++) {
1405 crc = crc ^ mac[i];
1406 for (j = 8; j > 0; j--) {
1407 mask = (crc & 1) * (-1);
1408 crc = (crc >> 1) ^ (poly & mask);
1409 }
1410 }
1411 return ~crc;
1412}
1413
1414/** 1398/**
1415 * e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation 1399 * e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
1416 * with 82579 PHY 1400 * with 82579 PHY
@@ -1453,8 +1437,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
1453 mac_addr[4] = (addr_high & 0xFF); 1437 mac_addr[4] = (addr_high & 0xFF);
1454 mac_addr[5] = ((addr_high >> 8) & 0xFF); 1438 mac_addr[5] = ((addr_high >> 8) & 0xFF);
1455 1439
1456 ew32(PCH_RAICC(i), 1440 ew32(PCH_RAICC(i), ~ether_crc_le(ETH_ALEN, mac_addr));
1457 e1000_calc_rx_da_crc(mac_addr));
1458 } 1441 }
1459 1442
1460 /* Write Rx addresses to the PHY */ 1443 /* Write Rx addresses to the PHY */
@@ -2977,7 +2960,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
2977{ 2960{
2978 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 2961 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
2979 u16 reg; 2962 u16 reg;
2980 u32 ctrl, icr, kab; 2963 u32 ctrl, kab;
2981 s32 ret_val; 2964 s32 ret_val;
2982 2965
2983 /* 2966 /*
@@ -3067,7 +3050,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
3067 ew32(CRC_OFFSET, 0x65656565); 3050 ew32(CRC_OFFSET, 0x65656565);
3068 3051
3069 ew32(IMC, 0xffffffff); 3052 ew32(IMC, 0xffffffff);
3070 icr = er32(ICR); 3053 er32(ICR);
3071 3054
3072 kab = er32(KABGTXD); 3055 kab = er32(KABGTXD);
3073 kab |= E1000_KABGTXD_BGSQLBIAS; 3056 kab |= E1000_KABGTXD_BGSQLBIAS;
@@ -3118,7 +3101,7 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
3118 * Reset the phy after disabling host wakeup to reset the Rx buffer. 3101 * Reset the phy after disabling host wakeup to reset the Rx buffer.
3119 */ 3102 */
3120 if (hw->phy.type == e1000_phy_82578) { 3103 if (hw->phy.type == e1000_phy_82578) {
3121 hw->phy.ops.read_reg(hw, BM_WUC, &i); 3104 e1e_rphy(hw, BM_WUC, &i);
3122 ret_val = e1000_phy_hw_reset_ich8lan(hw); 3105 ret_val = e1000_phy_hw_reset_ich8lan(hw);
3123 if (ret_val) 3106 if (ret_val)
3124 return ret_val; 3107 return ret_val;
@@ -3276,9 +3259,8 @@ static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
3276 (hw->phy.type == e1000_phy_82577)) { 3259 (hw->phy.type == e1000_phy_82577)) {
3277 ew32(FCRTV_PCH, hw->fc.refresh_time); 3260 ew32(FCRTV_PCH, hw->fc.refresh_time);
3278 3261
3279 ret_val = hw->phy.ops.write_reg(hw, 3262 ret_val = e1e_wphy(hw, PHY_REG(BM_PORT_CTRL_PAGE, 27),
3280 PHY_REG(BM_PORT_CTRL_PAGE, 27), 3263 hw->fc.pause_time);
3281 hw->fc.pause_time);
3282 if (ret_val) 3264 if (ret_val)
3283 return ret_val; 3265 return ret_val;
3284 } 3266 }
@@ -3342,8 +3324,7 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
3342 return ret_val; 3324 return ret_val;
3343 break; 3325 break;
3344 case e1000_phy_ife: 3326 case e1000_phy_ife:
3345 ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL, 3327 ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &reg_data);
3346 &reg_data);
3347 if (ret_val) 3328 if (ret_val)
3348 return ret_val; 3329 return ret_val;
3349 3330
@@ -3361,8 +3342,7 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
3361 reg_data |= IFE_PMC_AUTO_MDIX; 3342 reg_data |= IFE_PMC_AUTO_MDIX;
3362 break; 3343 break;
3363 } 3344 }
3364 ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL, 3345 ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, reg_data);
3365 reg_data);
3366 if (ret_val) 3346 if (ret_val)
3367 return ret_val; 3347 return ret_val;
3368 break; 3348 break;
@@ -3646,7 +3626,8 @@ static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
3646{ 3626{
3647 if (hw->phy.type == e1000_phy_ife) 3627 if (hw->phy.type == e1000_phy_ife)
3648 return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, 3628 return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED,
3649 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF)); 3629 (IFE_PSCL_PROBE_MODE |
3630 IFE_PSCL_PROBE_LEDS_OFF));
3650 3631
3651 ew32(LEDCTL, hw->mac.ledctl_mode1); 3632 ew32(LEDCTL, hw->mac.ledctl_mode1);
3652 return 0; 3633 return 0;
@@ -3660,8 +3641,7 @@ static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
3660 **/ 3641 **/
3661static s32 e1000_setup_led_pchlan(struct e1000_hw *hw) 3642static s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
3662{ 3643{
3663 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, 3644 return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_mode1);
3664 (u16)hw->mac.ledctl_mode1);
3665} 3645}
3666 3646
3667/** 3647/**
@@ -3672,8 +3652,7 @@ static s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
3672 **/ 3652 **/
3673static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw) 3653static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
3674{ 3654{
3675 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, 3655 return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_default);
3676 (u16)hw->mac.ledctl_default);
3677} 3656}
3678 3657
3679/** 3658/**
@@ -3704,7 +3683,7 @@ static s32 e1000_led_on_pchlan(struct e1000_hw *hw)
3704 } 3683 }
3705 } 3684 }
3706 3685
3707 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data); 3686 return e1e_wphy(hw, HV_LED_CONFIG, data);
3708} 3687}
3709 3688
3710/** 3689/**
@@ -3735,7 +3714,7 @@ static s32 e1000_led_off_pchlan(struct e1000_hw *hw)
3735 } 3714 }
3736 } 3715 }
3737 3716
3738 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data); 3717 return e1e_wphy(hw, HV_LED_CONFIG, data);
3739} 3718}
3740 3719
3741/** 3720/**
@@ -3844,20 +3823,20 @@ static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
3844 if ((hw->phy.type == e1000_phy_82578) || 3823 if ((hw->phy.type == e1000_phy_82578) ||
3845 (hw->phy.type == e1000_phy_82579) || 3824 (hw->phy.type == e1000_phy_82579) ||
3846 (hw->phy.type == e1000_phy_82577)) { 3825 (hw->phy.type == e1000_phy_82577)) {
3847 hw->phy.ops.read_reg(hw, HV_SCC_UPPER, &phy_data); 3826 e1e_rphy(hw, HV_SCC_UPPER, &phy_data);
3848 hw->phy.ops.read_reg(hw, HV_SCC_LOWER, &phy_data); 3827 e1e_rphy(hw, HV_SCC_LOWER, &phy_data);
3849 hw->phy.ops.read_reg(hw, HV_ECOL_UPPER, &phy_data); 3828 e1e_rphy(hw, HV_ECOL_UPPER, &phy_data);
3850 hw->phy.ops.read_reg(hw, HV_ECOL_LOWER, &phy_data); 3829 e1e_rphy(hw, HV_ECOL_LOWER, &phy_data);
3851 hw->phy.ops.read_reg(hw, HV_MCC_UPPER, &phy_data); 3830 e1e_rphy(hw, HV_MCC_UPPER, &phy_data);
3852 hw->phy.ops.read_reg(hw, HV_MCC_LOWER, &phy_data); 3831 e1e_rphy(hw, HV_MCC_LOWER, &phy_data);
3853 hw->phy.ops.read_reg(hw, HV_LATECOL_UPPER, &phy_data); 3832 e1e_rphy(hw, HV_LATECOL_UPPER, &phy_data);
3854 hw->phy.ops.read_reg(hw, HV_LATECOL_LOWER, &phy_data); 3833 e1e_rphy(hw, HV_LATECOL_LOWER, &phy_data);
3855 hw->phy.ops.read_reg(hw, HV_COLC_UPPER, &phy_data); 3834 e1e_rphy(hw, HV_COLC_UPPER, &phy_data);
3856 hw->phy.ops.read_reg(hw, HV_COLC_LOWER, &phy_data); 3835 e1e_rphy(hw, HV_COLC_LOWER, &phy_data);
3857 hw->phy.ops.read_reg(hw, HV_DC_UPPER, &phy_data); 3836 e1e_rphy(hw, HV_DC_UPPER, &phy_data);
3858 hw->phy.ops.read_reg(hw, HV_DC_LOWER, &phy_data); 3837 e1e_rphy(hw, HV_DC_LOWER, &phy_data);
3859 hw->phy.ops.read_reg(hw, HV_TNCRS_UPPER, &phy_data); 3838 e1e_rphy(hw, HV_TNCRS_UPPER, &phy_data);
3860 hw->phy.ops.read_reg(hw, HV_TNCRS_LOWER, &phy_data); 3839 e1e_rphy(hw, HV_TNCRS_LOWER, &phy_data);
3861 } 3840 }
3862} 3841}
3863 3842
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index 7e55170a601e..ff2872153b21 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -1135,7 +1135,8 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
1135 ret_val = e1e_rphy(hw, PHY_AUTONEG_ADV, &mii_nway_adv_reg); 1135 ret_val = e1e_rphy(hw, PHY_AUTONEG_ADV, &mii_nway_adv_reg);
1136 if (ret_val) 1136 if (ret_val)
1137 return ret_val; 1137 return ret_val;
1138 ret_val = e1e_rphy(hw, PHY_LP_ABILITY, &mii_nway_lp_ability_reg); 1138 ret_val =
1139 e1e_rphy(hw, PHY_LP_ABILITY, &mii_nway_lp_ability_reg);
1139 if (ret_val) 1140 if (ret_val)
1140 return ret_val; 1141 return ret_val;
1141 1142
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index fe50242aa9e6..fa5b60452547 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -1980,15 +1980,15 @@ static void e1000_irq_enable(struct e1000_adapter *adapter)
1980} 1980}
1981 1981
1982/** 1982/**
1983 * e1000_get_hw_control - get control of the h/w from f/w 1983 * e1000e_get_hw_control - get control of the h/w from f/w
1984 * @adapter: address of board private structure 1984 * @adapter: address of board private structure
1985 * 1985 *
1986 * e1000_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit. 1986 * e1000e_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit.
1987 * For ASF and Pass Through versions of f/w this means that 1987 * For ASF and Pass Through versions of f/w this means that
1988 * the driver is loaded. For AMT version (only with 82573) 1988 * the driver is loaded. For AMT version (only with 82573)
1989 * of the f/w this means that the network i/f is open. 1989 * of the f/w this means that the network i/f is open.
1990 **/ 1990 **/
1991static void e1000_get_hw_control(struct e1000_adapter *adapter) 1991void e1000e_get_hw_control(struct e1000_adapter *adapter)
1992{ 1992{
1993 struct e1000_hw *hw = &adapter->hw; 1993 struct e1000_hw *hw = &adapter->hw;
1994 u32 ctrl_ext; 1994 u32 ctrl_ext;
@@ -2005,16 +2005,16 @@ static void e1000_get_hw_control(struct e1000_adapter *adapter)
2005} 2005}
2006 2006
2007/** 2007/**
2008 * e1000_release_hw_control - release control of the h/w to f/w 2008 * e1000e_release_hw_control - release control of the h/w to f/w
2009 * @adapter: address of board private structure 2009 * @adapter: address of board private structure
2010 * 2010 *
2011 * e1000_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit. 2011 * e1000e_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit.
2012 * For ASF and Pass Through versions of f/w this means that the 2012 * For ASF and Pass Through versions of f/w this means that the
2013 * driver is no longer loaded. For AMT version (only with 82573) i 2013 * driver is no longer loaded. For AMT version (only with 82573) i
2014 * of the f/w this means that the network i/f is closed. 2014 * of the f/w this means that the network i/f is closed.
2015 * 2015 *
2016 **/ 2016 **/
2017static void e1000_release_hw_control(struct e1000_adapter *adapter) 2017void e1000e_release_hw_control(struct e1000_adapter *adapter)
2018{ 2018{
2019 struct e1000_hw *hw = &adapter->hw; 2019 struct e1000_hw *hw = &adapter->hw;
2020 u32 ctrl_ext; 2020 u32 ctrl_ext;
@@ -2445,7 +2445,7 @@ static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
2445 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && 2445 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
2446 (vid == adapter->mng_vlan_id)) { 2446 (vid == adapter->mng_vlan_id)) {
2447 /* release control to f/w */ 2447 /* release control to f/w */
2448 e1000_release_hw_control(adapter); 2448 e1000e_release_hw_control(adapter);
2449 return; 2449 return;
2450 } 2450 }
2451 2451
@@ -2734,6 +2734,9 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
2734 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true); 2734 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true);
2735 else 2735 else
2736 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false); 2736 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false);
2737
2738 if (ret_val)
2739 e_dbg("failed to enable jumbo frame workaround mode\n");
2737 } 2740 }
2738 2741
2739 /* Program MC offset vector base */ 2742 /* Program MC offset vector base */
@@ -3184,7 +3187,6 @@ void e1000e_reset(struct e1000_adapter *adapter)
3184 ew32(PBA, pba); 3187 ew32(PBA, pba);
3185 } 3188 }
3186 3189
3187
3188 /* 3190 /*
3189 * flow control settings 3191 * flow control settings
3190 * 3192 *
@@ -3272,7 +3274,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
3272 * that the network interface is in control 3274 * that the network interface is in control
3273 */ 3275 */
3274 if (adapter->flags & FLAG_HAS_AMT) 3276 if (adapter->flags & FLAG_HAS_AMT)
3275 e1000_get_hw_control(adapter); 3277 e1000e_get_hw_control(adapter);
3276 3278
3277 ew32(WUC, 0); 3279 ew32(WUC, 0);
3278 3280
@@ -3285,6 +3287,13 @@ void e1000e_reset(struct e1000_adapter *adapter)
3285 ew32(VET, ETH_P_8021Q); 3287 ew32(VET, ETH_P_8021Q);
3286 3288
3287 e1000e_reset_adaptive(hw); 3289 e1000e_reset_adaptive(hw);
3290
3291 if (!netif_running(adapter->netdev) &&
3292 !test_bit(__E1000_TESTING, &adapter->state)) {
3293 e1000_power_down_phy(adapter);
3294 return;
3295 }
3296
3288 e1000_get_phy_info(hw); 3297 e1000_get_phy_info(hw);
3289 3298
3290 if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) && 3299 if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) &&
@@ -3570,7 +3579,7 @@ static int e1000_open(struct net_device *netdev)
3570 * interface is now open and reset the part to a known state. 3579 * interface is now open and reset the part to a known state.
3571 */ 3580 */
3572 if (adapter->flags & FLAG_HAS_AMT) { 3581 if (adapter->flags & FLAG_HAS_AMT) {
3573 e1000_get_hw_control(adapter); 3582 e1000e_get_hw_control(adapter);
3574 e1000e_reset(adapter); 3583 e1000e_reset(adapter);
3575 } 3584 }
3576 3585
@@ -3634,7 +3643,7 @@ static int e1000_open(struct net_device *netdev)
3634 return 0; 3643 return 0;
3635 3644
3636err_req_irq: 3645err_req_irq:
3637 e1000_release_hw_control(adapter); 3646 e1000e_release_hw_control(adapter);
3638 e1000_power_down_phy(adapter); 3647 e1000_power_down_phy(adapter);
3639 e1000e_free_rx_resources(adapter); 3648 e1000e_free_rx_resources(adapter);
3640err_setup_rx: 3649err_setup_rx:
@@ -3689,8 +3698,9 @@ static int e1000_close(struct net_device *netdev)
3689 * If AMT is enabled, let the firmware know that the network 3698 * If AMT is enabled, let the firmware know that the network
3690 * interface is now closed 3699 * interface is now closed
3691 */ 3700 */
3692 if (adapter->flags & FLAG_HAS_AMT) 3701 if ((adapter->flags & FLAG_HAS_AMT) &&
3693 e1000_release_hw_control(adapter); 3702 !test_bit(__E1000_TESTING, &adapter->state))
3703 e1000e_release_hw_control(adapter);
3694 3704
3695 if ((adapter->flags & FLAG_HAS_ERT) || 3705 if ((adapter->flags & FLAG_HAS_ERT) ||
3696 (adapter->hw.mac.type == e1000_pch2lan)) 3706 (adapter->hw.mac.type == e1000_pch2lan))
@@ -5209,7 +5219,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
5209 * Release control of h/w to f/w. If f/w is AMT enabled, this 5219 * Release control of h/w to f/w. If f/w is AMT enabled, this
5210 * would have already happened in close and is redundant. 5220 * would have already happened in close and is redundant.
5211 */ 5221 */
5212 e1000_release_hw_control(adapter); 5222 e1000e_release_hw_control(adapter);
5213 5223
5214 pci_disable_device(pdev); 5224 pci_disable_device(pdev);
5215 5225
@@ -5366,7 +5376,7 @@ static int __e1000_resume(struct pci_dev *pdev)
5366 * under the control of the driver. 5376 * under the control of the driver.
5367 */ 5377 */
5368 if (!(adapter->flags & FLAG_HAS_AMT)) 5378 if (!(adapter->flags & FLAG_HAS_AMT))
5369 e1000_get_hw_control(adapter); 5379 e1000e_get_hw_control(adapter);
5370 5380
5371 return 0; 5381 return 0;
5372} 5382}
@@ -5613,7 +5623,7 @@ static void e1000_io_resume(struct pci_dev *pdev)
5613 * under the control of the driver. 5623 * under the control of the driver.
5614 */ 5624 */
5615 if (!(adapter->flags & FLAG_HAS_AMT)) 5625 if (!(adapter->flags & FLAG_HAS_AMT))
5616 e1000_get_hw_control(adapter); 5626 e1000e_get_hw_control(adapter);
5617 5627
5618} 5628}
5619 5629
@@ -5636,7 +5646,7 @@ static void e1000_print_device_info(struct e1000_adapter *adapter)
5636 ret_val = e1000_read_pba_string_generic(hw, pba_str, 5646 ret_val = e1000_read_pba_string_generic(hw, pba_str,
5637 E1000_PBANUM_LENGTH); 5647 E1000_PBANUM_LENGTH);
5638 if (ret_val) 5648 if (ret_val)
5639 strcpy(pba_str, "Unknown"); 5649 strncpy((char *)pba_str, "Unknown", sizeof(pba_str) - 1);
5640 e_info("MAC: %d, PHY: %d, PBA No: %s\n", 5650 e_info("MAC: %d, PHY: %d, PBA No: %s\n",
5641 hw->mac.type, hw->phy.type, pba_str); 5651 hw->mac.type, hw->phy.type, pba_str);
5642} 5652}
@@ -5963,9 +5973,9 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
5963 * under the control of the driver. 5973 * under the control of the driver.
5964 */ 5974 */
5965 if (!(adapter->flags & FLAG_HAS_AMT)) 5975 if (!(adapter->flags & FLAG_HAS_AMT))
5966 e1000_get_hw_control(adapter); 5976 e1000e_get_hw_control(adapter);
5967 5977
5968 strcpy(netdev->name, "eth%d"); 5978 strncpy(netdev->name, "eth%d", sizeof(netdev->name) - 1);
5969 err = register_netdev(netdev); 5979 err = register_netdev(netdev);
5970 if (err) 5980 if (err)
5971 goto err_register; 5981 goto err_register;
@@ -5982,12 +5992,11 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
5982 5992
5983err_register: 5993err_register:
5984 if (!(adapter->flags & FLAG_HAS_AMT)) 5994 if (!(adapter->flags & FLAG_HAS_AMT))
5985 e1000_release_hw_control(adapter); 5995 e1000e_release_hw_control(adapter);
5986err_eeprom: 5996err_eeprom:
5987 if (!e1000_check_reset_block(&adapter->hw)) 5997 if (!e1000_check_reset_block(&adapter->hw))
5988 e1000_phy_hw_reset(&adapter->hw); 5998 e1000_phy_hw_reset(&adapter->hw);
5989err_hw_init: 5999err_hw_init:
5990
5991 kfree(adapter->tx_ring); 6000 kfree(adapter->tx_ring);
5992 kfree(adapter->rx_ring); 6001 kfree(adapter->rx_ring);
5993err_sw_init: 6002err_sw_init:
@@ -6053,7 +6062,7 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
6053 * Release control of h/w to f/w. If f/w is AMT enabled, this 6062 * Release control of h/w to f/w. If f/w is AMT enabled, this
6054 * would have already happened in close and is redundant. 6063 * would have already happened in close and is redundant.
6055 */ 6064 */
6056 e1000_release_hw_control(adapter); 6065 e1000e_release_hw_control(adapter);
6057 6066
6058 e1000e_reset_interrupt_capability(adapter); 6067 e1000e_reset_interrupt_capability(adapter);
6059 kfree(adapter->tx_ring); 6068 kfree(adapter->tx_ring);
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
index 1781efeb55e3..a640f1c369ae 100644
--- a/drivers/net/e1000e/phy.c
+++ b/drivers/net/e1000e/phy.c
@@ -637,12 +637,11 @@ s32 e1000e_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data)
637 **/ 637 **/
638s32 e1000_copper_link_setup_82577(struct e1000_hw *hw) 638s32 e1000_copper_link_setup_82577(struct e1000_hw *hw)
639{ 639{
640 struct e1000_phy_info *phy = &hw->phy;
641 s32 ret_val; 640 s32 ret_val;
642 u16 phy_data; 641 u16 phy_data;
643 642
644 /* Enable CRS on TX. This must be set for half-duplex operation. */ 643 /* Enable CRS on TX. This must be set for half-duplex operation. */
645 ret_val = phy->ops.read_reg(hw, I82577_CFG_REG, &phy_data); 644 ret_val = e1e_rphy(hw, I82577_CFG_REG, &phy_data);
646 if (ret_val) 645 if (ret_val)
647 goto out; 646 goto out;
648 647
@@ -651,7 +650,7 @@ s32 e1000_copper_link_setup_82577(struct e1000_hw *hw)
651 /* Enable downshift */ 650 /* Enable downshift */
652 phy_data |= I82577_CFG_ENABLE_DOWNSHIFT; 651 phy_data |= I82577_CFG_ENABLE_DOWNSHIFT;
653 652
654 ret_val = phy->ops.write_reg(hw, I82577_CFG_REG, phy_data); 653 ret_val = e1e_wphy(hw, I82577_CFG_REG, phy_data);
655 654
656out: 655out:
657 return ret_val; 656 return ret_val;
@@ -774,16 +773,14 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw)
774 } 773 }
775 774
776 if (phy->type == e1000_phy_82578) { 775 if (phy->type == e1000_phy_82578) {
777 ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, 776 ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
778 &phy_data);
779 if (ret_val) 777 if (ret_val)
780 return ret_val; 778 return ret_val;
781 779
782 /* 82578 PHY - set the downshift count to 1x. */ 780 /* 82578 PHY - set the downshift count to 1x. */
783 phy_data |= I82578_EPSCR_DOWNSHIFT_ENABLE; 781 phy_data |= I82578_EPSCR_DOWNSHIFT_ENABLE;
784 phy_data &= ~I82578_EPSCR_DOWNSHIFT_COUNTER_MASK; 782 phy_data &= ~I82578_EPSCR_DOWNSHIFT_COUNTER_MASK;
785 ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, 783 ret_val = e1e_wphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
786 phy_data);
787 if (ret_val) 784 if (ret_val)
788 return ret_val; 785 return ret_val;
789 } 786 }
@@ -1319,9 +1316,8 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw)
1319 * We didn't get link. 1316 * We didn't get link.
1320 * Reset the DSP and cross our fingers. 1317 * Reset the DSP and cross our fingers.
1321 */ 1318 */
1322 ret_val = e1e_wphy(hw, 1319 ret_val = e1e_wphy(hw, M88E1000_PHY_PAGE_SELECT,
1323 M88E1000_PHY_PAGE_SELECT, 1320 0x001d);
1324 0x001d);
1325 if (ret_val) 1321 if (ret_val)
1326 return ret_val; 1322 return ret_val;
1327 ret_val = e1000e_phy_reset_dsp(hw); 1323 ret_val = e1000e_phy_reset_dsp(hw);
@@ -3071,12 +3067,12 @@ s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw)
3071 goto out; 3067 goto out;
3072 3068
3073 /* Do not apply workaround if in PHY loopback bit 14 set */ 3069 /* Do not apply workaround if in PHY loopback bit 14 set */
3074 hw->phy.ops.read_reg(hw, PHY_CONTROL, &data); 3070 e1e_rphy(hw, PHY_CONTROL, &data);
3075 if (data & PHY_CONTROL_LB) 3071 if (data & PHY_CONTROL_LB)
3076 goto out; 3072 goto out;
3077 3073
3078 /* check if link is up and at 1Gbps */ 3074 /* check if link is up and at 1Gbps */
3079 ret_val = hw->phy.ops.read_reg(hw, BM_CS_STATUS, &data); 3075 ret_val = e1e_rphy(hw, BM_CS_STATUS, &data);
3080 if (ret_val) 3076 if (ret_val)
3081 goto out; 3077 goto out;
3082 3078
@@ -3092,14 +3088,12 @@ s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw)
3092 mdelay(200); 3088 mdelay(200);
3093 3089
3094 /* flush the packets in the fifo buffer */ 3090 /* flush the packets in the fifo buffer */
3095 ret_val = hw->phy.ops.write_reg(hw, HV_MUX_DATA_CTRL, 3091 ret_val = e1e_wphy(hw, HV_MUX_DATA_CTRL, HV_MUX_DATA_CTRL_GEN_TO_MAC |
3096 HV_MUX_DATA_CTRL_GEN_TO_MAC | 3092 HV_MUX_DATA_CTRL_FORCE_SPEED);
3097 HV_MUX_DATA_CTRL_FORCE_SPEED);
3098 if (ret_val) 3093 if (ret_val)
3099 goto out; 3094 goto out;
3100 3095
3101 ret_val = hw->phy.ops.write_reg(hw, HV_MUX_DATA_CTRL, 3096 ret_val = e1e_wphy(hw, HV_MUX_DATA_CTRL, HV_MUX_DATA_CTRL_GEN_TO_MAC);
3102 HV_MUX_DATA_CTRL_GEN_TO_MAC);
3103 3097
3104out: 3098out:
3105 return ret_val; 3099 return ret_val;
@@ -3119,7 +3113,7 @@ s32 e1000_check_polarity_82577(struct e1000_hw *hw)
3119 s32 ret_val; 3113 s32 ret_val;
3120 u16 data; 3114 u16 data;
3121 3115
3122 ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data); 3116 ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data);
3123 3117
3124 if (!ret_val) 3118 if (!ret_val)
3125 phy->cable_polarity = (data & I82577_PHY_STATUS2_REV_POLARITY) 3119 phy->cable_polarity = (data & I82577_PHY_STATUS2_REV_POLARITY)
@@ -3142,13 +3136,13 @@ s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw)
3142 u16 phy_data; 3136 u16 phy_data;
3143 bool link; 3137 bool link;
3144 3138
3145 ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); 3139 ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data);
3146 if (ret_val) 3140 if (ret_val)
3147 goto out; 3141 goto out;
3148 3142
3149 e1000e_phy_force_speed_duplex_setup(hw, &phy_data); 3143 e1000e_phy_force_speed_duplex_setup(hw, &phy_data);
3150 3144
3151 ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); 3145 ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data);
3152 if (ret_val) 3146 if (ret_val)
3153 goto out; 3147 goto out;
3154 3148
@@ -3212,7 +3206,7 @@ s32 e1000_get_phy_info_82577(struct e1000_hw *hw)
3212 if (ret_val) 3206 if (ret_val)
3213 goto out; 3207 goto out;
3214 3208
3215 ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data); 3209 ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data);
3216 if (ret_val) 3210 if (ret_val)
3217 goto out; 3211 goto out;
3218 3212
@@ -3224,7 +3218,7 @@ s32 e1000_get_phy_info_82577(struct e1000_hw *hw)
3224 if (ret_val) 3218 if (ret_val)
3225 goto out; 3219 goto out;
3226 3220
3227 ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data); 3221 ret_val = e1e_rphy(hw, PHY_1000T_STATUS, &data);
3228 if (ret_val) 3222 if (ret_val)
3229 goto out; 3223 goto out;
3230 3224
@@ -3258,7 +3252,7 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw)
3258 s32 ret_val; 3252 s32 ret_val;
3259 u16 phy_data, length; 3253 u16 phy_data, length;
3260 3254
3261 ret_val = phy->ops.read_reg(hw, I82577_PHY_DIAG_STATUS, &phy_data); 3255 ret_val = e1e_rphy(hw, I82577_PHY_DIAG_STATUS, &phy_data);
3262 if (ret_val) 3256 if (ret_val)
3263 goto out; 3257 goto out;
3264 3258
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index a724a2d14506..6c7257bd73fc 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -40,7 +40,7 @@
40#include <asm/io.h> 40#include <asm/io.h>
41 41
42#define DRV_NAME "ehea" 42#define DRV_NAME "ehea"
43#define DRV_VERSION "EHEA_0106" 43#define DRV_VERSION "EHEA_0107"
44 44
45/* eHEA capability flags */ 45/* eHEA capability flags */
46#define DLPAR_PORT_ADD_REM 1 46#define DLPAR_PORT_ADD_REM 1
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 1032b5bbe238..f75d3144b8a5 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -437,7 +437,7 @@ static void ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a)
437 } 437 }
438 } 438 }
439 /* Ring doorbell */ 439 /* Ring doorbell */
440 ehea_update_rq1a(pr->qp, i); 440 ehea_update_rq1a(pr->qp, i - 1);
441} 441}
442 442
443static int ehea_refill_rq_def(struct ehea_port_res *pr, 443static int ehea_refill_rq_def(struct ehea_port_res *pr,
@@ -1329,9 +1329,7 @@ static int ehea_fill_port_res(struct ehea_port_res *pr)
1329 int ret; 1329 int ret;
1330 struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr; 1330 struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr;
1331 1331
1332 ehea_init_fill_rq1(pr, init_attr->act_nr_rwqes_rq1 1332 ehea_init_fill_rq1(pr, pr->rq1_skba.len);
1333 - init_attr->act_nr_rwqes_rq2
1334 - init_attr->act_nr_rwqes_rq3 - 1);
1335 1333
1336 ret = ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1); 1334 ret = ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1);
1337 1335
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index cce32d43175f..2a71373719ae 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -17,6 +17,8 @@
17 * 17 *
18 * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be) 18 * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be)
19 * Copyright (c) 2004-2006 Macq Electronique SA. 19 * Copyright (c) 2004-2006 Macq Electronique SA.
20 *
21 * Copyright (C) 2010 Freescale Semiconductor, Inc.
20 */ 22 */
21 23
22#include <linux/module.h> 24#include <linux/module.h>
@@ -45,29 +47,41 @@
45 47
46#include <asm/cacheflush.h> 48#include <asm/cacheflush.h>
47 49
48#ifndef CONFIG_ARCH_MXC 50#ifndef CONFIG_ARM
49#include <asm/coldfire.h> 51#include <asm/coldfire.h>
50#include <asm/mcfsim.h> 52#include <asm/mcfsim.h>
51#endif 53#endif
52 54
53#include "fec.h" 55#include "fec.h"
54 56
55#ifdef CONFIG_ARCH_MXC 57#if defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
56#include <mach/hardware.h>
57#define FEC_ALIGNMENT 0xf 58#define FEC_ALIGNMENT 0xf
58#else 59#else
59#define FEC_ALIGNMENT 0x3 60#define FEC_ALIGNMENT 0x3
60#endif 61#endif
61 62
62/* 63#define DRIVER_NAME "fec"
63 * Define the fixed address of the FEC hardware. 64
64 */ 65/* Controller is ENET-MAC */
65#if defined(CONFIG_M5272) 66#define FEC_QUIRK_ENET_MAC (1 << 0)
67/* Controller needs driver to swap frame */
68#define FEC_QUIRK_SWAP_FRAME (1 << 1)
66 69
67static unsigned char fec_mac_default[] = { 70static struct platform_device_id fec_devtype[] = {
68 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 71 {
72 .name = DRIVER_NAME,
73 .driver_data = 0,
74 }, {
75 .name = "imx28-fec",
76 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME,
77 }
69}; 78};
70 79
80static unsigned char macaddr[ETH_ALEN];
81module_param_array(macaddr, byte, NULL, 0);
82MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
83
84#if defined(CONFIG_M5272)
71/* 85/*
72 * Some hardware gets it MAC address out of local flash memory. 86 * Some hardware gets it MAC address out of local flash memory.
73 * if this is non-zero then assume it is the address to get MAC from. 87 * if this is non-zero then assume it is the address to get MAC from.
@@ -133,7 +147,8 @@ static unsigned char fec_mac_default[] = {
133 * account when setting it. 147 * account when setting it.
134 */ 148 */
135#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ 149#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
136 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARCH_MXC) 150 defined(CONFIG_M520x) || defined(CONFIG_M532x) || \
151 defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
137#define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16) 152#define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16)
138#else 153#else
139#define OPT_FRAME_SIZE 0 154#define OPT_FRAME_SIZE 0
@@ -186,7 +201,6 @@ struct fec_enet_private {
186 int mii_timeout; 201 int mii_timeout;
187 uint phy_speed; 202 uint phy_speed;
188 phy_interface_t phy_interface; 203 phy_interface_t phy_interface;
189 int index;
190 int link; 204 int link;
191 int full_duplex; 205 int full_duplex;
192 struct completion mdio_done; 206 struct completion mdio_done;
@@ -213,10 +227,23 @@ static void fec_stop(struct net_device *dev);
213/* Transmitter timeout */ 227/* Transmitter timeout */
214#define TX_TIMEOUT (2 * HZ) 228#define TX_TIMEOUT (2 * HZ)
215 229
230static void *swap_buffer(void *bufaddr, int len)
231{
232 int i;
233 unsigned int *buf = bufaddr;
234
235 for (i = 0; i < (len + 3) / 4; i++, buf++)
236 *buf = cpu_to_be32(*buf);
237
238 return bufaddr;
239}
240
216static netdev_tx_t 241static netdev_tx_t
217fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) 242fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
218{ 243{
219 struct fec_enet_private *fep = netdev_priv(dev); 244 struct fec_enet_private *fep = netdev_priv(dev);
245 const struct platform_device_id *id_entry =
246 platform_get_device_id(fep->pdev);
220 struct bufdesc *bdp; 247 struct bufdesc *bdp;
221 void *bufaddr; 248 void *bufaddr;
222 unsigned short status; 249 unsigned short status;
@@ -261,6 +288,14 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
261 bufaddr = fep->tx_bounce[index]; 288 bufaddr = fep->tx_bounce[index];
262 } 289 }
263 290
291 /*
292 * Some design made an incorrect assumption on endian mode of
293 * the system that it's running on. As the result, driver has to
294 * swap every frame going to and coming from the controller.
295 */
296 if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
297 swap_buffer(bufaddr, skb->len);
298
264 /* Save skb pointer */ 299 /* Save skb pointer */
265 fep->tx_skbuff[fep->skb_cur] = skb; 300 fep->tx_skbuff[fep->skb_cur] = skb;
266 301
@@ -429,6 +464,8 @@ static void
429fec_enet_rx(struct net_device *dev) 464fec_enet_rx(struct net_device *dev)
430{ 465{
431 struct fec_enet_private *fep = netdev_priv(dev); 466 struct fec_enet_private *fep = netdev_priv(dev);
467 const struct platform_device_id *id_entry =
468 platform_get_device_id(fep->pdev);
432 struct bufdesc *bdp; 469 struct bufdesc *bdp;
433 unsigned short status; 470 unsigned short status;
434 struct sk_buff *skb; 471 struct sk_buff *skb;
@@ -492,6 +529,9 @@ fec_enet_rx(struct net_device *dev)
492 dma_unmap_single(NULL, bdp->cbd_bufaddr, bdp->cbd_datlen, 529 dma_unmap_single(NULL, bdp->cbd_bufaddr, bdp->cbd_datlen,
493 DMA_FROM_DEVICE); 530 DMA_FROM_DEVICE);
494 531
532 if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
533 swap_buffer(data, pkt_len);
534
495 /* This does 16 byte alignment, exactly what we need. 535 /* This does 16 byte alignment, exactly what we need.
496 * The packet length includes FCS, but we don't want to 536 * The packet length includes FCS, but we don't want to
497 * include that when passing upstream as it messes up 537 * include that when passing upstream as it messes up
@@ -538,37 +578,50 @@ rx_processing_done:
538} 578}
539 579
540/* ------------------------------------------------------------------------- */ 580/* ------------------------------------------------------------------------- */
541#ifdef CONFIG_M5272
542static void __inline__ fec_get_mac(struct net_device *dev) 581static void __inline__ fec_get_mac(struct net_device *dev)
543{ 582{
544 struct fec_enet_private *fep = netdev_priv(dev); 583 struct fec_enet_private *fep = netdev_priv(dev);
584 struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
545 unsigned char *iap, tmpaddr[ETH_ALEN]; 585 unsigned char *iap, tmpaddr[ETH_ALEN];
546 586
547 if (FEC_FLASHMAC) { 587 /*
548 /* 588 * try to get mac address in following order:
549 * Get MAC address from FLASH. 589 *
550 * If it is all 1's or 0's, use the default. 590 * 1) module parameter via kernel command line in form
551 */ 591 * fec.macaddr=0x00,0x04,0x9f,0x01,0x30,0xe0
552 iap = (unsigned char *)FEC_FLASHMAC; 592 */
553 if ((iap[0] == 0) && (iap[1] == 0) && (iap[2] == 0) && 593 iap = macaddr;
554 (iap[3] == 0) && (iap[4] == 0) && (iap[5] == 0)) 594
555 iap = fec_mac_default; 595 /*
556 if ((iap[0] == 0xff) && (iap[1] == 0xff) && (iap[2] == 0xff) && 596 * 2) from flash or fuse (via platform data)
557 (iap[3] == 0xff) && (iap[4] == 0xff) && (iap[5] == 0xff)) 597 */
558 iap = fec_mac_default; 598 if (!is_valid_ether_addr(iap)) {
559 } else { 599#ifdef CONFIG_M5272
560 *((unsigned long *) &tmpaddr[0]) = readl(fep->hwp + FEC_ADDR_LOW); 600 if (FEC_FLASHMAC)
561 *((unsigned short *) &tmpaddr[4]) = (readl(fep->hwp + FEC_ADDR_HIGH) >> 16); 601 iap = (unsigned char *)FEC_FLASHMAC;
602#else
603 if (pdata)
604 memcpy(iap, pdata->mac, ETH_ALEN);
605#endif
606 }
607
608 /*
609 * 3) FEC mac registers set by bootloader
610 */
611 if (!is_valid_ether_addr(iap)) {
612 *((unsigned long *) &tmpaddr[0]) =
613 be32_to_cpu(readl(fep->hwp + FEC_ADDR_LOW));
614 *((unsigned short *) &tmpaddr[4]) =
615 be16_to_cpu(readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
562 iap = &tmpaddr[0]; 616 iap = &tmpaddr[0];
563 } 617 }
564 618
565 memcpy(dev->dev_addr, iap, ETH_ALEN); 619 memcpy(dev->dev_addr, iap, ETH_ALEN);
566 620
567 /* Adjust MAC if using default MAC address */ 621 /* Adjust MAC if using macaddr */
568 if (iap == fec_mac_default) 622 if (iap == macaddr)
569 dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index; 623 dev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->pdev->id;
570} 624}
571#endif
572 625
573/* ------------------------------------------------------------------------- */ 626/* ------------------------------------------------------------------------- */
574 627
@@ -651,8 +704,8 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
651 fep->mii_timeout = 0; 704 fep->mii_timeout = 0;
652 init_completion(&fep->mdio_done); 705 init_completion(&fep->mdio_done);
653 706
654 /* start a read op */ 707 /* start a write op */
655 writel(FEC_MMFR_ST | FEC_MMFR_OP_READ | 708 writel(FEC_MMFR_ST | FEC_MMFR_OP_WRITE |
656 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) | 709 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
657 FEC_MMFR_TA | FEC_MMFR_DATA(value), 710 FEC_MMFR_TA | FEC_MMFR_DATA(value),
658 fep->hwp + FEC_MII_DATA); 711 fep->hwp + FEC_MII_DATA);
@@ -681,6 +734,7 @@ static int fec_enet_mii_probe(struct net_device *dev)
681 char mdio_bus_id[MII_BUS_ID_SIZE]; 734 char mdio_bus_id[MII_BUS_ID_SIZE];
682 char phy_name[MII_BUS_ID_SIZE + 3]; 735 char phy_name[MII_BUS_ID_SIZE + 3];
683 int phy_id; 736 int phy_id;
737 int dev_id = fep->pdev->id;
684 738
685 fep->phy_dev = NULL; 739 fep->phy_dev = NULL;
686 740
@@ -692,6 +746,8 @@ static int fec_enet_mii_probe(struct net_device *dev)
692 continue; 746 continue;
693 if (fep->mii_bus->phy_map[phy_id]->phy_id == 0) 747 if (fep->mii_bus->phy_map[phy_id]->phy_id == 0)
694 continue; 748 continue;
749 if (dev_id--)
750 continue;
695 strncpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE); 751 strncpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE);
696 break; 752 break;
697 } 753 }
@@ -729,10 +785,35 @@ static int fec_enet_mii_probe(struct net_device *dev)
729 785
730static int fec_enet_mii_init(struct platform_device *pdev) 786static int fec_enet_mii_init(struct platform_device *pdev)
731{ 787{
788 static struct mii_bus *fec0_mii_bus;
732 struct net_device *dev = platform_get_drvdata(pdev); 789 struct net_device *dev = platform_get_drvdata(pdev);
733 struct fec_enet_private *fep = netdev_priv(dev); 790 struct fec_enet_private *fep = netdev_priv(dev);
791 const struct platform_device_id *id_entry =
792 platform_get_device_id(fep->pdev);
734 int err = -ENXIO, i; 793 int err = -ENXIO, i;
735 794
795 /*
796 * The dual fec interfaces are not equivalent with enet-mac.
797 * Here are the differences:
798 *
799 * - fec0 supports MII & RMII modes while fec1 only supports RMII
800 * - fec0 acts as the 1588 time master while fec1 is slave
801 * - external phys can only be configured by fec0
802 *
803 * That is to say fec1 can not work independently. It only works
804 * when fec0 is working. The reason behind this design is that the
805 * second interface is added primarily for Switch mode.
806 *
807 * Because of the last point above, both phys are attached on fec0
808 * mdio interface in board design, and need to be configured by
809 * fec0 mii_bus.
810 */
811 if ((id_entry->driver_data & FEC_QUIRK_ENET_MAC) && pdev->id) {
812 /* fec1 uses fec0 mii_bus */
813 fep->mii_bus = fec0_mii_bus;
814 return 0;
815 }
816
736 fep->mii_timeout = 0; 817 fep->mii_timeout = 0;
737 818
738 /* 819 /*
@@ -769,6 +850,10 @@ static int fec_enet_mii_init(struct platform_device *pdev)
769 if (mdiobus_register(fep->mii_bus)) 850 if (mdiobus_register(fep->mii_bus))
770 goto err_out_free_mdio_irq; 851 goto err_out_free_mdio_irq;
771 852
853 /* save fec0 mii_bus */
854 if (id_entry->driver_data & FEC_QUIRK_ENET_MAC)
855 fec0_mii_bus = fep->mii_bus;
856
772 return 0; 857 return 0;
773 858
774err_out_free_mdio_irq: 859err_out_free_mdio_irq:
@@ -1067,9 +1152,8 @@ static const struct net_device_ops fec_netdev_ops = {
1067 /* 1152 /*
1068 * XXX: We need to clean up on failure exits here. 1153 * XXX: We need to clean up on failure exits here.
1069 * 1154 *
1070 * index is only used in legacy code
1071 */ 1155 */
1072static int fec_enet_init(struct net_device *dev, int index) 1156static int fec_enet_init(struct net_device *dev)
1073{ 1157{
1074 struct fec_enet_private *fep = netdev_priv(dev); 1158 struct fec_enet_private *fep = netdev_priv(dev);
1075 struct bufdesc *cbd_base; 1159 struct bufdesc *cbd_base;
@@ -1086,26 +1170,11 @@ static int fec_enet_init(struct net_device *dev, int index)
1086 1170
1087 spin_lock_init(&fep->hw_lock); 1171 spin_lock_init(&fep->hw_lock);
1088 1172
1089 fep->index = index;
1090 fep->hwp = (void __iomem *)dev->base_addr; 1173 fep->hwp = (void __iomem *)dev->base_addr;
1091 fep->netdev = dev; 1174 fep->netdev = dev;
1092 1175
1093 /* Set the Ethernet address */ 1176 /* Get the Ethernet address */
1094#ifdef CONFIG_M5272
1095 fec_get_mac(dev); 1177 fec_get_mac(dev);
1096#else
1097 {
1098 unsigned long l;
1099 l = readl(fep->hwp + FEC_ADDR_LOW);
1100 dev->dev_addr[0] = (unsigned char)((l & 0xFF000000) >> 24);
1101 dev->dev_addr[1] = (unsigned char)((l & 0x00FF0000) >> 16);
1102 dev->dev_addr[2] = (unsigned char)((l & 0x0000FF00) >> 8);
1103 dev->dev_addr[3] = (unsigned char)((l & 0x000000FF) >> 0);
1104 l = readl(fep->hwp + FEC_ADDR_HIGH);
1105 dev->dev_addr[4] = (unsigned char)((l & 0xFF000000) >> 24);
1106 dev->dev_addr[5] = (unsigned char)((l & 0x00FF0000) >> 16);
1107 }
1108#endif
1109 1178
1110 /* Set receive and transmit descriptor base. */ 1179 /* Set receive and transmit descriptor base. */
1111 fep->rx_bd_base = cbd_base; 1180 fep->rx_bd_base = cbd_base;
@@ -1156,12 +1225,25 @@ static void
1156fec_restart(struct net_device *dev, int duplex) 1225fec_restart(struct net_device *dev, int duplex)
1157{ 1226{
1158 struct fec_enet_private *fep = netdev_priv(dev); 1227 struct fec_enet_private *fep = netdev_priv(dev);
1228 const struct platform_device_id *id_entry =
1229 platform_get_device_id(fep->pdev);
1159 int i; 1230 int i;
1231 u32 val, temp_mac[2];
1160 1232
1161 /* Whack a reset. We should wait for this. */ 1233 /* Whack a reset. We should wait for this. */
1162 writel(1, fep->hwp + FEC_ECNTRL); 1234 writel(1, fep->hwp + FEC_ECNTRL);
1163 udelay(10); 1235 udelay(10);
1164 1236
1237 /*
1238 * enet-mac reset will reset mac address registers too,
1239 * so need to reconfigure it.
1240 */
1241 if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
1242 memcpy(&temp_mac, dev->dev_addr, ETH_ALEN);
1243 writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW);
1244 writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH);
1245 }
1246
1165 /* Clear any outstanding interrupt. */ 1247 /* Clear any outstanding interrupt. */
1166 writel(0xffc00000, fep->hwp + FEC_IEVENT); 1248 writel(0xffc00000, fep->hwp + FEC_IEVENT);
1167 1249
@@ -1208,20 +1290,45 @@ fec_restart(struct net_device *dev, int duplex)
1208 /* Set MII speed */ 1290 /* Set MII speed */
1209 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); 1291 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
1210 1292
1211#ifdef FEC_MIIGSK_ENR 1293 /*
1212 if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) { 1294 * The phy interface and speed need to get configured
1213 /* disable the gasket and wait */ 1295 * differently on enet-mac.
1214 writel(0, fep->hwp + FEC_MIIGSK_ENR); 1296 */
1215 while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4) 1297 if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
1216 udelay(1); 1298 val = readl(fep->hwp + FEC_R_CNTRL);
1217 1299
1218 /* configure the gasket: RMII, 50 MHz, no loopback, no echo */ 1300 /* MII or RMII */
1219 writel(1, fep->hwp + FEC_MIIGSK_CFGR); 1301 if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
1302 val |= (1 << 8);
1303 else
1304 val &= ~(1 << 8);
1220 1305
1221 /* re-enable the gasket */ 1306 /* 10M or 100M */
1222 writel(2, fep->hwp + FEC_MIIGSK_ENR); 1307 if (fep->phy_dev && fep->phy_dev->speed == SPEED_100)
1223 } 1308 val &= ~(1 << 9);
1309 else
1310 val |= (1 << 9);
1311
1312 writel(val, fep->hwp + FEC_R_CNTRL);
1313 } else {
1314#ifdef FEC_MIIGSK_ENR
1315 if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) {
1316 /* disable the gasket and wait */
1317 writel(0, fep->hwp + FEC_MIIGSK_ENR);
1318 while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
1319 udelay(1);
1320
1321 /*
1322 * configure the gasket:
1323 * RMII, 50 MHz, no loopback, no echo
1324 */
1325 writel(1, fep->hwp + FEC_MIIGSK_CFGR);
1326
1327 /* re-enable the gasket */
1328 writel(2, fep->hwp + FEC_MIIGSK_ENR);
1329 }
1224#endif 1330#endif
1331 }
1225 1332
1226 /* And last, enable the transmit and receive processing */ 1333 /* And last, enable the transmit and receive processing */
1227 writel(2, fep->hwp + FEC_ECNTRL); 1334 writel(2, fep->hwp + FEC_ECNTRL);
@@ -1316,7 +1423,7 @@ fec_probe(struct platform_device *pdev)
1316 } 1423 }
1317 clk_enable(fep->clk); 1424 clk_enable(fep->clk);
1318 1425
1319 ret = fec_enet_init(ndev, 0); 1426 ret = fec_enet_init(ndev);
1320 if (ret) 1427 if (ret)
1321 goto failed_init; 1428 goto failed_init;
1322 1429
@@ -1380,8 +1487,10 @@ fec_suspend(struct device *dev)
1380 1487
1381 if (ndev) { 1488 if (ndev) {
1382 fep = netdev_priv(ndev); 1489 fep = netdev_priv(ndev);
1383 if (netif_running(ndev)) 1490 if (netif_running(ndev)) {
1384 fec_enet_close(ndev); 1491 fec_stop(ndev);
1492 netif_device_detach(ndev);
1493 }
1385 clk_disable(fep->clk); 1494 clk_disable(fep->clk);
1386 } 1495 }
1387 return 0; 1496 return 0;
@@ -1396,8 +1505,10 @@ fec_resume(struct device *dev)
1396 if (ndev) { 1505 if (ndev) {
1397 fep = netdev_priv(ndev); 1506 fep = netdev_priv(ndev);
1398 clk_enable(fep->clk); 1507 clk_enable(fep->clk);
1399 if (netif_running(ndev)) 1508 if (netif_running(ndev)) {
1400 fec_enet_open(ndev); 1509 fec_restart(ndev, fep->full_duplex);
1510 netif_device_attach(ndev);
1511 }
1401 } 1512 }
1402 return 0; 1513 return 0;
1403} 1514}
@@ -1414,12 +1525,13 @@ static const struct dev_pm_ops fec_pm_ops = {
1414 1525
1415static struct platform_driver fec_driver = { 1526static struct platform_driver fec_driver = {
1416 .driver = { 1527 .driver = {
1417 .name = "fec", 1528 .name = DRIVER_NAME,
1418 .owner = THIS_MODULE, 1529 .owner = THIS_MODULE,
1419#ifdef CONFIG_PM 1530#ifdef CONFIG_PM
1420 .pm = &fec_pm_ops, 1531 .pm = &fec_pm_ops,
1421#endif 1532#endif
1422 }, 1533 },
1534 .id_table = fec_devtype,
1423 .probe = fec_probe, 1535 .probe = fec_probe,
1424 .remove = __devexit_p(fec_drv_remove), 1536 .remove = __devexit_p(fec_drv_remove),
1425}; 1537};
diff --git a/drivers/net/fec.h b/drivers/net/fec.h
index 2c48b25668d5..ace318df4c8d 100644
--- a/drivers/net/fec.h
+++ b/drivers/net/fec.h
@@ -14,7 +14,8 @@
14/****************************************************************************/ 14/****************************************************************************/
15 15
16#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ 16#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
17 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARCH_MXC) 17 defined(CONFIG_M520x) || defined(CONFIG_M532x) || \
18 defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
18/* 19/*
19 * Just figures, Motorola would have to change the offsets for 20 * Just figures, Motorola would have to change the offsets for
20 * registers in the same peripheral device on different models 21 * registers in the same peripheral device on different models
@@ -78,7 +79,7 @@
78/* 79/*
79 * Define the buffer descriptor structure. 80 * Define the buffer descriptor structure.
80 */ 81 */
81#ifdef CONFIG_ARCH_MXC 82#if defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
82struct bufdesc { 83struct bufdesc {
83 unsigned short cbd_datlen; /* Data length */ 84 unsigned short cbd_datlen; /* Data length */
84 unsigned short cbd_sc; /* Control and status info */ 85 unsigned short cbd_sc; /* Control and status info */
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index cd2d72d825df..af09296ef0dd 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -3949,6 +3949,7 @@ static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
3949 writel(flags, base + NvRegWakeUpFlags); 3949 writel(flags, base + NvRegWakeUpFlags);
3950 spin_unlock_irq(&np->lock); 3950 spin_unlock_irq(&np->lock);
3951 } 3951 }
3952 device_set_wakeup_enable(&np->pci_dev->dev, np->wolenabled);
3952 return 0; 3953 return 0;
3953} 3954}
3954 3955
@@ -5488,14 +5489,10 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5488 /* set mac address */ 5489 /* set mac address */
5489 nv_copy_mac_to_hw(dev); 5490 nv_copy_mac_to_hw(dev);
5490 5491
5491 /* Workaround current PCI init glitch: wakeup bits aren't
5492 * being set from PCI PM capability.
5493 */
5494 device_init_wakeup(&pci_dev->dev, 1);
5495
5496 /* disable WOL */ 5492 /* disable WOL */
5497 writel(0, base + NvRegWakeUpFlags); 5493 writel(0, base + NvRegWakeUpFlags);
5498 np->wolenabled = 0; 5494 np->wolenabled = 0;
5495 device_set_wakeup_enable(&pci_dev->dev, false);
5499 5496
5500 if (id->driver_data & DEV_HAS_POWER_CNTRL) { 5497 if (id->driver_data & DEV_HAS_POWER_CNTRL) {
5501 5498
@@ -5746,8 +5743,9 @@ static void __devexit nv_remove(struct pci_dev *pci_dev)
5746} 5743}
5747 5744
5748#ifdef CONFIG_PM 5745#ifdef CONFIG_PM
5749static int nv_suspend(struct pci_dev *pdev, pm_message_t state) 5746static int nv_suspend(struct device *device)
5750{ 5747{
5748 struct pci_dev *pdev = to_pci_dev(device);
5751 struct net_device *dev = pci_get_drvdata(pdev); 5749 struct net_device *dev = pci_get_drvdata(pdev);
5752 struct fe_priv *np = netdev_priv(dev); 5750 struct fe_priv *np = netdev_priv(dev);
5753 u8 __iomem *base = get_hwbase(dev); 5751 u8 __iomem *base = get_hwbase(dev);
@@ -5763,25 +5761,17 @@ static int nv_suspend(struct pci_dev *pdev, pm_message_t state)
5763 for (i = 0; i <= np->register_size/sizeof(u32); i++) 5761 for (i = 0; i <= np->register_size/sizeof(u32); i++)
5764 np->saved_config_space[i] = readl(base + i*sizeof(u32)); 5762 np->saved_config_space[i] = readl(base + i*sizeof(u32));
5765 5763
5766 pci_save_state(pdev);
5767 pci_enable_wake(pdev, pci_choose_state(pdev, state), np->wolenabled);
5768 pci_disable_device(pdev);
5769 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5770 return 0; 5764 return 0;
5771} 5765}
5772 5766
5773static int nv_resume(struct pci_dev *pdev) 5767static int nv_resume(struct device *device)
5774{ 5768{
5769 struct pci_dev *pdev = to_pci_dev(device);
5775 struct net_device *dev = pci_get_drvdata(pdev); 5770 struct net_device *dev = pci_get_drvdata(pdev);
5776 struct fe_priv *np = netdev_priv(dev); 5771 struct fe_priv *np = netdev_priv(dev);
5777 u8 __iomem *base = get_hwbase(dev); 5772 u8 __iomem *base = get_hwbase(dev);
5778 int i, rc = 0; 5773 int i, rc = 0;
5779 5774
5780 pci_set_power_state(pdev, PCI_D0);
5781 pci_restore_state(pdev);
5782 /* ack any pending wake events, disable PME */
5783 pci_enable_wake(pdev, PCI_D0, 0);
5784
5785 /* restore non-pci configuration space */ 5775 /* restore non-pci configuration space */
5786 for (i = 0; i <= np->register_size/sizeof(u32); i++) 5776 for (i = 0; i <= np->register_size/sizeof(u32); i++)
5787 writel(np->saved_config_space[i], base+i*sizeof(u32)); 5777 writel(np->saved_config_space[i], base+i*sizeof(u32));
@@ -5800,6 +5790,9 @@ static int nv_resume(struct pci_dev *pdev)
5800 return rc; 5790 return rc;
5801} 5791}
5802 5792
5793static SIMPLE_DEV_PM_OPS(nv_pm_ops, nv_suspend, nv_resume);
5794#define NV_PM_OPS (&nv_pm_ops)
5795
5803static void nv_shutdown(struct pci_dev *pdev) 5796static void nv_shutdown(struct pci_dev *pdev)
5804{ 5797{
5805 struct net_device *dev = pci_get_drvdata(pdev); 5798 struct net_device *dev = pci_get_drvdata(pdev);
@@ -5822,15 +5815,13 @@ static void nv_shutdown(struct pci_dev *pdev)
5822 * only put the device into D3 if we really go for poweroff. 5815 * only put the device into D3 if we really go for poweroff.
5823 */ 5816 */
5824 if (system_state == SYSTEM_POWER_OFF) { 5817 if (system_state == SYSTEM_POWER_OFF) {
5825 if (pci_enable_wake(pdev, PCI_D3cold, np->wolenabled)) 5818 pci_wake_from_d3(pdev, np->wolenabled);
5826 pci_enable_wake(pdev, PCI_D3hot, np->wolenabled);
5827 pci_set_power_state(pdev, PCI_D3hot); 5819 pci_set_power_state(pdev, PCI_D3hot);
5828 } 5820 }
5829} 5821}
5830#else 5822#else
5831#define nv_suspend NULL 5823#define NV_PM_OPS NULL
5832#define nv_shutdown NULL 5824#define nv_shutdown NULL
5833#define nv_resume NULL
5834#endif /* CONFIG_PM */ 5825#endif /* CONFIG_PM */
5835 5826
5836static DEFINE_PCI_DEVICE_TABLE(pci_tbl) = { 5827static DEFINE_PCI_DEVICE_TABLE(pci_tbl) = {
@@ -6002,9 +5993,8 @@ static struct pci_driver driver = {
6002 .id_table = pci_tbl, 5993 .id_table = pci_tbl,
6003 .probe = nv_probe, 5994 .probe = nv_probe,
6004 .remove = __devexit_p(nv_remove), 5995 .remove = __devexit_p(nv_remove),
6005 .suspend = nv_suspend,
6006 .resume = nv_resume,
6007 .shutdown = nv_shutdown, 5996 .shutdown = nv_shutdown,
5997 .driver.pm = NV_PM_OPS,
6008}; 5998};
6009 5999
6010static int __init init_nic(void) 6000static int __init init_nic(void)
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index 4e7d1d0a2340..7d9ced0738c5 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -396,7 +396,7 @@ static unsigned char *add_mcs(unsigned char *bits, int bitrate,
396 while (p) { 396 while (p) {
397 if (p->bitrate == bitrate) { 397 if (p->bitrate == bitrate) {
398 memcpy(p->bits, bits, YAM_FPGA_SIZE); 398 memcpy(p->bits, bits, YAM_FPGA_SIZE);
399 return p->bits; 399 goto out;
400 } 400 }
401 p = p->next; 401 p = p->next;
402 } 402 }
@@ -411,7 +411,7 @@ static unsigned char *add_mcs(unsigned char *bits, int bitrate,
411 p->bitrate = bitrate; 411 p->bitrate = bitrate;
412 p->next = yam_data; 412 p->next = yam_data;
413 yam_data = p; 413 yam_data = p;
414 414 out:
415 release_firmware(fw); 415 release_firmware(fw);
416 return p->bits; 416 return p->bits;
417} 417}
diff --git a/drivers/net/irda/bfin_sir.h b/drivers/net/irda/bfin_sir.h
index b54a6f08db45..e3b285a67734 100644
--- a/drivers/net/irda/bfin_sir.h
+++ b/drivers/net/irda/bfin_sir.h
@@ -26,6 +26,8 @@
26#include <asm/cacheflush.h> 26#include <asm/cacheflush.h>
27#include <asm/dma.h> 27#include <asm/dma.h>
28#include <asm/portmux.h> 28#include <asm/portmux.h>
29#include <mach/bfin_serial_5xx.h>
30#undef DRIVER_NAME
29 31
30#ifdef CONFIG_SIR_BFIN_DMA 32#ifdef CONFIG_SIR_BFIN_DMA
31struct dma_rx_buf { 33struct dma_rx_buf {
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 3ae30b8cb7d6..3b8c92463617 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -508,6 +508,8 @@ extern void ixgbe_free_rx_resources(struct ixgbe_ring *);
508extern void ixgbe_free_tx_resources(struct ixgbe_ring *); 508extern void ixgbe_free_tx_resources(struct ixgbe_ring *);
509extern void ixgbe_configure_rx_ring(struct ixgbe_adapter *,struct ixgbe_ring *); 509extern void ixgbe_configure_rx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
510extern void ixgbe_configure_tx_ring(struct ixgbe_adapter *,struct ixgbe_ring *); 510extern void ixgbe_configure_tx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
511extern void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
512 struct ixgbe_ring *);
511extern void ixgbe_update_stats(struct ixgbe_adapter *adapter); 513extern void ixgbe_update_stats(struct ixgbe_adapter *adapter);
512extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter); 514extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
513extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter); 515extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
@@ -524,26 +526,13 @@ extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
524extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc); 526extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc);
525extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc); 527extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc);
526extern s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, 528extern s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
527 struct ixgbe_atr_input *input, 529 union ixgbe_atr_hash_dword input,
530 union ixgbe_atr_hash_dword common,
528 u8 queue); 531 u8 queue);
529extern s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw, 532extern s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
530 struct ixgbe_atr_input *input, 533 union ixgbe_atr_input *input,
531 struct ixgbe_atr_input_masks *input_masks, 534 struct ixgbe_atr_input_masks *input_masks,
532 u16 soft_id, u8 queue); 535 u16 soft_id, u8 queue);
533extern s32 ixgbe_atr_set_vlan_id_82599(struct ixgbe_atr_input *input,
534 u16 vlan_id);
535extern s32 ixgbe_atr_set_src_ipv4_82599(struct ixgbe_atr_input *input,
536 u32 src_addr);
537extern s32 ixgbe_atr_set_dst_ipv4_82599(struct ixgbe_atr_input *input,
538 u32 dst_addr);
539extern s32 ixgbe_atr_set_src_port_82599(struct ixgbe_atr_input *input,
540 u16 src_port);
541extern s32 ixgbe_atr_set_dst_port_82599(struct ixgbe_atr_input *input,
542 u16 dst_port);
543extern s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input,
544 u16 flex_byte);
545extern s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input,
546 u8 l4type);
547extern void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, 536extern void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
548 struct ixgbe_ring *ring); 537 struct ixgbe_ring *ring);
549extern void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter, 538extern void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter,
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index bfd3c227cd4a..8d316d9cd29d 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -1003,7 +1003,7 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
1003 udelay(10); 1003 udelay(10);
1004 } 1004 }
1005 if (i >= IXGBE_FDIRCMD_CMD_POLL) { 1005 if (i >= IXGBE_FDIRCMD_CMD_POLL) {
1006 hw_dbg(hw ,"Flow Director previous command isn't complete, " 1006 hw_dbg(hw, "Flow Director previous command isn't complete, "
1007 "aborting table re-initialization.\n"); 1007 "aborting table re-initialization.\n");
1008 return IXGBE_ERR_FDIR_REINIT_FAILED; 1008 return IXGBE_ERR_FDIR_REINIT_FAILED;
1009 } 1009 }
@@ -1113,13 +1113,10 @@ s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc)
1113 /* Move the flexible bytes to use the ethertype - shift 6 words */ 1113 /* Move the flexible bytes to use the ethertype - shift 6 words */
1114 fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT); 1114 fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT);
1115 1115
1116 fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS;
1117 1116
1118 /* Prime the keys for hashing */ 1117 /* Prime the keys for hashing */
1119 IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, 1118 IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
1120 htonl(IXGBE_ATR_BUCKET_HASH_KEY)); 1119 IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
1121 IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY,
1122 htonl(IXGBE_ATR_SIGNATURE_HASH_KEY));
1123 1120
1124 /* 1121 /*
1125 * Poll init-done after we write the register. Estimated times: 1122 * Poll init-done after we write the register. Estimated times:
@@ -1209,10 +1206,8 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc)
1209 fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT); 1206 fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT);
1210 1207
1211 /* Prime the keys for hashing */ 1208 /* Prime the keys for hashing */
1212 IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, 1209 IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
1213 htonl(IXGBE_ATR_BUCKET_HASH_KEY)); 1210 IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
1214 IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY,
1215 htonl(IXGBE_ATR_SIGNATURE_HASH_KEY));
1216 1211
1217 /* 1212 /*
1218 * Poll init-done after we write the register. Estimated times: 1213 * Poll init-done after we write the register. Estimated times:
@@ -1251,8 +1246,8 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc)
1251 * @stream: input bitstream to compute the hash on 1246 * @stream: input bitstream to compute the hash on
1252 * @key: 32-bit hash key 1247 * @key: 32-bit hash key
1253 **/ 1248 **/
1254static u16 ixgbe_atr_compute_hash_82599(struct ixgbe_atr_input *atr_input, 1249static u32 ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
1255 u32 key) 1250 u32 key)
1256{ 1251{
1257 /* 1252 /*
1258 * The algorithm is as follows: 1253 * The algorithm is as follows:
@@ -1272,410 +1267,250 @@ static u16 ixgbe_atr_compute_hash_82599(struct ixgbe_atr_input *atr_input,
1272 * To simplify for programming, the algorithm is implemented 1267 * To simplify for programming, the algorithm is implemented
1273 * in software this way: 1268 * in software this way:
1274 * 1269 *
1275 * Key[31:0], Stream[335:0] 1270 * key[31:0], hi_hash_dword[31:0], lo_hash_dword[31:0], hash[15:0]
1271 *
1272 * for (i = 0; i < 352; i+=32)
1273 * hi_hash_dword[31:0] ^= Stream[(i+31):i];
1274 *
1275 * lo_hash_dword[15:0] ^= Stream[15:0];
1276 * lo_hash_dword[15:0] ^= hi_hash_dword[31:16];
1277 * lo_hash_dword[31:16] ^= hi_hash_dword[15:0];
1276 * 1278 *
1277 * tmp_key[11 * 32 - 1:0] = 11{Key[31:0] = key concatenated 11 times 1279 * hi_hash_dword[31:0] ^= Stream[351:320];
1278 * int_key[350:0] = tmp_key[351:1]
1279 * int_stream[365:0] = Stream[14:0] | Stream[335:0] | Stream[335:321]
1280 * 1280 *
1281 * hash[15:0] = 0; 1281 * if(key[0])
1282 * for (i = 0; i < 351; i++) { 1282 * hash[15:0] ^= Stream[15:0];
1283 * if (int_key[i]) 1283 *
1284 * hash ^= int_stream[(i + 15):i]; 1284 * for (i = 0; i < 16; i++) {
1285 * if (key[i])
1286 * hash[15:0] ^= lo_hash_dword[(i+15):i];
1287 * if (key[i + 16])
1288 * hash[15:0] ^= hi_hash_dword[(i+15):i];
1285 * } 1289 * }
1290 *
1286 */ 1291 */
1292 __be32 common_hash_dword = 0;
1293 u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
1294 u32 hash_result = 0;
1295 u8 i;
1287 1296
1288 union { 1297 /* record the flow_vm_vlan bits as they are a key part to the hash */
1289 u64 fill[6]; 1298 flow_vm_vlan = ntohl(atr_input->dword_stream[0]);
1290 u32 key[11];
1291 u8 key_stream[44];
1292 } tmp_key;
1293 1299
1294 u8 *stream = (u8 *)atr_input; 1300 /* generate common hash dword */
1295 u8 int_key[44]; /* upper-most bit unused */ 1301 for (i = 10; i; i -= 2)
1296 u8 hash_str[46]; /* upper-most 2 bits unused */ 1302 common_hash_dword ^= atr_input->dword_stream[i] ^
1297 u16 hash_result = 0; 1303 atr_input->dword_stream[i - 1];
1298 int i, j, k, h;
1299 1304
1300 /* 1305 hi_hash_dword = ntohl(common_hash_dword);
1301 * Initialize the fill member to prevent warnings
1302 * on some compilers
1303 */
1304 tmp_key.fill[0] = 0;
1305 1306
1306 /* First load the temporary key stream */ 1307 /* low dword is word swapped version of common */
1307 for (i = 0; i < 6; i++) { 1308 lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
1308 u64 fillkey = ((u64)key << 32) | key;
1309 tmp_key.fill[i] = fillkey;
1310 }
1311 1309
1312 /* 1310 /* apply flow ID/VM pool/VLAN ID bits to hash words */
1313 * Set the interim key for the hashing. Bit 352 is unused, so we must 1311 hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
1314 * shift and compensate when building the key.
1315 */
1316 1312
1317 int_key[0] = tmp_key.key_stream[0] >> 1; 1313 /* Process bits 0 and 16 */
1318 for (i = 1, j = 0; i < 44; i++) { 1314 if (key & 0x0001) hash_result ^= lo_hash_dword;
1319 unsigned int this_key = tmp_key.key_stream[j] << 7; 1315 if (key & 0x00010000) hash_result ^= hi_hash_dword;
1320 j++;
1321 int_key[i] = (u8)(this_key | (tmp_key.key_stream[j] >> 1));
1322 }
1323
1324 /*
1325 * Set the interim bit string for the hashing. Bits 368 and 367 are
1326 * unused, so shift and compensate when building the string.
1327 */
1328 hash_str[0] = (stream[40] & 0x7f) >> 1;
1329 for (i = 1, j = 40; i < 46; i++) {
1330 unsigned int this_str = stream[j] << 7;
1331 j++;
1332 if (j > 41)
1333 j = 0;
1334 hash_str[i] = (u8)(this_str | (stream[j] >> 1));
1335 }
1336 1316
1337 /* 1317 /*
1338 * Now compute the hash. i is the index into hash_str, j is into our 1318 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
1339 * key stream, k is counting the number of bits, and h interates within 1319 * delay this because bit 0 of the stream should not be processed
1340 * each byte. 1320 * so we do not add the vlan until after bit 0 was processed
1341 */ 1321 */
1342 for (i = 45, j = 43, k = 0; k < 351 && i >= 2 && j >= 0; i--, j--) { 1322 lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
1343 for (h = 0; h < 8 && k < 351; h++, k++) {
1344 if (int_key[j] & (1 << h)) {
1345 /*
1346 * Key bit is set, XOR in the current 16-bit
1347 * string. Example of processing:
1348 * h = 0,
1349 * tmp = (hash_str[i - 2] & 0 << 16) |
1350 * (hash_str[i - 1] & 0xff << 8) |
1351 * (hash_str[i] & 0xff >> 0)
1352 * So tmp = hash_str[15 + k:k], since the
1353 * i + 2 clause rolls off the 16-bit value
1354 * h = 7,
1355 * tmp = (hash_str[i - 2] & 0x7f << 9) |
1356 * (hash_str[i - 1] & 0xff << 1) |
1357 * (hash_str[i] & 0x80 >> 7)
1358 */
1359 int tmp = (hash_str[i] >> h);
1360 tmp |= (hash_str[i - 1] << (8 - h));
1361 tmp |= (int)(hash_str[i - 2] & ((1 << h) - 1))
1362 << (16 - h);
1363 hash_result ^= (u16)tmp;
1364 }
1365 }
1366 }
1367
1368 return hash_result;
1369}
1370
1371/**
1372 * ixgbe_atr_set_vlan_id_82599 - Sets the VLAN id in the ATR input stream
1373 * @input: input stream to modify
1374 * @vlan: the VLAN id to load
1375 **/
1376s32 ixgbe_atr_set_vlan_id_82599(struct ixgbe_atr_input *input, u16 vlan)
1377{
1378 input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] = vlan >> 8;
1379 input->byte_stream[IXGBE_ATR_VLAN_OFFSET] = vlan & 0xff;
1380
1381 return 0;
1382}
1383
1384/**
1385 * ixgbe_atr_set_src_ipv4_82599 - Sets the source IPv4 address
1386 * @input: input stream to modify
1387 * @src_addr: the IP address to load
1388 **/
1389s32 ixgbe_atr_set_src_ipv4_82599(struct ixgbe_atr_input *input, u32 src_addr)
1390{
1391 input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 3] = src_addr >> 24;
1392 input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 2] =
1393 (src_addr >> 16) & 0xff;
1394 input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 1] =
1395 (src_addr >> 8) & 0xff;
1396 input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET] = src_addr & 0xff;
1397
1398 return 0;
1399}
1400
1401/**
1402 * ixgbe_atr_set_dst_ipv4_82599 - Sets the destination IPv4 address
1403 * @input: input stream to modify
1404 * @dst_addr: the IP address to load
1405 **/
1406s32 ixgbe_atr_set_dst_ipv4_82599(struct ixgbe_atr_input *input, u32 dst_addr)
1407{
1408 input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 3] = dst_addr >> 24;
1409 input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 2] =
1410 (dst_addr >> 16) & 0xff;
1411 input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 1] =
1412 (dst_addr >> 8) & 0xff;
1413 input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET] = dst_addr & 0xff;
1414
1415 return 0;
1416}
1417 1323
1418/**
1419 * ixgbe_atr_set_src_port_82599 - Sets the source port
1420 * @input: input stream to modify
1421 * @src_port: the source port to load
1422 **/
1423s32 ixgbe_atr_set_src_port_82599(struct ixgbe_atr_input *input, u16 src_port)
1424{
1425 input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET + 1] = src_port >> 8;
1426 input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET] = src_port & 0xff;
1427
1428 return 0;
1429}
1430
1431/**
1432 * ixgbe_atr_set_dst_port_82599 - Sets the destination port
1433 * @input: input stream to modify
1434 * @dst_port: the destination port to load
1435 **/
1436s32 ixgbe_atr_set_dst_port_82599(struct ixgbe_atr_input *input, u16 dst_port)
1437{
1438 input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET + 1] = dst_port >> 8;
1439 input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET] = dst_port & 0xff;
1440
1441 return 0;
1442}
1443
1444/**
1445 * ixgbe_atr_set_flex_byte_82599 - Sets the flexible bytes
1446 * @input: input stream to modify
1447 * @flex_bytes: the flexible bytes to load
1448 **/
1449s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input, u16 flex_byte)
1450{
1451 input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET + 1] = flex_byte >> 8;
1452 input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET] = flex_byte & 0xff;
1453
1454 return 0;
1455}
1456
1457/**
1458 * ixgbe_atr_set_l4type_82599 - Sets the layer 4 packet type
1459 * @input: input stream to modify
1460 * @l4type: the layer 4 type value to load
1461 **/
1462s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input, u8 l4type)
1463{
1464 input->byte_stream[IXGBE_ATR_L4TYPE_OFFSET] = l4type;
1465
1466 return 0;
1467}
1468
1469/**
1470 * ixgbe_atr_get_vlan_id_82599 - Gets the VLAN id from the ATR input stream
1471 * @input: input stream to search
1472 * @vlan: the VLAN id to load
1473 **/
1474static s32 ixgbe_atr_get_vlan_id_82599(struct ixgbe_atr_input *input, u16 *vlan)
1475{
1476 *vlan = input->byte_stream[IXGBE_ATR_VLAN_OFFSET];
1477 *vlan |= input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] << 8;
1478
1479 return 0;
1480}
1481
1482/**
1483 * ixgbe_atr_get_src_ipv4_82599 - Gets the source IPv4 address
1484 * @input: input stream to search
1485 * @src_addr: the IP address to load
1486 **/
1487static s32 ixgbe_atr_get_src_ipv4_82599(struct ixgbe_atr_input *input,
1488 u32 *src_addr)
1489{
1490 *src_addr = input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET];
1491 *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 1] << 8;
1492 *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 2] << 16;
1493 *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 3] << 24;
1494
1495 return 0;
1496}
1497 1324
1498/** 1325 /* process the remaining 30 bits in the key 2 bits at a time */
1499 * ixgbe_atr_get_dst_ipv4_82599 - Gets the destination IPv4 address 1326 for (i = 15; i; i-- ) {
1500 * @input: input stream to search 1327 if (key & (0x0001 << i)) hash_result ^= lo_hash_dword >> i;
1501 * @dst_addr: the IP address to load 1328 if (key & (0x00010000 << i)) hash_result ^= hi_hash_dword >> i;
1502 **/ 1329 }
1503static s32 ixgbe_atr_get_dst_ipv4_82599(struct ixgbe_atr_input *input,
1504 u32 *dst_addr)
1505{
1506 *dst_addr = input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET];
1507 *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 1] << 8;
1508 *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 2] << 16;
1509 *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 3] << 24;
1510 1330
1511 return 0; 1331 return hash_result & IXGBE_ATR_HASH_MASK;
1512} 1332}
1513 1333
1514/** 1334/*
1515 * ixgbe_atr_get_src_ipv6_82599 - Gets the source IPv6 address 1335 * These defines allow us to quickly generate all of the necessary instructions
1516 * @input: input stream to search 1336 * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION
1517 * @src_addr_1: the first 4 bytes of the IP address to load 1337 * for values 0 through 15
1518 * @src_addr_2: the second 4 bytes of the IP address to load 1338 */
1519 * @src_addr_3: the third 4 bytes of the IP address to load 1339#define IXGBE_ATR_COMMON_HASH_KEY \
1520 * @src_addr_4: the fourth 4 bytes of the IP address to load 1340 (IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY)
1521 **/ 1341#define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \
1522static s32 ixgbe_atr_get_src_ipv6_82599(struct ixgbe_atr_input *input, 1342do { \
1523 u32 *src_addr_1, u32 *src_addr_2, 1343 u32 n = (_n); \
1524 u32 *src_addr_3, u32 *src_addr_4) 1344 if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \
1525{ 1345 common_hash ^= lo_hash_dword >> n; \
1526 *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 12]; 1346 else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
1527 *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 13] << 8; 1347 bucket_hash ^= lo_hash_dword >> n; \
1528 *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 14] << 16; 1348 else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \
1529 *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 15] << 24; 1349 sig_hash ^= lo_hash_dword << (16 - n); \
1530 1350 if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \
1531 *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 8]; 1351 common_hash ^= hi_hash_dword >> n; \
1532 *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 9] << 8; 1352 else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
1533 *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 10] << 16; 1353 bucket_hash ^= hi_hash_dword >> n; \
1534 *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 11] << 24; 1354 else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \
1535 1355 sig_hash ^= hi_hash_dword << (16 - n); \
1536 *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 4]; 1356} while (0);
1537 *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 5] << 8;
1538 *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 6] << 16;
1539 *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 7] << 24;
1540
1541 *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET];
1542 *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 1] << 8;
1543 *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 2] << 16;
1544 *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 3] << 24;
1545
1546 return 0;
1547}
1548 1357
1549/** 1358/**
1550 * ixgbe_atr_get_src_port_82599 - Gets the source port 1359 * ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash
1551 * @input: input stream to modify 1360 * @stream: input bitstream to compute the hash on
1552 * @src_port: the source port to load
1553 * 1361 *
1554 * Even though the input is given in big-endian, the FDIRPORT registers 1362 * This function is almost identical to the function above but contains
1555 * expect the ports to be programmed in little-endian. Hence the need to swap 1363 * several optomizations such as unwinding all of the loops, letting the
1556 * endianness when retrieving the data. This can be confusing since the 1364 * compiler work out all of the conditional ifs since the keys are static
1557 * internal hash engine expects it to be big-endian. 1365 * defines, and computing two keys at once since the hashed dword stream
1366 * will be the same for both keys.
1558 **/ 1367 **/
1559static s32 ixgbe_atr_get_src_port_82599(struct ixgbe_atr_input *input, 1368static u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
1560 u16 *src_port) 1369 union ixgbe_atr_hash_dword common)
1561{ 1370{
1562 *src_port = input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET] << 8; 1371 u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
1563 *src_port |= input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET + 1]; 1372 u32 sig_hash = 0, bucket_hash = 0, common_hash = 0;
1564 1373
1565 return 0; 1374 /* record the flow_vm_vlan bits as they are a key part to the hash */
1566} 1375 flow_vm_vlan = ntohl(input.dword);
1567 1376
1568/** 1377 /* generate common hash dword */
1569 * ixgbe_atr_get_dst_port_82599 - Gets the destination port 1378 hi_hash_dword = ntohl(common.dword);
1570 * @input: input stream to modify
1571 * @dst_port: the destination port to load
1572 *
1573 * Even though the input is given in big-endian, the FDIRPORT registers
1574 * expect the ports to be programmed in little-endian. Hence the need to swap
1575 * endianness when retrieving the data. This can be confusing since the
1576 * internal hash engine expects it to be big-endian.
1577 **/
1578static s32 ixgbe_atr_get_dst_port_82599(struct ixgbe_atr_input *input,
1579 u16 *dst_port)
1580{
1581 *dst_port = input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET] << 8;
1582 *dst_port |= input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET + 1];
1583 1379
1584 return 0; 1380 /* low dword is word swapped version of common */
1585} 1381 lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
1586 1382
1587/** 1383 /* apply flow ID/VM pool/VLAN ID bits to hash words */
1588 * ixgbe_atr_get_flex_byte_82599 - Gets the flexible bytes 1384 hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
1589 * @input: input stream to modify
1590 * @flex_bytes: the flexible bytes to load
1591 **/
1592static s32 ixgbe_atr_get_flex_byte_82599(struct ixgbe_atr_input *input,
1593 u16 *flex_byte)
1594{
1595 *flex_byte = input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET];
1596 *flex_byte |= input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET + 1] << 8;
1597 1385
1598 return 0; 1386 /* Process bits 0 and 16 */
1599} 1387 IXGBE_COMPUTE_SIG_HASH_ITERATION(0);
1600 1388
1601/** 1389 /*
1602 * ixgbe_atr_get_l4type_82599 - Gets the layer 4 packet type 1390 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
1603 * @input: input stream to modify 1391 * delay this because bit 0 of the stream should not be processed
1604 * @l4type: the layer 4 type value to load 1392 * so we do not add the vlan until after bit 0 was processed
1605 **/ 1393 */
1606static s32 ixgbe_atr_get_l4type_82599(struct ixgbe_atr_input *input, 1394 lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
1607 u8 *l4type) 1395
1608{ 1396 /* Process remaining 30 bit of the key */
1609 *l4type = input->byte_stream[IXGBE_ATR_L4TYPE_OFFSET]; 1397 IXGBE_COMPUTE_SIG_HASH_ITERATION(1);
1398 IXGBE_COMPUTE_SIG_HASH_ITERATION(2);
1399 IXGBE_COMPUTE_SIG_HASH_ITERATION(3);
1400 IXGBE_COMPUTE_SIG_HASH_ITERATION(4);
1401 IXGBE_COMPUTE_SIG_HASH_ITERATION(5);
1402 IXGBE_COMPUTE_SIG_HASH_ITERATION(6);
1403 IXGBE_COMPUTE_SIG_HASH_ITERATION(7);
1404 IXGBE_COMPUTE_SIG_HASH_ITERATION(8);
1405 IXGBE_COMPUTE_SIG_HASH_ITERATION(9);
1406 IXGBE_COMPUTE_SIG_HASH_ITERATION(10);
1407 IXGBE_COMPUTE_SIG_HASH_ITERATION(11);
1408 IXGBE_COMPUTE_SIG_HASH_ITERATION(12);
1409 IXGBE_COMPUTE_SIG_HASH_ITERATION(13);
1410 IXGBE_COMPUTE_SIG_HASH_ITERATION(14);
1411 IXGBE_COMPUTE_SIG_HASH_ITERATION(15);
1412
1413 /* combine common_hash result with signature and bucket hashes */
1414 bucket_hash ^= common_hash;
1415 bucket_hash &= IXGBE_ATR_HASH_MASK;
1610 1416
1611 return 0; 1417 sig_hash ^= common_hash << 16;
1418 sig_hash &= IXGBE_ATR_HASH_MASK << 16;
1419
1420 /* return completed signature hash */
1421 return sig_hash ^ bucket_hash;
1612} 1422}
1613 1423
1614/** 1424/**
1615 * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter 1425 * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter
1616 * @hw: pointer to hardware structure 1426 * @hw: pointer to hardware structure
1617 * @stream: input bitstream 1427 * @input: unique input dword
1428 * @common: compressed common input dword
1618 * @queue: queue index to direct traffic to 1429 * @queue: queue index to direct traffic to
1619 **/ 1430 **/
1620s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, 1431s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
1621 struct ixgbe_atr_input *input, 1432 union ixgbe_atr_hash_dword input,
1433 union ixgbe_atr_hash_dword common,
1622 u8 queue) 1434 u8 queue)
1623{ 1435{
1624 u64 fdirhashcmd; 1436 u64 fdirhashcmd;
1625 u64 fdircmd; 1437 u32 fdircmd;
1626 u32 fdirhash;
1627 u16 bucket_hash, sig_hash;
1628 u8 l4type;
1629
1630 bucket_hash = ixgbe_atr_compute_hash_82599(input,
1631 IXGBE_ATR_BUCKET_HASH_KEY);
1632
1633 /* bucket_hash is only 15 bits */
1634 bucket_hash &= IXGBE_ATR_HASH_MASK;
1635
1636 sig_hash = ixgbe_atr_compute_hash_82599(input,
1637 IXGBE_ATR_SIGNATURE_HASH_KEY);
1638
1639 /* Get the l4type in order to program FDIRCMD properly */
1640 /* lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6 */
1641 ixgbe_atr_get_l4type_82599(input, &l4type);
1642 1438
1643 /* 1439 /*
1644 * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits 1440 * Get the flow_type in order to program FDIRCMD properly
1645 * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH. 1441 * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6
1646 */ 1442 */
1647 fdirhash = sig_hash << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT | bucket_hash; 1443 switch (input.formatted.flow_type) {
1648 1444 case IXGBE_ATR_FLOW_TYPE_TCPV4:
1649 fdircmd = (IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | 1445 case IXGBE_ATR_FLOW_TYPE_UDPV4:
1650 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN); 1446 case IXGBE_ATR_FLOW_TYPE_SCTPV4:
1651 1447 case IXGBE_ATR_FLOW_TYPE_TCPV6:
1652 switch (l4type & IXGBE_ATR_L4TYPE_MASK) { 1448 case IXGBE_ATR_FLOW_TYPE_UDPV6:
1653 case IXGBE_ATR_L4TYPE_TCP: 1449 case IXGBE_ATR_FLOW_TYPE_SCTPV6:
1654 fdircmd |= IXGBE_FDIRCMD_L4TYPE_TCP;
1655 break;
1656 case IXGBE_ATR_L4TYPE_UDP:
1657 fdircmd |= IXGBE_FDIRCMD_L4TYPE_UDP;
1658 break;
1659 case IXGBE_ATR_L4TYPE_SCTP:
1660 fdircmd |= IXGBE_FDIRCMD_L4TYPE_SCTP;
1661 break; 1450 break;
1662 default: 1451 default:
1663 hw_dbg(hw, "Error on l4type input\n"); 1452 hw_dbg(hw, " Error on flow type input\n");
1664 return IXGBE_ERR_CONFIG; 1453 return IXGBE_ERR_CONFIG;
1665 } 1454 }
1666 1455
1667 if (l4type & IXGBE_ATR_L4TYPE_IPV6_MASK) 1456 /* configure FDIRCMD register */
1668 fdircmd |= IXGBE_FDIRCMD_IPV6; 1457 fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
1458 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
1459 fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1460 fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
1669 1461
1670 fdircmd |= ((u64)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT); 1462 /*
1671 fdirhashcmd = ((fdircmd << 32) | fdirhash); 1463 * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
1464 * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH.
1465 */
1466 fdirhashcmd = (u64)fdircmd << 32;
1467 fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common);
1672 1468
1673 IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd); 1469 IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
1674 1470
1471 hw_dbg(hw, "Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd);
1472
1675 return 0; 1473 return 0;
1676} 1474}
1677 1475
1678/** 1476/**
1477 * ixgbe_get_fdirtcpm_82599 - generate a tcp port from atr_input_masks
1478 * @input_mask: mask to be bit swapped
1479 *
1480 * The source and destination port masks for flow director are bit swapped
1481 * in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc. In order to
1482 * generate a correctly swapped value we need to bit swap the mask and that
1483 * is what is accomplished by this function.
1484 **/
1485static u32 ixgbe_get_fdirtcpm_82599(struct ixgbe_atr_input_masks *input_masks)
1486{
1487 u32 mask = ntohs(input_masks->dst_port_mask);
1488 mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT;
1489 mask |= ntohs(input_masks->src_port_mask);
1490 mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
1491 mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
1492 mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
1493 return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
1494}
1495
1496/*
1497 * These two macros are meant to address the fact that we have registers
1498 * that are either all or in part big-endian. As a result on big-endian
1499 * systems we will end up byte swapping the value to little-endian before
1500 * it is byte swapped again and written to the hardware in the original
1501 * big-endian format.
1502 */
1503#define IXGBE_STORE_AS_BE32(_value) \
1504 (((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \
1505 (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24))
1506
1507#define IXGBE_WRITE_REG_BE32(a, reg, value) \
1508 IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(ntohl(value)))
1509
1510#define IXGBE_STORE_AS_BE16(_value) \
1511 (((u16)(_value) >> 8) | ((u16)(_value) << 8))
1512
1513/**
1679 * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter 1514 * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
1680 * @hw: pointer to hardware structure 1515 * @hw: pointer to hardware structure
1681 * @input: input bitstream 1516 * @input: input bitstream
@@ -1687,135 +1522,139 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
1687 * hardware writes must be protected from one another. 1522 * hardware writes must be protected from one another.
1688 **/ 1523 **/
1689s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw, 1524s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
1690 struct ixgbe_atr_input *input, 1525 union ixgbe_atr_input *input,
1691 struct ixgbe_atr_input_masks *input_masks, 1526 struct ixgbe_atr_input_masks *input_masks,
1692 u16 soft_id, u8 queue) 1527 u16 soft_id, u8 queue)
1693{ 1528{
1694 u32 fdircmd = 0;
1695 u32 fdirhash; 1529 u32 fdirhash;
1696 u32 src_ipv4 = 0, dst_ipv4 = 0; 1530 u32 fdircmd;
1697 u32 src_ipv6_1, src_ipv6_2, src_ipv6_3, src_ipv6_4; 1531 u32 fdirport, fdirtcpm;
1698 u16 src_port, dst_port, vlan_id, flex_bytes; 1532 u32 fdirvlan;
1699 u16 bucket_hash; 1533 /* start with VLAN, flex bytes, VM pool, and IPv6 destination masked */
1700 u8 l4type; 1534 u32 fdirm = IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP | IXGBE_FDIRM_FLEX |
1701 u8 fdirm = 0; 1535 IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6;
1702
1703 /* Get our input values */
1704 ixgbe_atr_get_l4type_82599(input, &l4type);
1705 1536
1706 /* 1537 /*
1707 * Check l4type formatting, and bail out before we touch the hardware 1538 * Check flow_type formatting, and bail out before we touch the hardware
1708 * if there's a configuration issue 1539 * if there's a configuration issue
1709 */ 1540 */
1710 switch (l4type & IXGBE_ATR_L4TYPE_MASK) { 1541 switch (input->formatted.flow_type) {
1711 case IXGBE_ATR_L4TYPE_TCP: 1542 case IXGBE_ATR_FLOW_TYPE_IPV4:
1712 fdircmd |= IXGBE_FDIRCMD_L4TYPE_TCP; 1543 /* use the L4 protocol mask for raw IPv4/IPv6 traffic */
1713 break; 1544 fdirm |= IXGBE_FDIRM_L4P;
1714 case IXGBE_ATR_L4TYPE_UDP: 1545 case IXGBE_ATR_FLOW_TYPE_SCTPV4:
1715 fdircmd |= IXGBE_FDIRCMD_L4TYPE_UDP; 1546 if (input_masks->dst_port_mask || input_masks->src_port_mask) {
1716 break; 1547 hw_dbg(hw, " Error on src/dst port mask\n");
1717 case IXGBE_ATR_L4TYPE_SCTP: 1548 return IXGBE_ERR_CONFIG;
1718 fdircmd |= IXGBE_FDIRCMD_L4TYPE_SCTP; 1549 }
1550 case IXGBE_ATR_FLOW_TYPE_TCPV4:
1551 case IXGBE_ATR_FLOW_TYPE_UDPV4:
1719 break; 1552 break;
1720 default: 1553 default:
1721 hw_dbg(hw, "Error on l4type input\n"); 1554 hw_dbg(hw, " Error on flow type input\n");
1722 return IXGBE_ERR_CONFIG; 1555 return IXGBE_ERR_CONFIG;
1723 } 1556 }
1724 1557
1725 bucket_hash = ixgbe_atr_compute_hash_82599(input,
1726 IXGBE_ATR_BUCKET_HASH_KEY);
1727
1728 /* bucket_hash is only 15 bits */
1729 bucket_hash &= IXGBE_ATR_HASH_MASK;
1730
1731 ixgbe_atr_get_vlan_id_82599(input, &vlan_id);
1732 ixgbe_atr_get_src_port_82599(input, &src_port);
1733 ixgbe_atr_get_dst_port_82599(input, &dst_port);
1734 ixgbe_atr_get_flex_byte_82599(input, &flex_bytes);
1735
1736 fdirhash = soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT | bucket_hash;
1737
1738 /* Now figure out if we're IPv4 or IPv6 */
1739 if (l4type & IXGBE_ATR_L4TYPE_IPV6_MASK) {
1740 /* IPv6 */
1741 ixgbe_atr_get_src_ipv6_82599(input, &src_ipv6_1, &src_ipv6_2,
1742 &src_ipv6_3, &src_ipv6_4);
1743
1744 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), src_ipv6_1);
1745 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1), src_ipv6_2);
1746 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2), src_ipv6_3);
1747 /* The last 4 bytes is the same register as IPv4 */
1748 IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv6_4);
1749
1750 fdircmd |= IXGBE_FDIRCMD_IPV6;
1751 fdircmd |= IXGBE_FDIRCMD_IPv6DMATCH;
1752 } else {
1753 /* IPv4 */
1754 ixgbe_atr_get_src_ipv4_82599(input, &src_ipv4);
1755 IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv4);
1756 }
1757
1758 ixgbe_atr_get_dst_ipv4_82599(input, &dst_ipv4);
1759 IXGBE_WRITE_REG(hw, IXGBE_FDIRIPDA, dst_ipv4);
1760
1761 IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, (vlan_id |
1762 (flex_bytes << IXGBE_FDIRVLAN_FLEX_SHIFT)));
1763 IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, (src_port |
1764 (dst_port << IXGBE_FDIRPORT_DESTINATION_SHIFT)));
1765
1766 /* 1558 /*
1767 * Program the relevant mask registers. L4type cannot be 1559 * Program the relevant mask registers. If src/dst_port or src/dst_addr
1768 * masked out in this implementation. 1560 * are zero, then assume a full mask for that field. Also assume that
1561 * a VLAN of 0 is unspecified, so mask that out as well. L4type
1562 * cannot be masked out in this implementation.
1769 * 1563 *
1770 * This also assumes IPv4 only. IPv6 masking isn't supported at this 1564 * This also assumes IPv4 only. IPv6 masking isn't supported at this
1771 * point in time. 1565 * point in time.
1772 */ 1566 */
1773 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, input_masks->src_ip_mask); 1567
1774 IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, input_masks->dst_ip_mask); 1568 /* Program FDIRM */
1775 1569 switch (ntohs(input_masks->vlan_id_mask) & 0xEFFF) {
1776 switch (l4type & IXGBE_ATR_L4TYPE_MASK) { 1570 case 0xEFFF:
1777 case IXGBE_ATR_L4TYPE_TCP: 1571 /* Unmask VLAN ID - bit 0 and fall through to unmask prio */
1778 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, input_masks->src_port_mask); 1572 fdirm &= ~IXGBE_FDIRM_VLANID;
1779 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 1573 case 0xE000:
1780 (IXGBE_READ_REG(hw, IXGBE_FDIRTCPM) | 1574 /* Unmask VLAN prio - bit 1 */
1781 (input_masks->dst_port_mask << 16))); 1575 fdirm &= ~IXGBE_FDIRM_VLANP;
1782 break; 1576 break;
1783 case IXGBE_ATR_L4TYPE_UDP: 1577 case 0x0FFF:
1784 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, input_masks->src_port_mask); 1578 /* Unmask VLAN ID - bit 0 */
1785 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 1579 fdirm &= ~IXGBE_FDIRM_VLANID;
1786 (IXGBE_READ_REG(hw, IXGBE_FDIRUDPM) |
1787 (input_masks->src_port_mask << 16)));
1788 break; 1580 break;
1789 default: 1581 case 0x0000:
1790 /* this already would have failed above */ 1582 /* do nothing, vlans already masked */
1791 break; 1583 break;
1584 default:
1585 hw_dbg(hw, " Error on VLAN mask\n");
1586 return IXGBE_ERR_CONFIG;
1792 } 1587 }
1793 1588
1794 /* Program the last mask register, FDIRM */ 1589 if (input_masks->flex_mask & 0xFFFF) {
1795 if (input_masks->vlan_id_mask) 1590 if ((input_masks->flex_mask & 0xFFFF) != 0xFFFF) {
1796 /* Mask both VLAN and VLANP - bits 0 and 1 */ 1591 hw_dbg(hw, " Error on flexible byte mask\n");
1797 fdirm |= 0x3; 1592 return IXGBE_ERR_CONFIG;
1798 1593 }
1799 if (input_masks->data_mask) 1594 /* Unmask Flex Bytes - bit 4 */
1800 /* Flex bytes need masking, so mask the whole thing - bit 4 */ 1595 fdirm &= ~IXGBE_FDIRM_FLEX;
1801 fdirm |= 0x10; 1596 }
1802 1597
1803 /* Now mask VM pool and destination IPv6 - bits 5 and 2 */ 1598 /* Now mask VM pool and destination IPv6 - bits 5 and 2 */
1804 fdirm |= 0x24;
1805
1806 IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm); 1599 IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
1807 1600
1808 fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW; 1601 /* store the TCP/UDP port masks, bit reversed from port layout */
1809 fdircmd |= IXGBE_FDIRCMD_FILTER_UPDATE; 1602 fdirtcpm = ixgbe_get_fdirtcpm_82599(input_masks);
1810 fdircmd |= IXGBE_FDIRCMD_LAST; 1603
1811 fdircmd |= IXGBE_FDIRCMD_QUEUE_EN; 1604 /* write both the same so that UDP and TCP use the same mask */
1812 fdircmd |= queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; 1605 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
1606 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
1607
1608 /* store source and destination IP masks (big-enian) */
1609 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
1610 ~input_masks->src_ip_mask[0]);
1611 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M,
1612 ~input_masks->dst_ip_mask[0]);
1613
1614 /* Apply masks to input data */
1615 input->formatted.vlan_id &= input_masks->vlan_id_mask;
1616 input->formatted.flex_bytes &= input_masks->flex_mask;
1617 input->formatted.src_port &= input_masks->src_port_mask;
1618 input->formatted.dst_port &= input_masks->dst_port_mask;
1619 input->formatted.src_ip[0] &= input_masks->src_ip_mask[0];
1620 input->formatted.dst_ip[0] &= input_masks->dst_ip_mask[0];
1621
1622 /* record vlan (little-endian) and flex_bytes(big-endian) */
1623 fdirvlan =
1624 IXGBE_STORE_AS_BE16(ntohs(input->formatted.flex_bytes));
1625 fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
1626 fdirvlan |= ntohs(input->formatted.vlan_id);
1627 IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
1628
1629 /* record source and destination port (little-endian)*/
1630 fdirport = ntohs(input->formatted.dst_port);
1631 fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
1632 fdirport |= ntohs(input->formatted.src_port);
1633 IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
1634
1635 /* record the first 32 bits of the destination address (big-endian) */
1636 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]);
1637
1638 /* record the source address (big-endian) */
1639 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]);
1640
1641 /* configure FDIRCMD register */
1642 fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
1643 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
1644 fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1645 fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
1646
1647 /* we only want the bucket hash so drop the upper 16 bits */
1648 fdirhash = ixgbe_atr_compute_hash_82599(input,
1649 IXGBE_ATR_BUCKET_HASH_KEY);
1650 fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
1813 1651
1814 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); 1652 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1815 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd); 1653 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
1816 1654
1817 return 0; 1655 return 0;
1818} 1656}
1657
1819/** 1658/**
1820 * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register 1659 * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register
1821 * @hw: pointer to hardware structure 1660 * @hw: pointer to hardware structure
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 23ff23e8b393..2002ea88ca2a 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -1477,9 +1477,7 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
1477 reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 1477 reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1478 reg_ctl &= ~IXGBE_RXCTRL_RXEN; 1478 reg_ctl &= ~IXGBE_RXCTRL_RXEN;
1479 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_ctl); 1479 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_ctl);
1480 reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rx_ring->reg_idx)); 1480 ixgbe_disable_rx_queue(adapter, rx_ring);
1481 reg_ctl &= ~IXGBE_RXDCTL_ENABLE;
1482 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rx_ring->reg_idx), reg_ctl);
1483 1481
1484 /* now Tx */ 1482 /* now Tx */
1485 reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx)); 1483 reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx));
@@ -2279,10 +2277,11 @@ static int ixgbe_set_rx_ntuple(struct net_device *dev,
2279 struct ethtool_rx_ntuple *cmd) 2277 struct ethtool_rx_ntuple *cmd)
2280{ 2278{
2281 struct ixgbe_adapter *adapter = netdev_priv(dev); 2279 struct ixgbe_adapter *adapter = netdev_priv(dev);
2282 struct ethtool_rx_ntuple_flow_spec fs = cmd->fs; 2280 struct ethtool_rx_ntuple_flow_spec *fs = &cmd->fs;
2283 struct ixgbe_atr_input input_struct; 2281 union ixgbe_atr_input input_struct;
2284 struct ixgbe_atr_input_masks input_masks; 2282 struct ixgbe_atr_input_masks input_masks;
2285 int target_queue; 2283 int target_queue;
2284 int err;
2286 2285
2287 if (adapter->hw.mac.type == ixgbe_mac_82598EB) 2286 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
2288 return -EOPNOTSUPP; 2287 return -EOPNOTSUPP;
@@ -2291,67 +2290,122 @@ static int ixgbe_set_rx_ntuple(struct net_device *dev,
2291 * Don't allow programming if the action is a queue greater than 2290 * Don't allow programming if the action is a queue greater than
2292 * the number of online Tx queues. 2291 * the number of online Tx queues.
2293 */ 2292 */
2294 if ((fs.action >= adapter->num_tx_queues) || 2293 if ((fs->action >= adapter->num_tx_queues) ||
2295 (fs.action < ETHTOOL_RXNTUPLE_ACTION_DROP)) 2294 (fs->action < ETHTOOL_RXNTUPLE_ACTION_DROP))
2296 return -EINVAL; 2295 return -EINVAL;
2297 2296
2298 memset(&input_struct, 0, sizeof(struct ixgbe_atr_input)); 2297 memset(&input_struct, 0, sizeof(union ixgbe_atr_input));
2299 memset(&input_masks, 0, sizeof(struct ixgbe_atr_input_masks)); 2298 memset(&input_masks, 0, sizeof(struct ixgbe_atr_input_masks));
2300 2299
2301 input_masks.src_ip_mask = fs.m_u.tcp_ip4_spec.ip4src; 2300 /* record flow type */
2302 input_masks.dst_ip_mask = fs.m_u.tcp_ip4_spec.ip4dst; 2301 switch (fs->flow_type) {
2303 input_masks.src_port_mask = fs.m_u.tcp_ip4_spec.psrc; 2302 case IPV4_FLOW:
2304 input_masks.dst_port_mask = fs.m_u.tcp_ip4_spec.pdst; 2303 input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_IPV4;
2305 input_masks.vlan_id_mask = fs.vlan_tag_mask; 2304 break;
2306 /* only use the lowest 2 bytes for flex bytes */
2307 input_masks.data_mask = (fs.data_mask & 0xffff);
2308
2309 switch (fs.flow_type) {
2310 case TCP_V4_FLOW: 2305 case TCP_V4_FLOW:
2311 ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_TCP); 2306 input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
2312 break; 2307 break;
2313 case UDP_V4_FLOW: 2308 case UDP_V4_FLOW:
2314 ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_UDP); 2309 input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
2315 break; 2310 break;
2316 case SCTP_V4_FLOW: 2311 case SCTP_V4_FLOW:
2317 ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_SCTP); 2312 input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
2318 break; 2313 break;
2319 default: 2314 default:
2320 return -1; 2315 return -1;
2321 } 2316 }
2322 2317
2323 /* Mask bits from the inputs based on user-supplied mask */ 2318 /* copy vlan tag minus the CFI bit */
2324 ixgbe_atr_set_src_ipv4_82599(&input_struct, 2319 if ((fs->vlan_tag & 0xEFFF) || (~fs->vlan_tag_mask & 0xEFFF)) {
2325 (fs.h_u.tcp_ip4_spec.ip4src & ~fs.m_u.tcp_ip4_spec.ip4src)); 2320 input_struct.formatted.vlan_id = htons(fs->vlan_tag & 0xEFFF);
2326 ixgbe_atr_set_dst_ipv4_82599(&input_struct, 2321 if (!fs->vlan_tag_mask) {
2327 (fs.h_u.tcp_ip4_spec.ip4dst & ~fs.m_u.tcp_ip4_spec.ip4dst)); 2322 input_masks.vlan_id_mask = htons(0xEFFF);
2328 /* 82599 expects these to be byte-swapped for perfect filtering */ 2323 } else {
2329 ixgbe_atr_set_src_port_82599(&input_struct, 2324 switch (~fs->vlan_tag_mask & 0xEFFF) {
2330 ((ntohs(fs.h_u.tcp_ip4_spec.psrc)) & ~fs.m_u.tcp_ip4_spec.psrc)); 2325 /* all of these are valid vlan-mask values */
2331 ixgbe_atr_set_dst_port_82599(&input_struct, 2326 case 0xEFFF:
2332 ((ntohs(fs.h_u.tcp_ip4_spec.pdst)) & ~fs.m_u.tcp_ip4_spec.pdst)); 2327 case 0xE000:
2333 2328 case 0x0FFF:
2334 /* VLAN and Flex bytes are either completely masked or not */ 2329 case 0x0000:
2335 if (!fs.vlan_tag_mask) 2330 input_masks.vlan_id_mask =
2336 ixgbe_atr_set_vlan_id_82599(&input_struct, fs.vlan_tag); 2331 htons(~fs->vlan_tag_mask);
2337 2332 break;
2338 if (!input_masks.data_mask) 2333 /* exit with error if vlan-mask is invalid */
2339 /* make sure we only use the first 2 bytes of user data */ 2334 default:
2340 ixgbe_atr_set_flex_byte_82599(&input_struct, 2335 e_err(drv, "Partial VLAN ID or "
2341 (fs.data & 0xffff)); 2336 "priority mask in vlan-mask is not "
2337 "supported by hardware\n");
2338 return -1;
2339 }
2340 }
2341 }
2342
2343 /* make sure we only use the first 2 bytes of user data */
2344 if ((fs->data & 0xFFFF) || (~fs->data_mask & 0xFFFF)) {
2345 input_struct.formatted.flex_bytes = htons(fs->data & 0xFFFF);
2346 if (!(fs->data_mask & 0xFFFF)) {
2347 input_masks.flex_mask = 0xFFFF;
2348 } else if (~fs->data_mask & 0xFFFF) {
2349 e_err(drv, "Partial user-def-mask is not "
2350 "supported by hardware\n");
2351 return -1;
2352 }
2353 }
2354
2355 /*
2356 * Copy input into formatted structures
2357 *
2358 * These assignments are based on the following logic
2359 * If neither input or mask are set assume value is masked out.
2360 * If input is set, but mask is not mask should default to accept all.
2361 * If input is not set, but mask is set then mask likely results in 0.
2362 * If input is set and mask is set then assign both.
2363 */
2364 if (fs->h_u.tcp_ip4_spec.ip4src || ~fs->m_u.tcp_ip4_spec.ip4src) {
2365 input_struct.formatted.src_ip[0] = fs->h_u.tcp_ip4_spec.ip4src;
2366 if (!fs->m_u.tcp_ip4_spec.ip4src)
2367 input_masks.src_ip_mask[0] = 0xFFFFFFFF;
2368 else
2369 input_masks.src_ip_mask[0] =
2370 ~fs->m_u.tcp_ip4_spec.ip4src;
2371 }
2372 if (fs->h_u.tcp_ip4_spec.ip4dst || ~fs->m_u.tcp_ip4_spec.ip4dst) {
2373 input_struct.formatted.dst_ip[0] = fs->h_u.tcp_ip4_spec.ip4dst;
2374 if (!fs->m_u.tcp_ip4_spec.ip4dst)
2375 input_masks.dst_ip_mask[0] = 0xFFFFFFFF;
2376 else
2377 input_masks.dst_ip_mask[0] =
2378 ~fs->m_u.tcp_ip4_spec.ip4dst;
2379 }
2380 if (fs->h_u.tcp_ip4_spec.psrc || ~fs->m_u.tcp_ip4_spec.psrc) {
2381 input_struct.formatted.src_port = fs->h_u.tcp_ip4_spec.psrc;
2382 if (!fs->m_u.tcp_ip4_spec.psrc)
2383 input_masks.src_port_mask = 0xFFFF;
2384 else
2385 input_masks.src_port_mask = ~fs->m_u.tcp_ip4_spec.psrc;
2386 }
2387 if (fs->h_u.tcp_ip4_spec.pdst || ~fs->m_u.tcp_ip4_spec.pdst) {
2388 input_struct.formatted.dst_port = fs->h_u.tcp_ip4_spec.pdst;
2389 if (!fs->m_u.tcp_ip4_spec.pdst)
2390 input_masks.dst_port_mask = 0xFFFF;
2391 else
2392 input_masks.dst_port_mask = ~fs->m_u.tcp_ip4_spec.pdst;
2393 }
2342 2394
2343 /* determine if we need to drop or route the packet */ 2395 /* determine if we need to drop or route the packet */
2344 if (fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP) 2396 if (fs->action == ETHTOOL_RXNTUPLE_ACTION_DROP)
2345 target_queue = MAX_RX_QUEUES - 1; 2397 target_queue = MAX_RX_QUEUES - 1;
2346 else 2398 else
2347 target_queue = fs.action; 2399 target_queue = fs->action;
2348 2400
2349 spin_lock(&adapter->fdir_perfect_lock); 2401 spin_lock(&adapter->fdir_perfect_lock);
2350 ixgbe_fdir_add_perfect_filter_82599(&adapter->hw, &input_struct, 2402 err = ixgbe_fdir_add_perfect_filter_82599(&adapter->hw,
2351 &input_masks, 0, target_queue); 2403 &input_struct,
2404 &input_masks, 0,
2405 target_queue);
2352 spin_unlock(&adapter->fdir_perfect_lock); 2406 spin_unlock(&adapter->fdir_perfect_lock);
2353 2407
2354 return 0; 2408 return err ? -1 : 0;
2355} 2409}
2356 2410
2357static const struct ethtool_ops ixgbe_ethtool_ops = { 2411static const struct ethtool_ops ixgbe_ethtool_ops = {
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 38ab4f3f8197..a060610a42db 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -3024,6 +3024,36 @@ static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
3024 } 3024 }
3025} 3025}
3026 3026
3027void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
3028 struct ixgbe_ring *ring)
3029{
3030 struct ixgbe_hw *hw = &adapter->hw;
3031 int wait_loop = IXGBE_MAX_RX_DESC_POLL;
3032 u32 rxdctl;
3033 u8 reg_idx = ring->reg_idx;
3034
3035 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
3036 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
3037
3038 /* write value back with RXDCTL.ENABLE bit cleared */
3039 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
3040
3041 if (hw->mac.type == ixgbe_mac_82598EB &&
3042 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
3043 return;
3044
3045 /* the hardware may take up to 100us to really disable the rx queue */
3046 do {
3047 udelay(10);
3048 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
3049 } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
3050
3051 if (!wait_loop) {
3052 e_err(drv, "RXDCTL.ENABLE on Rx queue %d not cleared within "
3053 "the polling period\n", reg_idx);
3054 }
3055}
3056
3027void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, 3057void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
3028 struct ixgbe_ring *ring) 3058 struct ixgbe_ring *ring)
3029{ 3059{
@@ -3034,9 +3064,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
3034 3064
3035 /* disable queue to avoid issues while updating state */ 3065 /* disable queue to avoid issues while updating state */
3036 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); 3066 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
3037 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), 3067 ixgbe_disable_rx_queue(adapter, ring);
3038 rxdctl & ~IXGBE_RXDCTL_ENABLE);
3039 IXGBE_WRITE_FLUSH(hw);
3040 3068
3041 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32))); 3069 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32)));
3042 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32)); 3070 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32));
@@ -4064,7 +4092,11 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
4064 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 4092 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4065 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); 4093 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
4066 4094
4067 IXGBE_WRITE_FLUSH(hw); 4095 /* disable all enabled rx queues */
4096 for (i = 0; i < adapter->num_rx_queues; i++)
4097 /* this call also flushes the previous write */
4098 ixgbe_disable_rx_queue(adapter, adapter->rx_ring[i]);
4099
4068 msleep(10); 4100 msleep(10);
4069 4101
4070 netif_tx_stop_all_queues(netdev); 4102 netif_tx_stop_all_queues(netdev);
@@ -4789,6 +4821,12 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
4789 4821
4790 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; 4822 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
4791 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; 4823 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
4824 if (adapter->flags & (IXGBE_FLAG_FDIR_HASH_CAPABLE |
4825 IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) {
4826 e_err(probe,
4827 "Flow Director is not supported while multiple "
4828 "queues are disabled. Disabling Flow Director\n");
4829 }
4792 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; 4830 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
4793 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; 4831 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
4794 adapter->atr_sample_rate = 0; 4832 adapter->atr_sample_rate = 0;
@@ -5094,16 +5132,11 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
5094 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; 5132 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
5095 if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) 5133 if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
5096 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; 5134 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
5097 if (dev->features & NETIF_F_NTUPLE) { 5135 /* n-tuple support exists, always init our spinlock */
5098 /* Flow Director perfect filter enabled */ 5136 spin_lock_init(&adapter->fdir_perfect_lock);
5099 adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE; 5137 /* Flow Director hash filters enabled */
5100 adapter->atr_sample_rate = 0; 5138 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
5101 spin_lock_init(&adapter->fdir_perfect_lock); 5139 adapter->atr_sample_rate = 20;
5102 } else {
5103 /* Flow Director hash filters enabled */
5104 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
5105 adapter->atr_sample_rate = 20;
5106 }
5107 adapter->ring_feature[RING_F_FDIR].indices = 5140 adapter->ring_feature[RING_F_FDIR].indices =
5108 IXGBE_MAX_FDIR_INDICES; 5141 IXGBE_MAX_FDIR_INDICES;
5109 adapter->fdir_pballoc = 0; 5142 adapter->fdir_pballoc = 0;
@@ -6474,38 +6507,92 @@ static void ixgbe_tx_queue(struct ixgbe_ring *tx_ring,
6474 writel(i, tx_ring->tail); 6507 writel(i, tx_ring->tail);
6475} 6508}
6476 6509
6477static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb, 6510static void ixgbe_atr(struct ixgbe_ring *ring, struct sk_buff *skb,
6478 u8 queue, u32 tx_flags, __be16 protocol) 6511 u32 tx_flags, __be16 protocol)
6479{ 6512{
6480 struct ixgbe_atr_input atr_input; 6513 struct ixgbe_q_vector *q_vector = ring->q_vector;
6481 struct iphdr *iph = ip_hdr(skb); 6514 union ixgbe_atr_hash_dword input = { .dword = 0 };
6482 struct ethhdr *eth = (struct ethhdr *)skb->data; 6515 union ixgbe_atr_hash_dword common = { .dword = 0 };
6516 union {
6517 unsigned char *network;
6518 struct iphdr *ipv4;
6519 struct ipv6hdr *ipv6;
6520 } hdr;
6483 struct tcphdr *th; 6521 struct tcphdr *th;
6484 u16 vlan_id; 6522 __be16 vlan_id;
6485 6523
6486 /* Right now, we support IPv4 w/ TCP only */ 6524 /* if ring doesn't have a interrupt vector, cannot perform ATR */
6487 if (protocol != htons(ETH_P_IP) || 6525 if (!q_vector)
6488 iph->protocol != IPPROTO_TCP)
6489 return; 6526 return;
6490 6527
6491 memset(&atr_input, 0, sizeof(struct ixgbe_atr_input)); 6528 /* do nothing if sampling is disabled */
6529 if (!ring->atr_sample_rate)
6530 return;
6492 6531
6493 vlan_id = (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK) >> 6532 ring->atr_count++;
6494 IXGBE_TX_FLAGS_VLAN_SHIFT; 6533
6534 /* snag network header to get L4 type and address */
6535 hdr.network = skb_network_header(skb);
6536
6537 /* Currently only IPv4/IPv6 with TCP is supported */
6538 if ((protocol != __constant_htons(ETH_P_IPV6) ||
6539 hdr.ipv6->nexthdr != IPPROTO_TCP) &&
6540 (protocol != __constant_htons(ETH_P_IP) ||
6541 hdr.ipv4->protocol != IPPROTO_TCP))
6542 return;
6495 6543
6496 th = tcp_hdr(skb); 6544 th = tcp_hdr(skb);
6497 6545
6498 ixgbe_atr_set_vlan_id_82599(&atr_input, vlan_id); 6546 /* skip this packet since the socket is closing */
6499 ixgbe_atr_set_src_port_82599(&atr_input, th->dest); 6547 if (th->fin)
6500 ixgbe_atr_set_dst_port_82599(&atr_input, th->source); 6548 return;
6501 ixgbe_atr_set_flex_byte_82599(&atr_input, eth->h_proto); 6549
6502 ixgbe_atr_set_l4type_82599(&atr_input, IXGBE_ATR_L4TYPE_TCP); 6550 /* sample on all syn packets or once every atr sample count */
6503 /* src and dst are inverted, think how the receiver sees them */ 6551 if (!th->syn && (ring->atr_count < ring->atr_sample_rate))
6504 ixgbe_atr_set_src_ipv4_82599(&atr_input, iph->daddr); 6552 return;
6505 ixgbe_atr_set_dst_ipv4_82599(&atr_input, iph->saddr); 6553
6554 /* reset sample count */
6555 ring->atr_count = 0;
6556
6557 vlan_id = htons(tx_flags >> IXGBE_TX_FLAGS_VLAN_SHIFT);
6558
6559 /*
6560 * src and dst are inverted, think how the receiver sees them
6561 *
6562 * The input is broken into two sections, a non-compressed section
6563 * containing vm_pool, vlan_id, and flow_type. The rest of the data
6564 * is XORed together and stored in the compressed dword.
6565 */
6566 input.formatted.vlan_id = vlan_id;
6567
6568 /*
6569 * since src port and flex bytes occupy the same word XOR them together
6570 * and write the value to source port portion of compressed dword
6571 */
6572 if (vlan_id)
6573 common.port.src ^= th->dest ^ __constant_htons(ETH_P_8021Q);
6574 else
6575 common.port.src ^= th->dest ^ protocol;
6576 common.port.dst ^= th->source;
6577
6578 if (protocol == __constant_htons(ETH_P_IP)) {
6579 input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
6580 common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr;
6581 } else {
6582 input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6;
6583 common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^
6584 hdr.ipv6->saddr.s6_addr32[1] ^
6585 hdr.ipv6->saddr.s6_addr32[2] ^
6586 hdr.ipv6->saddr.s6_addr32[3] ^
6587 hdr.ipv6->daddr.s6_addr32[0] ^
6588 hdr.ipv6->daddr.s6_addr32[1] ^
6589 hdr.ipv6->daddr.s6_addr32[2] ^
6590 hdr.ipv6->daddr.s6_addr32[3];
6591 }
6506 6592
6507 /* This assumes the Rx queue and Tx queue are bound to the same CPU */ 6593 /* This assumes the Rx queue and Tx queue are bound to the same CPU */
6508 ixgbe_fdir_add_signature_filter_82599(&adapter->hw, &atr_input, queue); 6594 ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw,
6595 input, common, ring->queue_index);
6509} 6596}
6510 6597
6511static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size) 6598static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size)
@@ -6676,16 +6763,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
6676 count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first, hdr_len); 6763 count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first, hdr_len);
6677 if (count) { 6764 if (count) {
6678 /* add the ATR filter if ATR is on */ 6765 /* add the ATR filter if ATR is on */
6679 if (tx_ring->atr_sample_rate) { 6766 if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state))
6680 ++tx_ring->atr_count; 6767 ixgbe_atr(tx_ring, skb, tx_flags, protocol);
6681 if ((tx_ring->atr_count >= tx_ring->atr_sample_rate) &&
6682 test_bit(__IXGBE_TX_FDIR_INIT_DONE,
6683 &tx_ring->state)) {
6684 ixgbe_atr(adapter, skb, tx_ring->queue_index,
6685 tx_flags, protocol);
6686 tx_ring->atr_count = 0;
6687 }
6688 }
6689 txq = netdev_get_tx_queue(netdev, tx_ring->queue_index); 6768 txq = netdev_get_tx_queue(netdev, tx_ring->queue_index);
6690 txq->tx_bytes += skb->len; 6769 txq->tx_bytes += skb->len;
6691 txq->tx_packets++; 6770 txq->tx_packets++;
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index 446f3467d3c7..fd3358f54139 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -1947,10 +1947,9 @@ enum ixgbe_fdir_pballoc_type {
1947#define IXGBE_FDIRM_VLANID 0x00000001 1947#define IXGBE_FDIRM_VLANID 0x00000001
1948#define IXGBE_FDIRM_VLANP 0x00000002 1948#define IXGBE_FDIRM_VLANP 0x00000002
1949#define IXGBE_FDIRM_POOL 0x00000004 1949#define IXGBE_FDIRM_POOL 0x00000004
1950#define IXGBE_FDIRM_L3P 0x00000008 1950#define IXGBE_FDIRM_L4P 0x00000008
1951#define IXGBE_FDIRM_L4P 0x00000010 1951#define IXGBE_FDIRM_FLEX 0x00000010
1952#define IXGBE_FDIRM_FLEX 0x00000020 1952#define IXGBE_FDIRM_DIPv6 0x00000020
1953#define IXGBE_FDIRM_DIPv6 0x00000040
1954 1953
1955#define IXGBE_FDIRFREE_FREE_MASK 0xFFFF 1954#define IXGBE_FDIRFREE_FREE_MASK 0xFFFF
1956#define IXGBE_FDIRFREE_FREE_SHIFT 0 1955#define IXGBE_FDIRFREE_FREE_SHIFT 0
@@ -1990,6 +1989,7 @@ enum ixgbe_fdir_pballoc_type {
1990#define IXGBE_FDIRCMD_LAST 0x00000800 1989#define IXGBE_FDIRCMD_LAST 0x00000800
1991#define IXGBE_FDIRCMD_COLLISION 0x00001000 1990#define IXGBE_FDIRCMD_COLLISION 0x00001000
1992#define IXGBE_FDIRCMD_QUEUE_EN 0x00008000 1991#define IXGBE_FDIRCMD_QUEUE_EN 0x00008000
1992#define IXGBE_FDIRCMD_FLOW_TYPE_SHIFT 5
1993#define IXGBE_FDIRCMD_RX_QUEUE_SHIFT 16 1993#define IXGBE_FDIRCMD_RX_QUEUE_SHIFT 16
1994#define IXGBE_FDIRCMD_VT_POOL_SHIFT 24 1994#define IXGBE_FDIRCMD_VT_POOL_SHIFT 24
1995#define IXGBE_FDIR_INIT_DONE_POLL 10 1995#define IXGBE_FDIR_INIT_DONE_POLL 10
@@ -2147,51 +2147,80 @@ typedef u32 ixgbe_physical_layer;
2147#define FC_LOW_WATER(MTU) (2 * (2 * PAUSE_MTU(MTU) + PAUSE_RTT)) 2147#define FC_LOW_WATER(MTU) (2 * (2 * PAUSE_MTU(MTU) + PAUSE_RTT))
2148 2148
2149/* Software ATR hash keys */ 2149/* Software ATR hash keys */
2150#define IXGBE_ATR_BUCKET_HASH_KEY 0xE214AD3D 2150#define IXGBE_ATR_BUCKET_HASH_KEY 0x3DAD14E2
2151#define IXGBE_ATR_SIGNATURE_HASH_KEY 0x14364D17 2151#define IXGBE_ATR_SIGNATURE_HASH_KEY 0x174D3614
2152
2153/* Software ATR input stream offsets and masks */
2154#define IXGBE_ATR_VLAN_OFFSET 0
2155#define IXGBE_ATR_SRC_IPV6_OFFSET 2
2156#define IXGBE_ATR_SRC_IPV4_OFFSET 14
2157#define IXGBE_ATR_DST_IPV6_OFFSET 18
2158#define IXGBE_ATR_DST_IPV4_OFFSET 30
2159#define IXGBE_ATR_SRC_PORT_OFFSET 34
2160#define IXGBE_ATR_DST_PORT_OFFSET 36
2161#define IXGBE_ATR_FLEX_BYTE_OFFSET 38
2162#define IXGBE_ATR_VM_POOL_OFFSET 40
2163#define IXGBE_ATR_L4TYPE_OFFSET 41
2164 2152
2153/* Software ATR input stream values and masks */
2154#define IXGBE_ATR_HASH_MASK 0x7fff
2165#define IXGBE_ATR_L4TYPE_MASK 0x3 2155#define IXGBE_ATR_L4TYPE_MASK 0x3
2166#define IXGBE_ATR_L4TYPE_IPV6_MASK 0x4
2167#define IXGBE_ATR_L4TYPE_UDP 0x1 2156#define IXGBE_ATR_L4TYPE_UDP 0x1
2168#define IXGBE_ATR_L4TYPE_TCP 0x2 2157#define IXGBE_ATR_L4TYPE_TCP 0x2
2169#define IXGBE_ATR_L4TYPE_SCTP 0x3 2158#define IXGBE_ATR_L4TYPE_SCTP 0x3
2170#define IXGBE_ATR_HASH_MASK 0x7fff 2159#define IXGBE_ATR_L4TYPE_IPV6_MASK 0x4
2160enum ixgbe_atr_flow_type {
2161 IXGBE_ATR_FLOW_TYPE_IPV4 = 0x0,
2162 IXGBE_ATR_FLOW_TYPE_UDPV4 = 0x1,
2163 IXGBE_ATR_FLOW_TYPE_TCPV4 = 0x2,
2164 IXGBE_ATR_FLOW_TYPE_SCTPV4 = 0x3,
2165 IXGBE_ATR_FLOW_TYPE_IPV6 = 0x4,
2166 IXGBE_ATR_FLOW_TYPE_UDPV6 = 0x5,
2167 IXGBE_ATR_FLOW_TYPE_TCPV6 = 0x6,
2168 IXGBE_ATR_FLOW_TYPE_SCTPV6 = 0x7,
2169};
2171 2170
2172/* Flow Director ATR input struct. */ 2171/* Flow Director ATR input struct. */
2173struct ixgbe_atr_input { 2172union ixgbe_atr_input {
2174 /* Byte layout in order, all values with MSB first: 2173 /*
2174 * Byte layout in order, all values with MSB first:
2175 * 2175 *
2176 * vm_pool - 1 byte
2177 * flow_type - 1 byte
2176 * vlan_id - 2 bytes 2178 * vlan_id - 2 bytes
2177 * src_ip - 16 bytes 2179 * src_ip - 16 bytes
2178 * dst_ip - 16 bytes 2180 * dst_ip - 16 bytes
2179 * src_port - 2 bytes 2181 * src_port - 2 bytes
2180 * dst_port - 2 bytes 2182 * dst_port - 2 bytes
2181 * flex_bytes - 2 bytes 2183 * flex_bytes - 2 bytes
2182 * vm_pool - 1 byte 2184 * rsvd0 - 2 bytes - space reserved must be 0.
2183 * l4type - 1 byte
2184 */ 2185 */
2185 u8 byte_stream[42]; 2186 struct {
2187 u8 vm_pool;
2188 u8 flow_type;
2189 __be16 vlan_id;
2190 __be32 dst_ip[4];
2191 __be32 src_ip[4];
2192 __be16 src_port;
2193 __be16 dst_port;
2194 __be16 flex_bytes;
2195 __be16 rsvd0;
2196 } formatted;
2197 __be32 dword_stream[11];
2198};
2199
2200/* Flow Director compressed ATR hash input struct */
2201union ixgbe_atr_hash_dword {
2202 struct {
2203 u8 vm_pool;
2204 u8 flow_type;
2205 __be16 vlan_id;
2206 } formatted;
2207 __be32 ip;
2208 struct {
2209 __be16 src;
2210 __be16 dst;
2211 } port;
2212 __be16 flex_bytes;
2213 __be32 dword;
2186}; 2214};
2187 2215
2188struct ixgbe_atr_input_masks { 2216struct ixgbe_atr_input_masks {
2189 u32 src_ip_mask; 2217 __be16 rsvd0;
2190 u32 dst_ip_mask; 2218 __be16 vlan_id_mask;
2191 u16 src_port_mask; 2219 __be32 dst_ip_mask[4];
2192 u16 dst_port_mask; 2220 __be32 src_ip_mask[4];
2193 u16 vlan_id_mask; 2221 __be16 src_port_mask;
2194 u16 data_mask; 2222 __be16 dst_port_mask;
2223 __be16 flex_mask;
2195}; 2224};
2196 2225
2197enum ixgbe_eeprom_type { 2226enum ixgbe_eeprom_type {
diff --git a/drivers/net/mlx4/alloc.c b/drivers/net/mlx4/alloc.c
index 8f4bf1f07c11..3a4277f6fac4 100644
--- a/drivers/net/mlx4/alloc.c
+++ b/drivers/net/mlx4/alloc.c
@@ -178,6 +178,7 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
178 } else { 178 } else {
179 int i; 179 int i;
180 180
181 buf->direct.buf = NULL;
181 buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE; 182 buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE;
182 buf->npages = buf->nbufs; 183 buf->npages = buf->nbufs;
183 buf->page_shift = PAGE_SHIFT; 184 buf->page_shift = PAGE_SHIFT;
@@ -229,7 +230,7 @@ void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf)
229 dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf, 230 dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf,
230 buf->direct.map); 231 buf->direct.map);
231 else { 232 else {
232 if (BITS_PER_LONG == 64) 233 if (BITS_PER_LONG == 64 && buf->direct.buf)
233 vunmap(buf->direct.buf); 234 vunmap(buf->direct.buf);
234 235
235 for (i = 0; i < buf->nbufs; ++i) 236 for (i = 0; i < buf->nbufs; ++i)
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
index 6d6806b361e3..897f576b8b17 100644
--- a/drivers/net/mlx4/en_netdev.c
+++ b/drivers/net/mlx4/en_netdev.c
@@ -972,7 +972,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
972 int i; 972 int i;
973 int err; 973 int err;
974 974
975 dev = alloc_etherdev_mq(sizeof(struct mlx4_en_priv), prof->tx_ring_num); 975 dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv),
976 prof->tx_ring_num, prof->rx_ring_num);
976 if (dev == NULL) { 977 if (dev == NULL) {
977 mlx4_err(mdev, "Net device allocation failed\n"); 978 mlx4_err(mdev, "Net device allocation failed\n");
978 return -ENOMEM; 979 return -ENOMEM;
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c
index 7a7e18ba278a..5de1db897835 100644
--- a/drivers/net/mlx4/fw.c
+++ b/drivers/net/mlx4/fw.c
@@ -289,10 +289,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
289 MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET); 289 MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET);
290 dev_cap->bf_reg_size = 1 << (field & 0x1f); 290 dev_cap->bf_reg_size = 1 << (field & 0x1f);
291 MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET); 291 MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET);
292 if ((1 << (field & 0x3f)) > (PAGE_SIZE / dev_cap->bf_reg_size)) { 292 if ((1 << (field & 0x3f)) > (PAGE_SIZE / dev_cap->bf_reg_size))
293 mlx4_warn(dev, "firmware bug: log2 # of blue flame regs is invalid (%d), forcing 3\n", field & 0x1f);
294 field = 3; 293 field = 3;
295 }
296 dev_cap->bf_regs_per_page = 1 << (field & 0x3f); 294 dev_cap->bf_regs_per_page = 1 << (field & 0x3f);
297 mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n", 295 mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n",
298 dev_cap->bf_reg_size, dev_cap->bf_regs_per_page); 296 dev_cap->bf_reg_size, dev_cap->bf_regs_per_page);
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index 2c158910f7ea..e953793a33ff 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -1536,6 +1536,7 @@ static struct pcmcia_device_id pcnet_ids[] = {
1536 PCMCIA_DEVICE_PROD_ID12("CONTEC", "C-NET(PC)C-10L", 0x21cab552, 0xf6f90722), 1536 PCMCIA_DEVICE_PROD_ID12("CONTEC", "C-NET(PC)C-10L", 0x21cab552, 0xf6f90722),
1537 PCMCIA_DEVICE_PROD_ID12("corega", "FEther PCC-TXF", 0x0a21501a, 0xa51564a2), 1537 PCMCIA_DEVICE_PROD_ID12("corega", "FEther PCC-TXF", 0x0a21501a, 0xa51564a2),
1538 PCMCIA_DEVICE_PROD_ID12("corega", "Ether CF-TD", 0x0a21501a, 0x6589340a), 1538 PCMCIA_DEVICE_PROD_ID12("corega", "Ether CF-TD", 0x0a21501a, 0x6589340a),
1539 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega Ether CF-TD LAN Card", 0x5261440f, 0x8797663b),
1539 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-T", 0x5261440f, 0xfa9d85bd), 1540 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-T", 0x5261440f, 0xfa9d85bd),
1540 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-TD", 0x5261440f, 0xc49bd73d), 1541 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-TD", 0x5261440f, 0xc49bd73d),
1541 PCMCIA_DEVICE_PROD_ID12("Corega K.K.", "corega EtherII PCC-TD", 0xd4fdcbd8, 0xc49bd73d), 1542 PCMCIA_DEVICE_PROD_ID12("Corega K.K.", "corega EtherII PCC-TD", 0xd4fdcbd8, 0xc49bd73d),
diff --git a/drivers/net/ppp_async.c b/drivers/net/ppp_async.c
index 78d70a6481bf..a1b82c9c67d2 100644
--- a/drivers/net/ppp_async.c
+++ b/drivers/net/ppp_async.c
@@ -32,6 +32,7 @@
32#include <linux/init.h> 32#include <linux/init.h>
33#include <linux/jiffies.h> 33#include <linux/jiffies.h>
34#include <linux/slab.h> 34#include <linux/slab.h>
35#include <asm/unaligned.h>
35#include <asm/uaccess.h> 36#include <asm/uaccess.h>
36#include <asm/string.h> 37#include <asm/string.h>
37 38
@@ -542,7 +543,7 @@ ppp_async_encode(struct asyncppp *ap)
542 data = ap->tpkt->data; 543 data = ap->tpkt->data;
543 count = ap->tpkt->len; 544 count = ap->tpkt->len;
544 fcs = ap->tfcs; 545 fcs = ap->tfcs;
545 proto = (data[0] << 8) + data[1]; 546 proto = get_unaligned_be16(data);
546 547
547 /* 548 /*
548 * LCP packets with code values between 1 (configure-reqest) 549 * LCP packets with code values between 1 (configure-reqest)
@@ -963,7 +964,7 @@ static void async_lcp_peek(struct asyncppp *ap, unsigned char *data,
963 code = data[0]; 964 code = data[0];
964 if (code != CONFACK && code != CONFREQ) 965 if (code != CONFACK && code != CONFREQ)
965 return; 966 return;
966 dlen = (data[2] << 8) + data[3]; 967 dlen = get_unaligned_be16(data + 2);
967 if (len < dlen) 968 if (len < dlen)
968 return; /* packet got truncated or length is bogus */ 969 return; /* packet got truncated or length is bogus */
969 970
@@ -997,15 +998,14 @@ static void async_lcp_peek(struct asyncppp *ap, unsigned char *data,
997 while (dlen >= 2 && dlen >= data[1] && data[1] >= 2) { 998 while (dlen >= 2 && dlen >= data[1] && data[1] >= 2) {
998 switch (data[0]) { 999 switch (data[0]) {
999 case LCP_MRU: 1000 case LCP_MRU:
1000 val = (data[2] << 8) + data[3]; 1001 val = get_unaligned_be16(data + 2);
1001 if (inbound) 1002 if (inbound)
1002 ap->mru = val; 1003 ap->mru = val;
1003 else 1004 else
1004 ap->chan.mtu = val; 1005 ap->chan.mtu = val;
1005 break; 1006 break;
1006 case LCP_ASYNCMAP: 1007 case LCP_ASYNCMAP:
1007 val = (data[2] << 24) + (data[3] << 16) 1008 val = get_unaligned_be32(data + 2);
1008 + (data[4] << 8) + data[5];
1009 if (inbound) 1009 if (inbound)
1010 ap->raccm = val; 1010 ap->raccm = val;
1011 else 1011 else
diff --git a/drivers/net/ppp_deflate.c b/drivers/net/ppp_deflate.c
index 695bc83e0cfd..43583309a65d 100644
--- a/drivers/net/ppp_deflate.c
+++ b/drivers/net/ppp_deflate.c
@@ -41,6 +41,7 @@
41#include <linux/ppp-comp.h> 41#include <linux/ppp-comp.h>
42 42
43#include <linux/zlib.h> 43#include <linux/zlib.h>
44#include <asm/unaligned.h>
44 45
45/* 46/*
46 * State for a Deflate (de)compressor. 47 * State for a Deflate (de)compressor.
@@ -232,11 +233,9 @@ static int z_compress(void *arg, unsigned char *rptr, unsigned char *obuf,
232 */ 233 */
233 wptr[0] = PPP_ADDRESS(rptr); 234 wptr[0] = PPP_ADDRESS(rptr);
234 wptr[1] = PPP_CONTROL(rptr); 235 wptr[1] = PPP_CONTROL(rptr);
235 wptr[2] = PPP_COMP >> 8; 236 put_unaligned_be16(PPP_COMP, wptr + 2);
236 wptr[3] = PPP_COMP;
237 wptr += PPP_HDRLEN; 237 wptr += PPP_HDRLEN;
238 wptr[0] = state->seqno >> 8; 238 put_unaligned_be16(state->seqno, wptr);
239 wptr[1] = state->seqno;
240 wptr += DEFLATE_OVHD; 239 wptr += DEFLATE_OVHD;
241 olen = PPP_HDRLEN + DEFLATE_OVHD; 240 olen = PPP_HDRLEN + DEFLATE_OVHD;
242 state->strm.next_out = wptr; 241 state->strm.next_out = wptr;
@@ -451,7 +450,7 @@ static int z_decompress(void *arg, unsigned char *ibuf, int isize,
451 } 450 }
452 451
453 /* Check the sequence number. */ 452 /* Check the sequence number. */
454 seq = (ibuf[PPP_HDRLEN] << 8) + ibuf[PPP_HDRLEN+1]; 453 seq = get_unaligned_be16(ibuf + PPP_HDRLEN);
455 if (seq != (state->seqno & 0xffff)) { 454 if (seq != (state->seqno & 0xffff)) {
456 if (state->debug) 455 if (state->debug)
457 printk(KERN_DEBUG "z_decompress%d: bad seq # %d, expected %d\n", 456 printk(KERN_DEBUG "z_decompress%d: bad seq # %d, expected %d\n",
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 6456484c0299..c7a6c4466978 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -46,6 +46,7 @@
46#include <linux/device.h> 46#include <linux/device.h>
47#include <linux/mutex.h> 47#include <linux/mutex.h>
48#include <linux/slab.h> 48#include <linux/slab.h>
49#include <asm/unaligned.h>
49#include <net/slhc_vj.h> 50#include <net/slhc_vj.h>
50#include <asm/atomic.h> 51#include <asm/atomic.h>
51 52
@@ -210,7 +211,7 @@ struct ppp_net {
210}; 211};
211 212
212/* Get the PPP protocol number from a skb */ 213/* Get the PPP protocol number from a skb */
213#define PPP_PROTO(skb) (((skb)->data[0] << 8) + (skb)->data[1]) 214#define PPP_PROTO(skb) get_unaligned_be16((skb)->data)
214 215
215/* We limit the length of ppp->file.rq to this (arbitrary) value */ 216/* We limit the length of ppp->file.rq to this (arbitrary) value */
216#define PPP_MAX_RQLEN 32 217#define PPP_MAX_RQLEN 32
@@ -964,8 +965,7 @@ ppp_start_xmit(struct sk_buff *skb, struct net_device *dev)
964 965
965 pp = skb_push(skb, 2); 966 pp = skb_push(skb, 2);
966 proto = npindex_to_proto[npi]; 967 proto = npindex_to_proto[npi];
967 pp[0] = proto >> 8; 968 put_unaligned_be16(proto, pp);
968 pp[1] = proto;
969 969
970 netif_stop_queue(dev); 970 netif_stop_queue(dev);
971 skb_queue_tail(&ppp->file.xq, skb); 971 skb_queue_tail(&ppp->file.xq, skb);
@@ -1473,8 +1473,7 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
1473 q = skb_put(frag, flen + hdrlen); 1473 q = skb_put(frag, flen + hdrlen);
1474 1474
1475 /* make the MP header */ 1475 /* make the MP header */
1476 q[0] = PPP_MP >> 8; 1476 put_unaligned_be16(PPP_MP, q);
1477 q[1] = PPP_MP;
1478 if (ppp->flags & SC_MP_XSHORTSEQ) { 1477 if (ppp->flags & SC_MP_XSHORTSEQ) {
1479 q[2] = bits + ((ppp->nxseq >> 8) & 0xf); 1478 q[2] = bits + ((ppp->nxseq >> 8) & 0xf);
1480 q[3] = ppp->nxseq; 1479 q[3] = ppp->nxseq;
diff --git a/drivers/net/ppp_mppe.c b/drivers/net/ppp_mppe.c
index 6d1a1b80cc3e..9a1849a83e2a 100644
--- a/drivers/net/ppp_mppe.c
+++ b/drivers/net/ppp_mppe.c
@@ -55,6 +55,7 @@
55#include <linux/ppp_defs.h> 55#include <linux/ppp_defs.h>
56#include <linux/ppp-comp.h> 56#include <linux/ppp-comp.h>
57#include <linux/scatterlist.h> 57#include <linux/scatterlist.h>
58#include <asm/unaligned.h>
58 59
59#include "ppp_mppe.h" 60#include "ppp_mppe.h"
60 61
@@ -395,16 +396,14 @@ mppe_compress(void *arg, unsigned char *ibuf, unsigned char *obuf,
395 */ 396 */
396 obuf[0] = PPP_ADDRESS(ibuf); 397 obuf[0] = PPP_ADDRESS(ibuf);
397 obuf[1] = PPP_CONTROL(ibuf); 398 obuf[1] = PPP_CONTROL(ibuf);
398 obuf[2] = PPP_COMP >> 8; /* isize + MPPE_OVHD + 1 */ 399 put_unaligned_be16(PPP_COMP, obuf + 2);
399 obuf[3] = PPP_COMP; /* isize + MPPE_OVHD + 2 */
400 obuf += PPP_HDRLEN; 400 obuf += PPP_HDRLEN;
401 401
402 state->ccount = (state->ccount + 1) % MPPE_CCOUNT_SPACE; 402 state->ccount = (state->ccount + 1) % MPPE_CCOUNT_SPACE;
403 if (state->debug >= 7) 403 if (state->debug >= 7)
404 printk(KERN_DEBUG "mppe_compress[%d]: ccount %d\n", state->unit, 404 printk(KERN_DEBUG "mppe_compress[%d]: ccount %d\n", state->unit,
405 state->ccount); 405 state->ccount);
406 obuf[0] = state->ccount >> 8; 406 put_unaligned_be16(state->ccount, obuf);
407 obuf[1] = state->ccount & 0xff;
408 407
409 if (!state->stateful || /* stateless mode */ 408 if (!state->stateful || /* stateless mode */
410 ((state->ccount & 0xff) == 0xff) || /* "flag" packet */ 409 ((state->ccount & 0xff) == 0xff) || /* "flag" packet */
diff --git a/drivers/net/ppp_synctty.c b/drivers/net/ppp_synctty.c
index 4c95ec3fb8d4..4e6b72f57de8 100644
--- a/drivers/net/ppp_synctty.c
+++ b/drivers/net/ppp_synctty.c
@@ -45,6 +45,7 @@
45#include <linux/completion.h> 45#include <linux/completion.h>
46#include <linux/init.h> 46#include <linux/init.h>
47#include <linux/slab.h> 47#include <linux/slab.h>
48#include <asm/unaligned.h>
48#include <asm/uaccess.h> 49#include <asm/uaccess.h>
49 50
50#define PPP_VERSION "2.4.2" 51#define PPP_VERSION "2.4.2"
@@ -563,7 +564,7 @@ ppp_sync_txmunge(struct syncppp *ap, struct sk_buff *skb)
563 int islcp; 564 int islcp;
564 565
565 data = skb->data; 566 data = skb->data;
566 proto = (data[0] << 8) + data[1]; 567 proto = get_unaligned_be16(data);
567 568
568 /* LCP packets with codes between 1 (configure-request) 569 /* LCP packets with codes between 1 (configure-request)
569 * and 7 (code-reject) must be sent as though no options 570 * and 7 (code-reject) must be sent as though no options
diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h
index 9c2a02d204dc..44e316fd67b8 100644
--- a/drivers/net/qlcnic/qlcnic.h
+++ b/drivers/net/qlcnic/qlcnic.h
@@ -34,8 +34,8 @@
34 34
35#define _QLCNIC_LINUX_MAJOR 5 35#define _QLCNIC_LINUX_MAJOR 5
36#define _QLCNIC_LINUX_MINOR 0 36#define _QLCNIC_LINUX_MINOR 0
37#define _QLCNIC_LINUX_SUBVERSION 14 37#define _QLCNIC_LINUX_SUBVERSION 15
38#define QLCNIC_LINUX_VERSIONID "5.0.14" 38#define QLCNIC_LINUX_VERSIONID "5.0.15"
39#define QLCNIC_DRV_IDC_VER 0x01 39#define QLCNIC_DRV_IDC_VER 0x01
40#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ 40#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
41 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) 41 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -289,6 +289,26 @@ struct uni_data_desc{
289 u32 reserved[5]; 289 u32 reserved[5];
290}; 290};
291 291
292/* Flash Defines and Structures */
293#define QLCNIC_FLT_LOCATION 0x3F1000
294#define QLCNIC_FW_IMAGE_REGION 0x74
295struct qlcnic_flt_header {
296 u16 version;
297 u16 len;
298 u16 checksum;
299 u16 reserved;
300};
301
302struct qlcnic_flt_entry {
303 u8 region;
304 u8 reserved0;
305 u8 attrib;
306 u8 reserved1;
307 u32 size;
308 u32 start_addr;
309 u32 end_add;
310};
311
292/* Magic number to let user know flash is programmed */ 312/* Magic number to let user know flash is programmed */
293#define QLCNIC_BDINFO_MAGIC 0x12345678 313#define QLCNIC_BDINFO_MAGIC 0x12345678
294 314
diff --git a/drivers/net/qlcnic/qlcnic_ethtool.c b/drivers/net/qlcnic/qlcnic_ethtool.c
index 1e7af709d395..4c14510e2a87 100644
--- a/drivers/net/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/qlcnic/qlcnic_ethtool.c
@@ -672,7 +672,7 @@ qlcnic_diag_test(struct net_device *dev, struct ethtool_test *eth_test,
672 if (data[1]) 672 if (data[1])
673 eth_test->flags |= ETH_TEST_FL_FAILED; 673 eth_test->flags |= ETH_TEST_FL_FAILED;
674 674
675 if (eth_test->flags == ETH_TEST_FL_OFFLINE) { 675 if (eth_test->flags & ETH_TEST_FL_OFFLINE) {
676 data[2] = qlcnic_irq_test(dev); 676 data[2] = qlcnic_irq_test(dev);
677 if (data[2]) 677 if (data[2])
678 eth_test->flags |= ETH_TEST_FL_FAILED; 678 eth_test->flags |= ETH_TEST_FL_FAILED;
diff --git a/drivers/net/qlcnic/qlcnic_init.c b/drivers/net/qlcnic/qlcnic_init.c
index 9b9c7c39d3ee..a7f1d5b7e811 100644
--- a/drivers/net/qlcnic/qlcnic_init.c
+++ b/drivers/net/qlcnic/qlcnic_init.c
@@ -627,12 +627,73 @@ qlcnic_setup_idc_param(struct qlcnic_adapter *adapter) {
627 return 0; 627 return 0;
628} 628}
629 629
630static int qlcnic_get_flt_entry(struct qlcnic_adapter *adapter, u8 region,
631 struct qlcnic_flt_entry *region_entry)
632{
633 struct qlcnic_flt_header flt_hdr;
634 struct qlcnic_flt_entry *flt_entry;
635 int i = 0, ret;
636 u32 entry_size;
637
638 memset(region_entry, 0, sizeof(struct qlcnic_flt_entry));
639 ret = qlcnic_rom_fast_read_words(adapter, QLCNIC_FLT_LOCATION,
640 (u8 *)&flt_hdr,
641 sizeof(struct qlcnic_flt_header));
642 if (ret) {
643 dev_warn(&adapter->pdev->dev,
644 "error reading flash layout header\n");
645 return -EIO;
646 }
647
648 entry_size = flt_hdr.len - sizeof(struct qlcnic_flt_header);
649 flt_entry = (struct qlcnic_flt_entry *)vzalloc(entry_size);
650 if (flt_entry == NULL) {
651 dev_warn(&adapter->pdev->dev, "error allocating memory\n");
652 return -EIO;
653 }
654
655 ret = qlcnic_rom_fast_read_words(adapter, QLCNIC_FLT_LOCATION +
656 sizeof(struct qlcnic_flt_header),
657 (u8 *)flt_entry, entry_size);
658 if (ret) {
659 dev_warn(&adapter->pdev->dev,
660 "error reading flash layout entries\n");
661 goto err_out;
662 }
663
664 while (i < (entry_size/sizeof(struct qlcnic_flt_entry))) {
665 if (flt_entry[i].region == region)
666 break;
667 i++;
668 }
669 if (i >= (entry_size/sizeof(struct qlcnic_flt_entry))) {
670 dev_warn(&adapter->pdev->dev,
671 "region=%x not found in %d regions\n", region, i);
672 ret = -EIO;
673 goto err_out;
674 }
675 memcpy(region_entry, &flt_entry[i], sizeof(struct qlcnic_flt_entry));
676
677err_out:
678 vfree(flt_entry);
679 return ret;
680}
681
630int 682int
631qlcnic_check_flash_fw_ver(struct qlcnic_adapter *adapter) 683qlcnic_check_flash_fw_ver(struct qlcnic_adapter *adapter)
632{ 684{
685 struct qlcnic_flt_entry fw_entry;
633 u32 ver = -1, min_ver; 686 u32 ver = -1, min_ver;
687 int ret;
634 688
635 qlcnic_rom_fast_read(adapter, QLCNIC_FW_VERSION_OFFSET, (int *)&ver); 689 ret = qlcnic_get_flt_entry(adapter, QLCNIC_FW_IMAGE_REGION, &fw_entry);
690 if (!ret)
691 /* 0-4:-signature, 4-8:-fw version */
692 qlcnic_rom_fast_read(adapter, fw_entry.start_addr + 4,
693 (int *)&ver);
694 else
695 qlcnic_rom_fast_read(adapter, QLCNIC_FW_VERSION_OFFSET,
696 (int *)&ver);
636 697
637 ver = QLCNIC_DECODE_VERSION(ver); 698 ver = QLCNIC_DECODE_VERSION(ver);
638 min_ver = QLCNIC_MIN_FW_VERSION; 699 min_ver = QLCNIC_MIN_FW_VERSION;
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
index 11e3a46c0911..37c04b4fade3 100644
--- a/drivers/net/qlcnic/qlcnic_main.c
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -31,15 +31,15 @@ static const char qlcnic_driver_string[] = "QLogic 1/10 GbE "
31 31
32static struct workqueue_struct *qlcnic_wq; 32static struct workqueue_struct *qlcnic_wq;
33static int qlcnic_mac_learn; 33static int qlcnic_mac_learn;
34module_param(qlcnic_mac_learn, int, 0644); 34module_param(qlcnic_mac_learn, int, 0444);
35MODULE_PARM_DESC(qlcnic_mac_learn, "Mac Filter (0=disabled, 1=enabled)"); 35MODULE_PARM_DESC(qlcnic_mac_learn, "Mac Filter (0=disabled, 1=enabled)");
36 36
37static int use_msi = 1; 37static int use_msi = 1;
38module_param(use_msi, int, 0644); 38module_param(use_msi, int, 0444);
39MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled"); 39MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled");
40 40
41static int use_msi_x = 1; 41static int use_msi_x = 1;
42module_param(use_msi_x, int, 0644); 42module_param(use_msi_x, int, 0444);
43MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled"); 43MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled");
44 44
45static int auto_fw_reset = AUTO_FW_RESET_ENABLED; 45static int auto_fw_reset = AUTO_FW_RESET_ENABLED;
@@ -47,11 +47,11 @@ module_param(auto_fw_reset, int, 0644);
47MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled"); 47MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled");
48 48
49static int load_fw_file; 49static int load_fw_file;
50module_param(load_fw_file, int, 0644); 50module_param(load_fw_file, int, 0444);
51MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file"); 51MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file");
52 52
53static int qlcnic_config_npars; 53static int qlcnic_config_npars;
54module_param(qlcnic_config_npars, int, 0644); 54module_param(qlcnic_config_npars, int, 0444);
55MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled"); 55MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled");
56 56
57static int __devinit qlcnic_probe(struct pci_dev *pdev, 57static int __devinit qlcnic_probe(struct pci_dev *pdev,
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 27a7c20f64cd..bb8645ab247c 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -1632,36 +1632,134 @@ rtl_phy_write_fw(struct rtl8169_private *tp, const struct firmware *fw)
1632{ 1632{
1633 __le32 *phytable = (__le32 *)fw->data; 1633 __le32 *phytable = (__le32 *)fw->data;
1634 struct net_device *dev = tp->dev; 1634 struct net_device *dev = tp->dev;
1635 size_t i; 1635 size_t index, fw_size = fw->size / sizeof(*phytable);
1636 u32 predata, count;
1636 1637
1637 if (fw->size % sizeof(*phytable)) { 1638 if (fw->size % sizeof(*phytable)) {
1638 netif_err(tp, probe, dev, "odd sized firmware %zd\n", fw->size); 1639 netif_err(tp, probe, dev, "odd sized firmware %zd\n", fw->size);
1639 return; 1640 return;
1640 } 1641 }
1641 1642
1642 for (i = 0; i < fw->size / sizeof(*phytable); i++) { 1643 for (index = 0; index < fw_size; index++) {
1643 u32 action = le32_to_cpu(phytable[i]); 1644 u32 action = le32_to_cpu(phytable[index]);
1645 u32 regno = (action & 0x0fff0000) >> 16;
1644 1646
1645 if (!action) 1647 switch(action & 0xf0000000) {
1648 case PHY_READ:
1649 case PHY_DATA_OR:
1650 case PHY_DATA_AND:
1651 case PHY_READ_EFUSE:
1652 case PHY_CLEAR_READCOUNT:
1653 case PHY_WRITE:
1654 case PHY_WRITE_PREVIOUS:
1655 case PHY_DELAY_MS:
1656 break;
1657
1658 case PHY_BJMPN:
1659 if (regno > index) {
1660 netif_err(tp, probe, tp->dev,
1661 "Out of range of firmware\n");
1662 return;
1663 }
1664 break;
1665 case PHY_READCOUNT_EQ_SKIP:
1666 if (index + 2 >= fw_size) {
1667 netif_err(tp, probe, tp->dev,
1668 "Out of range of firmware\n");
1669 return;
1670 }
1671 break;
1672 case PHY_COMP_EQ_SKIPN:
1673 case PHY_COMP_NEQ_SKIPN:
1674 case PHY_SKIPN:
1675 if (index + 1 + regno >= fw_size) {
1676 netif_err(tp, probe, tp->dev,
1677 "Out of range of firmware\n");
1678 return;
1679 }
1646 break; 1680 break;
1647 1681
1648 if ((action & 0xf0000000) != PHY_WRITE) { 1682 case PHY_READ_MAC_BYTE:
1649 netif_err(tp, probe, dev, 1683 case PHY_WRITE_MAC_BYTE:
1650 "unknown action 0x%08x\n", action); 1684 case PHY_WRITE_ERI_WORD:
1685 default:
1686 netif_err(tp, probe, tp->dev,
1687 "Invalid action 0x%08x\n", action);
1651 return; 1688 return;
1652 } 1689 }
1653 } 1690 }
1654 1691
1655 while (i-- != 0) { 1692 predata = 0;
1656 u32 action = le32_to_cpu(*phytable); 1693 count = 0;
1694
1695 for (index = 0; index < fw_size; ) {
1696 u32 action = le32_to_cpu(phytable[index]);
1657 u32 data = action & 0x0000ffff; 1697 u32 data = action & 0x0000ffff;
1658 u32 reg = (action & 0x0fff0000) >> 16; 1698 u32 regno = (action & 0x0fff0000) >> 16;
1699
1700 if (!action)
1701 break;
1659 1702
1660 switch(action & 0xf0000000) { 1703 switch(action & 0xf0000000) {
1704 case PHY_READ:
1705 predata = rtl_readphy(tp, regno);
1706 count++;
1707 index++;
1708 break;
1709 case PHY_DATA_OR:
1710 predata |= data;
1711 index++;
1712 break;
1713 case PHY_DATA_AND:
1714 predata &= data;
1715 index++;
1716 break;
1717 case PHY_BJMPN:
1718 index -= regno;
1719 break;
1720 case PHY_READ_EFUSE:
1721 predata = rtl8168d_efuse_read(tp->mmio_addr, regno);
1722 index++;
1723 break;
1724 case PHY_CLEAR_READCOUNT:
1725 count = 0;
1726 index++;
1727 break;
1661 case PHY_WRITE: 1728 case PHY_WRITE:
1662 rtl_writephy(tp, reg, data); 1729 rtl_writephy(tp, regno, data);
1663 phytable++; 1730 index++;
1731 break;
1732 case PHY_READCOUNT_EQ_SKIP:
1733 if (count == data)
1734 index += 2;
1735 else
1736 index += 1;
1737 break;
1738 case PHY_COMP_EQ_SKIPN:
1739 if (predata == data)
1740 index += regno;
1741 index++;
1664 break; 1742 break;
1743 case PHY_COMP_NEQ_SKIPN:
1744 if (predata != data)
1745 index += regno;
1746 index++;
1747 break;
1748 case PHY_WRITE_PREVIOUS:
1749 rtl_writephy(tp, regno, predata);
1750 index++;
1751 break;
1752 case PHY_SKIPN:
1753 index += regno + 1;
1754 break;
1755 case PHY_DELAY_MS:
1756 mdelay(data);
1757 index++;
1758 break;
1759
1760 case PHY_READ_MAC_BYTE:
1761 case PHY_WRITE_MAC_BYTE:
1762 case PHY_WRITE_ERI_WORD:
1665 default: 1763 default:
1666 BUG(); 1764 BUG();
1667 } 1765 }
@@ -3069,15 +3167,6 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3069 rtl8168_driver_start(tp); 3167 rtl8168_driver_start(tp);
3070 } 3168 }
3071 3169
3072 rtl8169_init_phy(dev, tp);
3073
3074 /*
3075 * Pretend we are using VLANs; This bypasses a nasty bug where
3076 * Interrupts stop flowing on high load on 8110SCd controllers.
3077 */
3078 if (tp->mac_version == RTL_GIGA_MAC_VER_05)
3079 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | RxVlan);
3080
3081 device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL); 3170 device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL);
3082 3171
3083 if (pci_dev_run_wake(pdev)) 3172 if (pci_dev_run_wake(pdev))
@@ -3127,6 +3216,7 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev)
3127static int rtl8169_open(struct net_device *dev) 3216static int rtl8169_open(struct net_device *dev)
3128{ 3217{
3129 struct rtl8169_private *tp = netdev_priv(dev); 3218 struct rtl8169_private *tp = netdev_priv(dev);
3219 void __iomem *ioaddr = tp->mmio_addr;
3130 struct pci_dev *pdev = tp->pci_dev; 3220 struct pci_dev *pdev = tp->pci_dev;
3131 int retval = -ENOMEM; 3221 int retval = -ENOMEM;
3132 3222
@@ -3162,6 +3252,15 @@ static int rtl8169_open(struct net_device *dev)
3162 3252
3163 napi_enable(&tp->napi); 3253 napi_enable(&tp->napi);
3164 3254
3255 rtl8169_init_phy(dev, tp);
3256
3257 /*
3258 * Pretend we are using VLANs; This bypasses a nasty bug where
3259 * Interrupts stop flowing on high load on 8110SCd controllers.
3260 */
3261 if (tp->mac_version == RTL_GIGA_MAC_VER_05)
3262 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | RxVlan);
3263
3165 rtl_pll_power_up(tp); 3264 rtl_pll_power_up(tp);
3166 3265
3167 rtl_hw_start(dev); 3266 rtl_hw_start(dev);
@@ -3171,7 +3270,7 @@ static int rtl8169_open(struct net_device *dev)
3171 tp->saved_wolopts = 0; 3270 tp->saved_wolopts = 0;
3172 pm_runtime_put_noidle(&pdev->dev); 3271 pm_runtime_put_noidle(&pdev->dev);
3173 3272
3174 rtl8169_check_link_status(dev, tp, tp->mmio_addr); 3273 rtl8169_check_link_status(dev, tp, ioaddr);
3175out: 3274out:
3176 return retval; 3275 return retval;
3177 3276
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 39996bf3b247..7d85a38377a1 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -46,10 +46,6 @@
46 46
47#include <asm/irq.h> 47#include <asm/irq.h>
48 48
49#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
50#define SKY2_VLAN_TAG_USED 1
51#endif
52
53#include "sky2.h" 49#include "sky2.h"
54 50
55#define DRV_NAME "sky2" 51#define DRV_NAME "sky2"
@@ -1326,40 +1322,35 @@ static int sky2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1326 return err; 1322 return err;
1327} 1323}
1328 1324
1329#ifdef SKY2_VLAN_TAG_USED 1325#define NETIF_F_ALL_VLAN (NETIF_F_HW_VLAN_TX|NETIF_F_HW_VLAN_RX)
1330static void sky2_set_vlan_mode(struct sky2_hw *hw, u16 port, bool onoff) 1326
1327static void sky2_vlan_mode(struct net_device *dev)
1331{ 1328{
1332 if (onoff) { 1329 struct sky2_port *sky2 = netdev_priv(dev);
1330 struct sky2_hw *hw = sky2->hw;
1331 u16 port = sky2->port;
1332
1333 if (dev->features & NETIF_F_HW_VLAN_RX)
1333 sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), 1334 sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T),
1334 RX_VLAN_STRIP_ON); 1335 RX_VLAN_STRIP_ON);
1335 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), 1336 else
1336 TX_VLAN_TAG_ON);
1337 } else {
1338 sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), 1337 sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T),
1339 RX_VLAN_STRIP_OFF); 1338 RX_VLAN_STRIP_OFF);
1339
1340 dev->vlan_features = dev->features &~ NETIF_F_ALL_VLAN;
1341 if (dev->features & NETIF_F_HW_VLAN_TX)
1342 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
1343 TX_VLAN_TAG_ON);
1344 else {
1340 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), 1345 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
1341 TX_VLAN_TAG_OFF); 1346 TX_VLAN_TAG_OFF);
1347
1348 /* Can't do transmit offload of vlan without hw vlan */
1349 dev->vlan_features &= ~(NETIF_F_TSO | NETIF_F_SG
1350 | NETIF_F_ALL_CSUM);
1342 } 1351 }
1343} 1352}
1344 1353
1345static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
1346{
1347 struct sky2_port *sky2 = netdev_priv(dev);
1348 struct sky2_hw *hw = sky2->hw;
1349 u16 port = sky2->port;
1350
1351 netif_tx_lock_bh(dev);
1352 napi_disable(&hw->napi);
1353
1354 sky2->vlgrp = grp;
1355 sky2_set_vlan_mode(hw, port, grp != NULL);
1356
1357 sky2_read32(hw, B0_Y2_SP_LISR);
1358 napi_enable(&hw->napi);
1359 netif_tx_unlock_bh(dev);
1360}
1361#endif
1362
1363/* Amount of required worst case padding in rx buffer */ 1354/* Amount of required worst case padding in rx buffer */
1364static inline unsigned sky2_rx_pad(const struct sky2_hw *hw) 1355static inline unsigned sky2_rx_pad(const struct sky2_hw *hw)
1365{ 1356{
@@ -1635,9 +1626,7 @@ static void sky2_hw_up(struct sky2_port *sky2)
1635 sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map, 1626 sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map,
1636 sky2->tx_ring_size - 1); 1627 sky2->tx_ring_size - 1);
1637 1628
1638#ifdef SKY2_VLAN_TAG_USED 1629 sky2_vlan_mode(sky2->netdev);
1639 sky2_set_vlan_mode(hw, port, sky2->vlgrp != NULL);
1640#endif
1641 1630
1642 sky2_rx_start(sky2); 1631 sky2_rx_start(sky2);
1643} 1632}
@@ -1780,7 +1769,7 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb,
1780 } 1769 }
1781 1770
1782 ctrl = 0; 1771 ctrl = 0;
1783#ifdef SKY2_VLAN_TAG_USED 1772
1784 /* Add VLAN tag, can piggyback on LRGLEN or ADDR64 */ 1773 /* Add VLAN tag, can piggyback on LRGLEN or ADDR64 */
1785 if (vlan_tx_tag_present(skb)) { 1774 if (vlan_tx_tag_present(skb)) {
1786 if (!le) { 1775 if (!le) {
@@ -1792,7 +1781,6 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb,
1792 le->length = cpu_to_be16(vlan_tx_tag_get(skb)); 1781 le->length = cpu_to_be16(vlan_tx_tag_get(skb));
1793 ctrl |= INS_VLAN; 1782 ctrl |= INS_VLAN;
1794 } 1783 }
1795#endif
1796 1784
1797 /* Handle TCP checksum offload */ 1785 /* Handle TCP checksum offload */
1798 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1786 if (skb->ip_summed == CHECKSUM_PARTIAL) {
@@ -2432,11 +2420,8 @@ static struct sk_buff *sky2_receive(struct net_device *dev,
2432 struct sk_buff *skb = NULL; 2420 struct sk_buff *skb = NULL;
2433 u16 count = (status & GMR_FS_LEN) >> 16; 2421 u16 count = (status & GMR_FS_LEN) >> 16;
2434 2422
2435#ifdef SKY2_VLAN_TAG_USED 2423 if (status & GMR_FS_VLAN)
2436 /* Account for vlan tag */ 2424 count -= VLAN_HLEN; /* Account for vlan tag */
2437 if (sky2->vlgrp && (status & GMR_FS_VLAN))
2438 count -= VLAN_HLEN;
2439#endif
2440 2425
2441 netif_printk(sky2, rx_status, KERN_DEBUG, dev, 2426 netif_printk(sky2, rx_status, KERN_DEBUG, dev,
2442 "rx slot %u status 0x%x len %d\n", 2427 "rx slot %u status 0x%x len %d\n",
@@ -2504,17 +2489,9 @@ static inline void sky2_tx_done(struct net_device *dev, u16 last)
2504static inline void sky2_skb_rx(const struct sky2_port *sky2, 2489static inline void sky2_skb_rx(const struct sky2_port *sky2,
2505 u32 status, struct sk_buff *skb) 2490 u32 status, struct sk_buff *skb)
2506{ 2491{
2507#ifdef SKY2_VLAN_TAG_USED 2492 if (status & GMR_FS_VLAN)
2508 u16 vlan_tag = be16_to_cpu(sky2->rx_tag); 2493 __vlan_hwaccel_put_tag(skb, be16_to_cpu(sky2->rx_tag));
2509 if (sky2->vlgrp && (status & GMR_FS_VLAN)) { 2494
2510 if (skb->ip_summed == CHECKSUM_NONE)
2511 vlan_hwaccel_receive_skb(skb, sky2->vlgrp, vlan_tag);
2512 else
2513 vlan_gro_receive(&sky2->hw->napi, sky2->vlgrp,
2514 vlan_tag, skb);
2515 return;
2516 }
2517#endif
2518 if (skb->ip_summed == CHECKSUM_NONE) 2495 if (skb->ip_summed == CHECKSUM_NONE)
2519 netif_receive_skb(skb); 2496 netif_receive_skb(skb);
2520 else 2497 else
@@ -2631,7 +2608,6 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
2631 goto exit_loop; 2608 goto exit_loop;
2632 break; 2609 break;
2633 2610
2634#ifdef SKY2_VLAN_TAG_USED
2635 case OP_RXVLAN: 2611 case OP_RXVLAN:
2636 sky2->rx_tag = length; 2612 sky2->rx_tag = length;
2637 break; 2613 break;
@@ -2639,7 +2615,6 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
2639 case OP_RXCHKSVLAN: 2615 case OP_RXCHKSVLAN:
2640 sky2->rx_tag = length; 2616 sky2->rx_tag = length;
2641 /* fall through */ 2617 /* fall through */
2642#endif
2643 case OP_RXCHKS: 2618 case OP_RXCHKS:
2644 if (likely(sky2->flags & SKY2_FLAG_RX_CHECKSUM)) 2619 if (likely(sky2->flags & SKY2_FLAG_RX_CHECKSUM))
2645 sky2_rx_checksum(sky2, status); 2620 sky2_rx_checksum(sky2, status);
@@ -3042,6 +3017,10 @@ static int __devinit sky2_init(struct sky2_hw *hw)
3042 | SKY2_HW_NEW_LE 3017 | SKY2_HW_NEW_LE
3043 | SKY2_HW_AUTO_TX_SUM 3018 | SKY2_HW_AUTO_TX_SUM
3044 | SKY2_HW_ADV_POWER_CTL; 3019 | SKY2_HW_ADV_POWER_CTL;
3020
3021 /* The workaround for status conflicts VLAN tag detection. */
3022 if (hw->chip_rev == CHIP_REV_YU_FE2_A0)
3023 hw->flags |= SKY2_HW_VLAN_BROKEN;
3045 break; 3024 break;
3046 3025
3047 case CHIP_ID_YUKON_SUPR: 3026 case CHIP_ID_YUKON_SUPR:
@@ -3411,18 +3390,15 @@ static u32 sky2_supported_modes(const struct sky2_hw *hw)
3411 u32 modes = SUPPORTED_10baseT_Half 3390 u32 modes = SUPPORTED_10baseT_Half
3412 | SUPPORTED_10baseT_Full 3391 | SUPPORTED_10baseT_Full
3413 | SUPPORTED_100baseT_Half 3392 | SUPPORTED_100baseT_Half
3414 | SUPPORTED_100baseT_Full 3393 | SUPPORTED_100baseT_Full;
3415 | SUPPORTED_Autoneg | SUPPORTED_TP;
3416 3394
3417 if (hw->flags & SKY2_HW_GIGABIT) 3395 if (hw->flags & SKY2_HW_GIGABIT)
3418 modes |= SUPPORTED_1000baseT_Half 3396 modes |= SUPPORTED_1000baseT_Half
3419 | SUPPORTED_1000baseT_Full; 3397 | SUPPORTED_1000baseT_Full;
3420 return modes; 3398 return modes;
3421 } else 3399 } else
3422 return SUPPORTED_1000baseT_Half 3400 return SUPPORTED_1000baseT_Half
3423 | SUPPORTED_1000baseT_Full 3401 | SUPPORTED_1000baseT_Full;
3424 | SUPPORTED_Autoneg
3425 | SUPPORTED_FIBRE;
3426} 3402}
3427 3403
3428static int sky2_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) 3404static int sky2_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
@@ -3436,9 +3412,11 @@ static int sky2_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
3436 if (sky2_is_copper(hw)) { 3412 if (sky2_is_copper(hw)) {
3437 ecmd->port = PORT_TP; 3413 ecmd->port = PORT_TP;
3438 ecmd->speed = sky2->speed; 3414 ecmd->speed = sky2->speed;
3415 ecmd->supported |= SUPPORTED_Autoneg | SUPPORTED_TP;
3439 } else { 3416 } else {
3440 ecmd->speed = SPEED_1000; 3417 ecmd->speed = SPEED_1000;
3441 ecmd->port = PORT_FIBRE; 3418 ecmd->port = PORT_FIBRE;
3419 ecmd->supported |= SUPPORTED_Autoneg | SUPPORTED_FIBRE;
3442 } 3420 }
3443 3421
3444 ecmd->advertising = sky2->advertising; 3422 ecmd->advertising = sky2->advertising;
@@ -3455,8 +3433,19 @@ static int sky2_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
3455 u32 supported = sky2_supported_modes(hw); 3433 u32 supported = sky2_supported_modes(hw);
3456 3434
3457 if (ecmd->autoneg == AUTONEG_ENABLE) { 3435 if (ecmd->autoneg == AUTONEG_ENABLE) {
3436 if (ecmd->advertising & ~supported)
3437 return -EINVAL;
3438
3439 if (sky2_is_copper(hw))
3440 sky2->advertising = ecmd->advertising |
3441 ADVERTISED_TP |
3442 ADVERTISED_Autoneg;
3443 else
3444 sky2->advertising = ecmd->advertising |
3445 ADVERTISED_FIBRE |
3446 ADVERTISED_Autoneg;
3447
3458 sky2->flags |= SKY2_FLAG_AUTO_SPEED; 3448 sky2->flags |= SKY2_FLAG_AUTO_SPEED;
3459 ecmd->advertising = supported;
3460 sky2->duplex = -1; 3449 sky2->duplex = -1;
3461 sky2->speed = -1; 3450 sky2->speed = -1;
3462 } else { 3451 } else {
@@ -3500,8 +3489,6 @@ static int sky2_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
3500 sky2->flags &= ~SKY2_FLAG_AUTO_SPEED; 3489 sky2->flags &= ~SKY2_FLAG_AUTO_SPEED;
3501 } 3490 }
3502 3491
3503 sky2->advertising = ecmd->advertising;
3504
3505 if (netif_running(dev)) { 3492 if (netif_running(dev)) {
3506 sky2_phy_reinit(sky2); 3493 sky2_phy_reinit(sky2);
3507 sky2_set_multicast(dev); 3494 sky2_set_multicast(dev);
@@ -4229,15 +4216,28 @@ static int sky2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom
4229static int sky2_set_flags(struct net_device *dev, u32 data) 4216static int sky2_set_flags(struct net_device *dev, u32 data)
4230{ 4217{
4231 struct sky2_port *sky2 = netdev_priv(dev); 4218 struct sky2_port *sky2 = netdev_priv(dev);
4232 u32 supported = 4219 unsigned long old_feat = dev->features;
4233 (sky2->hw->flags & SKY2_HW_RSS_BROKEN) ? 0 : ETH_FLAG_RXHASH; 4220 u32 supported = 0;
4234 int rc; 4221 int rc;
4235 4222
4223 if (!(sky2->hw->flags & SKY2_HW_RSS_BROKEN))
4224 supported |= ETH_FLAG_RXHASH;
4225
4226 if (!(sky2->hw->flags & SKY2_HW_VLAN_BROKEN))
4227 supported |= ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN;
4228
4229 printk(KERN_DEBUG "sky2 set_flags: supported %x data %x\n",
4230 supported, data);
4231
4236 rc = ethtool_op_set_flags(dev, data, supported); 4232 rc = ethtool_op_set_flags(dev, data, supported);
4237 if (rc) 4233 if (rc)
4238 return rc; 4234 return rc;
4239 4235
4240 rx_set_rss(dev); 4236 if ((old_feat ^ dev->features) & NETIF_F_RXHASH)
4237 rx_set_rss(dev);
4238
4239 if ((old_feat ^ dev->features) & NETIF_F_ALL_VLAN)
4240 sky2_vlan_mode(dev);
4241 4241
4242 return 0; 4242 return 0;
4243} 4243}
@@ -4273,6 +4273,7 @@ static const struct ethtool_ops sky2_ethtool_ops = {
4273 .get_sset_count = sky2_get_sset_count, 4273 .get_sset_count = sky2_get_sset_count,
4274 .get_ethtool_stats = sky2_get_ethtool_stats, 4274 .get_ethtool_stats = sky2_get_ethtool_stats,
4275 .set_flags = sky2_set_flags, 4275 .set_flags = sky2_set_flags,
4276 .get_flags = ethtool_op_get_flags,
4276}; 4277};
4277 4278
4278#ifdef CONFIG_SKY2_DEBUG 4279#ifdef CONFIG_SKY2_DEBUG
@@ -4554,9 +4555,6 @@ static const struct net_device_ops sky2_netdev_ops[2] = {
4554 .ndo_change_mtu = sky2_change_mtu, 4555 .ndo_change_mtu = sky2_change_mtu,
4555 .ndo_tx_timeout = sky2_tx_timeout, 4556 .ndo_tx_timeout = sky2_tx_timeout,
4556 .ndo_get_stats64 = sky2_get_stats, 4557 .ndo_get_stats64 = sky2_get_stats,
4557#ifdef SKY2_VLAN_TAG_USED
4558 .ndo_vlan_rx_register = sky2_vlan_rx_register,
4559#endif
4560#ifdef CONFIG_NET_POLL_CONTROLLER 4558#ifdef CONFIG_NET_POLL_CONTROLLER
4561 .ndo_poll_controller = sky2_netpoll, 4559 .ndo_poll_controller = sky2_netpoll,
4562#endif 4560#endif
@@ -4572,9 +4570,6 @@ static const struct net_device_ops sky2_netdev_ops[2] = {
4572 .ndo_change_mtu = sky2_change_mtu, 4570 .ndo_change_mtu = sky2_change_mtu,
4573 .ndo_tx_timeout = sky2_tx_timeout, 4571 .ndo_tx_timeout = sky2_tx_timeout,
4574 .ndo_get_stats64 = sky2_get_stats, 4572 .ndo_get_stats64 = sky2_get_stats,
4575#ifdef SKY2_VLAN_TAG_USED
4576 .ndo_vlan_rx_register = sky2_vlan_rx_register,
4577#endif
4578 }, 4573 },
4579}; 4574};
4580 4575
@@ -4625,7 +4620,8 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
4625 sky2->port = port; 4620 sky2->port = port;
4626 4621
4627 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG 4622 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG
4628 | NETIF_F_TSO | NETIF_F_GRO; 4623 | NETIF_F_TSO | NETIF_F_GRO;
4624
4629 if (highmem) 4625 if (highmem)
4630 dev->features |= NETIF_F_HIGHDMA; 4626 dev->features |= NETIF_F_HIGHDMA;
4631 4627
@@ -4633,13 +4629,8 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
4633 if (!(hw->flags & SKY2_HW_RSS_BROKEN)) 4629 if (!(hw->flags & SKY2_HW_RSS_BROKEN))
4634 dev->features |= NETIF_F_RXHASH; 4630 dev->features |= NETIF_F_RXHASH;
4635 4631
4636#ifdef SKY2_VLAN_TAG_USED 4632 if (!(hw->flags & SKY2_HW_VLAN_BROKEN))
4637 /* The workaround for FE+ status conflicts with VLAN tag detection. */
4638 if (!(sky2->hw->chip_id == CHIP_ID_YUKON_FE_P &&
4639 sky2->hw->chip_rev == CHIP_REV_YU_FE2_A0)) {
4640 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 4633 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
4641 }
4642#endif
4643 4634
4644 /* read the mac address */ 4635 /* read the mac address */
4645 memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8, ETH_ALEN); 4636 memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8, ETH_ALEN);
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index 80bdc404f1ea..6861b0e8db9a 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -2236,11 +2236,8 @@ struct sky2_port {
2236 u16 rx_pending; 2236 u16 rx_pending;
2237 u16 rx_data_size; 2237 u16 rx_data_size;
2238 u16 rx_nfrags; 2238 u16 rx_nfrags;
2239
2240#ifdef SKY2_VLAN_TAG_USED
2241 u16 rx_tag; 2239 u16 rx_tag;
2242 struct vlan_group *vlgrp; 2240
2243#endif
2244 struct { 2241 struct {
2245 unsigned long last; 2242 unsigned long last;
2246 u32 mac_rp; 2243 u32 mac_rp;
@@ -2284,6 +2281,7 @@ struct sky2_hw {
2284#define SKY2_HW_AUTO_TX_SUM 0x00000040 /* new IP decode for Tx */ 2281#define SKY2_HW_AUTO_TX_SUM 0x00000040 /* new IP decode for Tx */
2285#define SKY2_HW_ADV_POWER_CTL 0x00000080 /* additional PHY power regs */ 2282#define SKY2_HW_ADV_POWER_CTL 0x00000080 /* additional PHY power regs */
2286#define SKY2_HW_RSS_BROKEN 0x00000100 2283#define SKY2_HW_RSS_BROKEN 0x00000100
2284#define SKY2_HW_VLAN_BROKEN 0x00000200
2287 2285
2288 u8 chip_id; 2286 u8 chip_id;
2289 u8 chip_rev; 2287 u8 chip_rev;
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index cdbeec9f83ea..546de5749824 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -488,7 +488,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
488 488
489 if (unlikely(!netif_carrier_ok(dev) || 489 if (unlikely(!netif_carrier_ok(dev) ||
490 (frags > 1 && !xennet_can_sg(dev)) || 490 (frags > 1 && !xennet_can_sg(dev)) ||
491 netif_needs_gso(dev, skb))) { 491 netif_needs_gso(skb, netif_skb_features(skb)))) {
492 spin_unlock_irq(&np->tx_lock); 492 spin_unlock_irq(&np->tx_lock);
493 goto drop; 493 goto drop;
494 } 494 }
diff --git a/drivers/ps3/Makefile b/drivers/ps3/Makefile
index ccea15c11c19..50cb1e1b4a12 100644
--- a/drivers/ps3/Makefile
+++ b/drivers/ps3/Makefile
@@ -1,6 +1,6 @@
1obj-$(CONFIG_PS3_VUART) += ps3-vuart.o 1obj-$(CONFIG_PS3_VUART) += ps3-vuart.o
2obj-$(CONFIG_PS3_PS3AV) += ps3av_mod.o 2obj-$(CONFIG_PS3_PS3AV) += ps3av_mod.o
3ps3av_mod-objs += ps3av.o ps3av_cmd.o 3ps3av_mod-y := ps3av.o ps3av_cmd.o
4obj-$(CONFIG_PPC_PS3) += sys-manager-core.o 4obj-$(CONFIG_PPC_PS3) += sys-manager-core.o
5obj-$(CONFIG_PS3_SYS_MANAGER) += ps3-sys-manager.o 5obj-$(CONFIG_PS3_SYS_MANAGER) += ps3-sys-manager.o
6obj-$(CONFIG_PS3_STORAGE) += ps3stor_lib.o 6obj-$(CONFIG_PS3_STORAGE) += ps3stor_lib.o
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index e6539cbabb35..9583cbcc6b79 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -16,6 +16,7 @@
16#include <linux/kdev_t.h> 16#include <linux/kdev_t.h>
17#include <linux/idr.h> 17#include <linux/idr.h>
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include <linux/workqueue.h>
19 20
20#include "rtc-core.h" 21#include "rtc-core.h"
21 22
@@ -152,6 +153,18 @@ struct rtc_device *rtc_device_register(const char *name, struct device *dev,
152 spin_lock_init(&rtc->irq_task_lock); 153 spin_lock_init(&rtc->irq_task_lock);
153 init_waitqueue_head(&rtc->irq_queue); 154 init_waitqueue_head(&rtc->irq_queue);
154 155
156 /* Init timerqueue */
157 timerqueue_init_head(&rtc->timerqueue);
158 INIT_WORK(&rtc->irqwork, rtc_timer_do_work);
159 /* Init aie timer */
160 rtc_timer_init(&rtc->aie_timer, rtc_aie_update_irq, (void *)rtc);
161 /* Init uie timer */
162 rtc_timer_init(&rtc->uie_rtctimer, rtc_uie_update_irq, (void *)rtc);
163 /* Init pie timer */
164 hrtimer_init(&rtc->pie_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
165 rtc->pie_timer.function = rtc_pie_update_irq;
166 rtc->pie_enabled = 0;
167
155 strlcpy(rtc->name, name, RTC_DEVICE_NAME_SIZE); 168 strlcpy(rtc->name, name, RTC_DEVICE_NAME_SIZE);
156 dev_set_name(&rtc->dev, "rtc%d", id); 169 dev_set_name(&rtc->dev, "rtc%d", id);
157 170
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index a0c816238aa9..90384b9f6b2c 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -14,15 +14,11 @@
14#include <linux/rtc.h> 14#include <linux/rtc.h>
15#include <linux/sched.h> 15#include <linux/sched.h>
16#include <linux/log2.h> 16#include <linux/log2.h>
17#include <linux/workqueue.h>
17 18
18int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm) 19static int __rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm)
19{ 20{
20 int err; 21 int err;
21
22 err = mutex_lock_interruptible(&rtc->ops_lock);
23 if (err)
24 return err;
25
26 if (!rtc->ops) 22 if (!rtc->ops)
27 err = -ENODEV; 23 err = -ENODEV;
28 else if (!rtc->ops->read_time) 24 else if (!rtc->ops->read_time)
@@ -31,7 +27,18 @@ int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm)
31 memset(tm, 0, sizeof(struct rtc_time)); 27 memset(tm, 0, sizeof(struct rtc_time));
32 err = rtc->ops->read_time(rtc->dev.parent, tm); 28 err = rtc->ops->read_time(rtc->dev.parent, tm);
33 } 29 }
30 return err;
31}
32
33int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm)
34{
35 int err;
34 36
37 err = mutex_lock_interruptible(&rtc->ops_lock);
38 if (err)
39 return err;
40
41 err = __rtc_read_time(rtc, tm);
35 mutex_unlock(&rtc->ops_lock); 42 mutex_unlock(&rtc->ops_lock);
36 return err; 43 return err;
37} 44}
@@ -106,188 +113,54 @@ int rtc_set_mmss(struct rtc_device *rtc, unsigned long secs)
106} 113}
107EXPORT_SYMBOL_GPL(rtc_set_mmss); 114EXPORT_SYMBOL_GPL(rtc_set_mmss);
108 115
109static int rtc_read_alarm_internal(struct rtc_device *rtc, struct rtc_wkalrm *alarm) 116int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
110{ 117{
111 int err; 118 int err;
112 119
113 err = mutex_lock_interruptible(&rtc->ops_lock); 120 err = mutex_lock_interruptible(&rtc->ops_lock);
114 if (err) 121 if (err)
115 return err; 122 return err;
116 123 alarm->enabled = rtc->aie_timer.enabled;
117 if (rtc->ops == NULL) 124 if (alarm->enabled)
118 err = -ENODEV; 125 alarm->time = rtc_ktime_to_tm(rtc->aie_timer.node.expires);
119 else if (!rtc->ops->read_alarm)
120 err = -EINVAL;
121 else {
122 memset(alarm, 0, sizeof(struct rtc_wkalrm));
123 err = rtc->ops->read_alarm(rtc->dev.parent, alarm);
124 }
125
126 mutex_unlock(&rtc->ops_lock); 126 mutex_unlock(&rtc->ops_lock);
127 return err; 127
128 return 0;
128} 129}
130EXPORT_SYMBOL_GPL(rtc_read_alarm);
129 131
130int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) 132int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
131{ 133{
134 struct rtc_time tm;
135 long now, scheduled;
132 int err; 136 int err;
133 struct rtc_time before, now;
134 int first_time = 1;
135 unsigned long t_now, t_alm;
136 enum { none, day, month, year } missing = none;
137 unsigned days;
138
139 /* The lower level RTC driver may return -1 in some fields,
140 * creating invalid alarm->time values, for reasons like:
141 *
142 * - The hardware may not be capable of filling them in;
143 * many alarms match only on time-of-day fields, not
144 * day/month/year calendar data.
145 *
146 * - Some hardware uses illegal values as "wildcard" match
147 * values, which non-Linux firmware (like a BIOS) may try
148 * to set up as e.g. "alarm 15 minutes after each hour".
149 * Linux uses only oneshot alarms.
150 *
151 * When we see that here, we deal with it by using values from
152 * a current RTC timestamp for any missing (-1) values. The
153 * RTC driver prevents "periodic alarm" modes.
154 *
155 * But this can be racey, because some fields of the RTC timestamp
156 * may have wrapped in the interval since we read the RTC alarm,
157 * which would lead to us inserting inconsistent values in place
158 * of the -1 fields.
159 *
160 * Reading the alarm and timestamp in the reverse sequence
161 * would have the same race condition, and not solve the issue.
162 *
163 * So, we must first read the RTC timestamp,
164 * then read the RTC alarm value,
165 * and then read a second RTC timestamp.
166 *
167 * If any fields of the second timestamp have changed
168 * when compared with the first timestamp, then we know
169 * our timestamp may be inconsistent with that used by
170 * the low-level rtc_read_alarm_internal() function.
171 *
172 * So, when the two timestamps disagree, we just loop and do
173 * the process again to get a fully consistent set of values.
174 *
175 * This could all instead be done in the lower level driver,
176 * but since more than one lower level RTC implementation needs it,
177 * then it's probably best best to do it here instead of there..
178 */
179 137
180 /* Get the "before" timestamp */ 138 err = rtc_valid_tm(&alarm->time);
181 err = rtc_read_time(rtc, &before); 139 if (err)
182 if (err < 0)
183 return err; 140 return err;
184 do { 141 rtc_tm_to_time(&alarm->time, &scheduled);
185 if (!first_time)
186 memcpy(&before, &now, sizeof(struct rtc_time));
187 first_time = 0;
188
189 /* get the RTC alarm values, which may be incomplete */
190 err = rtc_read_alarm_internal(rtc, alarm);
191 if (err)
192 return err;
193 if (!alarm->enabled)
194 return 0;
195
196 /* full-function RTCs won't have such missing fields */
197 if (rtc_valid_tm(&alarm->time) == 0)
198 return 0;
199
200 /* get the "after" timestamp, to detect wrapped fields */
201 err = rtc_read_time(rtc, &now);
202 if (err < 0)
203 return err;
204
205 /* note that tm_sec is a "don't care" value here: */
206 } while ( before.tm_min != now.tm_min
207 || before.tm_hour != now.tm_hour
208 || before.tm_mon != now.tm_mon
209 || before.tm_year != now.tm_year);
210
211 /* Fill in the missing alarm fields using the timestamp; we
212 * know there's at least one since alarm->time is invalid.
213 */
214 if (alarm->time.tm_sec == -1)
215 alarm->time.tm_sec = now.tm_sec;
216 if (alarm->time.tm_min == -1)
217 alarm->time.tm_min = now.tm_min;
218 if (alarm->time.tm_hour == -1)
219 alarm->time.tm_hour = now.tm_hour;
220
221 /* For simplicity, only support date rollover for now */
222 if (alarm->time.tm_mday == -1) {
223 alarm->time.tm_mday = now.tm_mday;
224 missing = day;
225 }
226 if (alarm->time.tm_mon == -1) {
227 alarm->time.tm_mon = now.tm_mon;
228 if (missing == none)
229 missing = month;
230 }
231 if (alarm->time.tm_year == -1) {
232 alarm->time.tm_year = now.tm_year;
233 if (missing == none)
234 missing = year;
235 }
236
237 /* with luck, no rollover is needed */
238 rtc_tm_to_time(&now, &t_now);
239 rtc_tm_to_time(&alarm->time, &t_alm);
240 if (t_now < t_alm)
241 goto done;
242
243 switch (missing) {
244 142
245 /* 24 hour rollover ... if it's now 10am Monday, an alarm that 143 /* Make sure we're not setting alarms in the past */
246 * that will trigger at 5am will do so at 5am Tuesday, which 144 err = __rtc_read_time(rtc, &tm);
247 * could also be in the next month or year. This is a common 145 rtc_tm_to_time(&tm, &now);
248 * case, especially for PCs. 146 if (scheduled <= now)
249 */ 147 return -ETIME;
250 case day: 148 /*
251 dev_dbg(&rtc->dev, "alarm rollover: %s\n", "day"); 149 * XXX - We just checked to make sure the alarm time is not
252 t_alm += 24 * 60 * 60; 150 * in the past, but there is still a race window where if
253 rtc_time_to_tm(t_alm, &alarm->time); 151 * the is alarm set for the next second and the second ticks
254 break; 152 * over right here, before we set the alarm.
255
256 /* Month rollover ... if it's the 31th, an alarm on the 3rd will
257 * be next month. An alarm matching on the 30th, 29th, or 28th
258 * may end up in the month after that! Many newer PCs support
259 * this type of alarm.
260 */ 153 */
261 case month:
262 dev_dbg(&rtc->dev, "alarm rollover: %s\n", "month");
263 do {
264 if (alarm->time.tm_mon < 11)
265 alarm->time.tm_mon++;
266 else {
267 alarm->time.tm_mon = 0;
268 alarm->time.tm_year++;
269 }
270 days = rtc_month_days(alarm->time.tm_mon,
271 alarm->time.tm_year);
272 } while (days < alarm->time.tm_mday);
273 break;
274
275 /* Year rollover ... easy except for leap years! */
276 case year:
277 dev_dbg(&rtc->dev, "alarm rollover: %s\n", "year");
278 do {
279 alarm->time.tm_year++;
280 } while (rtc_valid_tm(&alarm->time) != 0);
281 break;
282
283 default:
284 dev_warn(&rtc->dev, "alarm rollover not handled\n");
285 }
286 154
287done: 155 if (!rtc->ops)
288 return 0; 156 err = -ENODEV;
157 else if (!rtc->ops->set_alarm)
158 err = -EINVAL;
159 else
160 err = rtc->ops->set_alarm(rtc->dev.parent, alarm);
161
162 return err;
289} 163}
290EXPORT_SYMBOL_GPL(rtc_read_alarm);
291 164
292int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) 165int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
293{ 166{
@@ -300,16 +173,18 @@ int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
300 err = mutex_lock_interruptible(&rtc->ops_lock); 173 err = mutex_lock_interruptible(&rtc->ops_lock);
301 if (err) 174 if (err)
302 return err; 175 return err;
303 176 if (rtc->aie_timer.enabled) {
304 if (!rtc->ops) 177 rtc_timer_remove(rtc, &rtc->aie_timer);
305 err = -ENODEV; 178 rtc->aie_timer.enabled = 0;
306 else if (!rtc->ops->set_alarm) 179 }
307 err = -EINVAL; 180 rtc->aie_timer.node.expires = rtc_tm_to_ktime(alarm->time);
308 else 181 rtc->aie_timer.period = ktime_set(0, 0);
309 err = rtc->ops->set_alarm(rtc->dev.parent, alarm); 182 if (alarm->enabled) {
310 183 rtc->aie_timer.enabled = 1;
184 rtc_timer_enqueue(rtc, &rtc->aie_timer);
185 }
311 mutex_unlock(&rtc->ops_lock); 186 mutex_unlock(&rtc->ops_lock);
312 return err; 187 return 0;
313} 188}
314EXPORT_SYMBOL_GPL(rtc_set_alarm); 189EXPORT_SYMBOL_GPL(rtc_set_alarm);
315 190
@@ -319,6 +194,16 @@ int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled)
319 if (err) 194 if (err)
320 return err; 195 return err;
321 196
197 if (rtc->aie_timer.enabled != enabled) {
198 if (enabled) {
199 rtc->aie_timer.enabled = 1;
200 rtc_timer_enqueue(rtc, &rtc->aie_timer);
201 } else {
202 rtc_timer_remove(rtc, &rtc->aie_timer);
203 rtc->aie_timer.enabled = 0;
204 }
205 }
206
322 if (!rtc->ops) 207 if (!rtc->ops)
323 err = -ENODEV; 208 err = -ENODEV;
324 else if (!rtc->ops->alarm_irq_enable) 209 else if (!rtc->ops->alarm_irq_enable)
@@ -337,52 +222,53 @@ int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled)
337 if (err) 222 if (err)
338 return err; 223 return err;
339 224
340#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL 225 /* make sure we're changing state */
341 if (enabled == 0 && rtc->uie_irq_active) { 226 if (rtc->uie_rtctimer.enabled == enabled)
342 mutex_unlock(&rtc->ops_lock); 227 goto out;
343 return rtc_dev_update_irq_enable_emul(rtc, enabled); 228
229 if (enabled) {
230 struct rtc_time tm;
231 ktime_t now, onesec;
232
233 __rtc_read_time(rtc, &tm);
234 onesec = ktime_set(1, 0);
235 now = rtc_tm_to_ktime(tm);
236 rtc->uie_rtctimer.node.expires = ktime_add(now, onesec);
237 rtc->uie_rtctimer.period = ktime_set(1, 0);
238 rtc->uie_rtctimer.enabled = 1;
239 rtc_timer_enqueue(rtc, &rtc->uie_rtctimer);
240 } else {
241 rtc_timer_remove(rtc, &rtc->uie_rtctimer);
242 rtc->uie_rtctimer.enabled = 0;
344 } 243 }
345#endif
346
347 if (!rtc->ops)
348 err = -ENODEV;
349 else if (!rtc->ops->update_irq_enable)
350 err = -EINVAL;
351 else
352 err = rtc->ops->update_irq_enable(rtc->dev.parent, enabled);
353 244
245out:
354 mutex_unlock(&rtc->ops_lock); 246 mutex_unlock(&rtc->ops_lock);
355
356#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
357 /*
358 * Enable emulation if the driver did not provide
359 * the update_irq_enable function pointer or if returned
360 * -EINVAL to signal that it has been configured without
361 * interrupts or that are not available at the moment.
362 */
363 if (err == -EINVAL)
364 err = rtc_dev_update_irq_enable_emul(rtc, enabled);
365#endif
366 return err; 247 return err;
248
367} 249}
368EXPORT_SYMBOL_GPL(rtc_update_irq_enable); 250EXPORT_SYMBOL_GPL(rtc_update_irq_enable);
369 251
252
370/** 253/**
371 * rtc_update_irq - report RTC periodic, alarm, and/or update irqs 254 * rtc_handle_legacy_irq - AIE, UIE and PIE event hook
372 * @rtc: the rtc device 255 * @rtc: pointer to the rtc device
373 * @num: how many irqs are being reported (usually one) 256 *
374 * @events: mask of RTC_IRQF with one or more of RTC_PF, RTC_AF, RTC_UF 257 * This function is called when an AIE, UIE or PIE mode interrupt
375 * Context: any 258 * has occured (or been emulated).
259 *
260 * Triggers the registered irq_task function callback.
376 */ 261 */
377void rtc_update_irq(struct rtc_device *rtc, 262static void rtc_handle_legacy_irq(struct rtc_device *rtc, int num, int mode)
378 unsigned long num, unsigned long events)
379{ 263{
380 unsigned long flags; 264 unsigned long flags;
381 265
266 /* mark one irq of the appropriate mode */
382 spin_lock_irqsave(&rtc->irq_lock, flags); 267 spin_lock_irqsave(&rtc->irq_lock, flags);
383 rtc->irq_data = (rtc->irq_data + (num << 8)) | events; 268 rtc->irq_data = (rtc->irq_data + (num << 8)) | (RTC_IRQF|mode);
384 spin_unlock_irqrestore(&rtc->irq_lock, flags); 269 spin_unlock_irqrestore(&rtc->irq_lock, flags);
385 270
271 /* call the task func */
386 spin_lock_irqsave(&rtc->irq_task_lock, flags); 272 spin_lock_irqsave(&rtc->irq_task_lock, flags);
387 if (rtc->irq_task) 273 if (rtc->irq_task)
388 rtc->irq_task->func(rtc->irq_task->private_data); 274 rtc->irq_task->func(rtc->irq_task->private_data);
@@ -391,6 +277,69 @@ void rtc_update_irq(struct rtc_device *rtc,
391 wake_up_interruptible(&rtc->irq_queue); 277 wake_up_interruptible(&rtc->irq_queue);
392 kill_fasync(&rtc->async_queue, SIGIO, POLL_IN); 278 kill_fasync(&rtc->async_queue, SIGIO, POLL_IN);
393} 279}
280
281
282/**
283 * rtc_aie_update_irq - AIE mode rtctimer hook
284 * @private: pointer to the rtc_device
285 *
286 * This functions is called when the aie_timer expires.
287 */
288void rtc_aie_update_irq(void *private)
289{
290 struct rtc_device *rtc = (struct rtc_device *)private;
291 rtc_handle_legacy_irq(rtc, 1, RTC_AF);
292}
293
294
295/**
296 * rtc_uie_update_irq - UIE mode rtctimer hook
297 * @private: pointer to the rtc_device
298 *
299 * This functions is called when the uie_timer expires.
300 */
301void rtc_uie_update_irq(void *private)
302{
303 struct rtc_device *rtc = (struct rtc_device *)private;
304 rtc_handle_legacy_irq(rtc, 1, RTC_UF);
305}
306
307
308/**
309 * rtc_pie_update_irq - PIE mode hrtimer hook
310 * @timer: pointer to the pie mode hrtimer
311 *
312 * This function is used to emulate PIE mode interrupts
313 * using an hrtimer. This function is called when the periodic
314 * hrtimer expires.
315 */
316enum hrtimer_restart rtc_pie_update_irq(struct hrtimer *timer)
317{
318 struct rtc_device *rtc;
319 ktime_t period;
320 int count;
321 rtc = container_of(timer, struct rtc_device, pie_timer);
322
323 period = ktime_set(0, NSEC_PER_SEC/rtc->irq_freq);
324 count = hrtimer_forward_now(timer, period);
325
326 rtc_handle_legacy_irq(rtc, count, RTC_PF);
327
328 return HRTIMER_RESTART;
329}
330
331/**
332 * rtc_update_irq - Triggered when a RTC interrupt occurs.
333 * @rtc: the rtc device
334 * @num: how many irqs are being reported (usually one)
335 * @events: mask of RTC_IRQF with one or more of RTC_PF, RTC_AF, RTC_UF
336 * Context: any
337 */
338void rtc_update_irq(struct rtc_device *rtc,
339 unsigned long num, unsigned long events)
340{
341 schedule_work(&rtc->irqwork);
342}
394EXPORT_SYMBOL_GPL(rtc_update_irq); 343EXPORT_SYMBOL_GPL(rtc_update_irq);
395 344
396static int __rtc_match(struct device *dev, void *data) 345static int __rtc_match(struct device *dev, void *data)
@@ -477,18 +426,20 @@ int rtc_irq_set_state(struct rtc_device *rtc, struct rtc_task *task, int enabled
477 int err = 0; 426 int err = 0;
478 unsigned long flags; 427 unsigned long flags;
479 428
480 if (rtc->ops->irq_set_state == NULL)
481 return -ENXIO;
482
483 spin_lock_irqsave(&rtc->irq_task_lock, flags); 429 spin_lock_irqsave(&rtc->irq_task_lock, flags);
484 if (rtc->irq_task != NULL && task == NULL) 430 if (rtc->irq_task != NULL && task == NULL)
485 err = -EBUSY; 431 err = -EBUSY;
486 if (rtc->irq_task != task) 432 if (rtc->irq_task != task)
487 err = -EACCES; 433 err = -EACCES;
488 spin_unlock_irqrestore(&rtc->irq_task_lock, flags);
489 434
490 if (err == 0) 435 if (enabled) {
491 err = rtc->ops->irq_set_state(rtc->dev.parent, enabled); 436 ktime_t period = ktime_set(0, NSEC_PER_SEC/rtc->irq_freq);
437 hrtimer_start(&rtc->pie_timer, period, HRTIMER_MODE_REL);
438 } else {
439 hrtimer_cancel(&rtc->pie_timer);
440 }
441 rtc->pie_enabled = enabled;
442 spin_unlock_irqrestore(&rtc->irq_task_lock, flags);
492 443
493 return err; 444 return err;
494} 445}
@@ -509,21 +460,194 @@ int rtc_irq_set_freq(struct rtc_device *rtc, struct rtc_task *task, int freq)
509 int err = 0; 460 int err = 0;
510 unsigned long flags; 461 unsigned long flags;
511 462
512 if (rtc->ops->irq_set_freq == NULL)
513 return -ENXIO;
514
515 spin_lock_irqsave(&rtc->irq_task_lock, flags); 463 spin_lock_irqsave(&rtc->irq_task_lock, flags);
516 if (rtc->irq_task != NULL && task == NULL) 464 if (rtc->irq_task != NULL && task == NULL)
517 err = -EBUSY; 465 err = -EBUSY;
518 if (rtc->irq_task != task) 466 if (rtc->irq_task != task)
519 err = -EACCES; 467 err = -EACCES;
520 spin_unlock_irqrestore(&rtc->irq_task_lock, flags);
521
522 if (err == 0) { 468 if (err == 0) {
523 err = rtc->ops->irq_set_freq(rtc->dev.parent, freq); 469 rtc->irq_freq = freq;
524 if (err == 0) 470 if (rtc->pie_enabled) {
525 rtc->irq_freq = freq; 471 ktime_t period;
472 hrtimer_cancel(&rtc->pie_timer);
473 period = ktime_set(0, NSEC_PER_SEC/rtc->irq_freq);
474 hrtimer_start(&rtc->pie_timer, period,
475 HRTIMER_MODE_REL);
476 }
526 } 477 }
478 spin_unlock_irqrestore(&rtc->irq_task_lock, flags);
527 return err; 479 return err;
528} 480}
529EXPORT_SYMBOL_GPL(rtc_irq_set_freq); 481EXPORT_SYMBOL_GPL(rtc_irq_set_freq);
482
483/**
484 * rtc_timer_enqueue - Adds a rtc_timer to the rtc_device timerqueue
485 * @rtc rtc device
486 * @timer timer being added.
487 *
488 * Enqueues a timer onto the rtc devices timerqueue and sets
489 * the next alarm event appropriately.
490 *
491 * Must hold ops_lock for proper serialization of timerqueue
492 */
493void rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
494{
495 timerqueue_add(&rtc->timerqueue, &timer->node);
496 if (&timer->node == timerqueue_getnext(&rtc->timerqueue)) {
497 struct rtc_wkalrm alarm;
498 int err;
499 alarm.time = rtc_ktime_to_tm(timer->node.expires);
500 alarm.enabled = 1;
501 err = __rtc_set_alarm(rtc, &alarm);
502 if (err == -ETIME)
503 schedule_work(&rtc->irqwork);
504 }
505}
506
507/**
508 * rtc_timer_remove - Removes a rtc_timer from the rtc_device timerqueue
509 * @rtc rtc device
510 * @timer timer being removed.
511 *
512 * Removes a timer onto the rtc devices timerqueue and sets
513 * the next alarm event appropriately.
514 *
515 * Must hold ops_lock for proper serialization of timerqueue
516 */
517void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer)
518{
519 struct timerqueue_node *next = timerqueue_getnext(&rtc->timerqueue);
520 timerqueue_del(&rtc->timerqueue, &timer->node);
521
522 if (next == &timer->node) {
523 struct rtc_wkalrm alarm;
524 int err;
525 next = timerqueue_getnext(&rtc->timerqueue);
526 if (!next)
527 return;
528 alarm.time = rtc_ktime_to_tm(next->expires);
529 alarm.enabled = 1;
530 err = __rtc_set_alarm(rtc, &alarm);
531 if (err == -ETIME)
532 schedule_work(&rtc->irqwork);
533 }
534}
535
536/**
537 * rtc_timer_do_work - Expires rtc timers
538 * @rtc rtc device
539 * @timer timer being removed.
540 *
541 * Expires rtc timers. Reprograms next alarm event if needed.
542 * Called via worktask.
543 *
544 * Serializes access to timerqueue via ops_lock mutex
545 */
546void rtc_timer_do_work(struct work_struct *work)
547{
548 struct rtc_timer *timer;
549 struct timerqueue_node *next;
550 ktime_t now;
551 struct rtc_time tm;
552
553 struct rtc_device *rtc =
554 container_of(work, struct rtc_device, irqwork);
555
556 mutex_lock(&rtc->ops_lock);
557again:
558 __rtc_read_time(rtc, &tm);
559 now = rtc_tm_to_ktime(tm);
560 while ((next = timerqueue_getnext(&rtc->timerqueue))) {
561 if (next->expires.tv64 > now.tv64)
562 break;
563
564 /* expire timer */
565 timer = container_of(next, struct rtc_timer, node);
566 timerqueue_del(&rtc->timerqueue, &timer->node);
567 timer->enabled = 0;
568 if (timer->task.func)
569 timer->task.func(timer->task.private_data);
570
571 /* Re-add/fwd periodic timers */
572 if (ktime_to_ns(timer->period)) {
573 timer->node.expires = ktime_add(timer->node.expires,
574 timer->period);
575 timer->enabled = 1;
576 timerqueue_add(&rtc->timerqueue, &timer->node);
577 }
578 }
579
580 /* Set next alarm */
581 if (next) {
582 struct rtc_wkalrm alarm;
583 int err;
584 alarm.time = rtc_ktime_to_tm(next->expires);
585 alarm.enabled = 1;
586 err = __rtc_set_alarm(rtc, &alarm);
587 if (err == -ETIME)
588 goto again;
589 }
590
591 mutex_unlock(&rtc->ops_lock);
592}
593
594
595/* rtc_timer_init - Initializes an rtc_timer
596 * @timer: timer to be intiialized
597 * @f: function pointer to be called when timer fires
598 * @data: private data passed to function pointer
599 *
600 * Kernel interface to initializing an rtc_timer.
601 */
602void rtc_timer_init(struct rtc_timer *timer, void (*f)(void* p), void* data)
603{
604 timerqueue_init(&timer->node);
605 timer->enabled = 0;
606 timer->task.func = f;
607 timer->task.private_data = data;
608}
609
610/* rtc_timer_start - Sets an rtc_timer to fire in the future
611 * @ rtc: rtc device to be used
612 * @ timer: timer being set
613 * @ expires: time at which to expire the timer
614 * @ period: period that the timer will recur
615 *
616 * Kernel interface to set an rtc_timer
617 */
618int rtc_timer_start(struct rtc_device *rtc, struct rtc_timer* timer,
619 ktime_t expires, ktime_t period)
620{
621 int ret = 0;
622 mutex_lock(&rtc->ops_lock);
623 if (timer->enabled)
624 rtc_timer_remove(rtc, timer);
625
626 timer->node.expires = expires;
627 timer->period = period;
628
629 timer->enabled = 1;
630 rtc_timer_enqueue(rtc, timer);
631
632 mutex_unlock(&rtc->ops_lock);
633 return ret;
634}
635
636/* rtc_timer_cancel - Stops an rtc_timer
637 * @ rtc: rtc device to be used
638 * @ timer: timer being set
639 *
640 * Kernel interface to cancel an rtc_timer
641 */
642int rtc_timer_cancel(struct rtc_device *rtc, struct rtc_timer* timer)
643{
644 int ret = 0;
645 mutex_lock(&rtc->ops_lock);
646 if (timer->enabled)
647 rtc_timer_remove(rtc, timer);
648 timer->enabled = 0;
649 mutex_unlock(&rtc->ops_lock);
650 return ret;
651}
652
653
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index 5856167a0c90..7e6ce626b7f1 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -687,7 +687,8 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
687#if defined(CONFIG_ATARI) 687#if defined(CONFIG_ATARI)
688 address_space = 64; 688 address_space = 64;
689#elif defined(__i386__) || defined(__x86_64__) || defined(__arm__) \ 689#elif defined(__i386__) || defined(__x86_64__) || defined(__arm__) \
690 || defined(__sparc__) || defined(__mips__) 690 || defined(__sparc__) || defined(__mips__) \
691 || defined(__powerpc__)
691 address_space = 128; 692 address_space = 128;
692#else 693#else
693#warning Assuming 128 bytes of RTC+NVRAM address space, not 64 bytes. 694#warning Assuming 128 bytes of RTC+NVRAM address space, not 64 bytes.
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
index 0cc0984d155b..212b16edafc0 100644
--- a/drivers/rtc/rtc-dev.c
+++ b/drivers/rtc/rtc-dev.c
@@ -46,105 +46,6 @@ static int rtc_dev_open(struct inode *inode, struct file *file)
46 return err; 46 return err;
47} 47}
48 48
49#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
50/*
51 * Routine to poll RTC seconds field for change as often as possible,
52 * after first RTC_UIE use timer to reduce polling
53 */
54static void rtc_uie_task(struct work_struct *work)
55{
56 struct rtc_device *rtc =
57 container_of(work, struct rtc_device, uie_task);
58 struct rtc_time tm;
59 int num = 0;
60 int err;
61
62 err = rtc_read_time(rtc, &tm);
63
64 spin_lock_irq(&rtc->irq_lock);
65 if (rtc->stop_uie_polling || err) {
66 rtc->uie_task_active = 0;
67 } else if (rtc->oldsecs != tm.tm_sec) {
68 num = (tm.tm_sec + 60 - rtc->oldsecs) % 60;
69 rtc->oldsecs = tm.tm_sec;
70 rtc->uie_timer.expires = jiffies + HZ - (HZ/10);
71 rtc->uie_timer_active = 1;
72 rtc->uie_task_active = 0;
73 add_timer(&rtc->uie_timer);
74 } else if (schedule_work(&rtc->uie_task) == 0) {
75 rtc->uie_task_active = 0;
76 }
77 spin_unlock_irq(&rtc->irq_lock);
78 if (num)
79 rtc_update_irq(rtc, num, RTC_UF | RTC_IRQF);
80}
81static void rtc_uie_timer(unsigned long data)
82{
83 struct rtc_device *rtc = (struct rtc_device *)data;
84 unsigned long flags;
85
86 spin_lock_irqsave(&rtc->irq_lock, flags);
87 rtc->uie_timer_active = 0;
88 rtc->uie_task_active = 1;
89 if ((schedule_work(&rtc->uie_task) == 0))
90 rtc->uie_task_active = 0;
91 spin_unlock_irqrestore(&rtc->irq_lock, flags);
92}
93
94static int clear_uie(struct rtc_device *rtc)
95{
96 spin_lock_irq(&rtc->irq_lock);
97 if (rtc->uie_irq_active) {
98 rtc->stop_uie_polling = 1;
99 if (rtc->uie_timer_active) {
100 spin_unlock_irq(&rtc->irq_lock);
101 del_timer_sync(&rtc->uie_timer);
102 spin_lock_irq(&rtc->irq_lock);
103 rtc->uie_timer_active = 0;
104 }
105 if (rtc->uie_task_active) {
106 spin_unlock_irq(&rtc->irq_lock);
107 flush_work_sync(&rtc->uie_task);
108 spin_lock_irq(&rtc->irq_lock);
109 }
110 rtc->uie_irq_active = 0;
111 }
112 spin_unlock_irq(&rtc->irq_lock);
113 return 0;
114}
115
116static int set_uie(struct rtc_device *rtc)
117{
118 struct rtc_time tm;
119 int err;
120
121 err = rtc_read_time(rtc, &tm);
122 if (err)
123 return err;
124 spin_lock_irq(&rtc->irq_lock);
125 if (!rtc->uie_irq_active) {
126 rtc->uie_irq_active = 1;
127 rtc->stop_uie_polling = 0;
128 rtc->oldsecs = tm.tm_sec;
129 rtc->uie_task_active = 1;
130 if (schedule_work(&rtc->uie_task) == 0)
131 rtc->uie_task_active = 0;
132 }
133 rtc->irq_data = 0;
134 spin_unlock_irq(&rtc->irq_lock);
135 return 0;
136}
137
138int rtc_dev_update_irq_enable_emul(struct rtc_device *rtc, unsigned int enabled)
139{
140 if (enabled)
141 return set_uie(rtc);
142 else
143 return clear_uie(rtc);
144}
145EXPORT_SYMBOL(rtc_dev_update_irq_enable_emul);
146
147#endif /* CONFIG_RTC_INTF_DEV_UIE_EMUL */
148 49
149static ssize_t 50static ssize_t
150rtc_dev_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) 51rtc_dev_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
@@ -493,11 +394,6 @@ void rtc_dev_prepare(struct rtc_device *rtc)
493 394
494 rtc->dev.devt = MKDEV(MAJOR(rtc_devt), rtc->id); 395 rtc->dev.devt = MKDEV(MAJOR(rtc_devt), rtc->id);
495 396
496#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
497 INIT_WORK(&rtc->uie_task, rtc_uie_task);
498 setup_timer(&rtc->uie_timer, rtc_uie_timer, (unsigned long)rtc);
499#endif
500
501 cdev_init(&rtc->char_dev, &rtc_dev_fops); 397 cdev_init(&rtc->char_dev, &rtc_dev_fops);
502 rtc->char_dev.owner = rtc->owner; 398 rtc->char_dev.owner = rtc->owner;
503} 399}
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index d827ce570a8c..0d559b6416dd 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -106,9 +106,9 @@ struct ds1307 {
106 struct i2c_client *client; 106 struct i2c_client *client;
107 struct rtc_device *rtc; 107 struct rtc_device *rtc;
108 struct work_struct work; 108 struct work_struct work;
109 s32 (*read_block_data)(struct i2c_client *client, u8 command, 109 s32 (*read_block_data)(const struct i2c_client *client, u8 command,
110 u8 length, u8 *values); 110 u8 length, u8 *values);
111 s32 (*write_block_data)(struct i2c_client *client, u8 command, 111 s32 (*write_block_data)(const struct i2c_client *client, u8 command,
112 u8 length, const u8 *values); 112 u8 length, const u8 *values);
113}; 113};
114 114
@@ -158,8 +158,8 @@ MODULE_DEVICE_TABLE(i2c, ds1307_id);
158 158
159#define BLOCK_DATA_MAX_TRIES 10 159#define BLOCK_DATA_MAX_TRIES 10
160 160
161static s32 ds1307_read_block_data_once(struct i2c_client *client, u8 command, 161static s32 ds1307_read_block_data_once(const struct i2c_client *client,
162 u8 length, u8 *values) 162 u8 command, u8 length, u8 *values)
163{ 163{
164 s32 i, data; 164 s32 i, data;
165 165
@@ -172,7 +172,7 @@ static s32 ds1307_read_block_data_once(struct i2c_client *client, u8 command,
172 return i; 172 return i;
173} 173}
174 174
175static s32 ds1307_read_block_data(struct i2c_client *client, u8 command, 175static s32 ds1307_read_block_data(const struct i2c_client *client, u8 command,
176 u8 length, u8 *values) 176 u8 length, u8 *values)
177{ 177{
178 u8 oldvalues[I2C_SMBUS_BLOCK_MAX]; 178 u8 oldvalues[I2C_SMBUS_BLOCK_MAX];
@@ -198,7 +198,7 @@ static s32 ds1307_read_block_data(struct i2c_client *client, u8 command,
198 return length; 198 return length;
199} 199}
200 200
201static s32 ds1307_write_block_data(struct i2c_client *client, u8 command, 201static s32 ds1307_write_block_data(const struct i2c_client *client, u8 command,
202 u8 length, const u8 *values) 202 u8 length, const u8 *values)
203{ 203{
204 u8 currvalues[I2C_SMBUS_BLOCK_MAX]; 204 u8 currvalues[I2C_SMBUS_BLOCK_MAX];
diff --git a/drivers/rtc/rtc-lib.c b/drivers/rtc/rtc-lib.c
index 773851f338b8..075f1708deae 100644
--- a/drivers/rtc/rtc-lib.c
+++ b/drivers/rtc/rtc-lib.c
@@ -117,4 +117,32 @@ int rtc_tm_to_time(struct rtc_time *tm, unsigned long *time)
117} 117}
118EXPORT_SYMBOL(rtc_tm_to_time); 118EXPORT_SYMBOL(rtc_tm_to_time);
119 119
120/*
121 * Convert rtc_time to ktime
122 */
123ktime_t rtc_tm_to_ktime(struct rtc_time tm)
124{
125 time_t time;
126 rtc_tm_to_time(&tm, &time);
127 return ktime_set(time, 0);
128}
129EXPORT_SYMBOL_GPL(rtc_tm_to_ktime);
130
131/*
132 * Convert ktime to rtc_time
133 */
134struct rtc_time rtc_ktime_to_tm(ktime_t kt)
135{
136 struct timespec ts;
137 struct rtc_time ret;
138
139 ts = ktime_to_timespec(kt);
140 /* Round up any ns */
141 if (ts.tv_nsec)
142 ts.tv_sec++;
143 rtc_time_to_tm(ts.tv_sec, &ret);
144 return ret;
145}
146EXPORT_SYMBOL_GPL(rtc_ktime_to_tm);
147
120MODULE_LICENSE("GPL"); 148MODULE_LICENSE("GPL");
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index 188aff6d263f..c1df7676a73d 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -776,24 +776,7 @@ config BFIN_UART0_CTSRTS
776 bool "Enable UART0 hardware flow control" 776 bool "Enable UART0 hardware flow control"
777 depends on SERIAL_BFIN_UART0 777 depends on SERIAL_BFIN_UART0
778 help 778 help
779 Enable hardware flow control in the driver. Using GPIO emulate the CTS/RTS 779 Enable hardware flow control in the driver.
780 signal.
781
782config UART0_CTS_PIN
783 int "UART0 CTS pin"
784 depends on BFIN_UART0_CTSRTS && !BF548
785 default 23
786 help
787 The default pin is GPIO_GP7.
788 Refer to arch/blackfin/mach-*/include/mach/gpio.h to see the GPIO map.
789
790config UART0_RTS_PIN
791 int "UART0 RTS pin"
792 depends on BFIN_UART0_CTSRTS && !BF548
793 default 22
794 help
795 The default pin is GPIO_GP6.
796 Refer to arch/blackfin/mach-*/include/mach/gpio.h to see the GPIO map.
797 780
798config SERIAL_BFIN_UART1 781config SERIAL_BFIN_UART1
799 bool "Enable UART1" 782 bool "Enable UART1"
@@ -805,22 +788,7 @@ config BFIN_UART1_CTSRTS
805 bool "Enable UART1 hardware flow control" 788 bool "Enable UART1 hardware flow control"
806 depends on SERIAL_BFIN_UART1 789 depends on SERIAL_BFIN_UART1
807 help 790 help
808 Enable hardware flow control in the driver. Using GPIO emulate the CTS/RTS 791 Enable hardware flow control in the driver.
809 signal.
810
811config UART1_CTS_PIN
812 int "UART1 CTS pin"
813 depends on BFIN_UART1_CTSRTS && !BF548
814 default -1
815 help
816 Refer to arch/blackfin/mach-*/include/mach/gpio.h to see the GPIO map.
817
818config UART1_RTS_PIN
819 int "UART1 RTS pin"
820 depends on BFIN_UART1_CTSRTS && !BF548
821 default -1
822 help
823 Refer to arch/blackfin/mach-*/include/mach/gpio.h to see the GPIO map.
824 792
825config SERIAL_BFIN_UART2 793config SERIAL_BFIN_UART2
826 bool "Enable UART2" 794 bool "Enable UART2"
@@ -832,22 +800,7 @@ config BFIN_UART2_CTSRTS
832 bool "Enable UART2 hardware flow control" 800 bool "Enable UART2 hardware flow control"
833 depends on SERIAL_BFIN_UART2 801 depends on SERIAL_BFIN_UART2
834 help 802 help
835 Enable hardware flow control in the driver. Using GPIO emulate the CTS/RTS 803 Enable hardware flow control in the driver.
836 signal.
837
838config UART2_CTS_PIN
839 int "UART2 CTS pin"
840 depends on BFIN_UART2_CTSRTS && !BF548
841 default -1
842 help
843 Refer to arch/blackfin/mach-*/include/mach/gpio.h to see the GPIO map.
844
845config UART2_RTS_PIN
846 int "UART2 RTS pin"
847 depends on BFIN_UART2_CTSRTS && !BF548
848 default -1
849 help
850 Refer to arch/blackfin/mach-*/include/mach/gpio.h to see the GPIO map.
851 804
852config SERIAL_BFIN_UART3 805config SERIAL_BFIN_UART3
853 bool "Enable UART3" 806 bool "Enable UART3"
@@ -859,22 +812,7 @@ config BFIN_UART3_CTSRTS
859 bool "Enable UART3 hardware flow control" 812 bool "Enable UART3 hardware flow control"
860 depends on SERIAL_BFIN_UART3 813 depends on SERIAL_BFIN_UART3
861 help 814 help
862 Enable hardware flow control in the driver. Using GPIO emulate the CTS/RTS 815 Enable hardware flow control in the driver.
863 signal.
864
865config UART3_CTS_PIN
866 int "UART3 CTS pin"
867 depends on BFIN_UART3_CTSRTS && !BF548
868 default -1
869 help
870 Refer to arch/blackfin/mach-*/include/mach/gpio.h to see the GPIO map.
871
872config UART3_RTS_PIN
873 int "UART3 RTS pin"
874 depends on BFIN_UART3_CTSRTS && !BF548
875 default -1
876 help
877 Refer to arch/blackfin/mach-*/include/mach/gpio.h to see the GPIO map.
878 816
879config SERIAL_IMX 817config SERIAL_IMX
880 bool "IMX serial port support" 818 bool "IMX serial port support"
diff --git a/drivers/serial/bfin_5xx.c b/drivers/serial/bfin_5xx.c
index 19cac9f610fd..e381b895b04d 100644
--- a/drivers/serial/bfin_5xx.c
+++ b/drivers/serial/bfin_5xx.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Blackfin On-Chip Serial Driver 2 * Blackfin On-Chip Serial Driver
3 * 3 *
4 * Copyright 2006-2008 Analog Devices Inc. 4 * Copyright 2006-2010 Analog Devices Inc.
5 * 5 *
6 * Enter bugs at http://blackfin.uclinux.org/ 6 * Enter bugs at http://blackfin.uclinux.org/
7 * 7 *
@@ -12,6 +12,9 @@
12#define SUPPORT_SYSRQ 12#define SUPPORT_SYSRQ
13#endif 13#endif
14 14
15#define DRIVER_NAME "bfin-uart"
16#define pr_fmt(fmt) DRIVER_NAME ": " fmt
17
15#include <linux/module.h> 18#include <linux/module.h>
16#include <linux/ioport.h> 19#include <linux/ioport.h>
17#include <linux/gfp.h> 20#include <linux/gfp.h>
@@ -23,21 +26,20 @@
23#include <linux/tty.h> 26#include <linux/tty.h>
24#include <linux/tty_flip.h> 27#include <linux/tty_flip.h>
25#include <linux/serial_core.h> 28#include <linux/serial_core.h>
26#include <linux/dma-mapping.h> 29#include <linux/gpio.h>
27 30#include <linux/irq.h>
28#if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \
29 defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE)
30#include <linux/kgdb.h> 31#include <linux/kgdb.h>
31#include <asm/irq_regs.h> 32#include <linux/slab.h>
32#endif 33#include <linux/dma-mapping.h>
33
34#include <asm/gpio.h>
35#include <mach/bfin_serial_5xx.h>
36 34
37#include <asm/dma.h> 35#include <asm/portmux.h>
38#include <asm/io.h>
39#include <asm/irq.h>
40#include <asm/cacheflush.h> 36#include <asm/cacheflush.h>
37#include <asm/dma.h>
38
39#define port_membase(uart) (((struct bfin_serial_port *)(uart))->port.membase)
40#define get_lsr_cache(uart) (((struct bfin_serial_port *)(uart))->lsr)
41#define put_lsr_cache(uart, v) (((struct bfin_serial_port *)(uart))->lsr = (v))
42#include <asm/bfin_serial.h>
41 43
42#ifdef CONFIG_SERIAL_BFIN_MODULE 44#ifdef CONFIG_SERIAL_BFIN_MODULE
43# undef CONFIG_EARLY_PRINTK 45# undef CONFIG_EARLY_PRINTK
@@ -48,12 +50,11 @@
48#endif 50#endif
49 51
50/* UART name and device definitions */ 52/* UART name and device definitions */
51#define BFIN_SERIAL_NAME "ttyBF" 53#define BFIN_SERIAL_DEV_NAME "ttyBF"
52#define BFIN_SERIAL_MAJOR 204 54#define BFIN_SERIAL_MAJOR 204
53#define BFIN_SERIAL_MINOR 64 55#define BFIN_SERIAL_MINOR 64
54 56
55static struct bfin_serial_port bfin_serial_ports[BFIN_UART_NR_PORTS]; 57static struct bfin_serial_port *bfin_serial_ports[BFIN_UART_NR_PORTS];
56static int nr_active_ports = ARRAY_SIZE(bfin_serial_resource);
57 58
58#if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \ 59#if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \
59 defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE) 60 defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE)
@@ -743,14 +744,14 @@ static int bfin_serial_startup(struct uart_port *port)
743 } 744 }
744 } 745 }
745 if (uart->rts_pin >= 0) { 746 if (uart->rts_pin >= 0) {
746 gpio_request(uart->rts_pin, DRIVER_NAME);
747 gpio_direction_output(uart->rts_pin, 0); 747 gpio_direction_output(uart->rts_pin, 0);
748 } 748 }
749#endif 749#endif
750#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS 750#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
751 if (request_irq(uart->status_irq, 751 if (uart->cts_pin >= 0 && request_irq(uart->status_irq,
752 bfin_serial_mctrl_cts_int, 752 bfin_serial_mctrl_cts_int,
753 IRQF_DISABLED, "BFIN_UART_MODEM_STATUS", uart)) { 753 IRQF_DISABLED, "BFIN_UART_MODEM_STATUS", uart)) {
754 uart->cts_pin = -1;
754 pr_info("Unable to attach BlackFin UART Modem Status interrupt.\n"); 755 pr_info("Unable to attach BlackFin UART Modem Status interrupt.\n");
755 } 756 }
756 757
@@ -796,11 +797,9 @@ static void bfin_serial_shutdown(struct uart_port *port)
796#ifdef CONFIG_SERIAL_BFIN_CTSRTS 797#ifdef CONFIG_SERIAL_BFIN_CTSRTS
797 if (uart->cts_pin >= 0) 798 if (uart->cts_pin >= 0)
798 free_irq(gpio_to_irq(uart->cts_pin), uart); 799 free_irq(gpio_to_irq(uart->cts_pin), uart);
799 if (uart->rts_pin >= 0)
800 gpio_free(uart->rts_pin);
801#endif 800#endif
802#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS 801#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
803 if (UART_GET_IER(uart) & EDSSI) 802 if (uart->cts_pin >= 0)
804 free_irq(uart->status_irq, uart); 803 free_irq(uart->status_irq, uart);
805#endif 804#endif
806} 805}
@@ -962,33 +961,33 @@ bfin_serial_verify_port(struct uart_port *port, struct serial_struct *ser)
962 */ 961 */
963static void bfin_serial_set_ldisc(struct uart_port *port, int ld) 962static void bfin_serial_set_ldisc(struct uart_port *port, int ld)
964{ 963{
965 int line = port->line; 964 struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
966 unsigned short val; 965 unsigned short val;
967 966
968 switch (ld) { 967 switch (ld) {
969 case N_IRDA: 968 case N_IRDA:
970 val = UART_GET_GCTL(&bfin_serial_ports[line]); 969 val = UART_GET_GCTL(uart);
971 val |= (IREN | RPOLC); 970 val |= (IREN | RPOLC);
972 UART_PUT_GCTL(&bfin_serial_ports[line], val); 971 UART_PUT_GCTL(uart, val);
973 break; 972 break;
974 default: 973 default:
975 val = UART_GET_GCTL(&bfin_serial_ports[line]); 974 val = UART_GET_GCTL(uart);
976 val &= ~(IREN | RPOLC); 975 val &= ~(IREN | RPOLC);
977 UART_PUT_GCTL(&bfin_serial_ports[line], val); 976 UART_PUT_GCTL(uart, val);
978 } 977 }
979} 978}
980 979
981static void bfin_serial_reset_irda(struct uart_port *port) 980static void bfin_serial_reset_irda(struct uart_port *port)
982{ 981{
983 int line = port->line; 982 struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
984 unsigned short val; 983 unsigned short val;
985 984
986 val = UART_GET_GCTL(&bfin_serial_ports[line]); 985 val = UART_GET_GCTL(uart);
987 val &= ~(IREN | RPOLC); 986 val &= ~(IREN | RPOLC);
988 UART_PUT_GCTL(&bfin_serial_ports[line], val); 987 UART_PUT_GCTL(uart, val);
989 SSYNC(); 988 SSYNC();
990 val |= (IREN | RPOLC); 989 val |= (IREN | RPOLC);
991 UART_PUT_GCTL(&bfin_serial_ports[line], val); 990 UART_PUT_GCTL(uart, val);
992 SSYNC(); 991 SSYNC();
993} 992}
994 993
@@ -1070,85 +1069,6 @@ static struct uart_ops bfin_serial_pops = {
1070#endif 1069#endif
1071}; 1070};
1072 1071
1073static void __init bfin_serial_hw_init(void)
1074{
1075#ifdef CONFIG_SERIAL_BFIN_UART0
1076 peripheral_request(P_UART0_TX, DRIVER_NAME);
1077 peripheral_request(P_UART0_RX, DRIVER_NAME);
1078#endif
1079
1080#ifdef CONFIG_SERIAL_BFIN_UART1
1081 peripheral_request(P_UART1_TX, DRIVER_NAME);
1082 peripheral_request(P_UART1_RX, DRIVER_NAME);
1083
1084# if defined(CONFIG_BFIN_UART1_CTSRTS) && defined(CONFIG_BF54x)
1085 peripheral_request(P_UART1_RTS, DRIVER_NAME);
1086 peripheral_request(P_UART1_CTS, DRIVER_NAME);
1087# endif
1088#endif
1089
1090#ifdef CONFIG_SERIAL_BFIN_UART2
1091 peripheral_request(P_UART2_TX, DRIVER_NAME);
1092 peripheral_request(P_UART2_RX, DRIVER_NAME);
1093#endif
1094
1095#ifdef CONFIG_SERIAL_BFIN_UART3
1096 peripheral_request(P_UART3_TX, DRIVER_NAME);
1097 peripheral_request(P_UART3_RX, DRIVER_NAME);
1098
1099# if defined(CONFIG_BFIN_UART3_CTSRTS) && defined(CONFIG_BF54x)
1100 peripheral_request(P_UART3_RTS, DRIVER_NAME);
1101 peripheral_request(P_UART3_CTS, DRIVER_NAME);
1102# endif
1103#endif
1104}
1105
1106static void __init bfin_serial_init_ports(void)
1107{
1108 static int first = 1;
1109 int i;
1110
1111 if (!first)
1112 return;
1113 first = 0;
1114
1115 bfin_serial_hw_init();
1116
1117 for (i = 0; i < nr_active_ports; i++) {
1118 spin_lock_init(&bfin_serial_ports[i].port.lock);
1119 bfin_serial_ports[i].port.uartclk = get_sclk();
1120 bfin_serial_ports[i].port.fifosize = BFIN_UART_TX_FIFO_SIZE;
1121 bfin_serial_ports[i].port.ops = &bfin_serial_pops;
1122 bfin_serial_ports[i].port.line = i;
1123 bfin_serial_ports[i].port.iotype = UPIO_MEM;
1124 bfin_serial_ports[i].port.membase =
1125 (void __iomem *)bfin_serial_resource[i].uart_base_addr;
1126 bfin_serial_ports[i].port.mapbase =
1127 bfin_serial_resource[i].uart_base_addr;
1128 bfin_serial_ports[i].port.irq =
1129 bfin_serial_resource[i].uart_irq;
1130 bfin_serial_ports[i].status_irq =
1131 bfin_serial_resource[i].uart_status_irq;
1132 bfin_serial_ports[i].port.flags = UPF_BOOT_AUTOCONF;
1133#ifdef CONFIG_SERIAL_BFIN_DMA
1134 bfin_serial_ports[i].tx_done = 1;
1135 bfin_serial_ports[i].tx_count = 0;
1136 bfin_serial_ports[i].tx_dma_channel =
1137 bfin_serial_resource[i].uart_tx_dma_channel;
1138 bfin_serial_ports[i].rx_dma_channel =
1139 bfin_serial_resource[i].uart_rx_dma_channel;
1140 init_timer(&(bfin_serial_ports[i].rx_dma_timer));
1141#endif
1142#if defined(CONFIG_SERIAL_BFIN_CTSRTS) || \
1143 defined(CONFIG_SERIAL_BFIN_HARD_CTSRTS)
1144 bfin_serial_ports[i].cts_pin =
1145 bfin_serial_resource[i].uart_cts_pin;
1146 bfin_serial_ports[i].rts_pin =
1147 bfin_serial_resource[i].uart_rts_pin;
1148#endif
1149 }
1150}
1151
1152#if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK) 1072#if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK)
1153/* 1073/*
1154 * If the port was already initialised (eg, by a boot loader), 1074 * If the port was already initialised (eg, by a boot loader),
@@ -1196,6 +1116,34 @@ bfin_serial_console_get_options(struct bfin_serial_port *uart, int *baud,
1196 1116
1197static struct uart_driver bfin_serial_reg; 1117static struct uart_driver bfin_serial_reg;
1198 1118
1119static void bfin_serial_console_putchar(struct uart_port *port, int ch)
1120{
1121 struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
1122 while (!(UART_GET_LSR(uart) & THRE))
1123 barrier();
1124 UART_PUT_CHAR(uart, ch);
1125}
1126
1127#endif /* defined (CONFIG_SERIAL_BFIN_CONSOLE) ||
1128 defined (CONFIG_EARLY_PRINTK) */
1129
1130#ifdef CONFIG_SERIAL_BFIN_CONSOLE
1131#define CLASS_BFIN_CONSOLE "bfin-console"
1132/*
1133 * Interrupts are disabled on entering
1134 */
1135static void
1136bfin_serial_console_write(struct console *co, const char *s, unsigned int count)
1137{
1138 struct bfin_serial_port *uart = bfin_serial_ports[co->index];
1139 unsigned long flags;
1140
1141 spin_lock_irqsave(&uart->port.lock, flags);
1142 uart_console_write(&uart->port, s, count, bfin_serial_console_putchar);
1143 spin_unlock_irqrestore(&uart->port.lock, flags);
1144
1145}
1146
1199static int __init 1147static int __init
1200bfin_serial_console_setup(struct console *co, char *options) 1148bfin_serial_console_setup(struct console *co, char *options)
1201{ 1149{
@@ -1215,9 +1163,12 @@ bfin_serial_console_setup(struct console *co, char *options)
1215 * if so, search for the first available port that does have 1163 * if so, search for the first available port that does have
1216 * console support. 1164 * console support.
1217 */ 1165 */
1218 if (co->index == -1 || co->index >= nr_active_ports) 1166 if (co->index < 0 || co->index >= BFIN_UART_NR_PORTS)
1219 co->index = 0; 1167 return -ENODEV;
1220 uart = &bfin_serial_ports[co->index]; 1168
1169 uart = bfin_serial_ports[co->index];
1170 if (!uart)
1171 return -ENODEV;
1221 1172
1222 if (options) 1173 if (options)
1223 uart_parse_options(options, &baud, &parity, &bits, &flow); 1174 uart_parse_options(options, &baud, &parity, &bits, &flow);
@@ -1226,36 +1177,9 @@ bfin_serial_console_setup(struct console *co, char *options)
1226 1177
1227 return uart_set_options(&uart->port, co, baud, parity, bits, flow); 1178 return uart_set_options(&uart->port, co, baud, parity, bits, flow);
1228} 1179}
1229#endif /* defined (CONFIG_SERIAL_BFIN_CONSOLE) ||
1230 defined (CONFIG_EARLY_PRINTK) */
1231
1232#ifdef CONFIG_SERIAL_BFIN_CONSOLE
1233static void bfin_serial_console_putchar(struct uart_port *port, int ch)
1234{
1235 struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
1236 while (!(UART_GET_LSR(uart) & THRE))
1237 barrier();
1238 UART_PUT_CHAR(uart, ch);
1239 SSYNC();
1240}
1241
1242/*
1243 * Interrupts are disabled on entering
1244 */
1245static void
1246bfin_serial_console_write(struct console *co, const char *s, unsigned int count)
1247{
1248 struct bfin_serial_port *uart = &bfin_serial_ports[co->index];
1249 unsigned long flags;
1250
1251 spin_lock_irqsave(&uart->port.lock, flags);
1252 uart_console_write(&uart->port, s, count, bfin_serial_console_putchar);
1253 spin_unlock_irqrestore(&uart->port.lock, flags);
1254
1255}
1256 1180
1257static struct console bfin_serial_console = { 1181static struct console bfin_serial_console = {
1258 .name = BFIN_SERIAL_NAME, 1182 .name = BFIN_SERIAL_DEV_NAME,
1259 .write = bfin_serial_console_write, 1183 .write = bfin_serial_console_write,
1260 .device = uart_console_device, 1184 .device = uart_console_device,
1261 .setup = bfin_serial_console_setup, 1185 .setup = bfin_serial_console_setup,
@@ -1263,44 +1187,30 @@ static struct console bfin_serial_console = {
1263 .index = -1, 1187 .index = -1,
1264 .data = &bfin_serial_reg, 1188 .data = &bfin_serial_reg,
1265}; 1189};
1266
1267static int __init bfin_serial_rs_console_init(void)
1268{
1269 bfin_serial_init_ports();
1270 register_console(&bfin_serial_console);
1271
1272 return 0;
1273}
1274console_initcall(bfin_serial_rs_console_init);
1275
1276#define BFIN_SERIAL_CONSOLE &bfin_serial_console 1190#define BFIN_SERIAL_CONSOLE &bfin_serial_console
1277#else 1191#else
1278#define BFIN_SERIAL_CONSOLE NULL 1192#define BFIN_SERIAL_CONSOLE NULL
1279#endif /* CONFIG_SERIAL_BFIN_CONSOLE */ 1193#endif /* CONFIG_SERIAL_BFIN_CONSOLE */
1280 1194
1195#ifdef CONFIG_EARLY_PRINTK
1196static struct bfin_serial_port bfin_earlyprintk_port;
1197#define CLASS_BFIN_EARLYPRINTK "bfin-earlyprintk"
1281 1198
1282#ifdef CONFIG_EARLY_PRINTK 1199/*
1283static __init void early_serial_putc(struct uart_port *port, int ch) 1200 * Interrupts are disabled on entering
1201 */
1202static void
1203bfin_earlyprintk_console_write(struct console *co, const char *s, unsigned int count)
1284{ 1204{
1285 unsigned timeout = 0xffff; 1205 unsigned long flags;
1286 struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
1287
1288 while ((!(UART_GET_LSR(uart) & THRE)) && --timeout)
1289 cpu_relax();
1290 UART_PUT_CHAR(uart, ch);
1291}
1292 1206
1293static __init void early_serial_write(struct console *con, const char *s, 1207 if (bfin_earlyprintk_port.port.line != co->index)
1294 unsigned int n) 1208 return;
1295{
1296 struct bfin_serial_port *uart = &bfin_serial_ports[con->index];
1297 unsigned int i;
1298 1209
1299 for (i = 0; i < n; i++, s++) { 1210 spin_lock_irqsave(&bfin_earlyprintk_port.port.lock, flags);
1300 if (*s == '\n') 1211 uart_console_write(&bfin_earlyprintk_port.port, s, count,
1301 early_serial_putc(&uart->port, '\r'); 1212 bfin_serial_console_putchar);
1302 early_serial_putc(&uart->port, *s); 1213 spin_unlock_irqrestore(&bfin_earlyprintk_port.port.lock, flags);
1303 }
1304} 1214}
1305 1215
1306/* 1216/*
@@ -1311,113 +1221,204 @@ static __init void early_serial_write(struct console *con, const char *s,
1311 */ 1221 */
1312static struct __initdata console bfin_early_serial_console = { 1222static struct __initdata console bfin_early_serial_console = {
1313 .name = "early_BFuart", 1223 .name = "early_BFuart",
1314 .write = early_serial_write, 1224 .write = bfin_earlyprintk_console_write,
1315 .device = uart_console_device, 1225 .device = uart_console_device,
1316 .flags = CON_PRINTBUFFER, 1226 .flags = CON_PRINTBUFFER,
1317 .index = -1, 1227 .index = -1,
1318 .data = &bfin_serial_reg, 1228 .data = &bfin_serial_reg,
1319}; 1229};
1320
1321struct console __init *bfin_earlyserial_init(unsigned int port,
1322 unsigned int cflag)
1323{
1324 struct bfin_serial_port *uart;
1325 struct ktermios t;
1326
1327#ifdef CONFIG_SERIAL_BFIN_CONSOLE
1328 /*
1329 * If we are using early serial, don't let the normal console rewind
1330 * log buffer, since that causes things to be printed multiple times
1331 */
1332 bfin_serial_console.flags &= ~CON_PRINTBUFFER;
1333#endif 1230#endif
1334 1231
1335 if (port == -1 || port >= nr_active_ports)
1336 port = 0;
1337 bfin_serial_init_ports();
1338 bfin_early_serial_console.index = port;
1339 uart = &bfin_serial_ports[port];
1340 t.c_cflag = cflag;
1341 t.c_iflag = 0;
1342 t.c_oflag = 0;
1343 t.c_lflag = ICANON;
1344 t.c_line = port;
1345 bfin_serial_set_termios(&uart->port, &t, &t);
1346 return &bfin_early_serial_console;
1347}
1348
1349#endif /* CONFIG_EARLY_PRINTK */
1350
1351static struct uart_driver bfin_serial_reg = { 1232static struct uart_driver bfin_serial_reg = {
1352 .owner = THIS_MODULE, 1233 .owner = THIS_MODULE,
1353 .driver_name = "bfin-uart", 1234 .driver_name = DRIVER_NAME,
1354 .dev_name = BFIN_SERIAL_NAME, 1235 .dev_name = BFIN_SERIAL_DEV_NAME,
1355 .major = BFIN_SERIAL_MAJOR, 1236 .major = BFIN_SERIAL_MAJOR,
1356 .minor = BFIN_SERIAL_MINOR, 1237 .minor = BFIN_SERIAL_MINOR,
1357 .nr = BFIN_UART_NR_PORTS, 1238 .nr = BFIN_UART_NR_PORTS,
1358 .cons = BFIN_SERIAL_CONSOLE, 1239 .cons = BFIN_SERIAL_CONSOLE,
1359}; 1240};
1360 1241
1361static int bfin_serial_suspend(struct platform_device *dev, pm_message_t state) 1242static int bfin_serial_suspend(struct platform_device *pdev, pm_message_t state)
1362{ 1243{
1363 int i; 1244 struct bfin_serial_port *uart = platform_get_drvdata(pdev);
1364 1245
1365 for (i = 0; i < nr_active_ports; i++) { 1246 return uart_suspend_port(&bfin_serial_reg, &uart->port);
1366 if (bfin_serial_ports[i].port.dev != &dev->dev) 1247}
1367 continue;
1368 uart_suspend_port(&bfin_serial_reg, &bfin_serial_ports[i].port);
1369 }
1370 1248
1371 return 0; 1249static int bfin_serial_resume(struct platform_device *pdev)
1250{
1251 struct bfin_serial_port *uart = platform_get_drvdata(pdev);
1252
1253 return uart_resume_port(&bfin_serial_reg, &uart->port);
1372} 1254}
1373 1255
1374static int bfin_serial_resume(struct platform_device *dev) 1256static int bfin_serial_probe(struct platform_device *pdev)
1375{ 1257{
1376 int i; 1258 struct resource *res;
1259 struct bfin_serial_port *uart = NULL;
1260 int ret = 0;
1377 1261
1378 for (i = 0; i < nr_active_ports; i++) { 1262 if (pdev->id < 0 || pdev->id >= BFIN_UART_NR_PORTS) {
1379 if (bfin_serial_ports[i].port.dev != &dev->dev) 1263 dev_err(&pdev->dev, "Wrong bfin uart platform device id.\n");
1380 continue; 1264 return -ENOENT;
1381 uart_resume_port(&bfin_serial_reg, &bfin_serial_ports[i].port);
1382 } 1265 }
1383 1266
1384 return 0; 1267 if (bfin_serial_ports[pdev->id] == NULL) {
1385}
1386 1268
1387static int bfin_serial_probe(struct platform_device *dev) 1269 uart = kzalloc(sizeof(*uart), GFP_KERNEL);
1388{ 1270 if (!uart) {
1389 struct resource *res = dev->resource; 1271 dev_err(&pdev->dev,
1390 int i; 1272 "fail to malloc bfin_serial_port\n");
1273 return -ENOMEM;
1274 }
1275 bfin_serial_ports[pdev->id] = uart;
1391 1276
1392 for (i = 0; i < dev->num_resources; i++, res++) 1277#ifdef CONFIG_EARLY_PRINTK
1393 if (res->flags & IORESOURCE_MEM) 1278 if (!(bfin_earlyprintk_port.port.membase
1394 break; 1279 && bfin_earlyprintk_port.port.line == pdev->id)) {
1280 /*
1281 * If the peripheral PINs of current port is allocated
1282 * in earlyprintk probe stage, don't do it again.
1283 */
1284#endif
1285 ret = peripheral_request_list(
1286 (unsigned short *)pdev->dev.platform_data, DRIVER_NAME);
1287 if (ret) {
1288 dev_err(&pdev->dev,
1289 "fail to request bfin serial peripherals\n");
1290 goto out_error_free_mem;
1291 }
1292#ifdef CONFIG_EARLY_PRINTK
1293 }
1294#endif
1295
1296 spin_lock_init(&uart->port.lock);
1297 uart->port.uartclk = get_sclk();
1298 uart->port.fifosize = BFIN_UART_TX_FIFO_SIZE;
1299 uart->port.ops = &bfin_serial_pops;
1300 uart->port.line = pdev->id;
1301 uart->port.iotype = UPIO_MEM;
1302 uart->port.flags = UPF_BOOT_AUTOCONF;
1303
1304 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1305 if (res == NULL) {
1306 dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n");
1307 ret = -ENOENT;
1308 goto out_error_free_peripherals;
1309 }
1310
1311 uart->port.membase = ioremap(res->start,
1312 res->end - res->start);
1313 if (!uart->port.membase) {
1314 dev_err(&pdev->dev, "Cannot map uart IO\n");
1315 ret = -ENXIO;
1316 goto out_error_free_peripherals;
1317 }
1318 uart->port.mapbase = res->start;
1395 1319
1396 if (i < dev->num_resources) { 1320 uart->port.irq = platform_get_irq(pdev, 0);
1397 for (i = 0; i < nr_active_ports; i++, res++) { 1321 if (uart->port.irq < 0) {
1398 if (bfin_serial_ports[i].port.mapbase != res->start) 1322 dev_err(&pdev->dev, "No uart RX/TX IRQ specified\n");
1399 continue; 1323 ret = -ENOENT;
1400 bfin_serial_ports[i].port.dev = &dev->dev; 1324 goto out_error_unmap;
1401 uart_add_one_port(&bfin_serial_reg, &bfin_serial_ports[i].port);
1402 } 1325 }
1326
1327 uart->status_irq = platform_get_irq(pdev, 1);
1328 if (uart->status_irq < 0) {
1329 dev_err(&pdev->dev, "No uart status IRQ specified\n");
1330 ret = -ENOENT;
1331 goto out_error_unmap;
1332 }
1333
1334#ifdef CONFIG_SERIAL_BFIN_DMA
1335 uart->tx_done = 1;
1336 uart->tx_count = 0;
1337
1338 res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1339 if (res == NULL) {
1340 dev_err(&pdev->dev, "No uart TX DMA channel specified\n");
1341 ret = -ENOENT;
1342 goto out_error_unmap;
1343 }
1344 uart->tx_dma_channel = res->start;
1345
1346 res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
1347 if (res == NULL) {
1348 dev_err(&pdev->dev, "No uart RX DMA channel specified\n");
1349 ret = -ENOENT;
1350 goto out_error_unmap;
1351 }
1352 uart->rx_dma_channel = res->start;
1353
1354 init_timer(&(uart->rx_dma_timer));
1355#endif
1356
1357#if defined(CONFIG_SERIAL_BFIN_CTSRTS) || \
1358 defined(CONFIG_SERIAL_BFIN_HARD_CTSRTS)
1359 res = platform_get_resource(pdev, IORESOURCE_IO, 0);
1360 if (res == NULL)
1361 uart->cts_pin = -1;
1362 else
1363 uart->cts_pin = res->start;
1364
1365 res = platform_get_resource(pdev, IORESOURCE_IO, 1);
1366 if (res == NULL)
1367 uart->rts_pin = -1;
1368 else
1369 uart->rts_pin = res->start;
1370# if defined(CONFIG_SERIAL_BFIN_CTSRTS)
1371 if (uart->rts_pin >= 0)
1372 gpio_request(uart->rts_pin, DRIVER_NAME);
1373# endif
1374#endif
1403 } 1375 }
1404 1376
1405 return 0; 1377#ifdef CONFIG_SERIAL_BFIN_CONSOLE
1378 if (!is_early_platform_device(pdev)) {
1379#endif
1380 uart = bfin_serial_ports[pdev->id];
1381 uart->port.dev = &pdev->dev;
1382 dev_set_drvdata(&pdev->dev, uart);
1383 ret = uart_add_one_port(&bfin_serial_reg, &uart->port);
1384#ifdef CONFIG_SERIAL_BFIN_CONSOLE
1385 }
1386#endif
1387
1388 if (!ret)
1389 return 0;
1390
1391 if (uart) {
1392out_error_unmap:
1393 iounmap(uart->port.membase);
1394out_error_free_peripherals:
1395 peripheral_free_list(
1396 (unsigned short *)pdev->dev.platform_data);
1397out_error_free_mem:
1398 kfree(uart);
1399 bfin_serial_ports[pdev->id] = NULL;
1400 }
1401
1402 return ret;
1406} 1403}
1407 1404
1408static int bfin_serial_remove(struct platform_device *dev) 1405static int __devexit bfin_serial_remove(struct platform_device *pdev)
1409{ 1406{
1410 int i; 1407 struct bfin_serial_port *uart = platform_get_drvdata(pdev);
1411 1408
1412 for (i = 0; i < nr_active_ports; i++) { 1409 dev_set_drvdata(&pdev->dev, NULL);
1413 if (bfin_serial_ports[i].port.dev != &dev->dev) 1410
1414 continue; 1411 if (uart) {
1415 uart_remove_one_port(&bfin_serial_reg, &bfin_serial_ports[i].port); 1412 uart_remove_one_port(&bfin_serial_reg, &uart->port);
1416 bfin_serial_ports[i].port.dev = NULL; 1413#ifdef CONFIG_SERIAL_BFIN_CTSRTS
1417#if defined(CONFIG_SERIAL_BFIN_CTSRTS) 1414 if (uart->rts_pin >= 0)
1418 gpio_free(bfin_serial_ports[i].cts_pin); 1415 gpio_free(uart->rts_pin);
1419 gpio_free(bfin_serial_ports[i].rts_pin);
1420#endif 1416#endif
1417 iounmap(uart->port.membase);
1418 peripheral_free_list(
1419 (unsigned short *)pdev->dev.platform_data);
1420 kfree(uart);
1421 bfin_serial_ports[pdev->id] = NULL;
1421 } 1422 }
1422 1423
1423 return 0; 1424 return 0;
@@ -1425,31 +1426,160 @@ static int bfin_serial_remove(struct platform_device *dev)
1425 1426
1426static struct platform_driver bfin_serial_driver = { 1427static struct platform_driver bfin_serial_driver = {
1427 .probe = bfin_serial_probe, 1428 .probe = bfin_serial_probe,
1428 .remove = bfin_serial_remove, 1429 .remove = __devexit_p(bfin_serial_remove),
1429 .suspend = bfin_serial_suspend, 1430 .suspend = bfin_serial_suspend,
1430 .resume = bfin_serial_resume, 1431 .resume = bfin_serial_resume,
1431 .driver = { 1432 .driver = {
1432 .name = "bfin-uart", 1433 .name = DRIVER_NAME,
1433 .owner = THIS_MODULE, 1434 .owner = THIS_MODULE,
1434 }, 1435 },
1435}; 1436};
1436 1437
1437static int __init bfin_serial_init(void) 1438#if defined(CONFIG_SERIAL_BFIN_CONSOLE)
1439static __initdata struct early_platform_driver early_bfin_serial_driver = {
1440 .class_str = CLASS_BFIN_CONSOLE,
1441 .pdrv = &bfin_serial_driver,
1442 .requested_id = EARLY_PLATFORM_ID_UNSET,
1443};
1444
1445static int __init bfin_serial_rs_console_init(void)
1446{
1447 early_platform_driver_register(&early_bfin_serial_driver, DRIVER_NAME);
1448
1449 early_platform_driver_probe(CLASS_BFIN_CONSOLE, BFIN_UART_NR_PORTS, 0);
1450
1451 register_console(&bfin_serial_console);
1452
1453 return 0;
1454}
1455console_initcall(bfin_serial_rs_console_init);
1456#endif
1457
1458#ifdef CONFIG_EARLY_PRINTK
1459/*
1460 * Memory can't be allocated dynamically during earlyprink init stage.
1461 * So, do individual probe for earlyprink with a static uart port variable.
1462 */
1463static int bfin_earlyprintk_probe(struct platform_device *pdev)
1438{ 1464{
1465 struct resource *res;
1439 int ret; 1466 int ret;
1440 1467
1441 pr_info("Serial: Blackfin serial driver\n"); 1468 if (pdev->id < 0 || pdev->id >= BFIN_UART_NR_PORTS) {
1469 dev_err(&pdev->dev, "Wrong earlyprintk platform device id.\n");
1470 return -ENOENT;
1471 }
1472
1473 ret = peripheral_request_list(
1474 (unsigned short *)pdev->dev.platform_data, DRIVER_NAME);
1475 if (ret) {
1476 dev_err(&pdev->dev,
1477 "fail to request bfin serial peripherals\n");
1478 return ret;
1479 }
1480
1481 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1482 if (res == NULL) {
1483 dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n");
1484 ret = -ENOENT;
1485 goto out_error_free_peripherals;
1486 }
1487
1488 bfin_earlyprintk_port.port.membase = ioremap(res->start,
1489 res->end - res->start);
1490 if (!bfin_earlyprintk_port.port.membase) {
1491 dev_err(&pdev->dev, "Cannot map uart IO\n");
1492 ret = -ENXIO;
1493 goto out_error_free_peripherals;
1494 }
1495 bfin_earlyprintk_port.port.mapbase = res->start;
1496 bfin_earlyprintk_port.port.line = pdev->id;
1497 bfin_earlyprintk_port.port.uartclk = get_sclk();
1498 bfin_earlyprintk_port.port.fifosize = BFIN_UART_TX_FIFO_SIZE;
1499 spin_lock_init(&bfin_earlyprintk_port.port.lock);
1500
1501 return 0;
1502
1503out_error_free_peripherals:
1504 peripheral_free_list(
1505 (unsigned short *)pdev->dev.platform_data);
1506
1507 return ret;
1508}
1509
1510static struct platform_driver bfin_earlyprintk_driver = {
1511 .probe = bfin_earlyprintk_probe,
1512 .driver = {
1513 .name = DRIVER_NAME,
1514 .owner = THIS_MODULE,
1515 },
1516};
1517
1518static __initdata struct early_platform_driver early_bfin_earlyprintk_driver = {
1519 .class_str = CLASS_BFIN_EARLYPRINTK,
1520 .pdrv = &bfin_earlyprintk_driver,
1521 .requested_id = EARLY_PLATFORM_ID_UNSET,
1522};
1523
1524struct console __init *bfin_earlyserial_init(unsigned int port,
1525 unsigned int cflag)
1526{
1527 struct ktermios t;
1528 char port_name[20];
1442 1529
1443 bfin_serial_init_ports(); 1530 if (port < 0 || port >= BFIN_UART_NR_PORTS)
1531 return NULL;
1532
1533 /*
1534 * Only probe resource of the given port in earlyprintk boot arg.
1535 * The expected port id should be indicated in port name string.
1536 */
1537 snprintf(port_name, 20, DRIVER_NAME ".%d", port);
1538 early_platform_driver_register(&early_bfin_earlyprintk_driver,
1539 port_name);
1540 early_platform_driver_probe(CLASS_BFIN_EARLYPRINTK, 1, 0);
1541
1542 if (!bfin_earlyprintk_port.port.membase)
1543 return NULL;
1544
1545#ifdef CONFIG_SERIAL_BFIN_CONSOLE
1546 /*
1547 * If we are using early serial, don't let the normal console rewind
1548 * log buffer, since that causes things to be printed multiple times
1549 */
1550 bfin_serial_console.flags &= ~CON_PRINTBUFFER;
1551#endif
1552
1553 bfin_early_serial_console.index = port;
1554 t.c_cflag = cflag;
1555 t.c_iflag = 0;
1556 t.c_oflag = 0;
1557 t.c_lflag = ICANON;
1558 t.c_line = port;
1559 bfin_serial_set_termios(&bfin_earlyprintk_port.port, &t, &t);
1560
1561 return &bfin_early_serial_console;
1562}
1563#endif /* CONFIG_EARLY_PRINTK */
1564
1565static int __init bfin_serial_init(void)
1566{
1567 int ret;
1568
1569 pr_info("Blackfin serial driver\n");
1444 1570
1445 ret = uart_register_driver(&bfin_serial_reg); 1571 ret = uart_register_driver(&bfin_serial_reg);
1446 if (ret == 0) { 1572 if (ret) {
1447 ret = platform_driver_register(&bfin_serial_driver); 1573 pr_err("failed to register %s:%d\n",
1448 if (ret) { 1574 bfin_serial_reg.driver_name, ret);
1449 pr_debug("uart register failed\n"); 1575 }
1450 uart_unregister_driver(&bfin_serial_reg); 1576
1451 } 1577 ret = platform_driver_register(&bfin_serial_driver);
1578 if (ret) {
1579 pr_err("fail to register bfin uart\n");
1580 uart_unregister_driver(&bfin_serial_reg);
1452 } 1581 }
1582
1453 return ret; 1583 return ret;
1454} 1584}
1455 1585
@@ -1463,7 +1593,7 @@ static void __exit bfin_serial_exit(void)
1463module_init(bfin_serial_init); 1593module_init(bfin_serial_init);
1464module_exit(bfin_serial_exit); 1594module_exit(bfin_serial_exit);
1465 1595
1466MODULE_AUTHOR("Aubrey.Li <aubrey.li@analog.com>"); 1596MODULE_AUTHOR("Sonic Zhang, Aubrey Li");
1467MODULE_DESCRIPTION("Blackfin generic serial port driver"); 1597MODULE_DESCRIPTION("Blackfin generic serial port driver");
1468MODULE_LICENSE("GPL"); 1598MODULE_LICENSE("GPL");
1469MODULE_ALIAS_CHARDEV_MAJOR(BFIN_SERIAL_MAJOR); 1599MODULE_ALIAS_CHARDEV_MAJOR(BFIN_SERIAL_MAJOR);
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index dea7b5bf6e2c..24b966d5061a 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -469,7 +469,7 @@ static int hpwdt_pretimeout(struct notifier_block *nb, unsigned long ulReason,
469 unsigned long rom_pl; 469 unsigned long rom_pl;
470 static int die_nmi_called; 470 static int die_nmi_called;
471 471
472 if (ulReason != DIE_NMI && ulReason != DIE_NMI_IPI) 472 if (ulReason != DIE_NMIUNKNOWN)
473 goto out; 473 goto out;
474 474
475 if (!hpwdt_nmi_decoding) 475 if (!hpwdt_nmi_decoding)
diff --git a/fs/9p/Kconfig b/fs/9p/Kconfig
index 7e0511476797..814ac4e213a8 100644
--- a/fs/9p/Kconfig
+++ b/fs/9p/Kconfig
@@ -9,6 +9,8 @@ config 9P_FS
9 9
10 If unsure, say N. 10 If unsure, say N.
11 11
12if 9P_FS
13
12config 9P_FSCACHE 14config 9P_FSCACHE
13 bool "Enable 9P client caching support (EXPERIMENTAL)" 15 bool "Enable 9P client caching support (EXPERIMENTAL)"
14 depends on EXPERIMENTAL 16 depends on EXPERIMENTAL
@@ -20,7 +22,6 @@ config 9P_FSCACHE
20 22
21config 9P_FS_POSIX_ACL 23config 9P_FS_POSIX_ACL
22 bool "9P POSIX Access Control Lists" 24 bool "9P POSIX Access Control Lists"
23 depends on 9P_FS
24 select FS_POSIX_ACL 25 select FS_POSIX_ACL
25 help 26 help
26 POSIX Access Control Lists (ACLs) support permissions for users and 27 POSIX Access Control Lists (ACLs) support permissions for users and
@@ -30,3 +31,5 @@ config 9P_FS_POSIX_ACL
30 Linux website <http://acl.bestbits.at/>. 31 Linux website <http://acl.bestbits.at/>.
31 32
32 If you don't know what Access Control Lists are, say N 33 If you don't know what Access Control Lists are, say N
34
35endif
diff --git a/fs/9p/Makefile b/fs/9p/Makefile
index f8ba37effd1b..ab8c12780634 100644
--- a/fs/9p/Makefile
+++ b/fs/9p/Makefile
@@ -3,6 +3,7 @@ obj-$(CONFIG_9P_FS) := 9p.o
39p-objs := \ 39p-objs := \
4 vfs_super.o \ 4 vfs_super.o \
5 vfs_inode.o \ 5 vfs_inode.o \
6 vfs_inode_dotl.o \
6 vfs_addr.o \ 7 vfs_addr.o \
7 vfs_file.o \ 8 vfs_file.o \
8 vfs_dir.o \ 9 vfs_dir.o \
diff --git a/fs/9p/acl.c b/fs/9p/acl.c
index 6e58c4ca1e6e..02a2cf616318 100644
--- a/fs/9p/acl.c
+++ b/fs/9p/acl.c
@@ -28,7 +28,7 @@ static struct posix_acl *__v9fs_get_acl(struct p9_fid *fid, char *name)
28{ 28{
29 ssize_t size; 29 ssize_t size;
30 void *value = NULL; 30 void *value = NULL;
31 struct posix_acl *acl = NULL;; 31 struct posix_acl *acl = NULL;
32 32
33 size = v9fs_fid_xattr_get(fid, name, NULL, 0); 33 size = v9fs_fid_xattr_get(fid, name, NULL, 0);
34 if (size > 0) { 34 if (size > 0) {
@@ -365,7 +365,7 @@ static int v9fs_xattr_set_acl(struct dentry *dentry, const char *name,
365 case ACL_TYPE_DEFAULT: 365 case ACL_TYPE_DEFAULT:
366 name = POSIX_ACL_XATTR_DEFAULT; 366 name = POSIX_ACL_XATTR_DEFAULT;
367 if (!S_ISDIR(inode->i_mode)) { 367 if (!S_ISDIR(inode->i_mode)) {
368 retval = -EINVAL; 368 retval = acl ? -EINVAL : 0;
369 goto err_out; 369 goto err_out;
370 } 370 }
371 break; 371 break;
diff --git a/fs/9p/v9fs.h b/fs/9p/v9fs.h
index cb6396855e2d..c4b5d8864f0d 100644
--- a/fs/9p/v9fs.h
+++ b/fs/9p/v9fs.h
@@ -113,9 +113,27 @@ struct v9fs_session_info {
113 113
114struct p9_fid *v9fs_session_init(struct v9fs_session_info *, const char *, 114struct p9_fid *v9fs_session_init(struct v9fs_session_info *, const char *,
115 char *); 115 char *);
116void v9fs_session_close(struct v9fs_session_info *v9ses); 116extern void v9fs_session_close(struct v9fs_session_info *v9ses);
117void v9fs_session_cancel(struct v9fs_session_info *v9ses); 117extern void v9fs_session_cancel(struct v9fs_session_info *v9ses);
118void v9fs_session_begin_cancel(struct v9fs_session_info *v9ses); 118extern void v9fs_session_begin_cancel(struct v9fs_session_info *v9ses);
119extern struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
120 struct nameidata *nameidata);
121extern int v9fs_vfs_unlink(struct inode *i, struct dentry *d);
122extern int v9fs_vfs_rmdir(struct inode *i, struct dentry *d);
123extern int v9fs_vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
124 struct inode *new_dir, struct dentry *new_dentry);
125extern void v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd,
126 void *p);
127extern struct inode *v9fs_inode(struct v9fs_session_info *v9ses,
128 struct p9_fid *fid,
129 struct super_block *sb);
130
131extern const struct inode_operations v9fs_dir_inode_operations_dotl;
132extern const struct inode_operations v9fs_file_inode_operations_dotl;
133extern const struct inode_operations v9fs_symlink_inode_operations_dotl;
134extern struct inode *v9fs_inode_dotl(struct v9fs_session_info *v9ses,
135 struct p9_fid *fid,
136 struct super_block *sb);
119 137
120/* other default globals */ 138/* other default globals */
121#define V9FS_PORT 564 139#define V9FS_PORT 564
@@ -138,3 +156,21 @@ static inline int v9fs_proto_dotl(struct v9fs_session_info *v9ses)
138{ 156{
139 return v9ses->flags & V9FS_PROTO_2000L; 157 return v9ses->flags & V9FS_PROTO_2000L;
140} 158}
159
160/**
161 * v9fs_inode_from_fid - Helper routine to populate an inode by
162 * issuing a attribute request
163 * @v9ses: session information
164 * @fid: fid to issue attribute request for
165 * @sb: superblock on which to create inode
166 *
167 */
168static inline struct inode *
169v9fs_inode_from_fid(struct v9fs_session_info *v9ses, struct p9_fid *fid,
170 struct super_block *sb)
171{
172 if (v9fs_proto_dotl(v9ses))
173 return v9fs_inode_dotl(v9ses, fid, sb);
174 else
175 return v9fs_inode(v9ses, fid, sb);
176}
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 59782981b225..5076eeb95502 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -49,15 +49,8 @@
49 49
50static const struct inode_operations v9fs_dir_inode_operations; 50static const struct inode_operations v9fs_dir_inode_operations;
51static const struct inode_operations v9fs_dir_inode_operations_dotu; 51static const struct inode_operations v9fs_dir_inode_operations_dotu;
52static const struct inode_operations v9fs_dir_inode_operations_dotl;
53static const struct inode_operations v9fs_file_inode_operations; 52static const struct inode_operations v9fs_file_inode_operations;
54static const struct inode_operations v9fs_file_inode_operations_dotl;
55static const struct inode_operations v9fs_symlink_inode_operations; 53static const struct inode_operations v9fs_symlink_inode_operations;
56static const struct inode_operations v9fs_symlink_inode_operations_dotl;
57
58static int
59v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, int omode,
60 dev_t rdev);
61 54
62/** 55/**
63 * unixmode2p9mode - convert unix mode bits to plan 9 56 * unixmode2p9mode - convert unix mode bits to plan 9
@@ -251,41 +244,6 @@ void v9fs_destroy_inode(struct inode *inode)
251#endif 244#endif
252 245
253/** 246/**
254 * v9fs_get_fsgid_for_create - Helper function to get the gid for creating a
255 * new file system object. This checks the S_ISGID to determine the owning
256 * group of the new file system object.
257 */
258
259static gid_t v9fs_get_fsgid_for_create(struct inode *dir_inode)
260{
261 BUG_ON(dir_inode == NULL);
262
263 if (dir_inode->i_mode & S_ISGID) {
264 /* set_gid bit is set.*/
265 return dir_inode->i_gid;
266 }
267 return current_fsgid();
268}
269
270/**
271 * v9fs_dentry_from_dir_inode - helper function to get the dentry from
272 * dir inode.
273 *
274 */
275
276static struct dentry *v9fs_dentry_from_dir_inode(struct inode *inode)
277{
278 struct dentry *dentry;
279
280 spin_lock(&inode->i_lock);
281 /* Directory should have only one entry. */
282 BUG_ON(S_ISDIR(inode->i_mode) && !list_is_singular(&inode->i_dentry));
283 dentry = list_entry(inode->i_dentry.next, struct dentry, d_alias);
284 spin_unlock(&inode->i_lock);
285 return dentry;
286}
287
288/**
289 * v9fs_get_inode - helper function to setup an inode 247 * v9fs_get_inode - helper function to setup an inode
290 * @sb: superblock 248 * @sb: superblock
291 * @mode: mode to setup inode with 249 * @mode: mode to setup inode with
@@ -454,7 +412,7 @@ void v9fs_evict_inode(struct inode *inode)
454#endif 412#endif
455} 413}
456 414
457static struct inode * 415struct inode *
458v9fs_inode(struct v9fs_session_info *v9ses, struct p9_fid *fid, 416v9fs_inode(struct v9fs_session_info *v9ses, struct p9_fid *fid,
459 struct super_block *sb) 417 struct super_block *sb)
460{ 418{
@@ -489,60 +447,6 @@ error:
489 return ERR_PTR(err); 447 return ERR_PTR(err);
490} 448}
491 449
492static struct inode *
493v9fs_inode_dotl(struct v9fs_session_info *v9ses, struct p9_fid *fid,
494 struct super_block *sb)
495{
496 struct inode *ret = NULL;
497 int err;
498 struct p9_stat_dotl *st;
499
500 st = p9_client_getattr_dotl(fid, P9_STATS_BASIC);
501 if (IS_ERR(st))
502 return ERR_CAST(st);
503
504 ret = v9fs_get_inode(sb, st->st_mode);
505 if (IS_ERR(ret)) {
506 err = PTR_ERR(ret);
507 goto error;
508 }
509
510 v9fs_stat2inode_dotl(st, ret);
511 ret->i_ino = v9fs_qid2ino(&st->qid);
512#ifdef CONFIG_9P_FSCACHE
513 v9fs_vcookie_set_qid(ret, &st->qid);
514 v9fs_cache_inode_get_cookie(ret);
515#endif
516 err = v9fs_get_acl(ret, fid);
517 if (err) {
518 iput(ret);
519 goto error;
520 }
521 kfree(st);
522 return ret;
523error:
524 kfree(st);
525 return ERR_PTR(err);
526}
527
528/**
529 * v9fs_inode_from_fid - Helper routine to populate an inode by
530 * issuing a attribute request
531 * @v9ses: session information
532 * @fid: fid to issue attribute request for
533 * @sb: superblock on which to create inode
534 *
535 */
536static inline struct inode *
537v9fs_inode_from_fid(struct v9fs_session_info *v9ses, struct p9_fid *fid,
538 struct super_block *sb)
539{
540 if (v9fs_proto_dotl(v9ses))
541 return v9fs_inode_dotl(v9ses, fid, sb);
542 else
543 return v9fs_inode(v9ses, fid, sb);
544}
545
546/** 450/**
547 * v9fs_remove - helper function to remove files and directories 451 * v9fs_remove - helper function to remove files and directories
548 * @dir: directory inode that is being deleted 452 * @dir: directory inode that is being deleted
@@ -633,12 +537,6 @@ v9fs_create(struct v9fs_session_info *v9ses, struct inode *dir,
633 P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", err); 537 P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", err);
634 goto error; 538 goto error;
635 } 539 }
636
637 if (v9ses->cache)
638 d_set_d_op(dentry, &v9fs_cached_dentry_operations);
639 else
640 d_set_d_op(dentry, &v9fs_dentry_operations);
641
642 d_instantiate(dentry, inode); 540 d_instantiate(dentry, inode);
643 err = v9fs_fid_add(dentry, fid); 541 err = v9fs_fid_add(dentry, fid);
644 if (err < 0) 542 if (err < 0)
@@ -657,144 +555,6 @@ error:
657} 555}
658 556
659/** 557/**
660 * v9fs_vfs_create_dotl - VFS hook to create files for 9P2000.L protocol.
661 * @dir: directory inode that is being created
662 * @dentry: dentry that is being deleted
663 * @mode: create permissions
664 * @nd: path information
665 *
666 */
667
668static int
669v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int omode,
670 struct nameidata *nd)
671{
672 int err = 0;
673 char *name = NULL;
674 gid_t gid;
675 int flags;
676 mode_t mode;
677 struct v9fs_session_info *v9ses;
678 struct p9_fid *fid = NULL;
679 struct p9_fid *dfid, *ofid;
680 struct file *filp;
681 struct p9_qid qid;
682 struct inode *inode;
683 struct posix_acl *pacl = NULL, *dacl = NULL;
684
685 v9ses = v9fs_inode2v9ses(dir);
686 if (nd && nd->flags & LOOKUP_OPEN)
687 flags = nd->intent.open.flags - 1;
688 else {
689 /*
690 * create call without LOOKUP_OPEN is due
691 * to mknod of regular files. So use mknod
692 * operation.
693 */
694 return v9fs_vfs_mknod_dotl(dir, dentry, omode, 0);
695 }
696
697 name = (char *) dentry->d_name.name;
698 P9_DPRINTK(P9_DEBUG_VFS, "v9fs_vfs_create_dotl: name:%s flags:0x%x "
699 "mode:0x%x\n", name, flags, omode);
700
701 dfid = v9fs_fid_lookup(dentry->d_parent);
702 if (IS_ERR(dfid)) {
703 err = PTR_ERR(dfid);
704 P9_DPRINTK(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
705 return err;
706 }
707
708 /* clone a fid to use for creation */
709 ofid = p9_client_walk(dfid, 0, NULL, 1);
710 if (IS_ERR(ofid)) {
711 err = PTR_ERR(ofid);
712 P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err);
713 return err;
714 }
715
716 gid = v9fs_get_fsgid_for_create(dir);
717
718 mode = omode;
719 /* Update mode based on ACL value */
720 err = v9fs_acl_mode(dir, &mode, &dacl, &pacl);
721 if (err) {
722 P9_DPRINTK(P9_DEBUG_VFS,
723 "Failed to get acl values in creat %d\n", err);
724 goto error;
725 }
726 err = p9_client_create_dotl(ofid, name, flags, mode, gid, &qid);
727 if (err < 0) {
728 P9_DPRINTK(P9_DEBUG_VFS,
729 "p9_client_open_dotl failed in creat %d\n",
730 err);
731 goto error;
732 }
733 /* instantiate inode and assign the unopened fid to the dentry */
734 if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE ||
735 (nd && nd->flags & LOOKUP_OPEN)) {
736 fid = p9_client_walk(dfid, 1, &name, 1);
737 if (IS_ERR(fid)) {
738 err = PTR_ERR(fid);
739 P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n",
740 err);
741 fid = NULL;
742 goto error;
743 }
744
745 inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb);
746 if (IS_ERR(inode)) {
747 err = PTR_ERR(inode);
748 P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n",
749 err);
750 goto error;
751 }
752 d_set_d_op(dentry, &v9fs_cached_dentry_operations);
753 d_instantiate(dentry, inode);
754 err = v9fs_fid_add(dentry, fid);
755 if (err < 0)
756 goto error;
757 /* The fid would get clunked via a dput */
758 fid = NULL;
759 } else {
760 /*
761 * Not in cached mode. No need to populate
762 * inode with stat. We need to get an inode
763 * so that we can set the acl with dentry
764 */
765 inode = v9fs_get_inode(dir->i_sb, mode);
766 if (IS_ERR(inode)) {
767 err = PTR_ERR(inode);
768 goto error;
769 }
770 d_set_d_op(dentry, &v9fs_dentry_operations);
771 d_instantiate(dentry, inode);
772 }
773 /* Now set the ACL based on the default value */
774 v9fs_set_create_acl(dentry, dacl, pacl);
775
776 /* if we are opening a file, assign the open fid to the file */
777 if (nd && nd->flags & LOOKUP_OPEN) {
778 filp = lookup_instantiate_filp(nd, dentry, generic_file_open);
779 if (IS_ERR(filp)) {
780 p9_client_clunk(ofid);
781 return PTR_ERR(filp);
782 }
783 filp->private_data = ofid;
784 } else
785 p9_client_clunk(ofid);
786
787 return 0;
788
789error:
790 if (ofid)
791 p9_client_clunk(ofid);
792 if (fid)
793 p9_client_clunk(fid);
794 return err;
795}
796
797/**
798 * v9fs_vfs_create - VFS hook to create files 558 * v9fs_vfs_create - VFS hook to create files
799 * @dir: directory inode that is being created 559 * @dir: directory inode that is being created
800 * @dentry: dentry that is being deleted 560 * @dentry: dentry that is being deleted
@@ -884,107 +644,6 @@ static int v9fs_vfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
884 return err; 644 return err;
885} 645}
886 646
887
888/**
889 * v9fs_vfs_mkdir_dotl - VFS mkdir hook to create a directory
890 * @dir: inode that is being unlinked
891 * @dentry: dentry that is being unlinked
892 * @mode: mode for new directory
893 *
894 */
895
896static int v9fs_vfs_mkdir_dotl(struct inode *dir,
897 struct dentry *dentry, int omode)
898{
899 int err;
900 struct v9fs_session_info *v9ses;
901 struct p9_fid *fid = NULL, *dfid = NULL;
902 gid_t gid;
903 char *name;
904 mode_t mode;
905 struct inode *inode;
906 struct p9_qid qid;
907 struct dentry *dir_dentry;
908 struct posix_acl *dacl = NULL, *pacl = NULL;
909
910 P9_DPRINTK(P9_DEBUG_VFS, "name %s\n", dentry->d_name.name);
911 err = 0;
912 v9ses = v9fs_inode2v9ses(dir);
913
914 omode |= S_IFDIR;
915 if (dir->i_mode & S_ISGID)
916 omode |= S_ISGID;
917
918 dir_dentry = v9fs_dentry_from_dir_inode(dir);
919 dfid = v9fs_fid_lookup(dir_dentry);
920 if (IS_ERR(dfid)) {
921 err = PTR_ERR(dfid);
922 P9_DPRINTK(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
923 dfid = NULL;
924 goto error;
925 }
926
927 gid = v9fs_get_fsgid_for_create(dir);
928 mode = omode;
929 /* Update mode based on ACL value */
930 err = v9fs_acl_mode(dir, &mode, &dacl, &pacl);
931 if (err) {
932 P9_DPRINTK(P9_DEBUG_VFS,
933 "Failed to get acl values in mkdir %d\n", err);
934 goto error;
935 }
936 name = (char *) dentry->d_name.name;
937 err = p9_client_mkdir_dotl(dfid, name, mode, gid, &qid);
938 if (err < 0)
939 goto error;
940
941 /* instantiate inode and assign the unopened fid to the dentry */
942 if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
943 fid = p9_client_walk(dfid, 1, &name, 1);
944 if (IS_ERR(fid)) {
945 err = PTR_ERR(fid);
946 P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n",
947 err);
948 fid = NULL;
949 goto error;
950 }
951
952 inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb);
953 if (IS_ERR(inode)) {
954 err = PTR_ERR(inode);
955 P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n",
956 err);
957 goto error;
958 }
959 d_set_d_op(dentry, &v9fs_cached_dentry_operations);
960 d_instantiate(dentry, inode);
961 err = v9fs_fid_add(dentry, fid);
962 if (err < 0)
963 goto error;
964 fid = NULL;
965 } else {
966 /*
967 * Not in cached mode. No need to populate
968 * inode with stat. We need to get an inode
969 * so that we can set the acl with dentry
970 */
971 inode = v9fs_get_inode(dir->i_sb, mode);
972 if (IS_ERR(inode)) {
973 err = PTR_ERR(inode);
974 goto error;
975 }
976 d_set_d_op(dentry, &v9fs_dentry_operations);
977 d_instantiate(dentry, inode);
978 }
979 /* Now set the ACL based on the default value */
980 v9fs_set_create_acl(dentry, dacl, pacl);
981
982error:
983 if (fid)
984 p9_client_clunk(fid);
985 return err;
986}
987
988/** 647/**
989 * v9fs_vfs_lookup - VFS lookup hook to "walk" to a new inode 648 * v9fs_vfs_lookup - VFS lookup hook to "walk" to a new inode
990 * @dir: inode that is being walked from 649 * @dir: inode that is being walked from
@@ -993,7 +652,7 @@ error:
993 * 652 *
994 */ 653 */
995 654
996static struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry, 655struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
997 struct nameidata *nameidata) 656 struct nameidata *nameidata)
998{ 657{
999 struct super_block *sb; 658 struct super_block *sb;
@@ -1063,7 +722,7 @@ error:
1063 * 722 *
1064 */ 723 */
1065 724
1066static int v9fs_vfs_unlink(struct inode *i, struct dentry *d) 725int v9fs_vfs_unlink(struct inode *i, struct dentry *d)
1067{ 726{
1068 return v9fs_remove(i, d, 0); 727 return v9fs_remove(i, d, 0);
1069} 728}
@@ -1075,7 +734,7 @@ static int v9fs_vfs_unlink(struct inode *i, struct dentry *d)
1075 * 734 *
1076 */ 735 */
1077 736
1078static int v9fs_vfs_rmdir(struct inode *i, struct dentry *d) 737int v9fs_vfs_rmdir(struct inode *i, struct dentry *d)
1079{ 738{
1080 return v9fs_remove(i, d, 1); 739 return v9fs_remove(i, d, 1);
1081} 740}
@@ -1089,7 +748,7 @@ static int v9fs_vfs_rmdir(struct inode *i, struct dentry *d)
1089 * 748 *
1090 */ 749 */
1091 750
1092static int 751int
1093v9fs_vfs_rename(struct inode *old_dir, struct dentry *old_dentry, 752v9fs_vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
1094 struct inode *new_dir, struct dentry *new_dentry) 753 struct inode *new_dir, struct dentry *new_dentry)
1095{ 754{
@@ -1196,42 +855,6 @@ v9fs_vfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
1196 return 0; 855 return 0;
1197} 856}
1198 857
1199static int
1200v9fs_vfs_getattr_dotl(struct vfsmount *mnt, struct dentry *dentry,
1201 struct kstat *stat)
1202{
1203 int err;
1204 struct v9fs_session_info *v9ses;
1205 struct p9_fid *fid;
1206 struct p9_stat_dotl *st;
1207
1208 P9_DPRINTK(P9_DEBUG_VFS, "dentry: %p\n", dentry);
1209 err = -EPERM;
1210 v9ses = v9fs_inode2v9ses(dentry->d_inode);
1211 if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
1212 return simple_getattr(mnt, dentry, stat);
1213
1214 fid = v9fs_fid_lookup(dentry);
1215 if (IS_ERR(fid))
1216 return PTR_ERR(fid);
1217
1218 /* Ask for all the fields in stat structure. Server will return
1219 * whatever it supports
1220 */
1221
1222 st = p9_client_getattr_dotl(fid, P9_STATS_ALL);
1223 if (IS_ERR(st))
1224 return PTR_ERR(st);
1225
1226 v9fs_stat2inode_dotl(st, dentry->d_inode);
1227 generic_fillattr(dentry->d_inode, stat);
1228 /* Change block size to what the server returned */
1229 stat->blksize = st->st_blksize;
1230
1231 kfree(st);
1232 return 0;
1233}
1234
1235/** 858/**
1236 * v9fs_vfs_setattr - set file metadata 859 * v9fs_vfs_setattr - set file metadata
1237 * @dentry: file whose metadata to set 860 * @dentry: file whose metadata to set
@@ -1291,64 +914,6 @@ static int v9fs_vfs_setattr(struct dentry *dentry, struct iattr *iattr)
1291} 914}
1292 915
1293/** 916/**
1294 * v9fs_vfs_setattr_dotl - set file metadata
1295 * @dentry: file whose metadata to set
1296 * @iattr: metadata assignment structure
1297 *
1298 */
1299
1300int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr)
1301{
1302 int retval;
1303 struct v9fs_session_info *v9ses;
1304 struct p9_fid *fid;
1305 struct p9_iattr_dotl p9attr;
1306
1307 P9_DPRINTK(P9_DEBUG_VFS, "\n");
1308
1309 retval = inode_change_ok(dentry->d_inode, iattr);
1310 if (retval)
1311 return retval;
1312
1313 p9attr.valid = iattr->ia_valid;
1314 p9attr.mode = iattr->ia_mode;
1315 p9attr.uid = iattr->ia_uid;
1316 p9attr.gid = iattr->ia_gid;
1317 p9attr.size = iattr->ia_size;
1318 p9attr.atime_sec = iattr->ia_atime.tv_sec;
1319 p9attr.atime_nsec = iattr->ia_atime.tv_nsec;
1320 p9attr.mtime_sec = iattr->ia_mtime.tv_sec;
1321 p9attr.mtime_nsec = iattr->ia_mtime.tv_nsec;
1322
1323 retval = -EPERM;
1324 v9ses = v9fs_inode2v9ses(dentry->d_inode);
1325 fid = v9fs_fid_lookup(dentry);
1326 if (IS_ERR(fid))
1327 return PTR_ERR(fid);
1328
1329 retval = p9_client_setattr(fid, &p9attr);
1330 if (retval < 0)
1331 return retval;
1332
1333 if ((iattr->ia_valid & ATTR_SIZE) &&
1334 iattr->ia_size != i_size_read(dentry->d_inode)) {
1335 retval = vmtruncate(dentry->d_inode, iattr->ia_size);
1336 if (retval)
1337 return retval;
1338 }
1339
1340 setattr_copy(dentry->d_inode, iattr);
1341 mark_inode_dirty(dentry->d_inode);
1342 if (iattr->ia_valid & ATTR_MODE) {
1343 /* We also want to update ACL when we update mode bits */
1344 retval = v9fs_acl_chmod(dentry);
1345 if (retval < 0)
1346 return retval;
1347 }
1348 return 0;
1349}
1350
1351/**
1352 * v9fs_stat2inode - populate an inode structure with mistat info 917 * v9fs_stat2inode - populate an inode structure with mistat info
1353 * @stat: Plan 9 metadata (mistat) structure 918 * @stat: Plan 9 metadata (mistat) structure
1354 * @inode: inode to populate 919 * @inode: inode to populate
@@ -1426,77 +991,6 @@ v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode,
1426} 991}
1427 992
1428/** 993/**
1429 * v9fs_stat2inode_dotl - populate an inode structure with stat info
1430 * @stat: stat structure
1431 * @inode: inode to populate
1432 * @sb: superblock of filesystem
1433 *
1434 */
1435
1436void
1437v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode)
1438{
1439
1440 if ((stat->st_result_mask & P9_STATS_BASIC) == P9_STATS_BASIC) {
1441 inode->i_atime.tv_sec = stat->st_atime_sec;
1442 inode->i_atime.tv_nsec = stat->st_atime_nsec;
1443 inode->i_mtime.tv_sec = stat->st_mtime_sec;
1444 inode->i_mtime.tv_nsec = stat->st_mtime_nsec;
1445 inode->i_ctime.tv_sec = stat->st_ctime_sec;
1446 inode->i_ctime.tv_nsec = stat->st_ctime_nsec;
1447 inode->i_uid = stat->st_uid;
1448 inode->i_gid = stat->st_gid;
1449 inode->i_nlink = stat->st_nlink;
1450 inode->i_mode = stat->st_mode;
1451 inode->i_rdev = new_decode_dev(stat->st_rdev);
1452
1453 if ((S_ISBLK(inode->i_mode)) || (S_ISCHR(inode->i_mode)))
1454 init_special_inode(inode, inode->i_mode, inode->i_rdev);
1455
1456 i_size_write(inode, stat->st_size);
1457 inode->i_blocks = stat->st_blocks;
1458 } else {
1459 if (stat->st_result_mask & P9_STATS_ATIME) {
1460 inode->i_atime.tv_sec = stat->st_atime_sec;
1461 inode->i_atime.tv_nsec = stat->st_atime_nsec;
1462 }
1463 if (stat->st_result_mask & P9_STATS_MTIME) {
1464 inode->i_mtime.tv_sec = stat->st_mtime_sec;
1465 inode->i_mtime.tv_nsec = stat->st_mtime_nsec;
1466 }
1467 if (stat->st_result_mask & P9_STATS_CTIME) {
1468 inode->i_ctime.tv_sec = stat->st_ctime_sec;
1469 inode->i_ctime.tv_nsec = stat->st_ctime_nsec;
1470 }
1471 if (stat->st_result_mask & P9_STATS_UID)
1472 inode->i_uid = stat->st_uid;
1473 if (stat->st_result_mask & P9_STATS_GID)
1474 inode->i_gid = stat->st_gid;
1475 if (stat->st_result_mask & P9_STATS_NLINK)
1476 inode->i_nlink = stat->st_nlink;
1477 if (stat->st_result_mask & P9_STATS_MODE) {
1478 inode->i_mode = stat->st_mode;
1479 if ((S_ISBLK(inode->i_mode)) ||
1480 (S_ISCHR(inode->i_mode)))
1481 init_special_inode(inode, inode->i_mode,
1482 inode->i_rdev);
1483 }
1484 if (stat->st_result_mask & P9_STATS_RDEV)
1485 inode->i_rdev = new_decode_dev(stat->st_rdev);
1486 if (stat->st_result_mask & P9_STATS_SIZE)
1487 i_size_write(inode, stat->st_size);
1488 if (stat->st_result_mask & P9_STATS_BLOCKS)
1489 inode->i_blocks = stat->st_blocks;
1490 }
1491 if (stat->st_result_mask & P9_STATS_GEN)
1492 inode->i_generation = stat->st_gen;
1493
1494 /* Currently we don't support P9_STATS_BTIME and P9_STATS_DATA_VERSION
1495 * because the inode structure does not have fields for them.
1496 */
1497}
1498
1499/**
1500 * v9fs_qid2ino - convert qid into inode number 994 * v9fs_qid2ino - convert qid into inode number
1501 * @qid: qid to hash 995 * @qid: qid to hash
1502 * 996 *
@@ -1602,7 +1096,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
1602 * 1096 *
1603 */ 1097 */
1604 1098
1605static void 1099void
1606v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p) 1100v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
1607{ 1101{
1608 char *s = nd_get_link(nd); 1102 char *s = nd_get_link(nd);
@@ -1646,94 +1140,6 @@ static int v9fs_vfs_mkspecial(struct inode *dir, struct dentry *dentry,
1646} 1140}
1647 1141
1648/** 1142/**
1649 * v9fs_vfs_symlink_dotl - helper function to create symlinks
1650 * @dir: directory inode containing symlink
1651 * @dentry: dentry for symlink
1652 * @symname: symlink data
1653 *
1654 * See Also: 9P2000.L RFC for more information
1655 *
1656 */
1657
1658static int
1659v9fs_vfs_symlink_dotl(struct inode *dir, struct dentry *dentry,
1660 const char *symname)
1661{
1662 struct v9fs_session_info *v9ses;
1663 struct p9_fid *dfid;
1664 struct p9_fid *fid = NULL;
1665 struct inode *inode;
1666 struct p9_qid qid;
1667 char *name;
1668 int err;
1669 gid_t gid;
1670
1671 name = (char *) dentry->d_name.name;
1672 P9_DPRINTK(P9_DEBUG_VFS, "v9fs_vfs_symlink_dotl : %lu,%s,%s\n",
1673 dir->i_ino, name, symname);
1674 v9ses = v9fs_inode2v9ses(dir);
1675
1676 dfid = v9fs_fid_lookup(dentry->d_parent);
1677 if (IS_ERR(dfid)) {
1678 err = PTR_ERR(dfid);
1679 P9_DPRINTK(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
1680 return err;
1681 }
1682
1683 gid = v9fs_get_fsgid_for_create(dir);
1684
1685 /* Server doesn't alter fid on TSYMLINK. Hence no need to clone it. */
1686 err = p9_client_symlink(dfid, name, (char *)symname, gid, &qid);
1687
1688 if (err < 0) {
1689 P9_DPRINTK(P9_DEBUG_VFS, "p9_client_symlink failed %d\n", err);
1690 goto error;
1691 }
1692
1693 if (v9ses->cache) {
1694 /* Now walk from the parent so we can get an unopened fid. */
1695 fid = p9_client_walk(dfid, 1, &name, 1);
1696 if (IS_ERR(fid)) {
1697 err = PTR_ERR(fid);
1698 P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n",
1699 err);
1700 fid = NULL;
1701 goto error;
1702 }
1703
1704 /* instantiate inode and assign the unopened fid to dentry */
1705 inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb);
1706 if (IS_ERR(inode)) {
1707 err = PTR_ERR(inode);
1708 P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n",
1709 err);
1710 goto error;
1711 }
1712 d_set_d_op(dentry, &v9fs_cached_dentry_operations);
1713 d_instantiate(dentry, inode);
1714 err = v9fs_fid_add(dentry, fid);
1715 if (err < 0)
1716 goto error;
1717 fid = NULL;
1718 } else {
1719 /* Not in cached mode. No need to populate inode with stat */
1720 inode = v9fs_get_inode(dir->i_sb, S_IFLNK);
1721 if (IS_ERR(inode)) {
1722 err = PTR_ERR(inode);
1723 goto error;
1724 }
1725 d_set_d_op(dentry, &v9fs_dentry_operations);
1726 d_instantiate(dentry, inode);
1727 }
1728
1729error:
1730 if (fid)
1731 p9_client_clunk(fid);
1732
1733 return err;
1734}
1735
1736/**
1737 * v9fs_vfs_symlink - helper function to create symlinks 1143 * v9fs_vfs_symlink - helper function to create symlinks
1738 * @dir: directory inode containing symlink 1144 * @dir: directory inode containing symlink
1739 * @dentry: dentry for symlink 1145 * @dentry: dentry for symlink
@@ -1792,77 +1198,6 @@ clunk_fid:
1792} 1198}
1793 1199
1794/** 1200/**
1795 * v9fs_vfs_link_dotl - create a hardlink for dotl
1796 * @old_dentry: dentry for file to link to
1797 * @dir: inode destination for new link
1798 * @dentry: dentry for link
1799 *
1800 */
1801
1802static int
1803v9fs_vfs_link_dotl(struct dentry *old_dentry, struct inode *dir,
1804 struct dentry *dentry)
1805{
1806 int err;
1807 struct p9_fid *dfid, *oldfid;
1808 char *name;
1809 struct v9fs_session_info *v9ses;
1810 struct dentry *dir_dentry;
1811
1812 P9_DPRINTK(P9_DEBUG_VFS, "dir ino: %lu, old_name: %s, new_name: %s\n",
1813 dir->i_ino, old_dentry->d_name.name,
1814 dentry->d_name.name);
1815
1816 v9ses = v9fs_inode2v9ses(dir);
1817 dir_dentry = v9fs_dentry_from_dir_inode(dir);
1818 dfid = v9fs_fid_lookup(dir_dentry);
1819 if (IS_ERR(dfid))
1820 return PTR_ERR(dfid);
1821
1822 oldfid = v9fs_fid_lookup(old_dentry);
1823 if (IS_ERR(oldfid))
1824 return PTR_ERR(oldfid);
1825
1826 name = (char *) dentry->d_name.name;
1827
1828 err = p9_client_link(dfid, oldfid, (char *)dentry->d_name.name);
1829
1830 if (err < 0) {
1831 P9_DPRINTK(P9_DEBUG_VFS, "p9_client_link failed %d\n", err);
1832 return err;
1833 }
1834
1835 if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
1836 /* Get the latest stat info from server. */
1837 struct p9_fid *fid;
1838 struct p9_stat_dotl *st;
1839
1840 fid = v9fs_fid_lookup(old_dentry);
1841 if (IS_ERR(fid))
1842 return PTR_ERR(fid);
1843
1844 st = p9_client_getattr_dotl(fid, P9_STATS_BASIC);
1845 if (IS_ERR(st))
1846 return PTR_ERR(st);
1847
1848 v9fs_stat2inode_dotl(st, old_dentry->d_inode);
1849
1850 kfree(st);
1851 } else {
1852 /* Caching disabled. No need to get upto date stat info.
1853 * This dentry will be released immediately. So, just hold the
1854 * inode
1855 */
1856 ihold(old_dentry->d_inode);
1857 }
1858
1859 d_set_d_op(dentry, old_dentry->d_op);
1860 d_instantiate(dentry, old_dentry->d_inode);
1861
1862 return err;
1863}
1864
1865/**
1866 * v9fs_vfs_mknod - create a special file 1201 * v9fs_vfs_mknod - create a special file
1867 * @dir: inode destination for new link 1202 * @dir: inode destination for new link
1868 * @dentry: dentry for file 1203 * @dentry: dentry for file
@@ -1907,160 +1242,6 @@ v9fs_vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t rdev)
1907 return retval; 1242 return retval;
1908} 1243}
1909 1244
1910/**
1911 * v9fs_vfs_mknod_dotl - create a special file
1912 * @dir: inode destination for new link
1913 * @dentry: dentry for file
1914 * @mode: mode for creation
1915 * @rdev: device associated with special file
1916 *
1917 */
1918static int
1919v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, int omode,
1920 dev_t rdev)
1921{
1922 int err;
1923 char *name;
1924 mode_t mode;
1925 struct v9fs_session_info *v9ses;
1926 struct p9_fid *fid = NULL, *dfid = NULL;
1927 struct inode *inode;
1928 gid_t gid;
1929 struct p9_qid qid;
1930 struct dentry *dir_dentry;
1931 struct posix_acl *dacl = NULL, *pacl = NULL;
1932
1933 P9_DPRINTK(P9_DEBUG_VFS,
1934 " %lu,%s mode: %x MAJOR: %u MINOR: %u\n", dir->i_ino,
1935 dentry->d_name.name, omode, MAJOR(rdev), MINOR(rdev));
1936
1937 if (!new_valid_dev(rdev))
1938 return -EINVAL;
1939
1940 v9ses = v9fs_inode2v9ses(dir);
1941 dir_dentry = v9fs_dentry_from_dir_inode(dir);
1942 dfid = v9fs_fid_lookup(dir_dentry);
1943 if (IS_ERR(dfid)) {
1944 err = PTR_ERR(dfid);
1945 P9_DPRINTK(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
1946 dfid = NULL;
1947 goto error;
1948 }
1949
1950 gid = v9fs_get_fsgid_for_create(dir);
1951 mode = omode;
1952 /* Update mode based on ACL value */
1953 err = v9fs_acl_mode(dir, &mode, &dacl, &pacl);
1954 if (err) {
1955 P9_DPRINTK(P9_DEBUG_VFS,
1956 "Failed to get acl values in mknod %d\n", err);
1957 goto error;
1958 }
1959 name = (char *) dentry->d_name.name;
1960
1961 err = p9_client_mknod_dotl(dfid, name, mode, rdev, gid, &qid);
1962 if (err < 0)
1963 goto error;
1964
1965 /* instantiate inode and assign the unopened fid to the dentry */
1966 if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
1967 fid = p9_client_walk(dfid, 1, &name, 1);
1968 if (IS_ERR(fid)) {
1969 err = PTR_ERR(fid);
1970 P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n",
1971 err);
1972 fid = NULL;
1973 goto error;
1974 }
1975
1976 inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb);
1977 if (IS_ERR(inode)) {
1978 err = PTR_ERR(inode);
1979 P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n",
1980 err);
1981 goto error;
1982 }
1983 d_set_d_op(dentry, &v9fs_cached_dentry_operations);
1984 d_instantiate(dentry, inode);
1985 err = v9fs_fid_add(dentry, fid);
1986 if (err < 0)
1987 goto error;
1988 fid = NULL;
1989 } else {
1990 /*
1991 * Not in cached mode. No need to populate inode with stat.
1992 * socket syscall returns a fd, so we need instantiate
1993 */
1994 inode = v9fs_get_inode(dir->i_sb, mode);
1995 if (IS_ERR(inode)) {
1996 err = PTR_ERR(inode);
1997 goto error;
1998 }
1999 d_set_d_op(dentry, &v9fs_dentry_operations);
2000 d_instantiate(dentry, inode);
2001 }
2002 /* Now set the ACL based on the default value */
2003 v9fs_set_create_acl(dentry, dacl, pacl);
2004error:
2005 if (fid)
2006 p9_client_clunk(fid);
2007 return err;
2008}
2009
2010static int
2011v9fs_vfs_readlink_dotl(struct dentry *dentry, char *buffer, int buflen)
2012{
2013 int retval;
2014 struct p9_fid *fid;
2015 char *target = NULL;
2016
2017 P9_DPRINTK(P9_DEBUG_VFS, " %s\n", dentry->d_name.name);
2018 retval = -EPERM;
2019 fid = v9fs_fid_lookup(dentry);
2020 if (IS_ERR(fid))
2021 return PTR_ERR(fid);
2022
2023 retval = p9_client_readlink(fid, &target);
2024 if (retval < 0)
2025 return retval;
2026
2027 strncpy(buffer, target, buflen);
2028 P9_DPRINTK(P9_DEBUG_VFS, "%s -> %s\n", dentry->d_name.name, buffer);
2029
2030 retval = strnlen(buffer, buflen);
2031 return retval;
2032}
2033
2034/**
2035 * v9fs_vfs_follow_link_dotl - follow a symlink path
2036 * @dentry: dentry for symlink
2037 * @nd: nameidata
2038 *
2039 */
2040
2041static void *
2042v9fs_vfs_follow_link_dotl(struct dentry *dentry, struct nameidata *nd)
2043{
2044 int len = 0;
2045 char *link = __getname();
2046
2047 P9_DPRINTK(P9_DEBUG_VFS, "%s n", dentry->d_name.name);
2048
2049 if (!link)
2050 link = ERR_PTR(-ENOMEM);
2051 else {
2052 len = v9fs_vfs_readlink_dotl(dentry, link, PATH_MAX);
2053 if (len < 0) {
2054 __putname(link);
2055 link = ERR_PTR(len);
2056 } else
2057 link[min(len, PATH_MAX-1)] = 0;
2058 }
2059 nd_set_link(nd, link);
2060
2061 return NULL;
2062}
2063
2064static const struct inode_operations v9fs_dir_inode_operations_dotu = { 1245static const struct inode_operations v9fs_dir_inode_operations_dotu = {
2065 .create = v9fs_vfs_create, 1246 .create = v9fs_vfs_create,
2066 .lookup = v9fs_vfs_lookup, 1247 .lookup = v9fs_vfs_lookup,
@@ -2075,25 +1256,6 @@ static const struct inode_operations v9fs_dir_inode_operations_dotu = {
2075 .setattr = v9fs_vfs_setattr, 1256 .setattr = v9fs_vfs_setattr,
2076}; 1257};
2077 1258
2078static const struct inode_operations v9fs_dir_inode_operations_dotl = {
2079 .create = v9fs_vfs_create_dotl,
2080 .lookup = v9fs_vfs_lookup,
2081 .link = v9fs_vfs_link_dotl,
2082 .symlink = v9fs_vfs_symlink_dotl,
2083 .unlink = v9fs_vfs_unlink,
2084 .mkdir = v9fs_vfs_mkdir_dotl,
2085 .rmdir = v9fs_vfs_rmdir,
2086 .mknod = v9fs_vfs_mknod_dotl,
2087 .rename = v9fs_vfs_rename,
2088 .getattr = v9fs_vfs_getattr_dotl,
2089 .setattr = v9fs_vfs_setattr_dotl,
2090 .setxattr = generic_setxattr,
2091 .getxattr = generic_getxattr,
2092 .removexattr = generic_removexattr,
2093 .listxattr = v9fs_listxattr,
2094 .check_acl = v9fs_check_acl,
2095};
2096
2097static const struct inode_operations v9fs_dir_inode_operations = { 1259static const struct inode_operations v9fs_dir_inode_operations = {
2098 .create = v9fs_vfs_create, 1260 .create = v9fs_vfs_create,
2099 .lookup = v9fs_vfs_lookup, 1261 .lookup = v9fs_vfs_lookup,
@@ -2111,16 +1273,6 @@ static const struct inode_operations v9fs_file_inode_operations = {
2111 .setattr = v9fs_vfs_setattr, 1273 .setattr = v9fs_vfs_setattr,
2112}; 1274};
2113 1275
2114static const struct inode_operations v9fs_file_inode_operations_dotl = {
2115 .getattr = v9fs_vfs_getattr_dotl,
2116 .setattr = v9fs_vfs_setattr_dotl,
2117 .setxattr = generic_setxattr,
2118 .getxattr = generic_getxattr,
2119 .removexattr = generic_removexattr,
2120 .listxattr = v9fs_listxattr,
2121 .check_acl = v9fs_check_acl,
2122};
2123
2124static const struct inode_operations v9fs_symlink_inode_operations = { 1276static const struct inode_operations v9fs_symlink_inode_operations = {
2125 .readlink = generic_readlink, 1277 .readlink = generic_readlink,
2126 .follow_link = v9fs_vfs_follow_link, 1278 .follow_link = v9fs_vfs_follow_link,
@@ -2129,14 +1281,3 @@ static const struct inode_operations v9fs_symlink_inode_operations = {
2129 .setattr = v9fs_vfs_setattr, 1281 .setattr = v9fs_vfs_setattr,
2130}; 1282};
2131 1283
2132static const struct inode_operations v9fs_symlink_inode_operations_dotl = {
2133 .readlink = v9fs_vfs_readlink_dotl,
2134 .follow_link = v9fs_vfs_follow_link_dotl,
2135 .put_link = v9fs_vfs_put_link,
2136 .getattr = v9fs_vfs_getattr_dotl,
2137 .setattr = v9fs_vfs_setattr_dotl,
2138 .setxattr = generic_setxattr,
2139 .getxattr = generic_getxattr,
2140 .removexattr = generic_removexattr,
2141 .listxattr = v9fs_listxattr,
2142};
diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
new file mode 100644
index 000000000000..fe3ffa9aace4
--- /dev/null
+++ b/fs/9p/vfs_inode_dotl.c
@@ -0,0 +1,824 @@
1/*
2 * linux/fs/9p/vfs_inode_dotl.c
3 *
4 * This file contains vfs inode ops for the 9P2000.L protocol.
5 *
6 * Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com>
7 * Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2
11 * as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to:
20 * Free Software Foundation
21 * 51 Franklin Street, Fifth Floor
22 * Boston, MA 02111-1301 USA
23 *
24 */
25
26#include <linux/module.h>
27#include <linux/errno.h>
28#include <linux/fs.h>
29#include <linux/file.h>
30#include <linux/pagemap.h>
31#include <linux/stat.h>
32#include <linux/string.h>
33#include <linux/inet.h>
34#include <linux/namei.h>
35#include <linux/idr.h>
36#include <linux/sched.h>
37#include <linux/slab.h>
38#include <linux/xattr.h>
39#include <linux/posix_acl.h>
40#include <net/9p/9p.h>
41#include <net/9p/client.h>
42
43#include "v9fs.h"
44#include "v9fs_vfs.h"
45#include "fid.h"
46#include "cache.h"
47#include "xattr.h"
48#include "acl.h"
49
50static int
51v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, int omode,
52 dev_t rdev);
53
54/**
55 * v9fs_get_fsgid_for_create - Helper function to get the gid for creating a
56 * new file system object. This checks the S_ISGID to determine the owning
57 * group of the new file system object.
58 */
59
60static gid_t v9fs_get_fsgid_for_create(struct inode *dir_inode)
61{
62 BUG_ON(dir_inode == NULL);
63
64 if (dir_inode->i_mode & S_ISGID) {
65 /* set_gid bit is set.*/
66 return dir_inode->i_gid;
67 }
68 return current_fsgid();
69}
70
71/**
72 * v9fs_dentry_from_dir_inode - helper function to get the dentry from
73 * dir inode.
74 *
75 */
76
77static struct dentry *v9fs_dentry_from_dir_inode(struct inode *inode)
78{
79 struct dentry *dentry;
80
81 spin_lock(&inode->i_lock);
82 /* Directory should have only one entry. */
83 BUG_ON(S_ISDIR(inode->i_mode) && !list_is_singular(&inode->i_dentry));
84 dentry = list_entry(inode->i_dentry.next, struct dentry, d_alias);
85 spin_unlock(&inode->i_lock);
86 return dentry;
87}
88
89struct inode *
90v9fs_inode_dotl(struct v9fs_session_info *v9ses, struct p9_fid *fid,
91 struct super_block *sb)
92{
93 struct inode *ret = NULL;
94 int err;
95 struct p9_stat_dotl *st;
96
97 st = p9_client_getattr_dotl(fid, P9_STATS_BASIC);
98 if (IS_ERR(st))
99 return ERR_CAST(st);
100
101 ret = v9fs_get_inode(sb, st->st_mode);
102 if (IS_ERR(ret)) {
103 err = PTR_ERR(ret);
104 goto error;
105 }
106
107 v9fs_stat2inode_dotl(st, ret);
108 ret->i_ino = v9fs_qid2ino(&st->qid);
109#ifdef CONFIG_9P_FSCACHE
110 v9fs_vcookie_set_qid(ret, &st->qid);
111 v9fs_cache_inode_get_cookie(ret);
112#endif
113 err = v9fs_get_acl(ret, fid);
114 if (err) {
115 iput(ret);
116 goto error;
117 }
118 kfree(st);
119 return ret;
120error:
121 kfree(st);
122 return ERR_PTR(err);
123}
124
125/**
126 * v9fs_vfs_create_dotl - VFS hook to create files for 9P2000.L protocol.
127 * @dir: directory inode that is being created
128 * @dentry: dentry that is being deleted
129 * @mode: create permissions
130 * @nd: path information
131 *
132 */
133
134static int
135v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int omode,
136 struct nameidata *nd)
137{
138 int err = 0;
139 char *name = NULL;
140 gid_t gid;
141 int flags;
142 mode_t mode;
143 struct v9fs_session_info *v9ses;
144 struct p9_fid *fid = NULL;
145 struct p9_fid *dfid, *ofid;
146 struct file *filp;
147 struct p9_qid qid;
148 struct inode *inode;
149 struct posix_acl *pacl = NULL, *dacl = NULL;
150
151 v9ses = v9fs_inode2v9ses(dir);
152 if (nd && nd->flags & LOOKUP_OPEN)
153 flags = nd->intent.open.flags - 1;
154 else {
155 /*
156 * create call without LOOKUP_OPEN is due
157 * to mknod of regular files. So use mknod
158 * operation.
159 */
160 return v9fs_vfs_mknod_dotl(dir, dentry, omode, 0);
161 }
162
163 name = (char *) dentry->d_name.name;
164 P9_DPRINTK(P9_DEBUG_VFS, "v9fs_vfs_create_dotl: name:%s flags:0x%x "
165 "mode:0x%x\n", name, flags, omode);
166
167 dfid = v9fs_fid_lookup(dentry->d_parent);
168 if (IS_ERR(dfid)) {
169 err = PTR_ERR(dfid);
170 P9_DPRINTK(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
171 return err;
172 }
173
174 /* clone a fid to use for creation */
175 ofid = p9_client_walk(dfid, 0, NULL, 1);
176 if (IS_ERR(ofid)) {
177 err = PTR_ERR(ofid);
178 P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err);
179 return err;
180 }
181
182 gid = v9fs_get_fsgid_for_create(dir);
183
184 mode = omode;
185 /* Update mode based on ACL value */
186 err = v9fs_acl_mode(dir, &mode, &dacl, &pacl);
187 if (err) {
188 P9_DPRINTK(P9_DEBUG_VFS,
189 "Failed to get acl values in creat %d\n", err);
190 goto error;
191 }
192 err = p9_client_create_dotl(ofid, name, flags, mode, gid, &qid);
193 if (err < 0) {
194 P9_DPRINTK(P9_DEBUG_VFS,
195 "p9_client_open_dotl failed in creat %d\n",
196 err);
197 goto error;
198 }
199
200 /* instantiate inode and assign the unopened fid to the dentry */
201 fid = p9_client_walk(dfid, 1, &name, 1);
202 if (IS_ERR(fid)) {
203 err = PTR_ERR(fid);
204 P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err);
205 fid = NULL;
206 goto error;
207 }
208 inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb);
209 if (IS_ERR(inode)) {
210 err = PTR_ERR(inode);
211 P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", err);
212 goto error;
213 }
214 d_instantiate(dentry, inode);
215 err = v9fs_fid_add(dentry, fid);
216 if (err < 0)
217 goto error;
218
219 /* Now set the ACL based on the default value */
220 v9fs_set_create_acl(dentry, dacl, pacl);
221
222 /* Since we are opening a file, assign the open fid to the file */
223 filp = lookup_instantiate_filp(nd, dentry, generic_file_open);
224 if (IS_ERR(filp)) {
225 p9_client_clunk(ofid);
226 return PTR_ERR(filp);
227 }
228 filp->private_data = ofid;
229 return 0;
230
231error:
232 if (ofid)
233 p9_client_clunk(ofid);
234 if (fid)
235 p9_client_clunk(fid);
236 return err;
237}
238
239/**
240 * v9fs_vfs_mkdir_dotl - VFS mkdir hook to create a directory
241 * @dir: inode that is being unlinked
242 * @dentry: dentry that is being unlinked
243 * @mode: mode for new directory
244 *
245 */
246
247static int v9fs_vfs_mkdir_dotl(struct inode *dir,
248 struct dentry *dentry, int omode)
249{
250 int err;
251 struct v9fs_session_info *v9ses;
252 struct p9_fid *fid = NULL, *dfid = NULL;
253 gid_t gid;
254 char *name;
255 mode_t mode;
256 struct inode *inode;
257 struct p9_qid qid;
258 struct dentry *dir_dentry;
259 struct posix_acl *dacl = NULL, *pacl = NULL;
260
261 P9_DPRINTK(P9_DEBUG_VFS, "name %s\n", dentry->d_name.name);
262 err = 0;
263 v9ses = v9fs_inode2v9ses(dir);
264
265 omode |= S_IFDIR;
266 if (dir->i_mode & S_ISGID)
267 omode |= S_ISGID;
268
269 dir_dentry = v9fs_dentry_from_dir_inode(dir);
270 dfid = v9fs_fid_lookup(dir_dentry);
271 if (IS_ERR(dfid)) {
272 err = PTR_ERR(dfid);
273 P9_DPRINTK(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
274 dfid = NULL;
275 goto error;
276 }
277
278 gid = v9fs_get_fsgid_for_create(dir);
279 mode = omode;
280 /* Update mode based on ACL value */
281 err = v9fs_acl_mode(dir, &mode, &dacl, &pacl);
282 if (err) {
283 P9_DPRINTK(P9_DEBUG_VFS,
284 "Failed to get acl values in mkdir %d\n", err);
285 goto error;
286 }
287 name = (char *) dentry->d_name.name;
288 err = p9_client_mkdir_dotl(dfid, name, mode, gid, &qid);
289 if (err < 0)
290 goto error;
291
292 /* instantiate inode and assign the unopened fid to the dentry */
293 if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
294 fid = p9_client_walk(dfid, 1, &name, 1);
295 if (IS_ERR(fid)) {
296 err = PTR_ERR(fid);
297 P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n",
298 err);
299 fid = NULL;
300 goto error;
301 }
302
303 inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb);
304 if (IS_ERR(inode)) {
305 err = PTR_ERR(inode);
306 P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n",
307 err);
308 goto error;
309 }
310 d_instantiate(dentry, inode);
311 err = v9fs_fid_add(dentry, fid);
312 if (err < 0)
313 goto error;
314 fid = NULL;
315 } else {
316 /*
317 * Not in cached mode. No need to populate
318 * inode with stat. We need to get an inode
319 * so that we can set the acl with dentry
320 */
321 inode = v9fs_get_inode(dir->i_sb, mode);
322 if (IS_ERR(inode)) {
323 err = PTR_ERR(inode);
324 goto error;
325 }
326 d_instantiate(dentry, inode);
327 }
328 /* Now set the ACL based on the default value */
329 v9fs_set_create_acl(dentry, dacl, pacl);
330
331error:
332 if (fid)
333 p9_client_clunk(fid);
334 return err;
335}
336
337static int
338v9fs_vfs_getattr_dotl(struct vfsmount *mnt, struct dentry *dentry,
339 struct kstat *stat)
340{
341 int err;
342 struct v9fs_session_info *v9ses;
343 struct p9_fid *fid;
344 struct p9_stat_dotl *st;
345
346 P9_DPRINTK(P9_DEBUG_VFS, "dentry: %p\n", dentry);
347 err = -EPERM;
348 v9ses = v9fs_inode2v9ses(dentry->d_inode);
349 if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
350 return simple_getattr(mnt, dentry, stat);
351
352 fid = v9fs_fid_lookup(dentry);
353 if (IS_ERR(fid))
354 return PTR_ERR(fid);
355
356 /* Ask for all the fields in stat structure. Server will return
357 * whatever it supports
358 */
359
360 st = p9_client_getattr_dotl(fid, P9_STATS_ALL);
361 if (IS_ERR(st))
362 return PTR_ERR(st);
363
364 v9fs_stat2inode_dotl(st, dentry->d_inode);
365 generic_fillattr(dentry->d_inode, stat);
366 /* Change block size to what the server returned */
367 stat->blksize = st->st_blksize;
368
369 kfree(st);
370 return 0;
371}
372
373/**
374 * v9fs_vfs_setattr_dotl - set file metadata
375 * @dentry: file whose metadata to set
376 * @iattr: metadata assignment structure
377 *
378 */
379
380int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr)
381{
382 int retval;
383 struct v9fs_session_info *v9ses;
384 struct p9_fid *fid;
385 struct p9_iattr_dotl p9attr;
386
387 P9_DPRINTK(P9_DEBUG_VFS, "\n");
388
389 retval = inode_change_ok(dentry->d_inode, iattr);
390 if (retval)
391 return retval;
392
393 p9attr.valid = iattr->ia_valid;
394 p9attr.mode = iattr->ia_mode;
395 p9attr.uid = iattr->ia_uid;
396 p9attr.gid = iattr->ia_gid;
397 p9attr.size = iattr->ia_size;
398 p9attr.atime_sec = iattr->ia_atime.tv_sec;
399 p9attr.atime_nsec = iattr->ia_atime.tv_nsec;
400 p9attr.mtime_sec = iattr->ia_mtime.tv_sec;
401 p9attr.mtime_nsec = iattr->ia_mtime.tv_nsec;
402
403 retval = -EPERM;
404 v9ses = v9fs_inode2v9ses(dentry->d_inode);
405 fid = v9fs_fid_lookup(dentry);
406 if (IS_ERR(fid))
407 return PTR_ERR(fid);
408
409 retval = p9_client_setattr(fid, &p9attr);
410 if (retval < 0)
411 return retval;
412
413 if ((iattr->ia_valid & ATTR_SIZE) &&
414 iattr->ia_size != i_size_read(dentry->d_inode)) {
415 retval = vmtruncate(dentry->d_inode, iattr->ia_size);
416 if (retval)
417 return retval;
418 }
419
420 setattr_copy(dentry->d_inode, iattr);
421 mark_inode_dirty(dentry->d_inode);
422 if (iattr->ia_valid & ATTR_MODE) {
423 /* We also want to update ACL when we update mode bits */
424 retval = v9fs_acl_chmod(dentry);
425 if (retval < 0)
426 return retval;
427 }
428 return 0;
429}
430
431/**
432 * v9fs_stat2inode_dotl - populate an inode structure with stat info
433 * @stat: stat structure
434 * @inode: inode to populate
435 * @sb: superblock of filesystem
436 *
437 */
438
439void
440v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode)
441{
442
443 if ((stat->st_result_mask & P9_STATS_BASIC) == P9_STATS_BASIC) {
444 inode->i_atime.tv_sec = stat->st_atime_sec;
445 inode->i_atime.tv_nsec = stat->st_atime_nsec;
446 inode->i_mtime.tv_sec = stat->st_mtime_sec;
447 inode->i_mtime.tv_nsec = stat->st_mtime_nsec;
448 inode->i_ctime.tv_sec = stat->st_ctime_sec;
449 inode->i_ctime.tv_nsec = stat->st_ctime_nsec;
450 inode->i_uid = stat->st_uid;
451 inode->i_gid = stat->st_gid;
452 inode->i_nlink = stat->st_nlink;
453 inode->i_mode = stat->st_mode;
454 inode->i_rdev = new_decode_dev(stat->st_rdev);
455
456 if ((S_ISBLK(inode->i_mode)) || (S_ISCHR(inode->i_mode)))
457 init_special_inode(inode, inode->i_mode, inode->i_rdev);
458
459 i_size_write(inode, stat->st_size);
460 inode->i_blocks = stat->st_blocks;
461 } else {
462 if (stat->st_result_mask & P9_STATS_ATIME) {
463 inode->i_atime.tv_sec = stat->st_atime_sec;
464 inode->i_atime.tv_nsec = stat->st_atime_nsec;
465 }
466 if (stat->st_result_mask & P9_STATS_MTIME) {
467 inode->i_mtime.tv_sec = stat->st_mtime_sec;
468 inode->i_mtime.tv_nsec = stat->st_mtime_nsec;
469 }
470 if (stat->st_result_mask & P9_STATS_CTIME) {
471 inode->i_ctime.tv_sec = stat->st_ctime_sec;
472 inode->i_ctime.tv_nsec = stat->st_ctime_nsec;
473 }
474 if (stat->st_result_mask & P9_STATS_UID)
475 inode->i_uid = stat->st_uid;
476 if (stat->st_result_mask & P9_STATS_GID)
477 inode->i_gid = stat->st_gid;
478 if (stat->st_result_mask & P9_STATS_NLINK)
479 inode->i_nlink = stat->st_nlink;
480 if (stat->st_result_mask & P9_STATS_MODE) {
481 inode->i_mode = stat->st_mode;
482 if ((S_ISBLK(inode->i_mode)) ||
483 (S_ISCHR(inode->i_mode)))
484 init_special_inode(inode, inode->i_mode,
485 inode->i_rdev);
486 }
487 if (stat->st_result_mask & P9_STATS_RDEV)
488 inode->i_rdev = new_decode_dev(stat->st_rdev);
489 if (stat->st_result_mask & P9_STATS_SIZE)
490 i_size_write(inode, stat->st_size);
491 if (stat->st_result_mask & P9_STATS_BLOCKS)
492 inode->i_blocks = stat->st_blocks;
493 }
494 if (stat->st_result_mask & P9_STATS_GEN)
495 inode->i_generation = stat->st_gen;
496
497 /* Currently we don't support P9_STATS_BTIME and P9_STATS_DATA_VERSION
498 * because the inode structure does not have fields for them.
499 */
500}
501
502static int
503v9fs_vfs_symlink_dotl(struct inode *dir, struct dentry *dentry,
504 const char *symname)
505{
506 struct v9fs_session_info *v9ses;
507 struct p9_fid *dfid;
508 struct p9_fid *fid = NULL;
509 struct inode *inode;
510 struct p9_qid qid;
511 char *name;
512 int err;
513 gid_t gid;
514
515 name = (char *) dentry->d_name.name;
516 P9_DPRINTK(P9_DEBUG_VFS, "v9fs_vfs_symlink_dotl : %lu,%s,%s\n",
517 dir->i_ino, name, symname);
518 v9ses = v9fs_inode2v9ses(dir);
519
520 dfid = v9fs_fid_lookup(dentry->d_parent);
521 if (IS_ERR(dfid)) {
522 err = PTR_ERR(dfid);
523 P9_DPRINTK(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
524 return err;
525 }
526
527 gid = v9fs_get_fsgid_for_create(dir);
528
529 /* Server doesn't alter fid on TSYMLINK. Hence no need to clone it. */
530 err = p9_client_symlink(dfid, name, (char *)symname, gid, &qid);
531
532 if (err < 0) {
533 P9_DPRINTK(P9_DEBUG_VFS, "p9_client_symlink failed %d\n", err);
534 goto error;
535 }
536
537 if (v9ses->cache) {
538 /* Now walk from the parent so we can get an unopened fid. */
539 fid = p9_client_walk(dfid, 1, &name, 1);
540 if (IS_ERR(fid)) {
541 err = PTR_ERR(fid);
542 P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n",
543 err);
544 fid = NULL;
545 goto error;
546 }
547
548 /* instantiate inode and assign the unopened fid to dentry */
549 inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb);
550 if (IS_ERR(inode)) {
551 err = PTR_ERR(inode);
552 P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n",
553 err);
554 goto error;
555 }
556 d_instantiate(dentry, inode);
557 err = v9fs_fid_add(dentry, fid);
558 if (err < 0)
559 goto error;
560 fid = NULL;
561 } else {
562 /* Not in cached mode. No need to populate inode with stat */
563 inode = v9fs_get_inode(dir->i_sb, S_IFLNK);
564 if (IS_ERR(inode)) {
565 err = PTR_ERR(inode);
566 goto error;
567 }
568 d_instantiate(dentry, inode);
569 }
570
571error:
572 if (fid)
573 p9_client_clunk(fid);
574
575 return err;
576}
577
578/**
579 * v9fs_vfs_link_dotl - create a hardlink for dotl
580 * @old_dentry: dentry for file to link to
581 * @dir: inode destination for new link
582 * @dentry: dentry for link
583 *
584 */
585
586static int
587v9fs_vfs_link_dotl(struct dentry *old_dentry, struct inode *dir,
588 struct dentry *dentry)
589{
590 int err;
591 struct p9_fid *dfid, *oldfid;
592 char *name;
593 struct v9fs_session_info *v9ses;
594 struct dentry *dir_dentry;
595
596 P9_DPRINTK(P9_DEBUG_VFS, "dir ino: %lu, old_name: %s, new_name: %s\n",
597 dir->i_ino, old_dentry->d_name.name,
598 dentry->d_name.name);
599
600 v9ses = v9fs_inode2v9ses(dir);
601 dir_dentry = v9fs_dentry_from_dir_inode(dir);
602 dfid = v9fs_fid_lookup(dir_dentry);
603 if (IS_ERR(dfid))
604 return PTR_ERR(dfid);
605
606 oldfid = v9fs_fid_lookup(old_dentry);
607 if (IS_ERR(oldfid))
608 return PTR_ERR(oldfid);
609
610 name = (char *) dentry->d_name.name;
611
612 err = p9_client_link(dfid, oldfid, (char *)dentry->d_name.name);
613
614 if (err < 0) {
615 P9_DPRINTK(P9_DEBUG_VFS, "p9_client_link failed %d\n", err);
616 return err;
617 }
618
619 if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
620 /* Get the latest stat info from server. */
621 struct p9_fid *fid;
622 struct p9_stat_dotl *st;
623
624 fid = v9fs_fid_lookup(old_dentry);
625 if (IS_ERR(fid))
626 return PTR_ERR(fid);
627
628 st = p9_client_getattr_dotl(fid, P9_STATS_BASIC);
629 if (IS_ERR(st))
630 return PTR_ERR(st);
631
632 v9fs_stat2inode_dotl(st, old_dentry->d_inode);
633
634 kfree(st);
635 } else {
636 /* Caching disabled. No need to get upto date stat info.
637 * This dentry will be released immediately. So, just hold the
638 * inode
639 */
640 ihold(old_dentry->d_inode);
641 }
642 d_instantiate(dentry, old_dentry->d_inode);
643
644 return err;
645}
646
647/**
648 * v9fs_vfs_mknod_dotl - create a special file
649 * @dir: inode destination for new link
650 * @dentry: dentry for file
651 * @mode: mode for creation
652 * @rdev: device associated with special file
653 *
654 */
655static int
656v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, int omode,
657 dev_t rdev)
658{
659 int err;
660 char *name;
661 mode_t mode;
662 struct v9fs_session_info *v9ses;
663 struct p9_fid *fid = NULL, *dfid = NULL;
664 struct inode *inode;
665 gid_t gid;
666 struct p9_qid qid;
667 struct dentry *dir_dentry;
668 struct posix_acl *dacl = NULL, *pacl = NULL;
669
670 P9_DPRINTK(P9_DEBUG_VFS,
671 " %lu,%s mode: %x MAJOR: %u MINOR: %u\n", dir->i_ino,
672 dentry->d_name.name, omode, MAJOR(rdev), MINOR(rdev));
673
674 if (!new_valid_dev(rdev))
675 return -EINVAL;
676
677 v9ses = v9fs_inode2v9ses(dir);
678 dir_dentry = v9fs_dentry_from_dir_inode(dir);
679 dfid = v9fs_fid_lookup(dir_dentry);
680 if (IS_ERR(dfid)) {
681 err = PTR_ERR(dfid);
682 P9_DPRINTK(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
683 dfid = NULL;
684 goto error;
685 }
686
687 gid = v9fs_get_fsgid_for_create(dir);
688 mode = omode;
689 /* Update mode based on ACL value */
690 err = v9fs_acl_mode(dir, &mode, &dacl, &pacl);
691 if (err) {
692 P9_DPRINTK(P9_DEBUG_VFS,
693 "Failed to get acl values in mknod %d\n", err);
694 goto error;
695 }
696 name = (char *) dentry->d_name.name;
697
698 err = p9_client_mknod_dotl(dfid, name, mode, rdev, gid, &qid);
699 if (err < 0)
700 goto error;
701
702 /* instantiate inode and assign the unopened fid to the dentry */
703 if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
704 fid = p9_client_walk(dfid, 1, &name, 1);
705 if (IS_ERR(fid)) {
706 err = PTR_ERR(fid);
707 P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n",
708 err);
709 fid = NULL;
710 goto error;
711 }
712
713 inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb);
714 if (IS_ERR(inode)) {
715 err = PTR_ERR(inode);
716 P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n",
717 err);
718 goto error;
719 }
720 d_instantiate(dentry, inode);
721 err = v9fs_fid_add(dentry, fid);
722 if (err < 0)
723 goto error;
724 fid = NULL;
725 } else {
726 /*
727 * Not in cached mode. No need to populate inode with stat.
728 * socket syscall returns a fd, so we need instantiate
729 */
730 inode = v9fs_get_inode(dir->i_sb, mode);
731 if (IS_ERR(inode)) {
732 err = PTR_ERR(inode);
733 goto error;
734 }
735 d_instantiate(dentry, inode);
736 }
737 /* Now set the ACL based on the default value */
738 v9fs_set_create_acl(dentry, dacl, pacl);
739error:
740 if (fid)
741 p9_client_clunk(fid);
742 return err;
743}
744
745/**
746 * v9fs_vfs_follow_link_dotl - follow a symlink path
747 * @dentry: dentry for symlink
748 * @nd: nameidata
749 *
750 */
751
752static void *
753v9fs_vfs_follow_link_dotl(struct dentry *dentry, struct nameidata *nd)
754{
755 int retval;
756 struct p9_fid *fid;
757 char *link = __getname();
758 char *target;
759
760 P9_DPRINTK(P9_DEBUG_VFS, "%s\n", dentry->d_name.name);
761
762 if (!link) {
763 link = ERR_PTR(-ENOMEM);
764 goto ndset;
765 }
766 fid = v9fs_fid_lookup(dentry);
767 if (IS_ERR(fid)) {
768 __putname(link);
769 link = ERR_PTR(PTR_ERR(fid));
770 goto ndset;
771 }
772 retval = p9_client_readlink(fid, &target);
773 if (!retval) {
774 strcpy(link, target);
775 kfree(target);
776 goto ndset;
777 }
778 __putname(link);
779 link = ERR_PTR(retval);
780ndset:
781 nd_set_link(nd, link);
782 return NULL;
783}
784
785const struct inode_operations v9fs_dir_inode_operations_dotl = {
786 .create = v9fs_vfs_create_dotl,
787 .lookup = v9fs_vfs_lookup,
788 .link = v9fs_vfs_link_dotl,
789 .symlink = v9fs_vfs_symlink_dotl,
790 .unlink = v9fs_vfs_unlink,
791 .mkdir = v9fs_vfs_mkdir_dotl,
792 .rmdir = v9fs_vfs_rmdir,
793 .mknod = v9fs_vfs_mknod_dotl,
794 .rename = v9fs_vfs_rename,
795 .getattr = v9fs_vfs_getattr_dotl,
796 .setattr = v9fs_vfs_setattr_dotl,
797 .setxattr = generic_setxattr,
798 .getxattr = generic_getxattr,
799 .removexattr = generic_removexattr,
800 .listxattr = v9fs_listxattr,
801 .check_acl = v9fs_check_acl,
802};
803
804const struct inode_operations v9fs_file_inode_operations_dotl = {
805 .getattr = v9fs_vfs_getattr_dotl,
806 .setattr = v9fs_vfs_setattr_dotl,
807 .setxattr = generic_setxattr,
808 .getxattr = generic_getxattr,
809 .removexattr = generic_removexattr,
810 .listxattr = v9fs_listxattr,
811 .check_acl = v9fs_check_acl,
812};
813
814const struct inode_operations v9fs_symlink_inode_operations_dotl = {
815 .readlink = generic_readlink,
816 .follow_link = v9fs_vfs_follow_link_dotl,
817 .put_link = v9fs_vfs_put_link,
818 .getattr = v9fs_vfs_getattr_dotl,
819 .setattr = v9fs_vfs_setattr_dotl,
820 .setxattr = generic_setxattr,
821 .getxattr = generic_getxattr,
822 .removexattr = generic_removexattr,
823 .listxattr = v9fs_listxattr,
824};
diff --git a/fs/9p/xattr.c b/fs/9p/xattr.c
index 43ec7df84336..d288773871b3 100644
--- a/fs/9p/xattr.c
+++ b/fs/9p/xattr.c
@@ -133,7 +133,7 @@ int v9fs_xattr_set(struct dentry *dentry, const char *name,
133 "p9_client_xattrcreate failed %d\n", retval); 133 "p9_client_xattrcreate failed %d\n", retval);
134 goto error; 134 goto error;
135 } 135 }
136 msize = fid->clnt->msize;; 136 msize = fid->clnt->msize;
137 while (value_len) { 137 while (value_len) {
138 if (value_len > (msize - P9_IOHDRSZ)) 138 if (value_len > (msize - P9_IOHDRSZ))
139 write_count = msize - P9_IOHDRSZ; 139 write_count = msize - P9_IOHDRSZ;
diff --git a/fs/ext2/dir.c b/fs/ext2/dir.c
index 2709b34206ab..47cda410b548 100644
--- a/fs/ext2/dir.c
+++ b/fs/ext2/dir.c
@@ -28,21 +28,30 @@
28 28
29typedef struct ext2_dir_entry_2 ext2_dirent; 29typedef struct ext2_dir_entry_2 ext2_dirent;
30 30
31/*
32 * Tests against MAX_REC_LEN etc were put in place for 64k block
33 * sizes; if that is not possible on this arch, we can skip
34 * those tests and speed things up.
35 */
31static inline unsigned ext2_rec_len_from_disk(__le16 dlen) 36static inline unsigned ext2_rec_len_from_disk(__le16 dlen)
32{ 37{
33 unsigned len = le16_to_cpu(dlen); 38 unsigned len = le16_to_cpu(dlen);
34 39
40#if (PAGE_CACHE_SIZE >= 65536)
35 if (len == EXT2_MAX_REC_LEN) 41 if (len == EXT2_MAX_REC_LEN)
36 return 1 << 16; 42 return 1 << 16;
43#endif
37 return len; 44 return len;
38} 45}
39 46
40static inline __le16 ext2_rec_len_to_disk(unsigned len) 47static inline __le16 ext2_rec_len_to_disk(unsigned len)
41{ 48{
49#if (PAGE_CACHE_SIZE >= 65536)
42 if (len == (1 << 16)) 50 if (len == (1 << 16))
43 return cpu_to_le16(EXT2_MAX_REC_LEN); 51 return cpu_to_le16(EXT2_MAX_REC_LEN);
44 else 52 else
45 BUG_ON(len > (1 << 16)); 53 BUG_ON(len > (1 << 16));
54#endif
46 return cpu_to_le16(len); 55 return cpu_to_le16(len);
47} 56}
48 57
@@ -129,15 +138,15 @@ static void ext2_check_page(struct page *page, int quiet)
129 p = (ext2_dirent *)(kaddr + offs); 138 p = (ext2_dirent *)(kaddr + offs);
130 rec_len = ext2_rec_len_from_disk(p->rec_len); 139 rec_len = ext2_rec_len_from_disk(p->rec_len);
131 140
132 if (rec_len < EXT2_DIR_REC_LEN(1)) 141 if (unlikely(rec_len < EXT2_DIR_REC_LEN(1)))
133 goto Eshort; 142 goto Eshort;
134 if (rec_len & 3) 143 if (unlikely(rec_len & 3))
135 goto Ealign; 144 goto Ealign;
136 if (rec_len < EXT2_DIR_REC_LEN(p->name_len)) 145 if (unlikely(rec_len < EXT2_DIR_REC_LEN(p->name_len)))
137 goto Enamelen; 146 goto Enamelen;
138 if (((offs + rec_len - 1) ^ offs) & ~(chunk_size-1)) 147 if (unlikely(((offs + rec_len - 1) ^ offs) & ~(chunk_size-1)))
139 goto Espan; 148 goto Espan;
140 if (le32_to_cpu(p->inode) > max_inumber) 149 if (unlikely(le32_to_cpu(p->inode) > max_inumber))
141 goto Einumber; 150 goto Einumber;
142 } 151 }
143 if (offs != limit) 152 if (offs != limit)
diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c
index f8aecd2e3297..2e1d8341d827 100644
--- a/fs/ext2/namei.c
+++ b/fs/ext2/namei.c
@@ -67,7 +67,7 @@ static struct dentry *ext2_lookup(struct inode * dir, struct dentry *dentry, str
67 inode = NULL; 67 inode = NULL;
68 if (ino) { 68 if (ino) {
69 inode = ext2_iget(dir->i_sb, ino); 69 inode = ext2_iget(dir->i_sb, ino);
70 if (unlikely(IS_ERR(inode))) { 70 if (IS_ERR(inode)) {
71 if (PTR_ERR(inode) == -ESTALE) { 71 if (PTR_ERR(inode) == -ESTALE) {
72 ext2_error(dir->i_sb, __func__, 72 ext2_error(dir->i_sb, __func__,
73 "deleted inode referenced: %lu", 73 "deleted inode referenced: %lu",
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index e0c6380ff992..7731695e65d9 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -43,9 +43,10 @@ static int ext2_remount (struct super_block * sb, int * flags, char * data);
43static int ext2_statfs (struct dentry * dentry, struct kstatfs * buf); 43static int ext2_statfs (struct dentry * dentry, struct kstatfs * buf);
44static int ext2_sync_fs(struct super_block *sb, int wait); 44static int ext2_sync_fs(struct super_block *sb, int wait);
45 45
46void ext2_error (struct super_block * sb, const char * function, 46void ext2_error(struct super_block *sb, const char *function,
47 const char * fmt, ...) 47 const char *fmt, ...)
48{ 48{
49 struct va_format vaf;
49 va_list args; 50 va_list args;
50 struct ext2_sb_info *sbi = EXT2_SB(sb); 51 struct ext2_sb_info *sbi = EXT2_SB(sb);
51 struct ext2_super_block *es = sbi->s_es; 52 struct ext2_super_block *es = sbi->s_es;
@@ -59,9 +60,13 @@ void ext2_error (struct super_block * sb, const char * function,
59 } 60 }
60 61
61 va_start(args, fmt); 62 va_start(args, fmt);
62 printk(KERN_CRIT "EXT2-fs (%s): error: %s: ", sb->s_id, function); 63
63 vprintk(fmt, args); 64 vaf.fmt = fmt;
64 printk("\n"); 65 vaf.va = &args;
66
67 printk(KERN_CRIT "EXT2-fs (%s): error: %s: %pV\n",
68 sb->s_id, function, &vaf);
69
65 va_end(args); 70 va_end(args);
66 71
67 if (test_opt(sb, ERRORS_PANIC)) 72 if (test_opt(sb, ERRORS_PANIC))
@@ -76,12 +81,16 @@ void ext2_error (struct super_block * sb, const char * function,
76void ext2_msg(struct super_block *sb, const char *prefix, 81void ext2_msg(struct super_block *sb, const char *prefix,
77 const char *fmt, ...) 82 const char *fmt, ...)
78{ 83{
84 struct va_format vaf;
79 va_list args; 85 va_list args;
80 86
81 va_start(args, fmt); 87 va_start(args, fmt);
82 printk("%sEXT2-fs (%s): ", prefix, sb->s_id); 88
83 vprintk(fmt, args); 89 vaf.fmt = fmt;
84 printk("\n"); 90 vaf.va = &args;
91
92 printk("%sEXT2-fs (%s): %pV\n", prefix, sb->s_id, &vaf);
93
85 va_end(args); 94 va_end(args);
86} 95}
87 96
diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
index f84700be3274..c2e4dce984d2 100644
--- a/fs/ext2/xattr.c
+++ b/fs/ext2/xattr.c
@@ -199,14 +199,6 @@ bad_block: ext2_error(inode->i_sb, "ext2_xattr_get",
199 goto found; 199 goto found;
200 entry = next; 200 entry = next;
201 } 201 }
202 /* Check the remaining name entries */
203 while (!IS_LAST_ENTRY(entry)) {
204 struct ext2_xattr_entry *next =
205 EXT2_XATTR_NEXT(entry);
206 if ((char *)next >= end)
207 goto bad_block;
208 entry = next;
209 }
210 if (ext2_xattr_cache_insert(bh)) 202 if (ext2_xattr_cache_insert(bh))
211 ea_idebug(inode, "cache insert failed"); 203 ea_idebug(inode, "cache insert failed");
212 error = -ENODATA; 204 error = -ENODATA;
@@ -355,7 +347,7 @@ static void ext2_xattr_update_super_block(struct super_block *sb)
355/* 347/*
356 * ext2_xattr_set() 348 * ext2_xattr_set()
357 * 349 *
358 * Create, replace or remove an extended attribute for this inode. Buffer 350 * Create, replace or remove an extended attribute for this inode. Value
359 * is NULL to remove an existing extended attribute, and non-NULL to 351 * is NULL to remove an existing extended attribute, and non-NULL to
360 * either replace an existing extended attribute, or create a new extended 352 * either replace an existing extended attribute, or create a new extended
361 * attribute. The flags XATTR_REPLACE and XATTR_CREATE 353 * attribute. The flags XATTR_REPLACE and XATTR_CREATE
diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
index b3db22649426..045995c8ce5a 100644
--- a/fs/ext3/balloc.c
+++ b/fs/ext3/balloc.c
@@ -20,6 +20,7 @@
20#include <linux/ext3_jbd.h> 20#include <linux/ext3_jbd.h>
21#include <linux/quotaops.h> 21#include <linux/quotaops.h>
22#include <linux/buffer_head.h> 22#include <linux/buffer_head.h>
23#include <linux/blkdev.h>
23 24
24/* 25/*
25 * balloc.c contains the blocks allocation and deallocation routines 26 * balloc.c contains the blocks allocation and deallocation routines
@@ -39,6 +40,21 @@
39 40
40#define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1) 41#define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1)
41 42
43/*
44 * Calculate the block group number and offset, given a block number
45 */
46static void ext3_get_group_no_and_offset(struct super_block *sb,
47 ext3_fsblk_t blocknr, unsigned long *blockgrpp, ext3_grpblk_t *offsetp)
48{
49 struct ext3_super_block *es = EXT3_SB(sb)->s_es;
50
51 blocknr = blocknr - le32_to_cpu(es->s_first_data_block);
52 if (offsetp)
53 *offsetp = blocknr % EXT3_BLOCKS_PER_GROUP(sb);
54 if (blockgrpp)
55 *blockgrpp = blocknr / EXT3_BLOCKS_PER_GROUP(sb);
56}
57
42/** 58/**
43 * ext3_get_group_desc() -- load group descriptor from disk 59 * ext3_get_group_desc() -- load group descriptor from disk
44 * @sb: super block 60 * @sb: super block
@@ -1885,3 +1901,253 @@ unsigned long ext3_bg_num_gdb(struct super_block *sb, int group)
1885 return ext3_bg_num_gdb_meta(sb,group); 1901 return ext3_bg_num_gdb_meta(sb,group);
1886 1902
1887} 1903}
1904
1905/**
1906 * ext3_trim_all_free -- function to trim all free space in alloc. group
1907 * @sb: super block for file system
1908 * @group: allocation group to trim
1909 * @start: first group block to examine
1910 * @max: last group block to examine
1911 * @gdp: allocation group description structure
1912 * @minblocks: minimum extent block count
1913 *
1914 * ext3_trim_all_free walks through group's block bitmap searching for free
1915 * blocks. When the free block is found, it tries to allocate this block and
1916 * consequent free block to get the biggest free extent possible, until it
1917 * reaches any used block. Then issue a TRIM command on this extent and free
1918 * the extent in the block bitmap. This is done until whole group is scanned.
1919 */
1920ext3_grpblk_t ext3_trim_all_free(struct super_block *sb, unsigned int group,
1921 ext3_grpblk_t start, ext3_grpblk_t max,
1922 ext3_grpblk_t minblocks)
1923{
1924 handle_t *handle;
1925 ext3_grpblk_t next, free_blocks, bit, freed, count = 0;
1926 ext3_fsblk_t discard_block;
1927 struct ext3_sb_info *sbi;
1928 struct buffer_head *gdp_bh, *bitmap_bh = NULL;
1929 struct ext3_group_desc *gdp;
1930 int err = 0, ret = 0;
1931
1932 /*
1933 * We will update one block bitmap, and one group descriptor
1934 */
1935 handle = ext3_journal_start_sb(sb, 2);
1936 if (IS_ERR(handle))
1937 return PTR_ERR(handle);
1938
1939 bitmap_bh = read_block_bitmap(sb, group);
1940 if (!bitmap_bh) {
1941 err = -EIO;
1942 goto err_out;
1943 }
1944
1945 BUFFER_TRACE(bitmap_bh, "getting undo access");
1946 err = ext3_journal_get_undo_access(handle, bitmap_bh);
1947 if (err)
1948 goto err_out;
1949
1950 gdp = ext3_get_group_desc(sb, group, &gdp_bh);
1951 if (!gdp) {
1952 err = -EIO;
1953 goto err_out;
1954 }
1955
1956 BUFFER_TRACE(gdp_bh, "get_write_access");
1957 err = ext3_journal_get_write_access(handle, gdp_bh);
1958 if (err)
1959 goto err_out;
1960
1961 free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
1962 sbi = EXT3_SB(sb);
1963
1964 /* Walk through the whole group */
1965 while (start < max) {
1966 start = bitmap_search_next_usable_block(start, bitmap_bh, max);
1967 if (start < 0)
1968 break;
1969 next = start;
1970
1971 /*
1972 * Allocate contiguous free extents by setting bits in the
1973 * block bitmap
1974 */
1975 while (next < max
1976 && claim_block(sb_bgl_lock(sbi, group),
1977 next, bitmap_bh)) {
1978 next++;
1979 }
1980
1981 /* We did not claim any blocks */
1982 if (next == start)
1983 continue;
1984
1985 discard_block = (ext3_fsblk_t)start +
1986 ext3_group_first_block_no(sb, group);
1987
1988 /* Update counters */
1989 spin_lock(sb_bgl_lock(sbi, group));
1990 le16_add_cpu(&gdp->bg_free_blocks_count, start - next);
1991 spin_unlock(sb_bgl_lock(sbi, group));
1992 percpu_counter_sub(&sbi->s_freeblocks_counter, next - start);
1993
1994 /* Do not issue a TRIM on extents smaller than minblocks */
1995 if ((next - start) < minblocks)
1996 goto free_extent;
1997
1998 /* Send the TRIM command down to the device */
1999 err = sb_issue_discard(sb, discard_block, next - start,
2000 GFP_NOFS, 0);
2001 count += (next - start);
2002free_extent:
2003 freed = 0;
2004
2005 /*
2006 * Clear bits in the bitmap
2007 */
2008 for (bit = start; bit < next; bit++) {
2009 BUFFER_TRACE(bitmap_bh, "clear bit");
2010 if (!ext3_clear_bit_atomic(sb_bgl_lock(sbi, group),
2011 bit, bitmap_bh->b_data)) {
2012 ext3_error(sb, __func__,
2013 "bit already cleared for block "E3FSBLK,
2014 (unsigned long)bit);
2015 BUFFER_TRACE(bitmap_bh, "bit already cleared");
2016 } else {
2017 freed++;
2018 }
2019 }
2020
2021 /* Update couters */
2022 spin_lock(sb_bgl_lock(sbi, group));
2023 le16_add_cpu(&gdp->bg_free_blocks_count, freed);
2024 spin_unlock(sb_bgl_lock(sbi, group));
2025 percpu_counter_add(&sbi->s_freeblocks_counter, freed);
2026
2027 start = next;
2028 if (err < 0) {
2029 if (err != -EOPNOTSUPP)
2030 ext3_warning(sb, __func__, "Discard command "
2031 "returned error %d\n", err);
2032 break;
2033 }
2034
2035 if (fatal_signal_pending(current)) {
2036 err = -ERESTARTSYS;
2037 break;
2038 }
2039
2040 cond_resched();
2041
2042 /* No more suitable extents */
2043 if ((free_blocks - count) < minblocks)
2044 break;
2045 }
2046
2047 /* We dirtied the bitmap block */
2048 BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
2049 ret = ext3_journal_dirty_metadata(handle, bitmap_bh);
2050 if (!err)
2051 err = ret;
2052
2053 /* And the group descriptor block */
2054 BUFFER_TRACE(gdp_bh, "dirtied group descriptor block");
2055 ret = ext3_journal_dirty_metadata(handle, gdp_bh);
2056 if (!err)
2057 err = ret;
2058
2059 ext3_debug("trimmed %d blocks in the group %d\n",
2060 count, group);
2061
2062err_out:
2063 if (err)
2064 count = err;
2065 ext3_journal_stop(handle);
2066 brelse(bitmap_bh);
2067
2068 return count;
2069}
2070
2071/**
2072 * ext3_trim_fs() -- trim ioctl handle function
2073 * @sb: superblock for filesystem
2074 * @start: First Byte to trim
2075 * @len: number of Bytes to trim from start
2076 * @minlen: minimum extent length in Bytes
2077 *
2078 * ext3_trim_fs goes through all allocation groups containing Bytes from
2079 * start to start+len. For each such a group ext3_trim_all_free function
2080 * is invoked to trim all free space.
2081 */
2082int ext3_trim_fs(struct super_block *sb, struct fstrim_range *range)
2083{
2084 ext3_grpblk_t last_block, first_block, free_blocks;
2085 unsigned long first_group, last_group;
2086 unsigned long group, ngroups;
2087 struct ext3_group_desc *gdp;
2088 struct ext3_super_block *es = EXT3_SB(sb)->s_es;
2089 uint64_t start, len, minlen, trimmed;
2090 ext3_fsblk_t max_blks = le32_to_cpu(es->s_blocks_count);
2091 int ret = 0;
2092
2093 start = range->start >> sb->s_blocksize_bits;
2094 len = range->len >> sb->s_blocksize_bits;
2095 minlen = range->minlen >> sb->s_blocksize_bits;
2096 trimmed = 0;
2097
2098 if (unlikely(minlen > EXT3_BLOCKS_PER_GROUP(sb)))
2099 return -EINVAL;
2100 if (start >= max_blks)
2101 goto out;
2102 if (start < le32_to_cpu(es->s_first_data_block)) {
2103 len -= le32_to_cpu(es->s_first_data_block) - start;
2104 start = le32_to_cpu(es->s_first_data_block);
2105 }
2106 if (start + len > max_blks)
2107 len = max_blks - start;
2108
2109 ngroups = EXT3_SB(sb)->s_groups_count;
2110 smp_rmb();
2111
2112 /* Determine first and last group to examine based on start and len */
2113 ext3_get_group_no_and_offset(sb, (ext3_fsblk_t) start,
2114 &first_group, &first_block);
2115 ext3_get_group_no_and_offset(sb, (ext3_fsblk_t) (start + len),
2116 &last_group, &last_block);
2117 last_group = (last_group > ngroups - 1) ? ngroups - 1 : last_group;
2118 last_block = EXT3_BLOCKS_PER_GROUP(sb);
2119
2120 if (first_group > last_group)
2121 return -EINVAL;
2122
2123 for (group = first_group; group <= last_group; group++) {
2124 gdp = ext3_get_group_desc(sb, group, NULL);
2125 if (!gdp)
2126 break;
2127
2128 free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
2129 if (free_blocks < minlen)
2130 continue;
2131
2132 if (len >= EXT3_BLOCKS_PER_GROUP(sb))
2133 len -= (EXT3_BLOCKS_PER_GROUP(sb) - first_block);
2134 else
2135 last_block = first_block + len;
2136
2137 ret = ext3_trim_all_free(sb, group, first_block,
2138 last_block, minlen);
2139 if (ret < 0)
2140 break;
2141
2142 trimmed += ret;
2143 first_block = 0;
2144 }
2145
2146 if (ret >= 0)
2147 ret = 0;
2148
2149out:
2150 range->len = trimmed * sb->s_blocksize;
2151
2152 return ret;
2153}
diff --git a/fs/ext3/dir.c b/fs/ext3/dir.c
index e2e72c367cf6..34f0a072b935 100644
--- a/fs/ext3/dir.c
+++ b/fs/ext3/dir.c
@@ -69,25 +69,26 @@ int ext3_check_dir_entry (const char * function, struct inode * dir,
69 const char * error_msg = NULL; 69 const char * error_msg = NULL;
70 const int rlen = ext3_rec_len_from_disk(de->rec_len); 70 const int rlen = ext3_rec_len_from_disk(de->rec_len);
71 71
72 if (rlen < EXT3_DIR_REC_LEN(1)) 72 if (unlikely(rlen < EXT3_DIR_REC_LEN(1)))
73 error_msg = "rec_len is smaller than minimal"; 73 error_msg = "rec_len is smaller than minimal";
74 else if (rlen % 4 != 0) 74 else if (unlikely(rlen % 4 != 0))
75 error_msg = "rec_len % 4 != 0"; 75 error_msg = "rec_len % 4 != 0";
76 else if (rlen < EXT3_DIR_REC_LEN(de->name_len)) 76 else if (unlikely(rlen < EXT3_DIR_REC_LEN(de->name_len)))
77 error_msg = "rec_len is too small for name_len"; 77 error_msg = "rec_len is too small for name_len";
78 else if (((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize) 78 else if (unlikely((((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize)))
79 error_msg = "directory entry across blocks"; 79 error_msg = "directory entry across blocks";
80 else if (le32_to_cpu(de->inode) > 80 else if (unlikely(le32_to_cpu(de->inode) >
81 le32_to_cpu(EXT3_SB(dir->i_sb)->s_es->s_inodes_count)) 81 le32_to_cpu(EXT3_SB(dir->i_sb)->s_es->s_inodes_count)))
82 error_msg = "inode out of bounds"; 82 error_msg = "inode out of bounds";
83 83
84 if (error_msg != NULL) 84 if (unlikely(error_msg != NULL))
85 ext3_error (dir->i_sb, function, 85 ext3_error (dir->i_sb, function,
86 "bad entry in directory #%lu: %s - " 86 "bad entry in directory #%lu: %s - "
87 "offset=%lu, inode=%lu, rec_len=%d, name_len=%d", 87 "offset=%lu, inode=%lu, rec_len=%d, name_len=%d",
88 dir->i_ino, error_msg, offset, 88 dir->i_ino, error_msg, offset,
89 (unsigned long) le32_to_cpu(de->inode), 89 (unsigned long) le32_to_cpu(de->inode),
90 rlen, de->name_len); 90 rlen, de->name_len);
91
91 return error_msg == NULL ? 1 : 0; 92 return error_msg == NULL ? 1 : 0;
92} 93}
93 94
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index a9580617edd2..ae94f6d949f5 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -2145,13 +2145,15 @@ static void ext3_clear_blocks(handle_t *handle, struct inode *inode,
2145 if (try_to_extend_transaction(handle, inode)) { 2145 if (try_to_extend_transaction(handle, inode)) {
2146 if (bh) { 2146 if (bh) {
2147 BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata"); 2147 BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
2148 ext3_journal_dirty_metadata(handle, bh); 2148 if (ext3_journal_dirty_metadata(handle, bh))
2149 return;
2149 } 2150 }
2150 ext3_mark_inode_dirty(handle, inode); 2151 ext3_mark_inode_dirty(handle, inode);
2151 truncate_restart_transaction(handle, inode); 2152 truncate_restart_transaction(handle, inode);
2152 if (bh) { 2153 if (bh) {
2153 BUFFER_TRACE(bh, "retaking write access"); 2154 BUFFER_TRACE(bh, "retaking write access");
2154 ext3_journal_get_write_access(handle, bh); 2155 if (ext3_journal_get_write_access(handle, bh))
2156 return;
2155 } 2157 }
2156 } 2158 }
2157 2159
diff --git a/fs/ext3/ioctl.c b/fs/ext3/ioctl.c
index 88974814783a..fc080dd561f7 100644
--- a/fs/ext3/ioctl.c
+++ b/fs/ext3/ioctl.c
@@ -276,7 +276,29 @@ group_add_out:
276 mnt_drop_write(filp->f_path.mnt); 276 mnt_drop_write(filp->f_path.mnt);
277 return err; 277 return err;
278 } 278 }
279 case FITRIM: {
279 280
281 struct super_block *sb = inode->i_sb;
282 struct fstrim_range range;
283 int ret = 0;
284
285 if (!capable(CAP_SYS_ADMIN))
286 return -EPERM;
287
288 if (copy_from_user(&range, (struct fstrim_range *)arg,
289 sizeof(range)))
290 return -EFAULT;
291
292 ret = ext3_trim_fs(sb, &range);
293 if (ret < 0)
294 return ret;
295
296 if (copy_to_user((struct fstrim_range *)arg, &range,
297 sizeof(range)))
298 return -EFAULT;
299
300 return 0;
301 }
280 302
281 default: 303 default:
282 return -ENOTTY; 304 return -ENOTTY;
diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c
index bce9dce639b8..b27ba71810ec 100644
--- a/fs/ext3/namei.c
+++ b/fs/ext3/namei.c
@@ -858,6 +858,7 @@ static struct buffer_head *ext3_find_entry(struct inode *dir,
858 struct buffer_head * bh_use[NAMEI_RA_SIZE]; 858 struct buffer_head * bh_use[NAMEI_RA_SIZE];
859 struct buffer_head * bh, *ret = NULL; 859 struct buffer_head * bh, *ret = NULL;
860 unsigned long start, block, b; 860 unsigned long start, block, b;
861 const u8 *name = entry->name;
861 int ra_max = 0; /* Number of bh's in the readahead 862 int ra_max = 0; /* Number of bh's in the readahead
862 buffer, bh_use[] */ 863 buffer, bh_use[] */
863 int ra_ptr = 0; /* Current index into readahead 864 int ra_ptr = 0; /* Current index into readahead
@@ -871,6 +872,16 @@ static struct buffer_head *ext3_find_entry(struct inode *dir,
871 namelen = entry->len; 872 namelen = entry->len;
872 if (namelen > EXT3_NAME_LEN) 873 if (namelen > EXT3_NAME_LEN)
873 return NULL; 874 return NULL;
875 if ((namelen <= 2) && (name[0] == '.') &&
876 (name[1] == '.' || name[1] == 0)) {
877 /*
878 * "." or ".." will only be in the first block
879 * NFS may look up ".."; "." should be handled by the VFS
880 */
881 block = start = 0;
882 nblocks = 1;
883 goto restart;
884 }
874 if (is_dx(dir)) { 885 if (is_dx(dir)) {
875 bh = ext3_dx_find_entry(dir, entry, res_dir, &err); 886 bh = ext3_dx_find_entry(dir, entry, res_dir, &err);
876 /* 887 /*
@@ -961,55 +972,35 @@ static struct buffer_head * ext3_dx_find_entry(struct inode *dir,
961 struct qstr *entry, struct ext3_dir_entry_2 **res_dir, 972 struct qstr *entry, struct ext3_dir_entry_2 **res_dir,
962 int *err) 973 int *err)
963{ 974{
964 struct super_block * sb; 975 struct super_block *sb = dir->i_sb;
965 struct dx_hash_info hinfo; 976 struct dx_hash_info hinfo;
966 u32 hash;
967 struct dx_frame frames[2], *frame; 977 struct dx_frame frames[2], *frame;
968 struct ext3_dir_entry_2 *de, *top;
969 struct buffer_head *bh; 978 struct buffer_head *bh;
970 unsigned long block; 979 unsigned long block;
971 int retval; 980 int retval;
972 int namelen = entry->len;
973 const u8 *name = entry->name;
974 981
975 sb = dir->i_sb; 982 if (!(frame = dx_probe(entry, dir, &hinfo, frames, err)))
976 /* NFS may look up ".." - look at dx_root directory block */ 983 return NULL;
977 if (namelen > 2 || name[0] != '.'|| (namelen == 2 && name[1] != '.')) {
978 if (!(frame = dx_probe(entry, dir, &hinfo, frames, err)))
979 return NULL;
980 } else {
981 frame = frames;
982 frame->bh = NULL; /* for dx_release() */
983 frame->at = (struct dx_entry *)frames; /* hack for zero entry*/
984 dx_set_block(frame->at, 0); /* dx_root block is 0 */
985 }
986 hash = hinfo.hash;
987 do { 984 do {
988 block = dx_get_block(frame->at); 985 block = dx_get_block(frame->at);
989 if (!(bh = ext3_bread (NULL,dir, block, 0, err))) 986 if (!(bh = ext3_bread (NULL,dir, block, 0, err)))
990 goto errout; 987 goto errout;
991 de = (struct ext3_dir_entry_2 *) bh->b_data;
992 top = (struct ext3_dir_entry_2 *) ((char *) de + sb->s_blocksize -
993 EXT3_DIR_REC_LEN(0));
994 for (; de < top; de = ext3_next_entry(de)) {
995 int off = (block << EXT3_BLOCK_SIZE_BITS(sb))
996 + ((char *) de - bh->b_data);
997
998 if (!ext3_check_dir_entry(__func__, dir, de, bh, off)) {
999 brelse(bh);
1000 *err = ERR_BAD_DX_DIR;
1001 goto errout;
1002 }
1003 988
1004 if (ext3_match(namelen, name, de)) { 989 retval = search_dirblock(bh, dir, entry,
1005 *res_dir = de; 990 block << EXT3_BLOCK_SIZE_BITS(sb),
1006 dx_release(frames); 991 res_dir);
1007 return bh; 992 if (retval == 1) {
1008 } 993 dx_release(frames);
994 return bh;
1009 } 995 }
1010 brelse (bh); 996 brelse(bh);
997 if (retval == -1) {
998 *err = ERR_BAD_DX_DIR;
999 goto errout;
1000 }
1001
1011 /* Check to see if we should continue to search */ 1002 /* Check to see if we should continue to search */
1012 retval = ext3_htree_next_block(dir, hash, frame, 1003 retval = ext3_htree_next_block(dir, hinfo.hash, frame,
1013 frames, NULL); 1004 frames, NULL);
1014 if (retval < 0) { 1005 if (retval < 0) {
1015 ext3_warning(sb, __func__, 1006 ext3_warning(sb, __func__,
@@ -1047,7 +1038,7 @@ static struct dentry *ext3_lookup(struct inode * dir, struct dentry *dentry, str
1047 return ERR_PTR(-EIO); 1038 return ERR_PTR(-EIO);
1048 } 1039 }
1049 inode = ext3_iget(dir->i_sb, ino); 1040 inode = ext3_iget(dir->i_sb, ino);
1050 if (unlikely(IS_ERR(inode))) { 1041 if (IS_ERR(inode)) {
1051 if (PTR_ERR(inode) == -ESTALE) { 1042 if (PTR_ERR(inode) == -ESTALE) {
1052 ext3_error(dir->i_sb, __func__, 1043 ext3_error(dir->i_sb, __func__,
1053 "deleted inode referenced: %lu", 1044 "deleted inode referenced: %lu",
@@ -1607,7 +1598,9 @@ static int ext3_dx_add_entry(handle_t *handle, struct dentry *dentry,
1607 if (err) 1598 if (err)
1608 goto journal_error; 1599 goto journal_error;
1609 } 1600 }
1610 ext3_journal_dirty_metadata(handle, frames[0].bh); 1601 err = ext3_journal_dirty_metadata(handle, frames[0].bh);
1602 if (err)
1603 goto journal_error;
1611 } 1604 }
1612 de = do_split(handle, dir, &bh, frame, &hinfo, &err); 1605 de = do_split(handle, dir, &bh, frame, &hinfo, &err);
1613 if (!de) 1606 if (!de)
@@ -1644,8 +1637,13 @@ static int ext3_delete_entry (handle_t *handle,
1644 if (!ext3_check_dir_entry("ext3_delete_entry", dir, de, bh, i)) 1637 if (!ext3_check_dir_entry("ext3_delete_entry", dir, de, bh, i))
1645 return -EIO; 1638 return -EIO;
1646 if (de == de_del) { 1639 if (de == de_del) {
1640 int err;
1641
1647 BUFFER_TRACE(bh, "get_write_access"); 1642 BUFFER_TRACE(bh, "get_write_access");
1648 ext3_journal_get_write_access(handle, bh); 1643 err = ext3_journal_get_write_access(handle, bh);
1644 if (err)
1645 goto journal_error;
1646
1649 if (pde) 1647 if (pde)
1650 pde->rec_len = ext3_rec_len_to_disk( 1648 pde->rec_len = ext3_rec_len_to_disk(
1651 ext3_rec_len_from_disk(pde->rec_len) + 1649 ext3_rec_len_from_disk(pde->rec_len) +
@@ -1654,7 +1652,12 @@ static int ext3_delete_entry (handle_t *handle,
1654 de->inode = 0; 1652 de->inode = 0;
1655 dir->i_version++; 1653 dir->i_version++;
1656 BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata"); 1654 BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
1657 ext3_journal_dirty_metadata(handle, bh); 1655 err = ext3_journal_dirty_metadata(handle, bh);
1656 if (err) {
1657journal_error:
1658 ext3_std_error(dir->i_sb, err);
1659 return err;
1660 }
1658 return 0; 1661 return 0;
1659 } 1662 }
1660 i += ext3_rec_len_from_disk(de->rec_len); 1663 i += ext3_rec_len_from_disk(de->rec_len);
@@ -1762,7 +1765,7 @@ static int ext3_mkdir(struct inode * dir, struct dentry * dentry, int mode)
1762{ 1765{
1763 handle_t *handle; 1766 handle_t *handle;
1764 struct inode * inode; 1767 struct inode * inode;
1765 struct buffer_head * dir_block; 1768 struct buffer_head * dir_block = NULL;
1766 struct ext3_dir_entry_2 * de; 1769 struct ext3_dir_entry_2 * de;
1767 int err, retries = 0; 1770 int err, retries = 0;
1768 1771
@@ -1790,15 +1793,14 @@ retry:
1790 inode->i_fop = &ext3_dir_operations; 1793 inode->i_fop = &ext3_dir_operations;
1791 inode->i_size = EXT3_I(inode)->i_disksize = inode->i_sb->s_blocksize; 1794 inode->i_size = EXT3_I(inode)->i_disksize = inode->i_sb->s_blocksize;
1792 dir_block = ext3_bread (handle, inode, 0, 1, &err); 1795 dir_block = ext3_bread (handle, inode, 0, 1, &err);
1793 if (!dir_block) { 1796 if (!dir_block)
1794 drop_nlink(inode); /* is this nlink == 0? */ 1797 goto out_clear_inode;
1795 unlock_new_inode(inode); 1798
1796 ext3_mark_inode_dirty(handle, inode);
1797 iput (inode);
1798 goto out_stop;
1799 }
1800 BUFFER_TRACE(dir_block, "get_write_access"); 1799 BUFFER_TRACE(dir_block, "get_write_access");
1801 ext3_journal_get_write_access(handle, dir_block); 1800 err = ext3_journal_get_write_access(handle, dir_block);
1801 if (err)
1802 goto out_clear_inode;
1803
1802 de = (struct ext3_dir_entry_2 *) dir_block->b_data; 1804 de = (struct ext3_dir_entry_2 *) dir_block->b_data;
1803 de->inode = cpu_to_le32(inode->i_ino); 1805 de->inode = cpu_to_le32(inode->i_ino);
1804 de->name_len = 1; 1806 de->name_len = 1;
@@ -1814,11 +1816,16 @@ retry:
1814 ext3_set_de_type(dir->i_sb, de, S_IFDIR); 1816 ext3_set_de_type(dir->i_sb, de, S_IFDIR);
1815 inode->i_nlink = 2; 1817 inode->i_nlink = 2;
1816 BUFFER_TRACE(dir_block, "call ext3_journal_dirty_metadata"); 1818 BUFFER_TRACE(dir_block, "call ext3_journal_dirty_metadata");
1817 ext3_journal_dirty_metadata(handle, dir_block); 1819 err = ext3_journal_dirty_metadata(handle, dir_block);
1818 brelse (dir_block); 1820 if (err)
1819 ext3_mark_inode_dirty(handle, inode); 1821 goto out_clear_inode;
1820 err = ext3_add_entry (handle, dentry, inode); 1822
1823 err = ext3_mark_inode_dirty(handle, inode);
1824 if (!err)
1825 err = ext3_add_entry (handle, dentry, inode);
1826
1821 if (err) { 1827 if (err) {
1828out_clear_inode:
1822 inode->i_nlink = 0; 1829 inode->i_nlink = 0;
1823 unlock_new_inode(inode); 1830 unlock_new_inode(inode);
1824 ext3_mark_inode_dirty(handle, inode); 1831 ext3_mark_inode_dirty(handle, inode);
@@ -1827,10 +1834,14 @@ retry:
1827 } 1834 }
1828 inc_nlink(dir); 1835 inc_nlink(dir);
1829 ext3_update_dx_flag(dir); 1836 ext3_update_dx_flag(dir);
1830 ext3_mark_inode_dirty(handle, dir); 1837 err = ext3_mark_inode_dirty(handle, dir);
1838 if (err)
1839 goto out_clear_inode;
1840
1831 d_instantiate(dentry, inode); 1841 d_instantiate(dentry, inode);
1832 unlock_new_inode(inode); 1842 unlock_new_inode(inode);
1833out_stop: 1843out_stop:
1844 brelse(dir_block);
1834 ext3_journal_stop(handle); 1845 ext3_journal_stop(handle);
1835 if (err == -ENOSPC && ext3_should_retry_alloc(dir->i_sb, &retries)) 1846 if (err == -ENOSPC && ext3_should_retry_alloc(dir->i_sb, &retries))
1836 goto retry; 1847 goto retry;
@@ -2353,7 +2364,9 @@ static int ext3_rename (struct inode * old_dir, struct dentry *old_dentry,
2353 goto end_rename; 2364 goto end_rename;
2354 } else { 2365 } else {
2355 BUFFER_TRACE(new_bh, "get write access"); 2366 BUFFER_TRACE(new_bh, "get write access");
2356 ext3_journal_get_write_access(handle, new_bh); 2367 retval = ext3_journal_get_write_access(handle, new_bh);
2368 if (retval)
2369 goto journal_error;
2357 new_de->inode = cpu_to_le32(old_inode->i_ino); 2370 new_de->inode = cpu_to_le32(old_inode->i_ino);
2358 if (EXT3_HAS_INCOMPAT_FEATURE(new_dir->i_sb, 2371 if (EXT3_HAS_INCOMPAT_FEATURE(new_dir->i_sb,
2359 EXT3_FEATURE_INCOMPAT_FILETYPE)) 2372 EXT3_FEATURE_INCOMPAT_FILETYPE))
@@ -2362,7 +2375,9 @@ static int ext3_rename (struct inode * old_dir, struct dentry *old_dentry,
2362 new_dir->i_ctime = new_dir->i_mtime = CURRENT_TIME_SEC; 2375 new_dir->i_ctime = new_dir->i_mtime = CURRENT_TIME_SEC;
2363 ext3_mark_inode_dirty(handle, new_dir); 2376 ext3_mark_inode_dirty(handle, new_dir);
2364 BUFFER_TRACE(new_bh, "call ext3_journal_dirty_metadata"); 2377 BUFFER_TRACE(new_bh, "call ext3_journal_dirty_metadata");
2365 ext3_journal_dirty_metadata(handle, new_bh); 2378 retval = ext3_journal_dirty_metadata(handle, new_bh);
2379 if (retval)
2380 goto journal_error;
2366 brelse(new_bh); 2381 brelse(new_bh);
2367 new_bh = NULL; 2382 new_bh = NULL;
2368 } 2383 }
@@ -2411,10 +2426,17 @@ static int ext3_rename (struct inode * old_dir, struct dentry *old_dentry,
2411 ext3_update_dx_flag(old_dir); 2426 ext3_update_dx_flag(old_dir);
2412 if (dir_bh) { 2427 if (dir_bh) {
2413 BUFFER_TRACE(dir_bh, "get_write_access"); 2428 BUFFER_TRACE(dir_bh, "get_write_access");
2414 ext3_journal_get_write_access(handle, dir_bh); 2429 retval = ext3_journal_get_write_access(handle, dir_bh);
2430 if (retval)
2431 goto journal_error;
2415 PARENT_INO(dir_bh->b_data) = cpu_to_le32(new_dir->i_ino); 2432 PARENT_INO(dir_bh->b_data) = cpu_to_le32(new_dir->i_ino);
2416 BUFFER_TRACE(dir_bh, "call ext3_journal_dirty_metadata"); 2433 BUFFER_TRACE(dir_bh, "call ext3_journal_dirty_metadata");
2417 ext3_journal_dirty_metadata(handle, dir_bh); 2434 retval = ext3_journal_dirty_metadata(handle, dir_bh);
2435 if (retval) {
2436journal_error:
2437 ext3_std_error(new_dir->i_sb, retval);
2438 goto end_rename;
2439 }
2418 drop_nlink(old_dir); 2440 drop_nlink(old_dir);
2419 if (new_inode) { 2441 if (new_inode) {
2420 drop_nlink(new_inode); 2442 drop_nlink(new_inode);
diff --git a/fs/ext3/resize.c b/fs/ext3/resize.c
index e746d30b1232..108b142e11ed 100644
--- a/fs/ext3/resize.c
+++ b/fs/ext3/resize.c
@@ -249,7 +249,11 @@ static int setup_new_group_blocks(struct super_block *sb,
249 memcpy(gdb->b_data, sbi->s_group_desc[i]->b_data, gdb->b_size); 249 memcpy(gdb->b_data, sbi->s_group_desc[i]->b_data, gdb->b_size);
250 set_buffer_uptodate(gdb); 250 set_buffer_uptodate(gdb);
251 unlock_buffer(gdb); 251 unlock_buffer(gdb);
252 ext3_journal_dirty_metadata(handle, gdb); 252 err = ext3_journal_dirty_metadata(handle, gdb);
253 if (err) {
254 brelse(gdb);
255 goto exit_bh;
256 }
253 ext3_set_bit(bit, bh->b_data); 257 ext3_set_bit(bit, bh->b_data);
254 brelse(gdb); 258 brelse(gdb);
255 } 259 }
@@ -269,7 +273,11 @@ static int setup_new_group_blocks(struct super_block *sb,
269 err = PTR_ERR(gdb); 273 err = PTR_ERR(gdb);
270 goto exit_bh; 274 goto exit_bh;
271 } 275 }
272 ext3_journal_dirty_metadata(handle, gdb); 276 err = ext3_journal_dirty_metadata(handle, gdb);
277 if (err) {
278 brelse(gdb);
279 goto exit_bh;
280 }
273 ext3_set_bit(bit, bh->b_data); 281 ext3_set_bit(bit, bh->b_data);
274 brelse(gdb); 282 brelse(gdb);
275 } 283 }
@@ -295,7 +303,11 @@ static int setup_new_group_blocks(struct super_block *sb,
295 err = PTR_ERR(it); 303 err = PTR_ERR(it);
296 goto exit_bh; 304 goto exit_bh;
297 } 305 }
298 ext3_journal_dirty_metadata(handle, it); 306 err = ext3_journal_dirty_metadata(handle, it);
307 if (err) {
308 brelse(it);
309 goto exit_bh;
310 }
299 brelse(it); 311 brelse(it);
300 ext3_set_bit(bit, bh->b_data); 312 ext3_set_bit(bit, bh->b_data);
301 } 313 }
@@ -306,7 +318,9 @@ static int setup_new_group_blocks(struct super_block *sb,
306 318
307 mark_bitmap_end(input->blocks_count, EXT3_BLOCKS_PER_GROUP(sb), 319 mark_bitmap_end(input->blocks_count, EXT3_BLOCKS_PER_GROUP(sb),
308 bh->b_data); 320 bh->b_data);
309 ext3_journal_dirty_metadata(handle, bh); 321 err = ext3_journal_dirty_metadata(handle, bh);
322 if (err)
323 goto exit_bh;
310 brelse(bh); 324 brelse(bh);
311 325
312 /* Mark unused entries in inode bitmap used */ 326 /* Mark unused entries in inode bitmap used */
@@ -319,7 +333,7 @@ static int setup_new_group_blocks(struct super_block *sb,
319 333
320 mark_bitmap_end(EXT3_INODES_PER_GROUP(sb), EXT3_BLOCKS_PER_GROUP(sb), 334 mark_bitmap_end(EXT3_INODES_PER_GROUP(sb), EXT3_BLOCKS_PER_GROUP(sb),
321 bh->b_data); 335 bh->b_data);
322 ext3_journal_dirty_metadata(handle, bh); 336 err = ext3_journal_dirty_metadata(handle, bh);
323exit_bh: 337exit_bh:
324 brelse(bh); 338 brelse(bh);
325 339
@@ -503,12 +517,19 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
503 * reserved inode, and will become GDT blocks (primary and backup). 517 * reserved inode, and will become GDT blocks (primary and backup).
504 */ 518 */
505 data[gdb_num % EXT3_ADDR_PER_BLOCK(sb)] = 0; 519 data[gdb_num % EXT3_ADDR_PER_BLOCK(sb)] = 0;
506 ext3_journal_dirty_metadata(handle, dind); 520 err = ext3_journal_dirty_metadata(handle, dind);
521 if (err)
522 goto exit_group_desc;
507 brelse(dind); 523 brelse(dind);
524 dind = NULL;
508 inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >> 9; 525 inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >> 9;
509 ext3_mark_iloc_dirty(handle, inode, &iloc); 526 err = ext3_mark_iloc_dirty(handle, inode, &iloc);
527 if (err)
528 goto exit_group_desc;
510 memset((*primary)->b_data, 0, sb->s_blocksize); 529 memset((*primary)->b_data, 0, sb->s_blocksize);
511 ext3_journal_dirty_metadata(handle, *primary); 530 err = ext3_journal_dirty_metadata(handle, *primary);
531 if (err)
532 goto exit_group_desc;
512 533
513 o_group_desc = EXT3_SB(sb)->s_group_desc; 534 o_group_desc = EXT3_SB(sb)->s_group_desc;
514 memcpy(n_group_desc, o_group_desc, 535 memcpy(n_group_desc, o_group_desc,
@@ -519,10 +540,14 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
519 kfree(o_group_desc); 540 kfree(o_group_desc);
520 541
521 le16_add_cpu(&es->s_reserved_gdt_blocks, -1); 542 le16_add_cpu(&es->s_reserved_gdt_blocks, -1);
522 ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh); 543 err = ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
544 if (err)
545 goto exit_inode;
523 546
524 return 0; 547 return 0;
525 548
549exit_group_desc:
550 kfree(n_group_desc);
526exit_inode: 551exit_inode:
527 //ext3_journal_release_buffer(handle, iloc.bh); 552 //ext3_journal_release_buffer(handle, iloc.bh);
528 brelse(iloc.bh); 553 brelse(iloc.bh);
@@ -706,16 +731,20 @@ static void update_backups(struct super_block *sb,
706 } 731 }
707 ext3_debug("update metadata backup %#04lx\n", 732 ext3_debug("update metadata backup %#04lx\n",
708 (unsigned long)bh->b_blocknr); 733 (unsigned long)bh->b_blocknr);
709 if ((err = ext3_journal_get_write_access(handle, bh))) 734 if ((err = ext3_journal_get_write_access(handle, bh))) {
735 brelse(bh);
710 break; 736 break;
737 }
711 lock_buffer(bh); 738 lock_buffer(bh);
712 memcpy(bh->b_data, data, size); 739 memcpy(bh->b_data, data, size);
713 if (rest) 740 if (rest)
714 memset(bh->b_data + size, 0, rest); 741 memset(bh->b_data + size, 0, rest);
715 set_buffer_uptodate(bh); 742 set_buffer_uptodate(bh);
716 unlock_buffer(bh); 743 unlock_buffer(bh);
717 ext3_journal_dirty_metadata(handle, bh); 744 err = ext3_journal_dirty_metadata(handle, bh);
718 brelse(bh); 745 brelse(bh);
746 if (err)
747 break;
719 } 748 }
720 if ((err2 = ext3_journal_stop(handle)) && !err) 749 if ((err2 = ext3_journal_stop(handle)) && !err)
721 err = err2; 750 err = err2;
@@ -922,7 +951,9 @@ int ext3_group_add(struct super_block *sb, struct ext3_new_group_data *input)
922 /* Update the global fs size fields */ 951 /* Update the global fs size fields */
923 sbi->s_groups_count++; 952 sbi->s_groups_count++;
924 953
925 ext3_journal_dirty_metadata(handle, primary); 954 err = ext3_journal_dirty_metadata(handle, primary);
955 if (err)
956 goto exit_journal;
926 957
927 /* Update the reserved block counts only once the new group is 958 /* Update the reserved block counts only once the new group is
928 * active. */ 959 * active. */
@@ -934,7 +965,7 @@ int ext3_group_add(struct super_block *sb, struct ext3_new_group_data *input)
934 percpu_counter_add(&sbi->s_freeinodes_counter, 965 percpu_counter_add(&sbi->s_freeinodes_counter,
935 EXT3_INODES_PER_GROUP(sb)); 966 EXT3_INODES_PER_GROUP(sb));
936 967
937 ext3_journal_dirty_metadata(handle, sbi->s_sbh); 968 err = ext3_journal_dirty_metadata(handle, sbi->s_sbh);
938 969
939exit_journal: 970exit_journal:
940 mutex_unlock(&sbi->s_resize_lock); 971 mutex_unlock(&sbi->s_resize_lock);
@@ -1064,8 +1095,14 @@ int ext3_group_extend(struct super_block *sb, struct ext3_super_block *es,
1064 goto exit_put; 1095 goto exit_put;
1065 } 1096 }
1066 es->s_blocks_count = cpu_to_le32(o_blocks_count + add); 1097 es->s_blocks_count = cpu_to_le32(o_blocks_count + add);
1067 ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh); 1098 err = ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
1068 mutex_unlock(&EXT3_SB(sb)->s_resize_lock); 1099 mutex_unlock(&EXT3_SB(sb)->s_resize_lock);
1100 if (err) {
1101 ext3_warning(sb, __func__,
1102 "error %d on journal dirty metadata", err);
1103 ext3_journal_stop(handle);
1104 goto exit_put;
1105 }
1069 ext3_debug("freeing blocks "E3FSBLK" through "E3FSBLK"\n", 1106 ext3_debug("freeing blocks "E3FSBLK" through "E3FSBLK"\n",
1070 o_blocks_count, o_blocks_count + add); 1107 o_blocks_count, o_blocks_count + add);
1071 ext3_free_blocks_sb(handle, sb, o_blocks_count, add, &freed_blocks); 1108 ext3_free_blocks_sb(handle, sb, o_blocks_count, add, &freed_blocks);
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 77ce1616f725..b7d0554631e4 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -143,12 +143,16 @@ void ext3_journal_abort_handle(const char *caller, const char *err_fn,
143void ext3_msg(struct super_block *sb, const char *prefix, 143void ext3_msg(struct super_block *sb, const char *prefix,
144 const char *fmt, ...) 144 const char *fmt, ...)
145{ 145{
146 struct va_format vaf;
146 va_list args; 147 va_list args;
147 148
148 va_start(args, fmt); 149 va_start(args, fmt);
149 printk("%sEXT3-fs (%s): ", prefix, sb->s_id); 150
150 vprintk(fmt, args); 151 vaf.fmt = fmt;
151 printk("\n"); 152 vaf.va = &args;
153
154 printk("%sEXT3-fs (%s): %pV\n", prefix, sb->s_id, &vaf);
155
152 va_end(args); 156 va_end(args);
153} 157}
154 158
@@ -195,15 +199,20 @@ static void ext3_handle_error(struct super_block *sb)
195 sb->s_id); 199 sb->s_id);
196} 200}
197 201
198void ext3_error (struct super_block * sb, const char * function, 202void ext3_error(struct super_block *sb, const char *function,
199 const char * fmt, ...) 203 const char *fmt, ...)
200{ 204{
205 struct va_format vaf;
201 va_list args; 206 va_list args;
202 207
203 va_start(args, fmt); 208 va_start(args, fmt);
204 printk(KERN_CRIT "EXT3-fs error (device %s): %s: ",sb->s_id, function); 209
205 vprintk(fmt, args); 210 vaf.fmt = fmt;
206 printk("\n"); 211 vaf.va = &args;
212
213 printk(KERN_CRIT "EXT3-fs error (device %s): %s: %pV\n",
214 sb->s_id, function, &vaf);
215
207 va_end(args); 216 va_end(args);
208 217
209 ext3_handle_error(sb); 218 ext3_handle_error(sb);
@@ -274,15 +283,20 @@ void __ext3_std_error (struct super_block * sb, const char * function,
274 * case we take the easy way out and panic immediately. 283 * case we take the easy way out and panic immediately.
275 */ 284 */
276 285
277void ext3_abort (struct super_block * sb, const char * function, 286void ext3_abort(struct super_block *sb, const char *function,
278 const char * fmt, ...) 287 const char *fmt, ...)
279{ 288{
289 struct va_format vaf;
280 va_list args; 290 va_list args;
281 291
282 va_start(args, fmt); 292 va_start(args, fmt);
283 printk(KERN_CRIT "EXT3-fs (%s): error: %s: ", sb->s_id, function); 293
284 vprintk(fmt, args); 294 vaf.fmt = fmt;
285 printk("\n"); 295 vaf.va = &args;
296
297 printk(KERN_CRIT "EXT3-fs (%s): error: %s: %pV\n",
298 sb->s_id, function, &vaf);
299
286 va_end(args); 300 va_end(args);
287 301
288 if (test_opt(sb, ERRORS_PANIC)) 302 if (test_opt(sb, ERRORS_PANIC))
@@ -300,16 +314,20 @@ void ext3_abort (struct super_block * sb, const char * function,
300 journal_abort(EXT3_SB(sb)->s_journal, -EIO); 314 journal_abort(EXT3_SB(sb)->s_journal, -EIO);
301} 315}
302 316
303void ext3_warning (struct super_block * sb, const char * function, 317void ext3_warning(struct super_block *sb, const char *function,
304 const char * fmt, ...) 318 const char *fmt, ...)
305{ 319{
320 struct va_format vaf;
306 va_list args; 321 va_list args;
307 322
308 va_start(args, fmt); 323 va_start(args, fmt);
309 printk(KERN_WARNING "EXT3-fs (%s): warning: %s: ", 324
310 sb->s_id, function); 325 vaf.fmt = fmt;
311 vprintk(fmt, args); 326 vaf.va = &args;
312 printk("\n"); 327
328 printk(KERN_WARNING "EXT3-fs (%s): warning: %s: %pV\n",
329 sb->s_id, function, &vaf);
330
313 va_end(args); 331 va_end(args);
314} 332}
315 333
@@ -1848,13 +1866,15 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
1848 goto failed_mount; 1866 goto failed_mount;
1849 } 1867 }
1850 1868
1851 if (generic_check_addressable(sb->s_blocksize_bits, 1869 err = generic_check_addressable(sb->s_blocksize_bits,
1852 le32_to_cpu(es->s_blocks_count))) { 1870 le32_to_cpu(es->s_blocks_count));
1871 if (err) {
1853 ext3_msg(sb, KERN_ERR, 1872 ext3_msg(sb, KERN_ERR,
1854 "error: filesystem is too large to mount safely"); 1873 "error: filesystem is too large to mount safely");
1855 if (sizeof(sector_t) < 8) 1874 if (sizeof(sector_t) < 8)
1856 ext3_msg(sb, KERN_ERR, 1875 ext3_msg(sb, KERN_ERR,
1857 "error: CONFIG_LBDAF not enabled"); 1876 "error: CONFIG_LBDAF not enabled");
1877 ret = err;
1858 goto failed_mount; 1878 goto failed_mount;
1859 } 1879 }
1860 1880
@@ -2297,7 +2317,7 @@ static int ext3_load_journal(struct super_block *sb,
2297 EXT3_SB(sb)->s_journal = journal; 2317 EXT3_SB(sb)->s_journal = journal;
2298 ext3_clear_journal_err(sb, es); 2318 ext3_clear_journal_err(sb, es);
2299 2319
2300 if (journal_devnum && 2320 if (!really_read_only && journal_devnum &&
2301 journal_devnum != le32_to_cpu(es->s_journal_dev)) { 2321 journal_devnum != le32_to_cpu(es->s_journal_dev)) {
2302 es->s_journal_dev = cpu_to_le32(journal_devnum); 2322 es->s_journal_dev = cpu_to_le32(journal_devnum);
2303 2323
diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c
index e69dc6dfaa89..32e6cc23bd9a 100644
--- a/fs/ext3/xattr.c
+++ b/fs/ext3/xattr.c
@@ -925,7 +925,7 @@ ext3_xattr_ibody_set(handle_t *handle, struct inode *inode,
925/* 925/*
926 * ext3_xattr_set_handle() 926 * ext3_xattr_set_handle()
927 * 927 *
928 * Create, replace or remove an extended attribute for this inode. Buffer 928 * Create, replace or remove an extended attribute for this inode. Value
929 * is NULL to remove an existing extended attribute, and non-NULL to 929 * is NULL to remove an existing extended attribute, and non-NULL to
930 * either replace an existing extended attribute, or create a new extended 930 * either replace an existing extended attribute, or create a new extended
931 * attribute. The flags XATTR_REPLACE and XATTR_CREATE 931 * attribute. The flags XATTR_REPLACE and XATTR_CREATE
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index 14c3af26c671..adf96b822781 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -592,7 +592,8 @@ ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode,
592 * Account for the allocated meta blocks. We will never 592 * Account for the allocated meta blocks. We will never
593 * fail EDQUOT for metdata, but we do account for it. 593 * fail EDQUOT for metdata, but we do account for it.
594 */ 594 */
595 if (!(*errp) && EXT4_I(inode)->i_delalloc_reserved_flag) { 595 if (!(*errp) &&
596 ext4_test_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED)) {
596 spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 597 spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
597 EXT4_I(inode)->i_allocated_meta_blocks += ar.len; 598 EXT4_I(inode)->i_allocated_meta_blocks += ar.len;
598 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 599 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index ece76fb6a40c..164c56092e58 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -60,9 +60,13 @@ static unsigned char get_dtype(struct super_block *sb, int filetype)
60 return (ext4_filetype_table[filetype]); 60 return (ext4_filetype_table[filetype]);
61} 61}
62 62
63 63/*
64 * Return 0 if the directory entry is OK, and 1 if there is a problem
65 *
66 * Note: this is the opposite of what ext2 and ext3 historically returned...
67 */
64int __ext4_check_dir_entry(const char *function, unsigned int line, 68int __ext4_check_dir_entry(const char *function, unsigned int line,
65 struct inode *dir, 69 struct inode *dir, struct file *filp,
66 struct ext4_dir_entry_2 *de, 70 struct ext4_dir_entry_2 *de,
67 struct buffer_head *bh, 71 struct buffer_head *bh,
68 unsigned int offset) 72 unsigned int offset)
@@ -71,26 +75,37 @@ int __ext4_check_dir_entry(const char *function, unsigned int line,
71 const int rlen = ext4_rec_len_from_disk(de->rec_len, 75 const int rlen = ext4_rec_len_from_disk(de->rec_len,
72 dir->i_sb->s_blocksize); 76 dir->i_sb->s_blocksize);
73 77
74 if (rlen < EXT4_DIR_REC_LEN(1)) 78 if (unlikely(rlen < EXT4_DIR_REC_LEN(1)))
75 error_msg = "rec_len is smaller than minimal"; 79 error_msg = "rec_len is smaller than minimal";
76 else if (rlen % 4 != 0) 80 else if (unlikely(rlen % 4 != 0))
77 error_msg = "rec_len % 4 != 0"; 81 error_msg = "rec_len % 4 != 0";
78 else if (rlen < EXT4_DIR_REC_LEN(de->name_len)) 82 else if (unlikely(rlen < EXT4_DIR_REC_LEN(de->name_len)))
79 error_msg = "rec_len is too small for name_len"; 83 error_msg = "rec_len is too small for name_len";
80 else if (((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize) 84 else if (unlikely(((char *) de - bh->b_data) + rlen >
85 dir->i_sb->s_blocksize))
81 error_msg = "directory entry across blocks"; 86 error_msg = "directory entry across blocks";
82 else if (le32_to_cpu(de->inode) > 87 else if (unlikely(le32_to_cpu(de->inode) >
83 le32_to_cpu(EXT4_SB(dir->i_sb)->s_es->s_inodes_count)) 88 le32_to_cpu(EXT4_SB(dir->i_sb)->s_es->s_inodes_count)))
84 error_msg = "inode out of bounds"; 89 error_msg = "inode out of bounds";
90 else
91 return 0;
85 92
86 if (error_msg != NULL) 93 if (filp)
87 ext4_error_inode(dir, function, line, bh->b_blocknr, 94 ext4_error_file(filp, function, line, bh ? bh->b_blocknr : 0,
88 "bad entry in directory: %s - " 95 "bad entry in directory: %s - offset=%u(%u), "
89 "offset=%u(%u), inode=%u, rec_len=%d, name_len=%d", 96 "inode=%u, rec_len=%d, name_len=%d",
90 error_msg, (unsigned) (offset%bh->b_size), offset, 97 error_msg, (unsigned) (offset%bh->b_size),
91 le32_to_cpu(de->inode), 98 offset, le32_to_cpu(de->inode),
92 rlen, de->name_len); 99 rlen, de->name_len);
93 return error_msg == NULL ? 1 : 0; 100 else
101 ext4_error_inode(dir, function, line, bh ? bh->b_blocknr : 0,
102 "bad entry in directory: %s - offset=%u(%u), "
103 "inode=%u, rec_len=%d, name_len=%d",
104 error_msg, (unsigned) (offset%bh->b_size),
105 offset, le32_to_cpu(de->inode),
106 rlen, de->name_len);
107
108 return 1;
94} 109}
95 110
96static int ext4_readdir(struct file *filp, 111static int ext4_readdir(struct file *filp,
@@ -152,8 +167,9 @@ static int ext4_readdir(struct file *filp,
152 */ 167 */
153 if (!bh) { 168 if (!bh) {
154 if (!dir_has_error) { 169 if (!dir_has_error) {
155 EXT4_ERROR_INODE(inode, "directory " 170 EXT4_ERROR_FILE(filp, 0,
156 "contains a hole at offset %Lu", 171 "directory contains a "
172 "hole at offset %llu",
157 (unsigned long long) filp->f_pos); 173 (unsigned long long) filp->f_pos);
158 dir_has_error = 1; 174 dir_has_error = 1;
159 } 175 }
@@ -194,8 +210,8 @@ revalidate:
194 while (!error && filp->f_pos < inode->i_size 210 while (!error && filp->f_pos < inode->i_size
195 && offset < sb->s_blocksize) { 211 && offset < sb->s_blocksize) {
196 de = (struct ext4_dir_entry_2 *) (bh->b_data + offset); 212 de = (struct ext4_dir_entry_2 *) (bh->b_data + offset);
197 if (!ext4_check_dir_entry(inode, de, 213 if (ext4_check_dir_entry(inode, filp, de,
198 bh, offset)) { 214 bh, offset)) {
199 /* 215 /*
200 * On error, skip the f_pos to the next block 216 * On error, skip the f_pos to the next block
201 */ 217 */
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 94ce3d7a1c4b..bab2387fba43 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -62,8 +62,8 @@
62#define EXT4_ERROR_INODE_BLOCK(inode, block, fmt, a...) \ 62#define EXT4_ERROR_INODE_BLOCK(inode, block, fmt, a...) \
63 ext4_error_inode((inode), __func__, __LINE__, (block), (fmt), ## a) 63 ext4_error_inode((inode), __func__, __LINE__, (block), (fmt), ## a)
64 64
65#define EXT4_ERROR_FILE(file, fmt, a...) \ 65#define EXT4_ERROR_FILE(file, block, fmt, a...) \
66 ext4_error_file(__func__, __LINE__, (file), (fmt), ## a) 66 ext4_error_file((file), __func__, __LINE__, (block), (fmt), ## a)
67 67
68/* data type for block offset of block group */ 68/* data type for block offset of block group */
69typedef int ext4_grpblk_t; 69typedef int ext4_grpblk_t;
@@ -561,22 +561,6 @@ struct ext4_new_group_data {
561#define EXT4_IOC32_SETVERSION_OLD FS_IOC32_SETVERSION 561#define EXT4_IOC32_SETVERSION_OLD FS_IOC32_SETVERSION
562#endif 562#endif
563 563
564
565/*
566 * Mount options
567 */
568struct ext4_mount_options {
569 unsigned long s_mount_opt;
570 uid_t s_resuid;
571 gid_t s_resgid;
572 unsigned long s_commit_interval;
573 u32 s_min_batch_time, s_max_batch_time;
574#ifdef CONFIG_QUOTA
575 int s_jquota_fmt;
576 char *s_qf_names[MAXQUOTAS];
577#endif
578};
579
580/* Max physical block we can addres w/o extents */ 564/* Max physical block we can addres w/o extents */
581#define EXT4_MAX_BLOCK_FILE_PHYS 0xFFFFFFFF 565#define EXT4_MAX_BLOCK_FILE_PHYS 0xFFFFFFFF
582 566
@@ -709,6 +693,8 @@ do { \
709 if (EXT4_FITS_IN_INODE(raw_inode, EXT4_I(inode), xtime ## _extra)) \ 693 if (EXT4_FITS_IN_INODE(raw_inode, EXT4_I(inode), xtime ## _extra)) \
710 ext4_decode_extra_time(&(inode)->xtime, \ 694 ext4_decode_extra_time(&(inode)->xtime, \
711 raw_inode->xtime ## _extra); \ 695 raw_inode->xtime ## _extra); \
696 else \
697 (inode)->xtime.tv_nsec = 0; \
712} while (0) 698} while (0)
713 699
714#define EXT4_EINODE_GET_XTIME(xtime, einode, raw_inode) \ 700#define EXT4_EINODE_GET_XTIME(xtime, einode, raw_inode) \
@@ -719,6 +705,8 @@ do { \
719 if (EXT4_FITS_IN_INODE(raw_inode, einode, xtime ## _extra)) \ 705 if (EXT4_FITS_IN_INODE(raw_inode, einode, xtime ## _extra)) \
720 ext4_decode_extra_time(&(einode)->xtime, \ 706 ext4_decode_extra_time(&(einode)->xtime, \
721 raw_inode->xtime ## _extra); \ 707 raw_inode->xtime ## _extra); \
708 else \
709 (einode)->xtime.tv_nsec = 0; \
722} while (0) 710} while (0)
723 711
724#define i_disk_version osd1.linux1.l_i_version 712#define i_disk_version osd1.linux1.l_i_version
@@ -750,12 +738,13 @@ do { \
750 738
751/* 739/*
752 * storage for cached extent 740 * storage for cached extent
741 * If ec_len == 0, then the cache is invalid.
742 * If ec_start == 0, then the cache represents a gap (null mapping)
753 */ 743 */
754struct ext4_ext_cache { 744struct ext4_ext_cache {
755 ext4_fsblk_t ec_start; 745 ext4_fsblk_t ec_start;
756 ext4_lblk_t ec_block; 746 ext4_lblk_t ec_block;
757 __u32 ec_len; /* must be 32bit to return holes */ 747 __u32 ec_len; /* must be 32bit to return holes */
758 __u32 ec_type;
759}; 748};
760 749
761/* 750/*
@@ -774,10 +763,12 @@ struct ext4_inode_info {
774 * near to their parent directory's inode. 763 * near to their parent directory's inode.
775 */ 764 */
776 ext4_group_t i_block_group; 765 ext4_group_t i_block_group;
766 ext4_lblk_t i_dir_start_lookup;
767#if (BITS_PER_LONG < 64)
777 unsigned long i_state_flags; /* Dynamic state flags */ 768 unsigned long i_state_flags; /* Dynamic state flags */
769#endif
778 unsigned long i_flags; 770 unsigned long i_flags;
779 771
780 ext4_lblk_t i_dir_start_lookup;
781#ifdef CONFIG_EXT4_FS_XATTR 772#ifdef CONFIG_EXT4_FS_XATTR
782 /* 773 /*
783 * Extended attributes can be read independently of the main file 774 * Extended attributes can be read independently of the main file
@@ -820,7 +811,7 @@ struct ext4_inode_info {
820 */ 811 */
821 struct rw_semaphore i_data_sem; 812 struct rw_semaphore i_data_sem;
822 struct inode vfs_inode; 813 struct inode vfs_inode;
823 struct jbd2_inode jinode; 814 struct jbd2_inode *jinode;
824 815
825 struct ext4_ext_cache i_cached_extent; 816 struct ext4_ext_cache i_cached_extent;
826 /* 817 /*
@@ -840,14 +831,12 @@ struct ext4_inode_info {
840 unsigned int i_reserved_data_blocks; 831 unsigned int i_reserved_data_blocks;
841 unsigned int i_reserved_meta_blocks; 832 unsigned int i_reserved_meta_blocks;
842 unsigned int i_allocated_meta_blocks; 833 unsigned int i_allocated_meta_blocks;
843 unsigned short i_delalloc_reserved_flag; 834 ext4_lblk_t i_da_metadata_calc_last_lblock;
844 sector_t i_da_metadata_calc_last_lblock;
845 int i_da_metadata_calc_len; 835 int i_da_metadata_calc_len;
846 836
847 /* on-disk additional length */ 837 /* on-disk additional length */
848 __u16 i_extra_isize; 838 __u16 i_extra_isize;
849 839
850 spinlock_t i_block_reservation_lock;
851#ifdef CONFIG_QUOTA 840#ifdef CONFIG_QUOTA
852 /* quota space reservation, managed internally by quota code */ 841 /* quota space reservation, managed internally by quota code */
853 qsize_t i_reserved_quota; 842 qsize_t i_reserved_quota;
@@ -856,9 +845,11 @@ struct ext4_inode_info {
856 /* completed IOs that might need unwritten extents handling */ 845 /* completed IOs that might need unwritten extents handling */
857 struct list_head i_completed_io_list; 846 struct list_head i_completed_io_list;
858 spinlock_t i_completed_io_lock; 847 spinlock_t i_completed_io_lock;
848 atomic_t i_ioend_count; /* Number of outstanding io_end structs */
859 /* current io_end structure for async DIO write*/ 849 /* current io_end structure for async DIO write*/
860 ext4_io_end_t *cur_aio_dio; 850 ext4_io_end_t *cur_aio_dio;
861 atomic_t i_ioend_count; /* Number of outstanding io_end structs */ 851
852 spinlock_t i_block_reservation_lock;
862 853
863 /* 854 /*
864 * Transactions that contain inode's metadata needed to complete 855 * Transactions that contain inode's metadata needed to complete
@@ -917,11 +908,20 @@ struct ext4_inode_info {
917#define EXT4_MOUNT_DISCARD 0x40000000 /* Issue DISCARD requests */ 908#define EXT4_MOUNT_DISCARD 0x40000000 /* Issue DISCARD requests */
918#define EXT4_MOUNT_INIT_INODE_TABLE 0x80000000 /* Initialize uninitialized itables */ 909#define EXT4_MOUNT_INIT_INODE_TABLE 0x80000000 /* Initialize uninitialized itables */
919 910
920#define clear_opt(o, opt) o &= ~EXT4_MOUNT_##opt 911#define clear_opt(sb, opt) EXT4_SB(sb)->s_mount_opt &= \
921#define set_opt(o, opt) o |= EXT4_MOUNT_##opt 912 ~EXT4_MOUNT_##opt
913#define set_opt(sb, opt) EXT4_SB(sb)->s_mount_opt |= \
914 EXT4_MOUNT_##opt
922#define test_opt(sb, opt) (EXT4_SB(sb)->s_mount_opt & \ 915#define test_opt(sb, opt) (EXT4_SB(sb)->s_mount_opt & \
923 EXT4_MOUNT_##opt) 916 EXT4_MOUNT_##opt)
924 917
918#define clear_opt2(sb, opt) EXT4_SB(sb)->s_mount_opt2 &= \
919 ~EXT4_MOUNT2_##opt
920#define set_opt2(sb, opt) EXT4_SB(sb)->s_mount_opt2 |= \
921 EXT4_MOUNT2_##opt
922#define test_opt2(sb, opt) (EXT4_SB(sb)->s_mount_opt2 & \
923 EXT4_MOUNT2_##opt)
924
925#define ext4_set_bit ext2_set_bit 925#define ext4_set_bit ext2_set_bit
926#define ext4_set_bit_atomic ext2_set_bit_atomic 926#define ext4_set_bit_atomic ext2_set_bit_atomic
927#define ext4_clear_bit ext2_clear_bit 927#define ext4_clear_bit ext2_clear_bit
@@ -1087,6 +1087,7 @@ struct ext4_sb_info {
1087 struct ext4_super_block *s_es; /* Pointer to the super block in the buffer */ 1087 struct ext4_super_block *s_es; /* Pointer to the super block in the buffer */
1088 struct buffer_head **s_group_desc; 1088 struct buffer_head **s_group_desc;
1089 unsigned int s_mount_opt; 1089 unsigned int s_mount_opt;
1090 unsigned int s_mount_opt2;
1090 unsigned int s_mount_flags; 1091 unsigned int s_mount_flags;
1091 ext4_fsblk_t s_sb_block; 1092 ext4_fsblk_t s_sb_block;
1092 uid_t s_resuid; 1093 uid_t s_resuid;
@@ -1237,24 +1238,39 @@ enum {
1237 EXT4_STATE_EXT_MIGRATE, /* Inode is migrating */ 1238 EXT4_STATE_EXT_MIGRATE, /* Inode is migrating */
1238 EXT4_STATE_DIO_UNWRITTEN, /* need convert on dio done*/ 1239 EXT4_STATE_DIO_UNWRITTEN, /* need convert on dio done*/
1239 EXT4_STATE_NEWENTRY, /* File just added to dir */ 1240 EXT4_STATE_NEWENTRY, /* File just added to dir */
1241 EXT4_STATE_DELALLOC_RESERVED, /* blks already reserved for delalloc */
1240}; 1242};
1241 1243
1242#define EXT4_INODE_BIT_FNS(name, field) \ 1244#define EXT4_INODE_BIT_FNS(name, field, offset) \
1243static inline int ext4_test_inode_##name(struct inode *inode, int bit) \ 1245static inline int ext4_test_inode_##name(struct inode *inode, int bit) \
1244{ \ 1246{ \
1245 return test_bit(bit, &EXT4_I(inode)->i_##field); \ 1247 return test_bit(bit + (offset), &EXT4_I(inode)->i_##field); \
1246} \ 1248} \
1247static inline void ext4_set_inode_##name(struct inode *inode, int bit) \ 1249static inline void ext4_set_inode_##name(struct inode *inode, int bit) \
1248{ \ 1250{ \
1249 set_bit(bit, &EXT4_I(inode)->i_##field); \ 1251 set_bit(bit + (offset), &EXT4_I(inode)->i_##field); \
1250} \ 1252} \
1251static inline void ext4_clear_inode_##name(struct inode *inode, int bit) \ 1253static inline void ext4_clear_inode_##name(struct inode *inode, int bit) \
1252{ \ 1254{ \
1253 clear_bit(bit, &EXT4_I(inode)->i_##field); \ 1255 clear_bit(bit + (offset), &EXT4_I(inode)->i_##field); \
1254} 1256}
1255 1257
1256EXT4_INODE_BIT_FNS(flag, flags) 1258EXT4_INODE_BIT_FNS(flag, flags, 0)
1257EXT4_INODE_BIT_FNS(state, state_flags) 1259#if (BITS_PER_LONG < 64)
1260EXT4_INODE_BIT_FNS(state, state_flags, 0)
1261
1262static inline void ext4_clear_state_flags(struct ext4_inode_info *ei)
1263{
1264 (ei)->i_state_flags = 0;
1265}
1266#else
1267EXT4_INODE_BIT_FNS(state, flags, 32)
1268
1269static inline void ext4_clear_state_flags(struct ext4_inode_info *ei)
1270{
1271 /* We depend on the fact that callers will set i_flags */
1272}
1273#endif
1258#else 1274#else
1259/* Assume that user mode programs are passing in an ext4fs superblock, not 1275/* Assume that user mode programs are passing in an ext4fs superblock, not
1260 * a kernel struct super_block. This will allow us to call the feature-test 1276 * a kernel struct super_block. This will allow us to call the feature-test
@@ -1642,10 +1658,12 @@ extern unsigned ext4_init_block_bitmap(struct super_block *sb,
1642 1658
1643/* dir.c */ 1659/* dir.c */
1644extern int __ext4_check_dir_entry(const char *, unsigned int, struct inode *, 1660extern int __ext4_check_dir_entry(const char *, unsigned int, struct inode *,
1661 struct file *,
1645 struct ext4_dir_entry_2 *, 1662 struct ext4_dir_entry_2 *,
1646 struct buffer_head *, unsigned int); 1663 struct buffer_head *, unsigned int);
1647#define ext4_check_dir_entry(dir, de, bh, offset) \ 1664#define ext4_check_dir_entry(dir, filp, de, bh, offset) \
1648 __ext4_check_dir_entry(__func__, __LINE__, (dir), (de), (bh), (offset)) 1665 unlikely(__ext4_check_dir_entry(__func__, __LINE__, (dir), (filp), \
1666 (de), (bh), (offset)))
1649extern int ext4_htree_store_dirent(struct file *dir_file, __u32 hash, 1667extern int ext4_htree_store_dirent(struct file *dir_file, __u32 hash,
1650 __u32 minor_hash, 1668 __u32 minor_hash,
1651 struct ext4_dir_entry_2 *dirent); 1669 struct ext4_dir_entry_2 *dirent);
@@ -1653,6 +1671,7 @@ extern void ext4_htree_free_dir_info(struct dir_private_info *p);
1653 1671
1654/* fsync.c */ 1672/* fsync.c */
1655extern int ext4_sync_file(struct file *, int); 1673extern int ext4_sync_file(struct file *, int);
1674extern int ext4_flush_completed_IO(struct inode *);
1656 1675
1657/* hash.c */ 1676/* hash.c */
1658extern int ext4fs_dirhash(const char *name, int len, struct 1677extern int ext4fs_dirhash(const char *name, int len, struct
@@ -1752,8 +1771,8 @@ extern void ext4_error_inode(struct inode *, const char *, unsigned int,
1752 ext4_fsblk_t, const char *, ...) 1771 ext4_fsblk_t, const char *, ...)
1753 __attribute__ ((format (printf, 5, 6))); 1772 __attribute__ ((format (printf, 5, 6)));
1754extern void ext4_error_file(struct file *, const char *, unsigned int, 1773extern void ext4_error_file(struct file *, const char *, unsigned int,
1755 const char *, ...) 1774 ext4_fsblk_t, const char *, ...)
1756 __attribute__ ((format (printf, 4, 5))); 1775 __attribute__ ((format (printf, 5, 6)));
1757extern void __ext4_std_error(struct super_block *, const char *, 1776extern void __ext4_std_error(struct super_block *, const char *,
1758 unsigned int, int); 1777 unsigned int, int);
1759extern void __ext4_abort(struct super_block *, const char *, unsigned int, 1778extern void __ext4_abort(struct super_block *, const char *, unsigned int,
diff --git a/fs/ext4/ext4_extents.h b/fs/ext4/ext4_extents.h
index 28ce70fd9cd0..2e29abb30f76 100644
--- a/fs/ext4/ext4_extents.h
+++ b/fs/ext4/ext4_extents.h
@@ -119,10 +119,6 @@ struct ext4_ext_path {
119 * structure for external API 119 * structure for external API
120 */ 120 */
121 121
122#define EXT4_EXT_CACHE_NO 0
123#define EXT4_EXT_CACHE_GAP 1
124#define EXT4_EXT_CACHE_EXTENT 2
125
126/* 122/*
127 * to be called by ext4_ext_walk_space() 123 * to be called by ext4_ext_walk_space()
128 * negative retcode - error 124 * negative retcode - error
@@ -197,7 +193,7 @@ static inline unsigned short ext_depth(struct inode *inode)
197static inline void 193static inline void
198ext4_ext_invalidate_cache(struct inode *inode) 194ext4_ext_invalidate_cache(struct inode *inode)
199{ 195{
200 EXT4_I(inode)->i_cached_extent.ec_type = EXT4_EXT_CACHE_NO; 196 EXT4_I(inode)->i_cached_extent.ec_len = 0;
201} 197}
202 198
203static inline void ext4_ext_mark_uninitialized(struct ext4_extent *ext) 199static inline void ext4_ext_mark_uninitialized(struct ext4_extent *ext)
@@ -278,7 +274,7 @@ static inline void ext4_idx_store_pblock(struct ext4_extent_idx *ix,
278} 274}
279 275
280extern int ext4_ext_calc_metadata_amount(struct inode *inode, 276extern int ext4_ext_calc_metadata_amount(struct inode *inode,
281 sector_t lblocks); 277 ext4_lblk_t lblocks);
282extern int ext4_extent_tree_init(handle_t *, struct inode *); 278extern int ext4_extent_tree_init(handle_t *, struct inode *);
283extern int ext4_ext_calc_credits_for_single_extent(struct inode *inode, 279extern int ext4_ext_calc_credits_for_single_extent(struct inode *inode,
284 int num, 280 int num,
diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h
index b0bd792c58c5..d8b992e658c1 100644
--- a/fs/ext4/ext4_jbd2.h
+++ b/fs/ext4/ext4_jbd2.h
@@ -253,7 +253,7 @@ static inline int ext4_journal_force_commit(journal_t *journal)
253static inline int ext4_jbd2_file_inode(handle_t *handle, struct inode *inode) 253static inline int ext4_jbd2_file_inode(handle_t *handle, struct inode *inode)
254{ 254{
255 if (ext4_handle_valid(handle)) 255 if (ext4_handle_valid(handle))
256 return jbd2_journal_file_inode(handle, &EXT4_I(inode)->jinode); 256 return jbd2_journal_file_inode(handle, EXT4_I(inode)->jinode);
257 return 0; 257 return 0;
258} 258}
259 259
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 0554c48cb1fd..e910720e8bb8 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -117,11 +117,33 @@ static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
117 struct ext4_extent *ex; 117 struct ext4_extent *ex;
118 depth = path->p_depth; 118 depth = path->p_depth;
119 119
120 /* try to predict block placement */ 120 /*
121 * Try to predict block placement assuming that we are
122 * filling in a file which will eventually be
123 * non-sparse --- i.e., in the case of libbfd writing
124 * an ELF object sections out-of-order but in a way
125 * the eventually results in a contiguous object or
126 * executable file, or some database extending a table
127 * space file. However, this is actually somewhat
128 * non-ideal if we are writing a sparse file such as
129 * qemu or KVM writing a raw image file that is going
130 * to stay fairly sparse, since it will end up
131 * fragmenting the file system's free space. Maybe we
132 * should have some hueristics or some way to allow
133 * userspace to pass a hint to file system,
134 * especiially if the latter case turns out to be
135 * common.
136 */
121 ex = path[depth].p_ext; 137 ex = path[depth].p_ext;
122 if (ex) 138 if (ex) {
123 return (ext4_ext_pblock(ex) + 139 ext4_fsblk_t ext_pblk = ext4_ext_pblock(ex);
124 (block - le32_to_cpu(ex->ee_block))); 140 ext4_lblk_t ext_block = le32_to_cpu(ex->ee_block);
141
142 if (block > ext_block)
143 return ext_pblk + (block - ext_block);
144 else
145 return ext_pblk - (ext_block - block);
146 }
125 147
126 /* it looks like index is empty; 148 /* it looks like index is empty;
127 * try to find starting block from index itself */ 149 * try to find starting block from index itself */
@@ -244,7 +266,7 @@ static inline int ext4_ext_space_root_idx(struct inode *inode, int check)
244 * to allocate @blocks 266 * to allocate @blocks
245 * Worse case is one block per extent 267 * Worse case is one block per extent
246 */ 268 */
247int ext4_ext_calc_metadata_amount(struct inode *inode, sector_t lblock) 269int ext4_ext_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
248{ 270{
249 struct ext4_inode_info *ei = EXT4_I(inode); 271 struct ext4_inode_info *ei = EXT4_I(inode);
250 int idxs, num = 0; 272 int idxs, num = 0;
@@ -1872,12 +1894,10 @@ static int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block,
1872 cbex.ec_block = start; 1894 cbex.ec_block = start;
1873 cbex.ec_len = end - start; 1895 cbex.ec_len = end - start;
1874 cbex.ec_start = 0; 1896 cbex.ec_start = 0;
1875 cbex.ec_type = EXT4_EXT_CACHE_GAP;
1876 } else { 1897 } else {
1877 cbex.ec_block = le32_to_cpu(ex->ee_block); 1898 cbex.ec_block = le32_to_cpu(ex->ee_block);
1878 cbex.ec_len = ext4_ext_get_actual_len(ex); 1899 cbex.ec_len = ext4_ext_get_actual_len(ex);
1879 cbex.ec_start = ext4_ext_pblock(ex); 1900 cbex.ec_start = ext4_ext_pblock(ex);
1880 cbex.ec_type = EXT4_EXT_CACHE_EXTENT;
1881 } 1901 }
1882 1902
1883 if (unlikely(cbex.ec_len == 0)) { 1903 if (unlikely(cbex.ec_len == 0)) {
@@ -1917,13 +1937,12 @@ static int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block,
1917 1937
1918static void 1938static void
1919ext4_ext_put_in_cache(struct inode *inode, ext4_lblk_t block, 1939ext4_ext_put_in_cache(struct inode *inode, ext4_lblk_t block,
1920 __u32 len, ext4_fsblk_t start, int type) 1940 __u32 len, ext4_fsblk_t start)
1921{ 1941{
1922 struct ext4_ext_cache *cex; 1942 struct ext4_ext_cache *cex;
1923 BUG_ON(len == 0); 1943 BUG_ON(len == 0);
1924 spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 1944 spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
1925 cex = &EXT4_I(inode)->i_cached_extent; 1945 cex = &EXT4_I(inode)->i_cached_extent;
1926 cex->ec_type = type;
1927 cex->ec_block = block; 1946 cex->ec_block = block;
1928 cex->ec_len = len; 1947 cex->ec_len = len;
1929 cex->ec_start = start; 1948 cex->ec_start = start;
@@ -1976,15 +1995,18 @@ ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
1976 } 1995 }
1977 1996
1978 ext_debug(" -> %u:%lu\n", lblock, len); 1997 ext_debug(" -> %u:%lu\n", lblock, len);
1979 ext4_ext_put_in_cache(inode, lblock, len, 0, EXT4_EXT_CACHE_GAP); 1998 ext4_ext_put_in_cache(inode, lblock, len, 0);
1980} 1999}
1981 2000
2001/*
2002 * Return 0 if cache is invalid; 1 if the cache is valid
2003 */
1982static int 2004static int
1983ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block, 2005ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
1984 struct ext4_extent *ex) 2006 struct ext4_extent *ex)
1985{ 2007{
1986 struct ext4_ext_cache *cex; 2008 struct ext4_ext_cache *cex;
1987 int ret = EXT4_EXT_CACHE_NO; 2009 int ret = 0;
1988 2010
1989 /* 2011 /*
1990 * We borrow i_block_reservation_lock to protect i_cached_extent 2012 * We borrow i_block_reservation_lock to protect i_cached_extent
@@ -1993,11 +2015,9 @@ ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
1993 cex = &EXT4_I(inode)->i_cached_extent; 2015 cex = &EXT4_I(inode)->i_cached_extent;
1994 2016
1995 /* has cache valid data? */ 2017 /* has cache valid data? */
1996 if (cex->ec_type == EXT4_EXT_CACHE_NO) 2018 if (cex->ec_len == 0)
1997 goto errout; 2019 goto errout;
1998 2020
1999 BUG_ON(cex->ec_type != EXT4_EXT_CACHE_GAP &&
2000 cex->ec_type != EXT4_EXT_CACHE_EXTENT);
2001 if (in_range(block, cex->ec_block, cex->ec_len)) { 2021 if (in_range(block, cex->ec_block, cex->ec_len)) {
2002 ex->ee_block = cpu_to_le32(cex->ec_block); 2022 ex->ee_block = cpu_to_le32(cex->ec_block);
2003 ext4_ext_store_pblock(ex, cex->ec_start); 2023 ext4_ext_store_pblock(ex, cex->ec_start);
@@ -2005,7 +2025,7 @@ ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
2005 ext_debug("%u cached by %u:%u:%llu\n", 2025 ext_debug("%u cached by %u:%u:%llu\n",
2006 block, 2026 block,
2007 cex->ec_block, cex->ec_len, cex->ec_start); 2027 cex->ec_block, cex->ec_len, cex->ec_start);
2008 ret = cex->ec_type; 2028 ret = 1;
2009 } 2029 }
2010errout: 2030errout:
2011 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 2031 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
@@ -3082,7 +3102,7 @@ static void unmap_underlying_metadata_blocks(struct block_device *bdev,
3082 * Handle EOFBLOCKS_FL flag, clearing it if necessary 3102 * Handle EOFBLOCKS_FL flag, clearing it if necessary
3083 */ 3103 */
3084static int check_eofblocks_fl(handle_t *handle, struct inode *inode, 3104static int check_eofblocks_fl(handle_t *handle, struct inode *inode,
3085 struct ext4_map_blocks *map, 3105 ext4_lblk_t lblk,
3086 struct ext4_ext_path *path, 3106 struct ext4_ext_path *path,
3087 unsigned int len) 3107 unsigned int len)
3088{ 3108{
@@ -3112,7 +3132,7 @@ static int check_eofblocks_fl(handle_t *handle, struct inode *inode,
3112 * this turns out to be false, we can bail out from this 3132 * this turns out to be false, we can bail out from this
3113 * function immediately. 3133 * function immediately.
3114 */ 3134 */
3115 if (map->m_lblk + len < le32_to_cpu(last_ex->ee_block) + 3135 if (lblk + len < le32_to_cpu(last_ex->ee_block) +
3116 ext4_ext_get_actual_len(last_ex)) 3136 ext4_ext_get_actual_len(last_ex))
3117 return 0; 3137 return 0;
3118 /* 3138 /*
@@ -3168,8 +3188,8 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
3168 path); 3188 path);
3169 if (ret >= 0) { 3189 if (ret >= 0) {
3170 ext4_update_inode_fsync_trans(handle, inode, 1); 3190 ext4_update_inode_fsync_trans(handle, inode, 1);
3171 err = check_eofblocks_fl(handle, inode, map, path, 3191 err = check_eofblocks_fl(handle, inode, map->m_lblk,
3172 map->m_len); 3192 path, map->m_len);
3173 } else 3193 } else
3174 err = ret; 3194 err = ret;
3175 goto out2; 3195 goto out2;
@@ -3199,7 +3219,8 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
3199 ret = ext4_ext_convert_to_initialized(handle, inode, map, path); 3219 ret = ext4_ext_convert_to_initialized(handle, inode, map, path);
3200 if (ret >= 0) { 3220 if (ret >= 0) {
3201 ext4_update_inode_fsync_trans(handle, inode, 1); 3221 ext4_update_inode_fsync_trans(handle, inode, 1);
3202 err = check_eofblocks_fl(handle, inode, map, path, map->m_len); 3222 err = check_eofblocks_fl(handle, inode, map->m_lblk, path,
3223 map->m_len);
3203 if (err < 0) 3224 if (err < 0)
3204 goto out2; 3225 goto out2;
3205 } 3226 }
@@ -3276,7 +3297,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
3276 struct ext4_extent_header *eh; 3297 struct ext4_extent_header *eh;
3277 struct ext4_extent newex, *ex; 3298 struct ext4_extent newex, *ex;
3278 ext4_fsblk_t newblock; 3299 ext4_fsblk_t newblock;
3279 int err = 0, depth, ret, cache_type; 3300 int err = 0, depth, ret;
3280 unsigned int allocated = 0; 3301 unsigned int allocated = 0;
3281 struct ext4_allocation_request ar; 3302 struct ext4_allocation_request ar;
3282 ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio; 3303 ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio;
@@ -3285,9 +3306,8 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
3285 map->m_lblk, map->m_len, inode->i_ino); 3306 map->m_lblk, map->m_len, inode->i_ino);
3286 3307
3287 /* check in cache */ 3308 /* check in cache */
3288 cache_type = ext4_ext_in_cache(inode, map->m_lblk, &newex); 3309 if (ext4_ext_in_cache(inode, map->m_lblk, &newex)) {
3289 if (cache_type) { 3310 if (!newex.ee_start_lo && !newex.ee_start_hi) {
3290 if (cache_type == EXT4_EXT_CACHE_GAP) {
3291 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { 3311 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
3292 /* 3312 /*
3293 * block isn't allocated yet and 3313 * block isn't allocated yet and
@@ -3296,7 +3316,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
3296 goto out2; 3316 goto out2;
3297 } 3317 }
3298 /* we should allocate requested block */ 3318 /* we should allocate requested block */
3299 } else if (cache_type == EXT4_EXT_CACHE_EXTENT) { 3319 } else {
3300 /* block is already allocated */ 3320 /* block is already allocated */
3301 newblock = map->m_lblk 3321 newblock = map->m_lblk
3302 - le32_to_cpu(newex.ee_block) 3322 - le32_to_cpu(newex.ee_block)
@@ -3305,8 +3325,6 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
3305 allocated = ext4_ext_get_actual_len(&newex) - 3325 allocated = ext4_ext_get_actual_len(&newex) -
3306 (map->m_lblk - le32_to_cpu(newex.ee_block)); 3326 (map->m_lblk - le32_to_cpu(newex.ee_block));
3307 goto out; 3327 goto out;
3308 } else {
3309 BUG();
3310 } 3328 }
3311 } 3329 }
3312 3330
@@ -3357,8 +3375,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
3357 /* Do not put uninitialized extent in the cache */ 3375 /* Do not put uninitialized extent in the cache */
3358 if (!ext4_ext_is_uninitialized(ex)) { 3376 if (!ext4_ext_is_uninitialized(ex)) {
3359 ext4_ext_put_in_cache(inode, ee_block, 3377 ext4_ext_put_in_cache(inode, ee_block,
3360 ee_len, ee_start, 3378 ee_len, ee_start);
3361 EXT4_EXT_CACHE_EXTENT);
3362 goto out; 3379 goto out;
3363 } 3380 }
3364 ret = ext4_ext_handle_uninitialized_extents(handle, 3381 ret = ext4_ext_handle_uninitialized_extents(handle,
@@ -3456,7 +3473,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
3456 map->m_flags |= EXT4_MAP_UNINIT; 3473 map->m_flags |= EXT4_MAP_UNINIT;
3457 } 3474 }
3458 3475
3459 err = check_eofblocks_fl(handle, inode, map, path, ar.len); 3476 err = check_eofblocks_fl(handle, inode, map->m_lblk, path, ar.len);
3460 if (err) 3477 if (err)
3461 goto out2; 3478 goto out2;
3462 3479
@@ -3490,8 +3507,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
3490 * when it is _not_ an uninitialized extent. 3507 * when it is _not_ an uninitialized extent.
3491 */ 3508 */
3492 if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0) { 3509 if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0) {
3493 ext4_ext_put_in_cache(inode, map->m_lblk, allocated, newblock, 3510 ext4_ext_put_in_cache(inode, map->m_lblk, allocated, newblock);
3494 EXT4_EXT_CACHE_EXTENT);
3495 ext4_update_inode_fsync_trans(handle, inode, 1); 3511 ext4_update_inode_fsync_trans(handle, inode, 1);
3496 } else 3512 } else
3497 ext4_update_inode_fsync_trans(handle, inode, 0); 3513 ext4_update_inode_fsync_trans(handle, inode, 0);
@@ -3519,6 +3535,12 @@ void ext4_ext_truncate(struct inode *inode)
3519 int err = 0; 3535 int err = 0;
3520 3536
3521 /* 3537 /*
3538 * finish any pending end_io work so we won't run the risk of
3539 * converting any truncated blocks to initialized later
3540 */
3541 ext4_flush_completed_IO(inode);
3542
3543 /*
3522 * probably first extent we're gonna free will be last in block 3544 * probably first extent we're gonna free will be last in block
3523 */ 3545 */
3524 err = ext4_writepage_trans_blocks(inode); 3546 err = ext4_writepage_trans_blocks(inode);
@@ -3767,7 +3789,7 @@ static int ext4_ext_fiemap_cb(struct inode *inode, struct ext4_ext_path *path,
3767 3789
3768 logical = (__u64)newex->ec_block << blksize_bits; 3790 logical = (__u64)newex->ec_block << blksize_bits;
3769 3791
3770 if (newex->ec_type == EXT4_EXT_CACHE_GAP) { 3792 if (newex->ec_start == 0) {
3771 pgoff_t offset; 3793 pgoff_t offset;
3772 struct page *page; 3794 struct page *page;
3773 struct buffer_head *bh = NULL; 3795 struct buffer_head *bh = NULL;
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 5a5c55ddceef..bb003dc9ffff 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -104,6 +104,7 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
104{ 104{
105 struct super_block *sb = inode->i_sb; 105 struct super_block *sb = inode->i_sb;
106 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 106 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
107 struct ext4_inode_info *ei = EXT4_I(inode);
107 struct vfsmount *mnt = filp->f_path.mnt; 108 struct vfsmount *mnt = filp->f_path.mnt;
108 struct path path; 109 struct path path;
109 char buf[64], *cp; 110 char buf[64], *cp;
@@ -127,6 +128,27 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
127 ext4_mark_super_dirty(sb); 128 ext4_mark_super_dirty(sb);
128 } 129 }
129 } 130 }
131 /*
132 * Set up the jbd2_inode if we are opening the inode for
133 * writing and the journal is present
134 */
135 if (sbi->s_journal && !ei->jinode && (filp->f_mode & FMODE_WRITE)) {
136 struct jbd2_inode *jinode = jbd2_alloc_inode(GFP_KERNEL);
137
138 spin_lock(&inode->i_lock);
139 if (!ei->jinode) {
140 if (!jinode) {
141 spin_unlock(&inode->i_lock);
142 return -ENOMEM;
143 }
144 ei->jinode = jinode;
145 jbd2_journal_init_jbd_inode(ei->jinode, inode);
146 jinode = NULL;
147 }
148 spin_unlock(&inode->i_lock);
149 if (unlikely(jinode != NULL))
150 jbd2_free_inode(jinode);
151 }
130 return dquot_file_open(inode, filp); 152 return dquot_file_open(inode, filp);
131} 153}
132 154
diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c
index c1a7bc923cf6..7829b287822a 100644
--- a/fs/ext4/fsync.c
+++ b/fs/ext4/fsync.c
@@ -75,7 +75,7 @@ static void dump_completed_IO(struct inode * inode)
75 * to written. 75 * to written.
76 * The function return the number of pending IOs on success. 76 * The function return the number of pending IOs on success.
77 */ 77 */
78static int flush_completed_IO(struct inode *inode) 78extern int ext4_flush_completed_IO(struct inode *inode)
79{ 79{
80 ext4_io_end_t *io; 80 ext4_io_end_t *io;
81 struct ext4_inode_info *ei = EXT4_I(inode); 81 struct ext4_inode_info *ei = EXT4_I(inode);
@@ -169,7 +169,7 @@ int ext4_sync_file(struct file *file, int datasync)
169 if (inode->i_sb->s_flags & MS_RDONLY) 169 if (inode->i_sb->s_flags & MS_RDONLY)
170 return 0; 170 return 0;
171 171
172 ret = flush_completed_IO(inode); 172 ret = ext4_flush_completed_IO(inode);
173 if (ret < 0) 173 if (ret < 0)
174 return ret; 174 return ret;
175 175
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 1ce240a23ebb..eb9097aec6f0 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -1027,7 +1027,7 @@ got:
1027 inode->i_generation = sbi->s_next_generation++; 1027 inode->i_generation = sbi->s_next_generation++;
1028 spin_unlock(&sbi->s_next_gen_lock); 1028 spin_unlock(&sbi->s_next_gen_lock);
1029 1029
1030 ei->i_state_flags = 0; 1030 ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */
1031 ext4_set_inode_state(inode, EXT4_STATE_NEW); 1031 ext4_set_inode_state(inode, EXT4_STATE_NEW);
1032 1032
1033 ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize; 1033 ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index e659597b690b..e80fc513eacc 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -40,6 +40,7 @@
40#include <linux/workqueue.h> 40#include <linux/workqueue.h>
41#include <linux/kernel.h> 41#include <linux/kernel.h>
42#include <linux/slab.h> 42#include <linux/slab.h>
43#include <linux/ratelimit.h>
43 44
44#include "ext4_jbd2.h" 45#include "ext4_jbd2.h"
45#include "xattr.h" 46#include "xattr.h"
@@ -54,10 +55,17 @@ static inline int ext4_begin_ordered_truncate(struct inode *inode,
54 loff_t new_size) 55 loff_t new_size)
55{ 56{
56 trace_ext4_begin_ordered_truncate(inode, new_size); 57 trace_ext4_begin_ordered_truncate(inode, new_size);
57 return jbd2_journal_begin_ordered_truncate( 58 /*
58 EXT4_SB(inode->i_sb)->s_journal, 59 * If jinode is zero, then we never opened the file for
59 &EXT4_I(inode)->jinode, 60 * writing, so there's no need to call
60 new_size); 61 * jbd2_journal_begin_ordered_truncate() since there's no
62 * outstanding writes we need to flush.
63 */
64 if (!EXT4_I(inode)->jinode)
65 return 0;
66 return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode),
67 EXT4_I(inode)->jinode,
68 new_size);
61} 69}
62 70
63static void ext4_invalidatepage(struct page *page, unsigned long offset); 71static void ext4_invalidatepage(struct page *page, unsigned long offset);
@@ -552,7 +560,7 @@ static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block,
552} 560}
553 561
554/** 562/**
555 * ext4_blks_to_allocate: Look up the block map and count the number 563 * ext4_blks_to_allocate - Look up the block map and count the number
556 * of direct blocks need to be allocated for the given branch. 564 * of direct blocks need to be allocated for the given branch.
557 * 565 *
558 * @branch: chain of indirect blocks 566 * @branch: chain of indirect blocks
@@ -591,13 +599,19 @@ static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks,
591 599
592/** 600/**
593 * ext4_alloc_blocks: multiple allocate blocks needed for a branch 601 * ext4_alloc_blocks: multiple allocate blocks needed for a branch
602 * @handle: handle for this transaction
603 * @inode: inode which needs allocated blocks
604 * @iblock: the logical block to start allocated at
605 * @goal: preferred physical block of allocation
594 * @indirect_blks: the number of blocks need to allocate for indirect 606 * @indirect_blks: the number of blocks need to allocate for indirect
595 * blocks 607 * blocks
596 * 608 * @blks: number of desired blocks
597 * @new_blocks: on return it will store the new block numbers for 609 * @new_blocks: on return it will store the new block numbers for
598 * the indirect blocks(if needed) and the first direct block, 610 * the indirect blocks(if needed) and the first direct block,
599 * @blks: on return it will store the total number of allocated 611 * @err: on return it will store the error code
600 * direct blocks 612 *
613 * This function will return the number of blocks allocated as
614 * requested by the passed-in parameters.
601 */ 615 */
602static int ext4_alloc_blocks(handle_t *handle, struct inode *inode, 616static int ext4_alloc_blocks(handle_t *handle, struct inode *inode,
603 ext4_lblk_t iblock, ext4_fsblk_t goal, 617 ext4_lblk_t iblock, ext4_fsblk_t goal,
@@ -711,9 +725,11 @@ failed_out:
711 725
712/** 726/**
713 * ext4_alloc_branch - allocate and set up a chain of blocks. 727 * ext4_alloc_branch - allocate and set up a chain of blocks.
728 * @handle: handle for this transaction
714 * @inode: owner 729 * @inode: owner
715 * @indirect_blks: number of allocated indirect blocks 730 * @indirect_blks: number of allocated indirect blocks
716 * @blks: number of allocated direct blocks 731 * @blks: number of allocated direct blocks
732 * @goal: preferred place for allocation
717 * @offsets: offsets (in the blocks) to store the pointers to next. 733 * @offsets: offsets (in the blocks) to store the pointers to next.
718 * @branch: place to store the chain in. 734 * @branch: place to store the chain in.
719 * 735 *
@@ -826,6 +842,7 @@ failed:
826 842
827/** 843/**
828 * ext4_splice_branch - splice the allocated branch onto inode. 844 * ext4_splice_branch - splice the allocated branch onto inode.
845 * @handle: handle for this transaction
829 * @inode: owner 846 * @inode: owner
830 * @block: (logical) number of block we are adding 847 * @block: (logical) number of block we are adding
831 * @chain: chain of indirect blocks (with a missing link - see 848 * @chain: chain of indirect blocks (with a missing link - see
@@ -1081,7 +1098,7 @@ static int ext4_indirect_calc_metadata_amount(struct inode *inode,
1081 * Calculate the number of metadata blocks need to reserve 1098 * Calculate the number of metadata blocks need to reserve
1082 * to allocate a block located at @lblock 1099 * to allocate a block located at @lblock
1083 */ 1100 */
1084static int ext4_calc_metadata_amount(struct inode *inode, sector_t lblock) 1101static int ext4_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
1085{ 1102{
1086 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 1103 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
1087 return ext4_ext_calc_metadata_amount(inode, lblock); 1104 return ext4_ext_calc_metadata_amount(inode, lblock);
@@ -1320,7 +1337,7 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
1320 * avoid double accounting 1337 * avoid double accounting
1321 */ 1338 */
1322 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) 1339 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
1323 EXT4_I(inode)->i_delalloc_reserved_flag = 1; 1340 ext4_set_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED);
1324 /* 1341 /*
1325 * We need to check for EXT4 here because migrate 1342 * We need to check for EXT4 here because migrate
1326 * could have changed the inode type in between 1343 * could have changed the inode type in between
@@ -1350,7 +1367,7 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
1350 ext4_da_update_reserve_space(inode, retval, 1); 1367 ext4_da_update_reserve_space(inode, retval, 1);
1351 } 1368 }
1352 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) 1369 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
1353 EXT4_I(inode)->i_delalloc_reserved_flag = 0; 1370 ext4_clear_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED);
1354 1371
1355 up_write((&EXT4_I(inode)->i_data_sem)); 1372 up_write((&EXT4_I(inode)->i_data_sem));
1356 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) { 1373 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
@@ -1878,7 +1895,7 @@ static int ext4_journalled_write_end(struct file *file,
1878/* 1895/*
1879 * Reserve a single block located at lblock 1896 * Reserve a single block located at lblock
1880 */ 1897 */
1881static int ext4_da_reserve_space(struct inode *inode, sector_t lblock) 1898static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
1882{ 1899{
1883 int retries = 0; 1900 int retries = 0;
1884 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1901 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
@@ -2239,7 +2256,7 @@ static void mpage_da_map_and_submit(struct mpage_da_data *mpd)
2239 * affects functions in many different parts of the allocation 2256 * affects functions in many different parts of the allocation
2240 * call path. This flag exists primarily because we don't 2257 * call path. This flag exists primarily because we don't
2241 * want to change *many* call functions, so ext4_map_blocks() 2258 * want to change *many* call functions, so ext4_map_blocks()
2242 * will set the magic i_delalloc_reserved_flag once the 2259 * will set the EXT4_STATE_DELALLOC_RESERVED flag once the
2243 * inode's allocation semaphore is taken. 2260 * inode's allocation semaphore is taken.
2244 * 2261 *
2245 * If the blocks in questions were delalloc blocks, set 2262 * If the blocks in questions were delalloc blocks, set
@@ -3720,8 +3737,7 @@ static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode)
3720retry: 3737retry:
3721 io_end = ext4_init_io_end(inode, GFP_ATOMIC); 3738 io_end = ext4_init_io_end(inode, GFP_ATOMIC);
3722 if (!io_end) { 3739 if (!io_end) {
3723 if (printk_ratelimit()) 3740 pr_warning_ratelimited("%s: allocation fail\n", __func__);
3724 printk(KERN_WARNING "%s: allocation fail\n", __func__);
3725 schedule(); 3741 schedule();
3726 goto retry; 3742 goto retry;
3727 } 3743 }
@@ -4045,7 +4061,7 @@ int ext4_block_truncate_page(handle_t *handle,
4045 if (ext4_should_journal_data(inode)) { 4061 if (ext4_should_journal_data(inode)) {
4046 err = ext4_handle_dirty_metadata(handle, inode, bh); 4062 err = ext4_handle_dirty_metadata(handle, inode, bh);
4047 } else { 4063 } else {
4048 if (ext4_should_order_data(inode)) 4064 if (ext4_should_order_data(inode) && EXT4_I(inode)->jinode)
4049 err = ext4_jbd2_file_inode(handle, inode); 4065 err = ext4_jbd2_file_inode(handle, inode);
4050 mark_buffer_dirty(bh); 4066 mark_buffer_dirty(bh);
4051 } 4067 }
@@ -4169,6 +4185,7 @@ static int ext4_clear_blocks(handle_t *handle, struct inode *inode,
4169{ 4185{
4170 __le32 *p; 4186 __le32 *p;
4171 int flags = EXT4_FREE_BLOCKS_FORGET | EXT4_FREE_BLOCKS_VALIDATED; 4187 int flags = EXT4_FREE_BLOCKS_FORGET | EXT4_FREE_BLOCKS_VALIDATED;
4188 int err;
4172 4189
4173 if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) 4190 if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
4174 flags |= EXT4_FREE_BLOCKS_METADATA; 4191 flags |= EXT4_FREE_BLOCKS_METADATA;
@@ -4184,11 +4201,23 @@ static int ext4_clear_blocks(handle_t *handle, struct inode *inode,
4184 if (try_to_extend_transaction(handle, inode)) { 4201 if (try_to_extend_transaction(handle, inode)) {
4185 if (bh) { 4202 if (bh) {
4186 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 4203 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
4187 ext4_handle_dirty_metadata(handle, inode, bh); 4204 err = ext4_handle_dirty_metadata(handle, inode, bh);
4205 if (unlikely(err)) {
4206 ext4_std_error(inode->i_sb, err);
4207 return 1;
4208 }
4209 }
4210 err = ext4_mark_inode_dirty(handle, inode);
4211 if (unlikely(err)) {
4212 ext4_std_error(inode->i_sb, err);
4213 return 1;
4214 }
4215 err = ext4_truncate_restart_trans(handle, inode,
4216 blocks_for_truncate(inode));
4217 if (unlikely(err)) {
4218 ext4_std_error(inode->i_sb, err);
4219 return 1;
4188 } 4220 }
4189 ext4_mark_inode_dirty(handle, inode);
4190 ext4_truncate_restart_trans(handle, inode,
4191 blocks_for_truncate(inode));
4192 if (bh) { 4221 if (bh) {
4193 BUFFER_TRACE(bh, "retaking write access"); 4222 BUFFER_TRACE(bh, "retaking write access");
4194 ext4_journal_get_write_access(handle, bh); 4223 ext4_journal_get_write_access(handle, bh);
@@ -4349,6 +4378,7 @@ static void ext4_free_branches(handle_t *handle, struct inode *inode,
4349 (__le32 *) bh->b_data, 4378 (__le32 *) bh->b_data,
4350 (__le32 *) bh->b_data + addr_per_block, 4379 (__le32 *) bh->b_data + addr_per_block,
4351 depth); 4380 depth);
4381 brelse(bh);
4352 4382
4353 /* 4383 /*
4354 * Everything below this this pointer has been 4384 * Everything below this this pointer has been
@@ -4859,7 +4889,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
4859 } 4889 }
4860 inode->i_nlink = le16_to_cpu(raw_inode->i_links_count); 4890 inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
4861 4891
4862 ei->i_state_flags = 0; 4892 ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */
4863 ei->i_dir_start_lookup = 0; 4893 ei->i_dir_start_lookup = 0;
4864 ei->i_dtime = le32_to_cpu(raw_inode->i_dtime); 4894 ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
4865 /* We now have enough fields to check if the inode was active or not. 4895 /* We now have enough fields to check if the inode was active or not.
@@ -5118,7 +5148,7 @@ static int ext4_do_update_inode(handle_t *handle,
5118 if (ext4_inode_blocks_set(handle, raw_inode, ei)) 5148 if (ext4_inode_blocks_set(handle, raw_inode, ei))
5119 goto out_brelse; 5149 goto out_brelse;
5120 raw_inode->i_dtime = cpu_to_le32(ei->i_dtime); 5150 raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
5121 raw_inode->i_flags = cpu_to_le32(ei->i_flags); 5151 raw_inode->i_flags = cpu_to_le32(ei->i_flags & 0xFFFFFFFF);
5122 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os != 5152 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
5123 cpu_to_le32(EXT4_OS_HURD)) 5153 cpu_to_le32(EXT4_OS_HURD))
5124 raw_inode->i_file_acl_high = 5154 raw_inode->i_file_acl_high =
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 5b4d4e3a4d58..851f49b2f9d2 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -2608,18 +2608,12 @@ int ext4_mb_release(struct super_block *sb)
2608static inline int ext4_issue_discard(struct super_block *sb, 2608static inline int ext4_issue_discard(struct super_block *sb,
2609 ext4_group_t block_group, ext4_grpblk_t block, int count) 2609 ext4_group_t block_group, ext4_grpblk_t block, int count)
2610{ 2610{
2611 int ret;
2612 ext4_fsblk_t discard_block; 2611 ext4_fsblk_t discard_block;
2613 2612
2614 discard_block = block + ext4_group_first_block_no(sb, block_group); 2613 discard_block = block + ext4_group_first_block_no(sb, block_group);
2615 trace_ext4_discard_blocks(sb, 2614 trace_ext4_discard_blocks(sb,
2616 (unsigned long long) discard_block, count); 2615 (unsigned long long) discard_block, count);
2617 ret = sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0); 2616 return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0);
2618 if (ret == -EOPNOTSUPP) {
2619 ext4_warning(sb, "discard not supported, disabling");
2620 clear_opt(EXT4_SB(sb)->s_mount_opt, DISCARD);
2621 }
2622 return ret;
2623} 2617}
2624 2618
2625/* 2619/*
@@ -2631,7 +2625,7 @@ static void release_blocks_on_commit(journal_t *journal, transaction_t *txn)
2631 struct super_block *sb = journal->j_private; 2625 struct super_block *sb = journal->j_private;
2632 struct ext4_buddy e4b; 2626 struct ext4_buddy e4b;
2633 struct ext4_group_info *db; 2627 struct ext4_group_info *db;
2634 int err, count = 0, count2 = 0; 2628 int err, ret, count = 0, count2 = 0;
2635 struct ext4_free_data *entry; 2629 struct ext4_free_data *entry;
2636 struct list_head *l, *ltmp; 2630 struct list_head *l, *ltmp;
2637 2631
@@ -2641,9 +2635,15 @@ static void release_blocks_on_commit(journal_t *journal, transaction_t *txn)
2641 mb_debug(1, "gonna free %u blocks in group %u (0x%p):", 2635 mb_debug(1, "gonna free %u blocks in group %u (0x%p):",
2642 entry->count, entry->group, entry); 2636 entry->count, entry->group, entry);
2643 2637
2644 if (test_opt(sb, DISCARD)) 2638 if (test_opt(sb, DISCARD)) {
2645 ext4_issue_discard(sb, entry->group, 2639 ret = ext4_issue_discard(sb, entry->group,
2646 entry->start_blk, entry->count); 2640 entry->start_blk, entry->count);
2641 if (unlikely(ret == -EOPNOTSUPP)) {
2642 ext4_warning(sb, "discard not supported, "
2643 "disabling");
2644 clear_opt(sb, DISCARD);
2645 }
2646 }
2647 2647
2648 err = ext4_mb_load_buddy(sb, entry->group, &e4b); 2648 err = ext4_mb_load_buddy(sb, entry->group, &e4b);
2649 /* we expect to find existing buddy because it's pinned */ 2649 /* we expect to find existing buddy because it's pinned */
@@ -3881,19 +3881,6 @@ repeat:
3881 } 3881 }
3882} 3882}
3883 3883
3884/*
3885 * finds all preallocated spaces and return blocks being freed to them
3886 * if preallocated space becomes full (no block is used from the space)
3887 * then the function frees space in buddy
3888 * XXX: at the moment, truncate (which is the only way to free blocks)
3889 * discards all preallocations
3890 */
3891static void ext4_mb_return_to_preallocation(struct inode *inode,
3892 struct ext4_buddy *e4b,
3893 sector_t block, int count)
3894{
3895 BUG_ON(!list_empty(&EXT4_I(inode)->i_prealloc_list));
3896}
3897#ifdef CONFIG_EXT4_DEBUG 3884#ifdef CONFIG_EXT4_DEBUG
3898static void ext4_mb_show_ac(struct ext4_allocation_context *ac) 3885static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
3899{ 3886{
@@ -4283,7 +4270,7 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
4283 * EDQUOT check, as blocks and quotas have been already 4270 * EDQUOT check, as blocks and quotas have been already
4284 * reserved when data being copied into pagecache. 4271 * reserved when data being copied into pagecache.
4285 */ 4272 */
4286 if (EXT4_I(ar->inode)->i_delalloc_reserved_flag) 4273 if (ext4_test_inode_state(ar->inode, EXT4_STATE_DELALLOC_RESERVED))
4287 ar->flags |= EXT4_MB_DELALLOC_RESERVED; 4274 ar->flags |= EXT4_MB_DELALLOC_RESERVED;
4288 else { 4275 else {
4289 /* Without delayed allocation we need to verify 4276 /* Without delayed allocation we need to verify
@@ -4380,7 +4367,8 @@ out:
4380 if (inquota && ar->len < inquota) 4367 if (inquota && ar->len < inquota)
4381 dquot_free_block(ar->inode, inquota - ar->len); 4368 dquot_free_block(ar->inode, inquota - ar->len);
4382 if (!ar->len) { 4369 if (!ar->len) {
4383 if (!EXT4_I(ar->inode)->i_delalloc_reserved_flag) 4370 if (!ext4_test_inode_state(ar->inode,
4371 EXT4_STATE_DELALLOC_RESERVED))
4384 /* release all the reserved blocks if non delalloc */ 4372 /* release all the reserved blocks if non delalloc */
4385 percpu_counter_sub(&sbi->s_dirtyblocks_counter, 4373 percpu_counter_sub(&sbi->s_dirtyblocks_counter,
4386 reserv_blks); 4374 reserv_blks);
@@ -4626,7 +4614,11 @@ do_more:
4626 * blocks being freed are metadata. these blocks shouldn't 4614 * blocks being freed are metadata. these blocks shouldn't
4627 * be used until this transaction is committed 4615 * be used until this transaction is committed
4628 */ 4616 */
4629 new_entry = kmem_cache_alloc(ext4_free_ext_cachep, GFP_NOFS); 4617 new_entry = kmem_cache_alloc(ext4_free_ext_cachep, GFP_NOFS);
4618 if (!new_entry) {
4619 err = -ENOMEM;
4620 goto error_return;
4621 }
4630 new_entry->start_blk = bit; 4622 new_entry->start_blk = bit;
4631 new_entry->group = block_group; 4623 new_entry->group = block_group;
4632 new_entry->count = count; 4624 new_entry->count = count;
@@ -4643,7 +4635,6 @@ do_more:
4643 ext4_lock_group(sb, block_group); 4635 ext4_lock_group(sb, block_group);
4644 mb_clear_bits(bitmap_bh->b_data, bit, count); 4636 mb_clear_bits(bitmap_bh->b_data, bit, count);
4645 mb_free_blocks(inode, &e4b, bit, count); 4637 mb_free_blocks(inode, &e4b, bit, count);
4646 ext4_mb_return_to_preallocation(inode, &e4b, block, count);
4647 } 4638 }
4648 4639
4649 ret = ext4_free_blks_count(sb, gdp) + count; 4640 ret = ext4_free_blks_count(sb, gdp) + count;
@@ -4718,8 +4709,6 @@ static int ext4_trim_extent(struct super_block *sb, int start, int count,
4718 ext4_unlock_group(sb, group); 4709 ext4_unlock_group(sb, group);
4719 4710
4720 ret = ext4_issue_discard(sb, group, start, count); 4711 ret = ext4_issue_discard(sb, group, start, count);
4721 if (ret)
4722 ext4_std_error(sb, ret);
4723 4712
4724 ext4_lock_group(sb, group); 4713 ext4_lock_group(sb, group);
4725 mb_free_blocks(NULL, e4b, start, ex.fe_len); 4714 mb_free_blocks(NULL, e4b, start, ex.fe_len);
@@ -4819,6 +4808,8 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
4819 ext4_group_t group, ngroups = ext4_get_groups_count(sb); 4808 ext4_group_t group, ngroups = ext4_get_groups_count(sb);
4820 ext4_grpblk_t cnt = 0, first_block, last_block; 4809 ext4_grpblk_t cnt = 0, first_block, last_block;
4821 uint64_t start, len, minlen, trimmed; 4810 uint64_t start, len, minlen, trimmed;
4811 ext4_fsblk_t first_data_blk =
4812 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
4822 int ret = 0; 4813 int ret = 0;
4823 4814
4824 start = range->start >> sb->s_blocksize_bits; 4815 start = range->start >> sb->s_blocksize_bits;
@@ -4828,6 +4819,10 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
4828 4819
4829 if (unlikely(minlen > EXT4_BLOCKS_PER_GROUP(sb))) 4820 if (unlikely(minlen > EXT4_BLOCKS_PER_GROUP(sb)))
4830 return -EINVAL; 4821 return -EINVAL;
4822 if (start < first_data_blk) {
4823 len -= first_data_blk - start;
4824 start = first_data_blk;
4825 }
4831 4826
4832 /* Determine first and last group to examine based on start and len */ 4827 /* Determine first and last group to examine based on start and len */
4833 ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) start, 4828 ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) start,
@@ -4851,7 +4846,7 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
4851 if (len >= EXT4_BLOCKS_PER_GROUP(sb)) 4846 if (len >= EXT4_BLOCKS_PER_GROUP(sb))
4852 len -= (EXT4_BLOCKS_PER_GROUP(sb) - first_block); 4847 len -= (EXT4_BLOCKS_PER_GROUP(sb) - first_block);
4853 else 4848 else
4854 last_block = len; 4849 last_block = first_block + len;
4855 4850
4856 if (e4b.bd_info->bb_free >= minlen) { 4851 if (e4b.bd_info->bb_free >= minlen) {
4857 cnt = ext4_trim_all_free(sb, &e4b, first_block, 4852 cnt = ext4_trim_all_free(sb, &e4b, first_block,
diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
index 25f3a974b725..b0a126f23c20 100644
--- a/fs/ext4/migrate.c
+++ b/fs/ext4/migrate.c
@@ -496,7 +496,7 @@ int ext4_ext_migrate(struct inode *inode)
496 goal = (((inode->i_ino - 1) / EXT4_INODES_PER_GROUP(inode->i_sb)) * 496 goal = (((inode->i_ino - 1) / EXT4_INODES_PER_GROUP(inode->i_sb)) *
497 EXT4_INODES_PER_GROUP(inode->i_sb)) + 1; 497 EXT4_INODES_PER_GROUP(inode->i_sb)) + 1;
498 tmp_inode = ext4_new_inode(handle, inode->i_sb->s_root->d_inode, 498 tmp_inode = ext4_new_inode(handle, inode->i_sb->s_root->d_inode,
499 S_IFREG, 0, goal); 499 S_IFREG, NULL, goal);
500 if (IS_ERR(tmp_inode)) { 500 if (IS_ERR(tmp_inode)) {
501 retval = -ENOMEM; 501 retval = -ENOMEM;
502 ext4_journal_stop(handle); 502 ext4_journal_stop(handle);
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index dc40e75cba88..5485390d32c5 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -581,9 +581,9 @@ static int htree_dirblock_to_tree(struct file *dir_file,
581 dir->i_sb->s_blocksize - 581 dir->i_sb->s_blocksize -
582 EXT4_DIR_REC_LEN(0)); 582 EXT4_DIR_REC_LEN(0));
583 for (; de < top; de = ext4_next_entry(de, dir->i_sb->s_blocksize)) { 583 for (; de < top; de = ext4_next_entry(de, dir->i_sb->s_blocksize)) {
584 if (!ext4_check_dir_entry(dir, de, bh, 584 if (ext4_check_dir_entry(dir, NULL, de, bh,
585 (block<<EXT4_BLOCK_SIZE_BITS(dir->i_sb)) 585 (block<<EXT4_BLOCK_SIZE_BITS(dir->i_sb))
586 +((char *)de - bh->b_data))) { 586 + ((char *)de - bh->b_data))) {
587 /* On error, skip the f_pos to the next block. */ 587 /* On error, skip the f_pos to the next block. */
588 dir_file->f_pos = (dir_file->f_pos | 588 dir_file->f_pos = (dir_file->f_pos |
589 (dir->i_sb->s_blocksize - 1)) + 1; 589 (dir->i_sb->s_blocksize - 1)) + 1;
@@ -820,7 +820,7 @@ static inline int search_dirblock(struct buffer_head *bh,
820 if ((char *) de + namelen <= dlimit && 820 if ((char *) de + namelen <= dlimit &&
821 ext4_match (namelen, name, de)) { 821 ext4_match (namelen, name, de)) {
822 /* found a match - just to be sure, do a full check */ 822 /* found a match - just to be sure, do a full check */
823 if (!ext4_check_dir_entry(dir, de, bh, offset)) 823 if (ext4_check_dir_entry(dir, NULL, de, bh, offset))
824 return -1; 824 return -1;
825 *res_dir = de; 825 *res_dir = de;
826 return 1; 826 return 1;
@@ -1036,7 +1036,7 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, stru
1036 return ERR_PTR(-EIO); 1036 return ERR_PTR(-EIO);
1037 } 1037 }
1038 inode = ext4_iget(dir->i_sb, ino); 1038 inode = ext4_iget(dir->i_sb, ino);
1039 if (unlikely(IS_ERR(inode))) { 1039 if (IS_ERR(inode)) {
1040 if (PTR_ERR(inode) == -ESTALE) { 1040 if (PTR_ERR(inode) == -ESTALE) {
1041 EXT4_ERROR_INODE(dir, 1041 EXT4_ERROR_INODE(dir,
1042 "deleted inode referenced: %u", 1042 "deleted inode referenced: %u",
@@ -1269,7 +1269,7 @@ static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry,
1269 de = (struct ext4_dir_entry_2 *)bh->b_data; 1269 de = (struct ext4_dir_entry_2 *)bh->b_data;
1270 top = bh->b_data + blocksize - reclen; 1270 top = bh->b_data + blocksize - reclen;
1271 while ((char *) de <= top) { 1271 while ((char *) de <= top) {
1272 if (!ext4_check_dir_entry(dir, de, bh, offset)) 1272 if (ext4_check_dir_entry(dir, NULL, de, bh, offset))
1273 return -EIO; 1273 return -EIO;
1274 if (ext4_match(namelen, name, de)) 1274 if (ext4_match(namelen, name, de))
1275 return -EEXIST; 1275 return -EEXIST;
@@ -1602,7 +1602,11 @@ static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
1602 if (err) 1602 if (err)
1603 goto journal_error; 1603 goto journal_error;
1604 } 1604 }
1605 ext4_handle_dirty_metadata(handle, inode, frames[0].bh); 1605 err = ext4_handle_dirty_metadata(handle, inode, frames[0].bh);
1606 if (err) {
1607 ext4_std_error(inode->i_sb, err);
1608 goto cleanup;
1609 }
1606 } 1610 }
1607 de = do_split(handle, dir, &bh, frame, &hinfo, &err); 1611 de = do_split(handle, dir, &bh, frame, &hinfo, &err);
1608 if (!de) 1612 if (!de)
@@ -1630,17 +1634,21 @@ static int ext4_delete_entry(handle_t *handle,
1630{ 1634{
1631 struct ext4_dir_entry_2 *de, *pde; 1635 struct ext4_dir_entry_2 *de, *pde;
1632 unsigned int blocksize = dir->i_sb->s_blocksize; 1636 unsigned int blocksize = dir->i_sb->s_blocksize;
1633 int i; 1637 int i, err;
1634 1638
1635 i = 0; 1639 i = 0;
1636 pde = NULL; 1640 pde = NULL;
1637 de = (struct ext4_dir_entry_2 *) bh->b_data; 1641 de = (struct ext4_dir_entry_2 *) bh->b_data;
1638 while (i < bh->b_size) { 1642 while (i < bh->b_size) {
1639 if (!ext4_check_dir_entry(dir, de, bh, i)) 1643 if (ext4_check_dir_entry(dir, NULL, de, bh, i))
1640 return -EIO; 1644 return -EIO;
1641 if (de == de_del) { 1645 if (de == de_del) {
1642 BUFFER_TRACE(bh, "get_write_access"); 1646 BUFFER_TRACE(bh, "get_write_access");
1643 ext4_journal_get_write_access(handle, bh); 1647 err = ext4_journal_get_write_access(handle, bh);
1648 if (unlikely(err)) {
1649 ext4_std_error(dir->i_sb, err);
1650 return err;
1651 }
1644 if (pde) 1652 if (pde)
1645 pde->rec_len = ext4_rec_len_to_disk( 1653 pde->rec_len = ext4_rec_len_to_disk(
1646 ext4_rec_len_from_disk(pde->rec_len, 1654 ext4_rec_len_from_disk(pde->rec_len,
@@ -1652,7 +1660,11 @@ static int ext4_delete_entry(handle_t *handle,
1652 de->inode = 0; 1660 de->inode = 0;
1653 dir->i_version++; 1661 dir->i_version++;
1654 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 1662 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
1655 ext4_handle_dirty_metadata(handle, dir, bh); 1663 err = ext4_handle_dirty_metadata(handle, dir, bh);
1664 if (unlikely(err)) {
1665 ext4_std_error(dir->i_sb, err);
1666 return err;
1667 }
1656 return 0; 1668 return 0;
1657 } 1669 }
1658 i += ext4_rec_len_from_disk(de->rec_len, blocksize); 1670 i += ext4_rec_len_from_disk(de->rec_len, blocksize);
@@ -1789,7 +1801,7 @@ static int ext4_mkdir(struct inode *dir, struct dentry *dentry, int mode)
1789{ 1801{
1790 handle_t *handle; 1802 handle_t *handle;
1791 struct inode *inode; 1803 struct inode *inode;
1792 struct buffer_head *dir_block; 1804 struct buffer_head *dir_block = NULL;
1793 struct ext4_dir_entry_2 *de; 1805 struct ext4_dir_entry_2 *de;
1794 unsigned int blocksize = dir->i_sb->s_blocksize; 1806 unsigned int blocksize = dir->i_sb->s_blocksize;
1795 int err, retries = 0; 1807 int err, retries = 0;
@@ -1822,7 +1834,9 @@ retry:
1822 if (!dir_block) 1834 if (!dir_block)
1823 goto out_clear_inode; 1835 goto out_clear_inode;
1824 BUFFER_TRACE(dir_block, "get_write_access"); 1836 BUFFER_TRACE(dir_block, "get_write_access");
1825 ext4_journal_get_write_access(handle, dir_block); 1837 err = ext4_journal_get_write_access(handle, dir_block);
1838 if (err)
1839 goto out_clear_inode;
1826 de = (struct ext4_dir_entry_2 *) dir_block->b_data; 1840 de = (struct ext4_dir_entry_2 *) dir_block->b_data;
1827 de->inode = cpu_to_le32(inode->i_ino); 1841 de->inode = cpu_to_le32(inode->i_ino);
1828 de->name_len = 1; 1842 de->name_len = 1;
@@ -1839,10 +1853,12 @@ retry:
1839 ext4_set_de_type(dir->i_sb, de, S_IFDIR); 1853 ext4_set_de_type(dir->i_sb, de, S_IFDIR);
1840 inode->i_nlink = 2; 1854 inode->i_nlink = 2;
1841 BUFFER_TRACE(dir_block, "call ext4_handle_dirty_metadata"); 1855 BUFFER_TRACE(dir_block, "call ext4_handle_dirty_metadata");
1842 ext4_handle_dirty_metadata(handle, dir, dir_block); 1856 err = ext4_handle_dirty_metadata(handle, dir, dir_block);
1843 brelse(dir_block); 1857 if (err)
1844 ext4_mark_inode_dirty(handle, inode); 1858 goto out_clear_inode;
1845 err = ext4_add_entry(handle, dentry, inode); 1859 err = ext4_mark_inode_dirty(handle, inode);
1860 if (!err)
1861 err = ext4_add_entry(handle, dentry, inode);
1846 if (err) { 1862 if (err) {
1847out_clear_inode: 1863out_clear_inode:
1848 clear_nlink(inode); 1864 clear_nlink(inode);
@@ -1853,10 +1869,13 @@ out_clear_inode:
1853 } 1869 }
1854 ext4_inc_count(handle, dir); 1870 ext4_inc_count(handle, dir);
1855 ext4_update_dx_flag(dir); 1871 ext4_update_dx_flag(dir);
1856 ext4_mark_inode_dirty(handle, dir); 1872 err = ext4_mark_inode_dirty(handle, dir);
1873 if (err)
1874 goto out_clear_inode;
1857 d_instantiate(dentry, inode); 1875 d_instantiate(dentry, inode);
1858 unlock_new_inode(inode); 1876 unlock_new_inode(inode);
1859out_stop: 1877out_stop:
1878 brelse(dir_block);
1860 ext4_journal_stop(handle); 1879 ext4_journal_stop(handle);
1861 if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries)) 1880 if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
1862 goto retry; 1881 goto retry;
@@ -1919,7 +1938,7 @@ static int empty_dir(struct inode *inode)
1919 } 1938 }
1920 de = (struct ext4_dir_entry_2 *) bh->b_data; 1939 de = (struct ext4_dir_entry_2 *) bh->b_data;
1921 } 1940 }
1922 if (!ext4_check_dir_entry(inode, de, bh, offset)) { 1941 if (ext4_check_dir_entry(inode, NULL, de, bh, offset)) {
1923 de = (struct ext4_dir_entry_2 *)(bh->b_data + 1942 de = (struct ext4_dir_entry_2 *)(bh->b_data +
1924 sb->s_blocksize); 1943 sb->s_blocksize);
1925 offset = (offset | (sb->s_blocksize - 1)) + 1; 1944 offset = (offset | (sb->s_blocksize - 1)) + 1;
@@ -2407,7 +2426,11 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
2407 ext4_current_time(new_dir); 2426 ext4_current_time(new_dir);
2408 ext4_mark_inode_dirty(handle, new_dir); 2427 ext4_mark_inode_dirty(handle, new_dir);
2409 BUFFER_TRACE(new_bh, "call ext4_handle_dirty_metadata"); 2428 BUFFER_TRACE(new_bh, "call ext4_handle_dirty_metadata");
2410 ext4_handle_dirty_metadata(handle, new_dir, new_bh); 2429 retval = ext4_handle_dirty_metadata(handle, new_dir, new_bh);
2430 if (unlikely(retval)) {
2431 ext4_std_error(new_dir->i_sb, retval);
2432 goto end_rename;
2433 }
2411 brelse(new_bh); 2434 brelse(new_bh);
2412 new_bh = NULL; 2435 new_bh = NULL;
2413 } 2436 }
@@ -2459,7 +2482,11 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
2459 PARENT_INO(dir_bh->b_data, new_dir->i_sb->s_blocksize) = 2482 PARENT_INO(dir_bh->b_data, new_dir->i_sb->s_blocksize) =
2460 cpu_to_le32(new_dir->i_ino); 2483 cpu_to_le32(new_dir->i_ino);
2461 BUFFER_TRACE(dir_bh, "call ext4_handle_dirty_metadata"); 2484 BUFFER_TRACE(dir_bh, "call ext4_handle_dirty_metadata");
2462 ext4_handle_dirty_metadata(handle, old_dir, dir_bh); 2485 retval = ext4_handle_dirty_metadata(handle, old_dir, dir_bh);
2486 if (retval) {
2487 ext4_std_error(old_dir->i_sb, retval);
2488 goto end_rename;
2489 }
2463 ext4_dec_count(handle, old_dir); 2490 ext4_dec_count(handle, old_dir);
2464 if (new_inode) { 2491 if (new_inode) {
2465 /* checked empty_dir above, can't have another parent, 2492 /* checked empty_dir above, can't have another parent,
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index beacce11ac50..7270dcfca92a 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -44,7 +44,7 @@ int __init ext4_init_pageio(void)
44 if (io_page_cachep == NULL) 44 if (io_page_cachep == NULL)
45 return -ENOMEM; 45 return -ENOMEM;
46 io_end_cachep = KMEM_CACHE(ext4_io_end, SLAB_RECLAIM_ACCOUNT); 46 io_end_cachep = KMEM_CACHE(ext4_io_end, SLAB_RECLAIM_ACCOUNT);
47 if (io_page_cachep == NULL) { 47 if (io_end_cachep == NULL) {
48 kmem_cache_destroy(io_page_cachep); 48 kmem_cache_destroy(io_page_cachep);
49 return -ENOMEM; 49 return -ENOMEM;
50 } 50 }
@@ -158,11 +158,8 @@ static void ext4_end_io_work(struct work_struct *work)
158 158
159ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags) 159ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags)
160{ 160{
161 ext4_io_end_t *io = NULL; 161 ext4_io_end_t *io = kmem_cache_zalloc(io_end_cachep, flags);
162
163 io = kmem_cache_alloc(io_end_cachep, flags);
164 if (io) { 162 if (io) {
165 memset(io, 0, sizeof(*io));
166 atomic_inc(&EXT4_I(inode)->i_ioend_count); 163 atomic_inc(&EXT4_I(inode)->i_ioend_count);
167 io->inode = inode; 164 io->inode = inode;
168 INIT_WORK(&io->work, ext4_end_io_work); 165 INIT_WORK(&io->work, ext4_end_io_work);
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index 981c8477adab..3ecc6e45d2f9 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -220,7 +220,11 @@ static int setup_new_group_blocks(struct super_block *sb,
220 memcpy(gdb->b_data, sbi->s_group_desc[i]->b_data, gdb->b_size); 220 memcpy(gdb->b_data, sbi->s_group_desc[i]->b_data, gdb->b_size);
221 set_buffer_uptodate(gdb); 221 set_buffer_uptodate(gdb);
222 unlock_buffer(gdb); 222 unlock_buffer(gdb);
223 ext4_handle_dirty_metadata(handle, NULL, gdb); 223 err = ext4_handle_dirty_metadata(handle, NULL, gdb);
224 if (unlikely(err)) {
225 brelse(gdb);
226 goto exit_bh;
227 }
224 ext4_set_bit(bit, bh->b_data); 228 ext4_set_bit(bit, bh->b_data);
225 brelse(gdb); 229 brelse(gdb);
226 } 230 }
@@ -258,7 +262,11 @@ static int setup_new_group_blocks(struct super_block *sb,
258 262
259 ext4_mark_bitmap_end(input->blocks_count, sb->s_blocksize * 8, 263 ext4_mark_bitmap_end(input->blocks_count, sb->s_blocksize * 8,
260 bh->b_data); 264 bh->b_data);
261 ext4_handle_dirty_metadata(handle, NULL, bh); 265 err = ext4_handle_dirty_metadata(handle, NULL, bh);
266 if (unlikely(err)) {
267 ext4_std_error(sb, err);
268 goto exit_bh;
269 }
262 brelse(bh); 270 brelse(bh);
263 /* Mark unused entries in inode bitmap used */ 271 /* Mark unused entries in inode bitmap used */
264 ext4_debug("clear inode bitmap %#04llx (+%llu)\n", 272 ext4_debug("clear inode bitmap %#04llx (+%llu)\n",
@@ -270,7 +278,9 @@ static int setup_new_group_blocks(struct super_block *sb,
270 278
271 ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), sb->s_blocksize * 8, 279 ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), sb->s_blocksize * 8,
272 bh->b_data); 280 bh->b_data);
273 ext4_handle_dirty_metadata(handle, NULL, bh); 281 err = ext4_handle_dirty_metadata(handle, NULL, bh);
282 if (unlikely(err))
283 ext4_std_error(sb, err);
274exit_bh: 284exit_bh:
275 brelse(bh); 285 brelse(bh);
276 286
@@ -422,17 +432,21 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
422 goto exit_dind; 432 goto exit_dind;
423 } 433 }
424 434
425 if ((err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh))) 435 err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh);
436 if (unlikely(err))
426 goto exit_dind; 437 goto exit_dind;
427 438
428 if ((err = ext4_journal_get_write_access(handle, *primary))) 439 err = ext4_journal_get_write_access(handle, *primary);
440 if (unlikely(err))
429 goto exit_sbh; 441 goto exit_sbh;
430 442
431 if ((err = ext4_journal_get_write_access(handle, dind))) 443 err = ext4_journal_get_write_access(handle, dind);
432 goto exit_primary; 444 if (unlikely(err))
445 ext4_std_error(sb, err);
433 446
434 /* ext4_reserve_inode_write() gets a reference on the iloc */ 447 /* ext4_reserve_inode_write() gets a reference on the iloc */
435 if ((err = ext4_reserve_inode_write(handle, inode, &iloc))) 448 err = ext4_reserve_inode_write(handle, inode, &iloc);
449 if (unlikely(err))
436 goto exit_dindj; 450 goto exit_dindj;
437 451
438 n_group_desc = kmalloc((gdb_num + 1) * sizeof(struct buffer_head *), 452 n_group_desc = kmalloc((gdb_num + 1) * sizeof(struct buffer_head *),
@@ -454,12 +468,20 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
454 * reserved inode, and will become GDT blocks (primary and backup). 468 * reserved inode, and will become GDT blocks (primary and backup).
455 */ 469 */
456 data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)] = 0; 470 data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)] = 0;
457 ext4_handle_dirty_metadata(handle, NULL, dind); 471 err = ext4_handle_dirty_metadata(handle, NULL, dind);
458 brelse(dind); 472 if (unlikely(err)) {
473 ext4_std_error(sb, err);
474 goto exit_inode;
475 }
459 inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >> 9; 476 inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >> 9;
460 ext4_mark_iloc_dirty(handle, inode, &iloc); 477 ext4_mark_iloc_dirty(handle, inode, &iloc);
461 memset((*primary)->b_data, 0, sb->s_blocksize); 478 memset((*primary)->b_data, 0, sb->s_blocksize);
462 ext4_handle_dirty_metadata(handle, NULL, *primary); 479 err = ext4_handle_dirty_metadata(handle, NULL, *primary);
480 if (unlikely(err)) {
481 ext4_std_error(sb, err);
482 goto exit_inode;
483 }
484 brelse(dind);
463 485
464 o_group_desc = EXT4_SB(sb)->s_group_desc; 486 o_group_desc = EXT4_SB(sb)->s_group_desc;
465 memcpy(n_group_desc, o_group_desc, 487 memcpy(n_group_desc, o_group_desc,
@@ -470,19 +492,19 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
470 kfree(o_group_desc); 492 kfree(o_group_desc);
471 493
472 le16_add_cpu(&es->s_reserved_gdt_blocks, -1); 494 le16_add_cpu(&es->s_reserved_gdt_blocks, -1);
473 ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh); 495 err = ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh);
496 if (err)
497 ext4_std_error(sb, err);
474 498
475 return 0; 499 return err;
476 500
477exit_inode: 501exit_inode:
478 /* ext4_journal_release_buffer(handle, iloc.bh); */ 502 /* ext4_journal_release_buffer(handle, iloc.bh); */
479 brelse(iloc.bh); 503 brelse(iloc.bh);
480exit_dindj: 504exit_dindj:
481 /* ext4_journal_release_buffer(handle, dind); */ 505 /* ext4_journal_release_buffer(handle, dind); */
482exit_primary:
483 /* ext4_journal_release_buffer(handle, *primary); */
484exit_sbh: 506exit_sbh:
485 /* ext4_journal_release_buffer(handle, *primary); */ 507 /* ext4_journal_release_buffer(handle, EXT4_SB(sb)->s_sbh); */
486exit_dind: 508exit_dind:
487 brelse(dind); 509 brelse(dind);
488exit_bh: 510exit_bh:
@@ -665,7 +687,9 @@ static void update_backups(struct super_block *sb,
665 memset(bh->b_data + size, 0, rest); 687 memset(bh->b_data + size, 0, rest);
666 set_buffer_uptodate(bh); 688 set_buffer_uptodate(bh);
667 unlock_buffer(bh); 689 unlock_buffer(bh);
668 ext4_handle_dirty_metadata(handle, NULL, bh); 690 err = ext4_handle_dirty_metadata(handle, NULL, bh);
691 if (unlikely(err))
692 ext4_std_error(sb, err);
669 brelse(bh); 693 brelse(bh);
670 } 694 }
671 if ((err2 = ext4_journal_stop(handle)) && !err) 695 if ((err2 = ext4_journal_stop(handle)) && !err)
@@ -883,7 +907,11 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
883 /* Update the global fs size fields */ 907 /* Update the global fs size fields */
884 sbi->s_groups_count++; 908 sbi->s_groups_count++;
885 909
886 ext4_handle_dirty_metadata(handle, NULL, primary); 910 err = ext4_handle_dirty_metadata(handle, NULL, primary);
911 if (unlikely(err)) {
912 ext4_std_error(sb, err);
913 goto exit_journal;
914 }
887 915
888 /* Update the reserved block counts only once the new group is 916 /* Update the reserved block counts only once the new group is
889 * active. */ 917 * active. */
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index cd37f9d5e447..29c80f6d8b27 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -388,13 +388,14 @@ static void ext4_handle_error(struct super_block *sb)
388void __ext4_error(struct super_block *sb, const char *function, 388void __ext4_error(struct super_block *sb, const char *function,
389 unsigned int line, const char *fmt, ...) 389 unsigned int line, const char *fmt, ...)
390{ 390{
391 struct va_format vaf;
391 va_list args; 392 va_list args;
392 393
393 va_start(args, fmt); 394 va_start(args, fmt);
394 printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: comm %s: ", 395 vaf.fmt = fmt;
395 sb->s_id, function, line, current->comm); 396 vaf.va = &args;
396 vprintk(fmt, args); 397 printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: comm %s: %pV\n",
397 printk("\n"); 398 sb->s_id, function, line, current->comm, &vaf);
398 va_end(args); 399 va_end(args);
399 400
400 ext4_handle_error(sb); 401 ext4_handle_error(sb);
@@ -405,28 +406,31 @@ void ext4_error_inode(struct inode *inode, const char *function,
405 const char *fmt, ...) 406 const char *fmt, ...)
406{ 407{
407 va_list args; 408 va_list args;
409 struct va_format vaf;
408 struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es; 410 struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
409 411
410 es->s_last_error_ino = cpu_to_le32(inode->i_ino); 412 es->s_last_error_ino = cpu_to_le32(inode->i_ino);
411 es->s_last_error_block = cpu_to_le64(block); 413 es->s_last_error_block = cpu_to_le64(block);
412 save_error_info(inode->i_sb, function, line); 414 save_error_info(inode->i_sb, function, line);
413 va_start(args, fmt); 415 va_start(args, fmt);
416 vaf.fmt = fmt;
417 vaf.va = &args;
414 printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: inode #%lu: ", 418 printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: inode #%lu: ",
415 inode->i_sb->s_id, function, line, inode->i_ino); 419 inode->i_sb->s_id, function, line, inode->i_ino);
416 if (block) 420 if (block)
417 printk("block %llu: ", block); 421 printk(KERN_CONT "block %llu: ", block);
418 printk("comm %s: ", current->comm); 422 printk(KERN_CONT "comm %s: %pV\n", current->comm, &vaf);
419 vprintk(fmt, args);
420 printk("\n");
421 va_end(args); 423 va_end(args);
422 424
423 ext4_handle_error(inode->i_sb); 425 ext4_handle_error(inode->i_sb);
424} 426}
425 427
426void ext4_error_file(struct file *file, const char *function, 428void ext4_error_file(struct file *file, const char *function,
427 unsigned int line, const char *fmt, ...) 429 unsigned int line, ext4_fsblk_t block,
430 const char *fmt, ...)
428{ 431{
429 va_list args; 432 va_list args;
433 struct va_format vaf;
430 struct ext4_super_block *es; 434 struct ext4_super_block *es;
431 struct inode *inode = file->f_dentry->d_inode; 435 struct inode *inode = file->f_dentry->d_inode;
432 char pathname[80], *path; 436 char pathname[80], *path;
@@ -434,17 +438,18 @@ void ext4_error_file(struct file *file, const char *function,
434 es = EXT4_SB(inode->i_sb)->s_es; 438 es = EXT4_SB(inode->i_sb)->s_es;
435 es->s_last_error_ino = cpu_to_le32(inode->i_ino); 439 es->s_last_error_ino = cpu_to_le32(inode->i_ino);
436 save_error_info(inode->i_sb, function, line); 440 save_error_info(inode->i_sb, function, line);
437 va_start(args, fmt);
438 path = d_path(&(file->f_path), pathname, sizeof(pathname)); 441 path = d_path(&(file->f_path), pathname, sizeof(pathname));
439 if (!path) 442 if (IS_ERR(path))
440 path = "(unknown)"; 443 path = "(unknown)";
441 printk(KERN_CRIT 444 printk(KERN_CRIT
442 "EXT4-fs error (device %s): %s:%d: inode #%lu " 445 "EXT4-fs error (device %s): %s:%d: inode #%lu: ",
443 "(comm %s path %s): ", 446 inode->i_sb->s_id, function, line, inode->i_ino);
444 inode->i_sb->s_id, function, line, inode->i_ino, 447 if (block)
445 current->comm, path); 448 printk(KERN_CONT "block %llu: ", block);
446 vprintk(fmt, args); 449 va_start(args, fmt);
447 printk("\n"); 450 vaf.fmt = fmt;
451 vaf.va = &args;
452 printk(KERN_CONT "comm %s: path %s: %pV\n", current->comm, path, &vaf);
448 va_end(args); 453 va_end(args);
449 454
450 ext4_handle_error(inode->i_sb); 455 ext4_handle_error(inode->i_sb);
@@ -543,28 +548,29 @@ void __ext4_abort(struct super_block *sb, const char *function,
543 panic("EXT4-fs panic from previous error\n"); 548 panic("EXT4-fs panic from previous error\n");
544} 549}
545 550
546void ext4_msg (struct super_block * sb, const char *prefix, 551void ext4_msg(struct super_block *sb, const char *prefix, const char *fmt, ...)
547 const char *fmt, ...)
548{ 552{
553 struct va_format vaf;
549 va_list args; 554 va_list args;
550 555
551 va_start(args, fmt); 556 va_start(args, fmt);
552 printk("%sEXT4-fs (%s): ", prefix, sb->s_id); 557 vaf.fmt = fmt;
553 vprintk(fmt, args); 558 vaf.va = &args;
554 printk("\n"); 559 printk("%sEXT4-fs (%s): %pV\n", prefix, sb->s_id, &vaf);
555 va_end(args); 560 va_end(args);
556} 561}
557 562
558void __ext4_warning(struct super_block *sb, const char *function, 563void __ext4_warning(struct super_block *sb, const char *function,
559 unsigned int line, const char *fmt, ...) 564 unsigned int line, const char *fmt, ...)
560{ 565{
566 struct va_format vaf;
561 va_list args; 567 va_list args;
562 568
563 va_start(args, fmt); 569 va_start(args, fmt);
564 printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: ", 570 vaf.fmt = fmt;
565 sb->s_id, function, line); 571 vaf.va = &args;
566 vprintk(fmt, args); 572 printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: %pV\n",
567 printk("\n"); 573 sb->s_id, function, line, &vaf);
568 va_end(args); 574 va_end(args);
569} 575}
570 576
@@ -575,21 +581,25 @@ void __ext4_grp_locked_error(const char *function, unsigned int line,
575__releases(bitlock) 581__releases(bitlock)
576__acquires(bitlock) 582__acquires(bitlock)
577{ 583{
584 struct va_format vaf;
578 va_list args; 585 va_list args;
579 struct ext4_super_block *es = EXT4_SB(sb)->s_es; 586 struct ext4_super_block *es = EXT4_SB(sb)->s_es;
580 587
581 es->s_last_error_ino = cpu_to_le32(ino); 588 es->s_last_error_ino = cpu_to_le32(ino);
582 es->s_last_error_block = cpu_to_le64(block); 589 es->s_last_error_block = cpu_to_le64(block);
583 __save_error_info(sb, function, line); 590 __save_error_info(sb, function, line);
591
584 va_start(args, fmt); 592 va_start(args, fmt);
593
594 vaf.fmt = fmt;
595 vaf.va = &args;
585 printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: group %u", 596 printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: group %u",
586 sb->s_id, function, line, grp); 597 sb->s_id, function, line, grp);
587 if (ino) 598 if (ino)
588 printk("inode %lu: ", ino); 599 printk(KERN_CONT "inode %lu: ", ino);
589 if (block) 600 if (block)
590 printk("block %llu:", (unsigned long long) block); 601 printk(KERN_CONT "block %llu:", (unsigned long long) block);
591 vprintk(fmt, args); 602 printk(KERN_CONT "%pV\n", &vaf);
592 printk("\n");
593 va_end(args); 603 va_end(args);
594 604
595 if (test_opt(sb, ERRORS_CONT)) { 605 if (test_opt(sb, ERRORS_CONT)) {
@@ -808,21 +818,15 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
808 memset(&ei->i_cached_extent, 0, sizeof(struct ext4_ext_cache)); 818 memset(&ei->i_cached_extent, 0, sizeof(struct ext4_ext_cache));
809 INIT_LIST_HEAD(&ei->i_prealloc_list); 819 INIT_LIST_HEAD(&ei->i_prealloc_list);
810 spin_lock_init(&ei->i_prealloc_lock); 820 spin_lock_init(&ei->i_prealloc_lock);
811 /*
812 * Note: We can be called before EXT4_SB(sb)->s_journal is set,
813 * therefore it can be null here. Don't check it, just initialize
814 * jinode.
815 */
816 jbd2_journal_init_jbd_inode(&ei->jinode, &ei->vfs_inode);
817 ei->i_reserved_data_blocks = 0; 821 ei->i_reserved_data_blocks = 0;
818 ei->i_reserved_meta_blocks = 0; 822 ei->i_reserved_meta_blocks = 0;
819 ei->i_allocated_meta_blocks = 0; 823 ei->i_allocated_meta_blocks = 0;
820 ei->i_da_metadata_calc_len = 0; 824 ei->i_da_metadata_calc_len = 0;
821 ei->i_delalloc_reserved_flag = 0;
822 spin_lock_init(&(ei->i_block_reservation_lock)); 825 spin_lock_init(&(ei->i_block_reservation_lock));
823#ifdef CONFIG_QUOTA 826#ifdef CONFIG_QUOTA
824 ei->i_reserved_quota = 0; 827 ei->i_reserved_quota = 0;
825#endif 828#endif
829 ei->jinode = NULL;
826 INIT_LIST_HEAD(&ei->i_completed_io_list); 830 INIT_LIST_HEAD(&ei->i_completed_io_list);
827 spin_lock_init(&ei->i_completed_io_lock); 831 spin_lock_init(&ei->i_completed_io_lock);
828 ei->cur_aio_dio = NULL; 832 ei->cur_aio_dio = NULL;
@@ -898,9 +902,12 @@ void ext4_clear_inode(struct inode *inode)
898 end_writeback(inode); 902 end_writeback(inode);
899 dquot_drop(inode); 903 dquot_drop(inode);
900 ext4_discard_preallocations(inode); 904 ext4_discard_preallocations(inode);
901 if (EXT4_JOURNAL(inode)) 905 if (EXT4_I(inode)->jinode) {
902 jbd2_journal_release_jbd_inode(EXT4_SB(inode->i_sb)->s_journal, 906 jbd2_journal_release_jbd_inode(EXT4_JOURNAL(inode),
903 &EXT4_I(inode)->jinode); 907 EXT4_I(inode)->jinode);
908 jbd2_free_inode(EXT4_I(inode)->jinode);
909 EXT4_I(inode)->jinode = NULL;
910 }
904} 911}
905 912
906static inline void ext4_show_quota_options(struct seq_file *seq, 913static inline void ext4_show_quota_options(struct seq_file *seq,
@@ -1393,7 +1400,7 @@ static int set_qf_name(struct super_block *sb, int qtype, substring_t *args)
1393 sbi->s_qf_names[qtype] = NULL; 1400 sbi->s_qf_names[qtype] = NULL;
1394 return 0; 1401 return 0;
1395 } 1402 }
1396 set_opt(sbi->s_mount_opt, QUOTA); 1403 set_opt(sb, QUOTA);
1397 return 1; 1404 return 1;
1398} 1405}
1399 1406
@@ -1448,21 +1455,21 @@ static int parse_options(char *options, struct super_block *sb,
1448 switch (token) { 1455 switch (token) {
1449 case Opt_bsd_df: 1456 case Opt_bsd_df:
1450 ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38"); 1457 ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38");
1451 clear_opt(sbi->s_mount_opt, MINIX_DF); 1458 clear_opt(sb, MINIX_DF);
1452 break; 1459 break;
1453 case Opt_minix_df: 1460 case Opt_minix_df:
1454 ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38"); 1461 ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38");
1455 set_opt(sbi->s_mount_opt, MINIX_DF); 1462 set_opt(sb, MINIX_DF);
1456 1463
1457 break; 1464 break;
1458 case Opt_grpid: 1465 case Opt_grpid:
1459 ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38"); 1466 ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38");
1460 set_opt(sbi->s_mount_opt, GRPID); 1467 set_opt(sb, GRPID);
1461 1468
1462 break; 1469 break;
1463 case Opt_nogrpid: 1470 case Opt_nogrpid:
1464 ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38"); 1471 ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38");
1465 clear_opt(sbi->s_mount_opt, GRPID); 1472 clear_opt(sb, GRPID);
1466 1473
1467 break; 1474 break;
1468 case Opt_resuid: 1475 case Opt_resuid:
@@ -1480,38 +1487,38 @@ static int parse_options(char *options, struct super_block *sb,
1480 /* *sb_block = match_int(&args[0]); */ 1487 /* *sb_block = match_int(&args[0]); */
1481 break; 1488 break;
1482 case Opt_err_panic: 1489 case Opt_err_panic:
1483 clear_opt(sbi->s_mount_opt, ERRORS_CONT); 1490 clear_opt(sb, ERRORS_CONT);
1484 clear_opt(sbi->s_mount_opt, ERRORS_RO); 1491 clear_opt(sb, ERRORS_RO);
1485 set_opt(sbi->s_mount_opt, ERRORS_PANIC); 1492 set_opt(sb, ERRORS_PANIC);
1486 break; 1493 break;
1487 case Opt_err_ro: 1494 case Opt_err_ro:
1488 clear_opt(sbi->s_mount_opt, ERRORS_CONT); 1495 clear_opt(sb, ERRORS_CONT);
1489 clear_opt(sbi->s_mount_opt, ERRORS_PANIC); 1496 clear_opt(sb, ERRORS_PANIC);
1490 set_opt(sbi->s_mount_opt, ERRORS_RO); 1497 set_opt(sb, ERRORS_RO);
1491 break; 1498 break;
1492 case Opt_err_cont: 1499 case Opt_err_cont:
1493 clear_opt(sbi->s_mount_opt, ERRORS_RO); 1500 clear_opt(sb, ERRORS_RO);
1494 clear_opt(sbi->s_mount_opt, ERRORS_PANIC); 1501 clear_opt(sb, ERRORS_PANIC);
1495 set_opt(sbi->s_mount_opt, ERRORS_CONT); 1502 set_opt(sb, ERRORS_CONT);
1496 break; 1503 break;
1497 case Opt_nouid32: 1504 case Opt_nouid32:
1498 set_opt(sbi->s_mount_opt, NO_UID32); 1505 set_opt(sb, NO_UID32);
1499 break; 1506 break;
1500 case Opt_debug: 1507 case Opt_debug:
1501 set_opt(sbi->s_mount_opt, DEBUG); 1508 set_opt(sb, DEBUG);
1502 break; 1509 break;
1503 case Opt_oldalloc: 1510 case Opt_oldalloc:
1504 set_opt(sbi->s_mount_opt, OLDALLOC); 1511 set_opt(sb, OLDALLOC);
1505 break; 1512 break;
1506 case Opt_orlov: 1513 case Opt_orlov:
1507 clear_opt(sbi->s_mount_opt, OLDALLOC); 1514 clear_opt(sb, OLDALLOC);
1508 break; 1515 break;
1509#ifdef CONFIG_EXT4_FS_XATTR 1516#ifdef CONFIG_EXT4_FS_XATTR
1510 case Opt_user_xattr: 1517 case Opt_user_xattr:
1511 set_opt(sbi->s_mount_opt, XATTR_USER); 1518 set_opt(sb, XATTR_USER);
1512 break; 1519 break;
1513 case Opt_nouser_xattr: 1520 case Opt_nouser_xattr:
1514 clear_opt(sbi->s_mount_opt, XATTR_USER); 1521 clear_opt(sb, XATTR_USER);
1515 break; 1522 break;
1516#else 1523#else
1517 case Opt_user_xattr: 1524 case Opt_user_xattr:
@@ -1521,10 +1528,10 @@ static int parse_options(char *options, struct super_block *sb,
1521#endif 1528#endif
1522#ifdef CONFIG_EXT4_FS_POSIX_ACL 1529#ifdef CONFIG_EXT4_FS_POSIX_ACL
1523 case Opt_acl: 1530 case Opt_acl:
1524 set_opt(sbi->s_mount_opt, POSIX_ACL); 1531 set_opt(sb, POSIX_ACL);
1525 break; 1532 break;
1526 case Opt_noacl: 1533 case Opt_noacl:
1527 clear_opt(sbi->s_mount_opt, POSIX_ACL); 1534 clear_opt(sb, POSIX_ACL);
1528 break; 1535 break;
1529#else 1536#else
1530 case Opt_acl: 1537 case Opt_acl:
@@ -1543,7 +1550,7 @@ static int parse_options(char *options, struct super_block *sb,
1543 "Cannot specify journal on remount"); 1550 "Cannot specify journal on remount");
1544 return 0; 1551 return 0;
1545 } 1552 }
1546 set_opt(sbi->s_mount_opt, UPDATE_JOURNAL); 1553 set_opt(sb, UPDATE_JOURNAL);
1547 break; 1554 break;
1548 case Opt_journal_dev: 1555 case Opt_journal_dev:
1549 if (is_remount) { 1556 if (is_remount) {
@@ -1556,14 +1563,14 @@ static int parse_options(char *options, struct super_block *sb,
1556 *journal_devnum = option; 1563 *journal_devnum = option;
1557 break; 1564 break;
1558 case Opt_journal_checksum: 1565 case Opt_journal_checksum:
1559 set_opt(sbi->s_mount_opt, JOURNAL_CHECKSUM); 1566 set_opt(sb, JOURNAL_CHECKSUM);
1560 break; 1567 break;
1561 case Opt_journal_async_commit: 1568 case Opt_journal_async_commit:
1562 set_opt(sbi->s_mount_opt, JOURNAL_ASYNC_COMMIT); 1569 set_opt(sb, JOURNAL_ASYNC_COMMIT);
1563 set_opt(sbi->s_mount_opt, JOURNAL_CHECKSUM); 1570 set_opt(sb, JOURNAL_CHECKSUM);
1564 break; 1571 break;
1565 case Opt_noload: 1572 case Opt_noload:
1566 set_opt(sbi->s_mount_opt, NOLOAD); 1573 set_opt(sb, NOLOAD);
1567 break; 1574 break;
1568 case Opt_commit: 1575 case Opt_commit:
1569 if (match_int(&args[0], &option)) 1576 if (match_int(&args[0], &option))
@@ -1606,15 +1613,15 @@ static int parse_options(char *options, struct super_block *sb,
1606 return 0; 1613 return 0;
1607 } 1614 }
1608 } else { 1615 } else {
1609 clear_opt(sbi->s_mount_opt, DATA_FLAGS); 1616 clear_opt(sb, DATA_FLAGS);
1610 sbi->s_mount_opt |= data_opt; 1617 sbi->s_mount_opt |= data_opt;
1611 } 1618 }
1612 break; 1619 break;
1613 case Opt_data_err_abort: 1620 case Opt_data_err_abort:
1614 set_opt(sbi->s_mount_opt, DATA_ERR_ABORT); 1621 set_opt(sb, DATA_ERR_ABORT);
1615 break; 1622 break;
1616 case Opt_data_err_ignore: 1623 case Opt_data_err_ignore:
1617 clear_opt(sbi->s_mount_opt, DATA_ERR_ABORT); 1624 clear_opt(sb, DATA_ERR_ABORT);
1618 break; 1625 break;
1619#ifdef CONFIG_QUOTA 1626#ifdef CONFIG_QUOTA
1620 case Opt_usrjquota: 1627 case Opt_usrjquota:
@@ -1654,12 +1661,12 @@ set_qf_format:
1654 break; 1661 break;
1655 case Opt_quota: 1662 case Opt_quota:
1656 case Opt_usrquota: 1663 case Opt_usrquota:
1657 set_opt(sbi->s_mount_opt, QUOTA); 1664 set_opt(sb, QUOTA);
1658 set_opt(sbi->s_mount_opt, USRQUOTA); 1665 set_opt(sb, USRQUOTA);
1659 break; 1666 break;
1660 case Opt_grpquota: 1667 case Opt_grpquota:
1661 set_opt(sbi->s_mount_opt, QUOTA); 1668 set_opt(sb, QUOTA);
1662 set_opt(sbi->s_mount_opt, GRPQUOTA); 1669 set_opt(sb, GRPQUOTA);
1663 break; 1670 break;
1664 case Opt_noquota: 1671 case Opt_noquota:
1665 if (sb_any_quota_loaded(sb)) { 1672 if (sb_any_quota_loaded(sb)) {
@@ -1667,9 +1674,9 @@ set_qf_format:
1667 "options when quota turned on"); 1674 "options when quota turned on");
1668 return 0; 1675 return 0;
1669 } 1676 }
1670 clear_opt(sbi->s_mount_opt, QUOTA); 1677 clear_opt(sb, QUOTA);
1671 clear_opt(sbi->s_mount_opt, USRQUOTA); 1678 clear_opt(sb, USRQUOTA);
1672 clear_opt(sbi->s_mount_opt, GRPQUOTA); 1679 clear_opt(sb, GRPQUOTA);
1673 break; 1680 break;
1674#else 1681#else
1675 case Opt_quota: 1682 case Opt_quota:
@@ -1695,7 +1702,7 @@ set_qf_format:
1695 sbi->s_mount_flags |= EXT4_MF_FS_ABORTED; 1702 sbi->s_mount_flags |= EXT4_MF_FS_ABORTED;
1696 break; 1703 break;
1697 case Opt_nobarrier: 1704 case Opt_nobarrier:
1698 clear_opt(sbi->s_mount_opt, BARRIER); 1705 clear_opt(sb, BARRIER);
1699 break; 1706 break;
1700 case Opt_barrier: 1707 case Opt_barrier:
1701 if (args[0].from) { 1708 if (args[0].from) {
@@ -1704,9 +1711,9 @@ set_qf_format:
1704 } else 1711 } else
1705 option = 1; /* No argument, default to 1 */ 1712 option = 1; /* No argument, default to 1 */
1706 if (option) 1713 if (option)
1707 set_opt(sbi->s_mount_opt, BARRIER); 1714 set_opt(sb, BARRIER);
1708 else 1715 else
1709 clear_opt(sbi->s_mount_opt, BARRIER); 1716 clear_opt(sb, BARRIER);
1710 break; 1717 break;
1711 case Opt_ignore: 1718 case Opt_ignore:
1712 break; 1719 break;
@@ -1730,17 +1737,17 @@ set_qf_format:
1730 "Ignoring deprecated bh option"); 1737 "Ignoring deprecated bh option");
1731 break; 1738 break;
1732 case Opt_i_version: 1739 case Opt_i_version:
1733 set_opt(sbi->s_mount_opt, I_VERSION); 1740 set_opt(sb, I_VERSION);
1734 sb->s_flags |= MS_I_VERSION; 1741 sb->s_flags |= MS_I_VERSION;
1735 break; 1742 break;
1736 case Opt_nodelalloc: 1743 case Opt_nodelalloc:
1737 clear_opt(sbi->s_mount_opt, DELALLOC); 1744 clear_opt(sb, DELALLOC);
1738 break; 1745 break;
1739 case Opt_mblk_io_submit: 1746 case Opt_mblk_io_submit:
1740 set_opt(sbi->s_mount_opt, MBLK_IO_SUBMIT); 1747 set_opt(sb, MBLK_IO_SUBMIT);
1741 break; 1748 break;
1742 case Opt_nomblk_io_submit: 1749 case Opt_nomblk_io_submit:
1743 clear_opt(sbi->s_mount_opt, MBLK_IO_SUBMIT); 1750 clear_opt(sb, MBLK_IO_SUBMIT);
1744 break; 1751 break;
1745 case Opt_stripe: 1752 case Opt_stripe:
1746 if (match_int(&args[0], &option)) 1753 if (match_int(&args[0], &option))
@@ -1750,13 +1757,13 @@ set_qf_format:
1750 sbi->s_stripe = option; 1757 sbi->s_stripe = option;
1751 break; 1758 break;
1752 case Opt_delalloc: 1759 case Opt_delalloc:
1753 set_opt(sbi->s_mount_opt, DELALLOC); 1760 set_opt(sb, DELALLOC);
1754 break; 1761 break;
1755 case Opt_block_validity: 1762 case Opt_block_validity:
1756 set_opt(sbi->s_mount_opt, BLOCK_VALIDITY); 1763 set_opt(sb, BLOCK_VALIDITY);
1757 break; 1764 break;
1758 case Opt_noblock_validity: 1765 case Opt_noblock_validity:
1759 clear_opt(sbi->s_mount_opt, BLOCK_VALIDITY); 1766 clear_opt(sb, BLOCK_VALIDITY);
1760 break; 1767 break;
1761 case Opt_inode_readahead_blks: 1768 case Opt_inode_readahead_blks:
1762 if (match_int(&args[0], &option)) 1769 if (match_int(&args[0], &option))
@@ -1780,7 +1787,7 @@ set_qf_format:
1780 option); 1787 option);
1781 break; 1788 break;
1782 case Opt_noauto_da_alloc: 1789 case Opt_noauto_da_alloc:
1783 set_opt(sbi->s_mount_opt,NO_AUTO_DA_ALLOC); 1790 set_opt(sb, NO_AUTO_DA_ALLOC);
1784 break; 1791 break;
1785 case Opt_auto_da_alloc: 1792 case Opt_auto_da_alloc:
1786 if (args[0].from) { 1793 if (args[0].from) {
@@ -1789,24 +1796,24 @@ set_qf_format:
1789 } else 1796 } else
1790 option = 1; /* No argument, default to 1 */ 1797 option = 1; /* No argument, default to 1 */
1791 if (option) 1798 if (option)
1792 clear_opt(sbi->s_mount_opt, NO_AUTO_DA_ALLOC); 1799 clear_opt(sb, NO_AUTO_DA_ALLOC);
1793 else 1800 else
1794 set_opt(sbi->s_mount_opt,NO_AUTO_DA_ALLOC); 1801 set_opt(sb,NO_AUTO_DA_ALLOC);
1795 break; 1802 break;
1796 case Opt_discard: 1803 case Opt_discard:
1797 set_opt(sbi->s_mount_opt, DISCARD); 1804 set_opt(sb, DISCARD);
1798 break; 1805 break;
1799 case Opt_nodiscard: 1806 case Opt_nodiscard:
1800 clear_opt(sbi->s_mount_opt, DISCARD); 1807 clear_opt(sb, DISCARD);
1801 break; 1808 break;
1802 case Opt_dioread_nolock: 1809 case Opt_dioread_nolock:
1803 set_opt(sbi->s_mount_opt, DIOREAD_NOLOCK); 1810 set_opt(sb, DIOREAD_NOLOCK);
1804 break; 1811 break;
1805 case Opt_dioread_lock: 1812 case Opt_dioread_lock:
1806 clear_opt(sbi->s_mount_opt, DIOREAD_NOLOCK); 1813 clear_opt(sb, DIOREAD_NOLOCK);
1807 break; 1814 break;
1808 case Opt_init_inode_table: 1815 case Opt_init_inode_table:
1809 set_opt(sbi->s_mount_opt, INIT_INODE_TABLE); 1816 set_opt(sb, INIT_INODE_TABLE);
1810 if (args[0].from) { 1817 if (args[0].from) {
1811 if (match_int(&args[0], &option)) 1818 if (match_int(&args[0], &option))
1812 return 0; 1819 return 0;
@@ -1817,7 +1824,7 @@ set_qf_format:
1817 sbi->s_li_wait_mult = option; 1824 sbi->s_li_wait_mult = option;
1818 break; 1825 break;
1819 case Opt_noinit_inode_table: 1826 case Opt_noinit_inode_table:
1820 clear_opt(sbi->s_mount_opt, INIT_INODE_TABLE); 1827 clear_opt(sb, INIT_INODE_TABLE);
1821 break; 1828 break;
1822 default: 1829 default:
1823 ext4_msg(sb, KERN_ERR, 1830 ext4_msg(sb, KERN_ERR,
@@ -1829,10 +1836,10 @@ set_qf_format:
1829#ifdef CONFIG_QUOTA 1836#ifdef CONFIG_QUOTA
1830 if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) { 1837 if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
1831 if (test_opt(sb, USRQUOTA) && sbi->s_qf_names[USRQUOTA]) 1838 if (test_opt(sb, USRQUOTA) && sbi->s_qf_names[USRQUOTA])
1832 clear_opt(sbi->s_mount_opt, USRQUOTA); 1839 clear_opt(sb, USRQUOTA);
1833 1840
1834 if (test_opt(sb, GRPQUOTA) && sbi->s_qf_names[GRPQUOTA]) 1841 if (test_opt(sb, GRPQUOTA) && sbi->s_qf_names[GRPQUOTA])
1835 clear_opt(sbi->s_mount_opt, GRPQUOTA); 1842 clear_opt(sb, GRPQUOTA);
1836 1843
1837 if (test_opt(sb, GRPQUOTA) || test_opt(sb, USRQUOTA)) { 1844 if (test_opt(sb, GRPQUOTA) || test_opt(sb, USRQUOTA)) {
1838 ext4_msg(sb, KERN_ERR, "old and new quota " 1845 ext4_msg(sb, KERN_ERR, "old and new quota "
@@ -1902,12 +1909,12 @@ static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es,
1902 ext4_commit_super(sb, 1); 1909 ext4_commit_super(sb, 1);
1903 if (test_opt(sb, DEBUG)) 1910 if (test_opt(sb, DEBUG))
1904 printk(KERN_INFO "[EXT4 FS bs=%lu, gc=%u, " 1911 printk(KERN_INFO "[EXT4 FS bs=%lu, gc=%u, "
1905 "bpg=%lu, ipg=%lu, mo=%04x]\n", 1912 "bpg=%lu, ipg=%lu, mo=%04x, mo2=%04x]\n",
1906 sb->s_blocksize, 1913 sb->s_blocksize,
1907 sbi->s_groups_count, 1914 sbi->s_groups_count,
1908 EXT4_BLOCKS_PER_GROUP(sb), 1915 EXT4_BLOCKS_PER_GROUP(sb),
1909 EXT4_INODES_PER_GROUP(sb), 1916 EXT4_INODES_PER_GROUP(sb),
1910 sbi->s_mount_opt); 1917 sbi->s_mount_opt, sbi->s_mount_opt2);
1911 1918
1912 return res; 1919 return res;
1913} 1920}
@@ -1937,14 +1944,13 @@ static int ext4_fill_flex_info(struct super_block *sb)
1937 size = flex_group_count * sizeof(struct flex_groups); 1944 size = flex_group_count * sizeof(struct flex_groups);
1938 sbi->s_flex_groups = kzalloc(size, GFP_KERNEL); 1945 sbi->s_flex_groups = kzalloc(size, GFP_KERNEL);
1939 if (sbi->s_flex_groups == NULL) { 1946 if (sbi->s_flex_groups == NULL) {
1940 sbi->s_flex_groups = vmalloc(size); 1947 sbi->s_flex_groups = vzalloc(size);
1941 if (sbi->s_flex_groups) 1948 if (sbi->s_flex_groups == NULL) {
1942 memset(sbi->s_flex_groups, 0, size); 1949 ext4_msg(sb, KERN_ERR,
1943 } 1950 "not enough memory for %u flex groups",
1944 if (sbi->s_flex_groups == NULL) { 1951 flex_group_count);
1945 ext4_msg(sb, KERN_ERR, "not enough memory for " 1952 goto failed;
1946 "%u flex groups", flex_group_count); 1953 }
1947 goto failed;
1948 } 1954 }
1949 1955
1950 for (i = 0; i < sbi->s_groups_count; i++) { 1956 for (i = 0; i < sbi->s_groups_count; i++) {
@@ -2923,7 +2929,7 @@ static int ext4_register_li_request(struct super_block *sb,
2923 struct ext4_sb_info *sbi = EXT4_SB(sb); 2929 struct ext4_sb_info *sbi = EXT4_SB(sb);
2924 struct ext4_li_request *elr; 2930 struct ext4_li_request *elr;
2925 ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count; 2931 ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count;
2926 int ret; 2932 int ret = 0;
2927 2933
2928 if (sbi->s_li_request != NULL) 2934 if (sbi->s_li_request != NULL)
2929 return 0; 2935 return 0;
@@ -3078,41 +3084,41 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3078 3084
3079 /* Set defaults before we parse the mount options */ 3085 /* Set defaults before we parse the mount options */
3080 def_mount_opts = le32_to_cpu(es->s_default_mount_opts); 3086 def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
3081 set_opt(sbi->s_mount_opt, INIT_INODE_TABLE); 3087 set_opt(sb, INIT_INODE_TABLE);
3082 if (def_mount_opts & EXT4_DEFM_DEBUG) 3088 if (def_mount_opts & EXT4_DEFM_DEBUG)
3083 set_opt(sbi->s_mount_opt, DEBUG); 3089 set_opt(sb, DEBUG);
3084 if (def_mount_opts & EXT4_DEFM_BSDGROUPS) { 3090 if (def_mount_opts & EXT4_DEFM_BSDGROUPS) {
3085 ext4_msg(sb, KERN_WARNING, deprecated_msg, "bsdgroups", 3091 ext4_msg(sb, KERN_WARNING, deprecated_msg, "bsdgroups",
3086 "2.6.38"); 3092 "2.6.38");
3087 set_opt(sbi->s_mount_opt, GRPID); 3093 set_opt(sb, GRPID);
3088 } 3094 }
3089 if (def_mount_opts & EXT4_DEFM_UID16) 3095 if (def_mount_opts & EXT4_DEFM_UID16)
3090 set_opt(sbi->s_mount_opt, NO_UID32); 3096 set_opt(sb, NO_UID32);
3091#ifdef CONFIG_EXT4_FS_XATTR 3097#ifdef CONFIG_EXT4_FS_XATTR
3092 if (def_mount_opts & EXT4_DEFM_XATTR_USER) 3098 if (def_mount_opts & EXT4_DEFM_XATTR_USER)
3093 set_opt(sbi->s_mount_opt, XATTR_USER); 3099 set_opt(sb, XATTR_USER);
3094#endif 3100#endif
3095#ifdef CONFIG_EXT4_FS_POSIX_ACL 3101#ifdef CONFIG_EXT4_FS_POSIX_ACL
3096 if (def_mount_opts & EXT4_DEFM_ACL) 3102 if (def_mount_opts & EXT4_DEFM_ACL)
3097 set_opt(sbi->s_mount_opt, POSIX_ACL); 3103 set_opt(sb, POSIX_ACL);
3098#endif 3104#endif
3099 if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA) 3105 if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA)
3100 set_opt(sbi->s_mount_opt, JOURNAL_DATA); 3106 set_opt(sb, JOURNAL_DATA);
3101 else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED) 3107 else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED)
3102 set_opt(sbi->s_mount_opt, ORDERED_DATA); 3108 set_opt(sb, ORDERED_DATA);
3103 else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_WBACK) 3109 else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_WBACK)
3104 set_opt(sbi->s_mount_opt, WRITEBACK_DATA); 3110 set_opt(sb, WRITEBACK_DATA);
3105 3111
3106 if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_PANIC) 3112 if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_PANIC)
3107 set_opt(sbi->s_mount_opt, ERRORS_PANIC); 3113 set_opt(sb, ERRORS_PANIC);
3108 else if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_CONTINUE) 3114 else if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_CONTINUE)
3109 set_opt(sbi->s_mount_opt, ERRORS_CONT); 3115 set_opt(sb, ERRORS_CONT);
3110 else 3116 else
3111 set_opt(sbi->s_mount_opt, ERRORS_RO); 3117 set_opt(sb, ERRORS_RO);
3112 if (def_mount_opts & EXT4_DEFM_BLOCK_VALIDITY) 3118 if (def_mount_opts & EXT4_DEFM_BLOCK_VALIDITY)
3113 set_opt(sbi->s_mount_opt, BLOCK_VALIDITY); 3119 set_opt(sb, BLOCK_VALIDITY);
3114 if (def_mount_opts & EXT4_DEFM_DISCARD) 3120 if (def_mount_opts & EXT4_DEFM_DISCARD)
3115 set_opt(sbi->s_mount_opt, DISCARD); 3121 set_opt(sb, DISCARD);
3116 3122
3117 sbi->s_resuid = le16_to_cpu(es->s_def_resuid); 3123 sbi->s_resuid = le16_to_cpu(es->s_def_resuid);
3118 sbi->s_resgid = le16_to_cpu(es->s_def_resgid); 3124 sbi->s_resgid = le16_to_cpu(es->s_def_resgid);
@@ -3121,7 +3127,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3121 sbi->s_max_batch_time = EXT4_DEF_MAX_BATCH_TIME; 3127 sbi->s_max_batch_time = EXT4_DEF_MAX_BATCH_TIME;
3122 3128
3123 if ((def_mount_opts & EXT4_DEFM_NOBARRIER) == 0) 3129 if ((def_mount_opts & EXT4_DEFM_NOBARRIER) == 0)
3124 set_opt(sbi->s_mount_opt, BARRIER); 3130 set_opt(sb, BARRIER);
3125 3131
3126 /* 3132 /*
3127 * enable delayed allocation by default 3133 * enable delayed allocation by default
@@ -3129,7 +3135,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3129 */ 3135 */
3130 if (!IS_EXT3_SB(sb) && 3136 if (!IS_EXT3_SB(sb) &&
3131 ((def_mount_opts & EXT4_DEFM_NODELALLOC) == 0)) 3137 ((def_mount_opts & EXT4_DEFM_NODELALLOC) == 0))
3132 set_opt(sbi->s_mount_opt, DELALLOC); 3138 set_opt(sb, DELALLOC);
3133 3139
3134 if (!parse_options((char *) sbi->s_es->s_mount_opts, sb, 3140 if (!parse_options((char *) sbi->s_es->s_mount_opts, sb,
3135 &journal_devnum, &journal_ioprio, NULL, 0)) { 3141 &journal_devnum, &journal_ioprio, NULL, 0)) {
@@ -3432,8 +3438,8 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3432 "suppressed and not mounted read-only"); 3438 "suppressed and not mounted read-only");
3433 goto failed_mount_wq; 3439 goto failed_mount_wq;
3434 } else { 3440 } else {
3435 clear_opt(sbi->s_mount_opt, DATA_FLAGS); 3441 clear_opt(sb, DATA_FLAGS);
3436 set_opt(sbi->s_mount_opt, WRITEBACK_DATA); 3442 set_opt(sb, WRITEBACK_DATA);
3437 sbi->s_journal = NULL; 3443 sbi->s_journal = NULL;
3438 needs_recovery = 0; 3444 needs_recovery = 0;
3439 goto no_journal; 3445 goto no_journal;
@@ -3471,9 +3477,9 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3471 */ 3477 */
3472 if (jbd2_journal_check_available_features 3478 if (jbd2_journal_check_available_features
3473 (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) 3479 (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE))
3474 set_opt(sbi->s_mount_opt, ORDERED_DATA); 3480 set_opt(sb, ORDERED_DATA);
3475 else 3481 else
3476 set_opt(sbi->s_mount_opt, JOURNAL_DATA); 3482 set_opt(sb, JOURNAL_DATA);
3477 break; 3483 break;
3478 3484
3479 case EXT4_MOUNT_ORDERED_DATA: 3485 case EXT4_MOUNT_ORDERED_DATA:
@@ -3563,18 +3569,18 @@ no_journal:
3563 (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)) { 3569 (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)) {
3564 ext4_msg(sb, KERN_WARNING, "Ignoring delalloc option - " 3570 ext4_msg(sb, KERN_WARNING, "Ignoring delalloc option - "
3565 "requested data journaling mode"); 3571 "requested data journaling mode");
3566 clear_opt(sbi->s_mount_opt, DELALLOC); 3572 clear_opt(sb, DELALLOC);
3567 } 3573 }
3568 if (test_opt(sb, DIOREAD_NOLOCK)) { 3574 if (test_opt(sb, DIOREAD_NOLOCK)) {
3569 if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) { 3575 if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
3570 ext4_msg(sb, KERN_WARNING, "Ignoring dioread_nolock " 3576 ext4_msg(sb, KERN_WARNING, "Ignoring dioread_nolock "
3571 "option - requested data journaling mode"); 3577 "option - requested data journaling mode");
3572 clear_opt(sbi->s_mount_opt, DIOREAD_NOLOCK); 3578 clear_opt(sb, DIOREAD_NOLOCK);
3573 } 3579 }
3574 if (sb->s_blocksize < PAGE_SIZE) { 3580 if (sb->s_blocksize < PAGE_SIZE) {
3575 ext4_msg(sb, KERN_WARNING, "Ignoring dioread_nolock " 3581 ext4_msg(sb, KERN_WARNING, "Ignoring dioread_nolock "
3576 "option - block size is too small"); 3582 "option - block size is too small");
3577 clear_opt(sbi->s_mount_opt, DIOREAD_NOLOCK); 3583 clear_opt(sb, DIOREAD_NOLOCK);
3578 } 3584 }
3579 } 3585 }
3580 3586
@@ -4173,6 +4179,22 @@ static int ext4_unfreeze(struct super_block *sb)
4173 return 0; 4179 return 0;
4174} 4180}
4175 4181
4182/*
4183 * Structure to save mount options for ext4_remount's benefit
4184 */
4185struct ext4_mount_options {
4186 unsigned long s_mount_opt;
4187 unsigned long s_mount_opt2;
4188 uid_t s_resuid;
4189 gid_t s_resgid;
4190 unsigned long s_commit_interval;
4191 u32 s_min_batch_time, s_max_batch_time;
4192#ifdef CONFIG_QUOTA
4193 int s_jquota_fmt;
4194 char *s_qf_names[MAXQUOTAS];
4195#endif
4196};
4197
4176static int ext4_remount(struct super_block *sb, int *flags, char *data) 4198static int ext4_remount(struct super_block *sb, int *flags, char *data)
4177{ 4199{
4178 struct ext4_super_block *es; 4200 struct ext4_super_block *es;
@@ -4193,6 +4215,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
4193 lock_super(sb); 4215 lock_super(sb);
4194 old_sb_flags = sb->s_flags; 4216 old_sb_flags = sb->s_flags;
4195 old_opts.s_mount_opt = sbi->s_mount_opt; 4217 old_opts.s_mount_opt = sbi->s_mount_opt;
4218 old_opts.s_mount_opt2 = sbi->s_mount_opt2;
4196 old_opts.s_resuid = sbi->s_resuid; 4219 old_opts.s_resuid = sbi->s_resuid;
4197 old_opts.s_resgid = sbi->s_resgid; 4220 old_opts.s_resgid = sbi->s_resgid;
4198 old_opts.s_commit_interval = sbi->s_commit_interval; 4221 old_opts.s_commit_interval = sbi->s_commit_interval;
@@ -4346,6 +4369,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
4346restore_opts: 4369restore_opts:
4347 sb->s_flags = old_sb_flags; 4370 sb->s_flags = old_sb_flags;
4348 sbi->s_mount_opt = old_opts.s_mount_opt; 4371 sbi->s_mount_opt = old_opts.s_mount_opt;
4372 sbi->s_mount_opt2 = old_opts.s_mount_opt2;
4349 sbi->s_resuid = old_opts.s_resuid; 4373 sbi->s_resuid = old_opts.s_resuid;
4350 sbi->s_resgid = old_opts.s_resgid; 4374 sbi->s_resgid = old_opts.s_resgid;
4351 sbi->s_commit_interval = old_opts.s_commit_interval; 4375 sbi->s_commit_interval = old_opts.s_commit_interval;
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index fa4b899da4b3..fc32176eee39 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -427,23 +427,23 @@ cleanup:
427static int 427static int
428ext4_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size) 428ext4_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
429{ 429{
430 int i_error, b_error; 430 int ret, ret2;
431 431
432 down_read(&EXT4_I(dentry->d_inode)->xattr_sem); 432 down_read(&EXT4_I(dentry->d_inode)->xattr_sem);
433 i_error = ext4_xattr_ibody_list(dentry, buffer, buffer_size); 433 ret = ret2 = ext4_xattr_ibody_list(dentry, buffer, buffer_size);
434 if (i_error < 0) { 434 if (ret < 0)
435 b_error = 0; 435 goto errout;
436 } else { 436 if (buffer) {
437 if (buffer) { 437 buffer += ret;
438 buffer += i_error; 438 buffer_size -= ret;
439 buffer_size -= i_error;
440 }
441 b_error = ext4_xattr_block_list(dentry, buffer, buffer_size);
442 if (b_error < 0)
443 i_error = 0;
444 } 439 }
440 ret = ext4_xattr_block_list(dentry, buffer, buffer_size);
441 if (ret < 0)
442 goto errout;
443 ret += ret2;
444errout:
445 up_read(&EXT4_I(dentry->d_inode)->xattr_sem); 445 up_read(&EXT4_I(dentry->d_inode)->xattr_sem);
446 return i_error + b_error; 446 return ret;
447} 447}
448 448
449/* 449/*
@@ -947,7 +947,7 @@ ext4_xattr_ibody_set(handle_t *handle, struct inode *inode,
947/* 947/*
948 * ext4_xattr_set_handle() 948 * ext4_xattr_set_handle()
949 * 949 *
950 * Create, replace or remove an extended attribute for this inode. Buffer 950 * Create, replace or remove an extended attribute for this inode. Value
951 * is NULL to remove an existing extended attribute, and non-NULL to 951 * is NULL to remove an existing extended attribute, and non-NULL to
952 * either replace an existing extended attribute, or create a new extended 952 * either replace an existing extended attribute, or create a new extended
953 * attribute. The flags XATTR_REPLACE and XATTR_CREATE 953 * attribute. The flags XATTR_REPLACE and XATTR_CREATE
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index f837ba953529..9e4686900f18 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -43,6 +43,7 @@
43#include <linux/vmalloc.h> 43#include <linux/vmalloc.h>
44#include <linux/backing-dev.h> 44#include <linux/backing-dev.h>
45#include <linux/bitops.h> 45#include <linux/bitops.h>
46#include <linux/ratelimit.h>
46 47
47#define CREATE_TRACE_POINTS 48#define CREATE_TRACE_POINTS
48#include <trace/events/jbd2.h> 49#include <trace/events/jbd2.h>
@@ -93,6 +94,7 @@ EXPORT_SYMBOL(jbd2_journal_file_inode);
93EXPORT_SYMBOL(jbd2_journal_init_jbd_inode); 94EXPORT_SYMBOL(jbd2_journal_init_jbd_inode);
94EXPORT_SYMBOL(jbd2_journal_release_jbd_inode); 95EXPORT_SYMBOL(jbd2_journal_release_jbd_inode);
95EXPORT_SYMBOL(jbd2_journal_begin_ordered_truncate); 96EXPORT_SYMBOL(jbd2_journal_begin_ordered_truncate);
97EXPORT_SYMBOL(jbd2_inode_cache);
96 98
97static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *); 99static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *);
98static void __journal_abort_soft (journal_t *journal, int errno); 100static void __journal_abort_soft (journal_t *journal, int errno);
@@ -827,7 +829,7 @@ static journal_t * journal_init_common (void)
827 829
828 journal = kzalloc(sizeof(*journal), GFP_KERNEL); 830 journal = kzalloc(sizeof(*journal), GFP_KERNEL);
829 if (!journal) 831 if (!journal)
830 goto fail; 832 return NULL;
831 833
832 init_waitqueue_head(&journal->j_wait_transaction_locked); 834 init_waitqueue_head(&journal->j_wait_transaction_locked);
833 init_waitqueue_head(&journal->j_wait_logspace); 835 init_waitqueue_head(&journal->j_wait_logspace);
@@ -852,14 +854,12 @@ static journal_t * journal_init_common (void)
852 err = jbd2_journal_init_revoke(journal, JOURNAL_REVOKE_DEFAULT_HASH); 854 err = jbd2_journal_init_revoke(journal, JOURNAL_REVOKE_DEFAULT_HASH);
853 if (err) { 855 if (err) {
854 kfree(journal); 856 kfree(journal);
855 goto fail; 857 return NULL;
856 } 858 }
857 859
858 spin_lock_init(&journal->j_history_lock); 860 spin_lock_init(&journal->j_history_lock);
859 861
860 return journal; 862 return journal;
861fail:
862 return NULL;
863} 863}
864 864
865/* jbd2_journal_init_dev and jbd2_journal_init_inode: 865/* jbd2_journal_init_dev and jbd2_journal_init_inode:
@@ -1982,7 +1982,6 @@ static void jbd2_journal_destroy_jbd2_journal_head_cache(void)
1982static struct journal_head *journal_alloc_journal_head(void) 1982static struct journal_head *journal_alloc_journal_head(void)
1983{ 1983{
1984 struct journal_head *ret; 1984 struct journal_head *ret;
1985 static unsigned long last_warning;
1986 1985
1987#ifdef CONFIG_JBD2_DEBUG 1986#ifdef CONFIG_JBD2_DEBUG
1988 atomic_inc(&nr_journal_heads); 1987 atomic_inc(&nr_journal_heads);
@@ -1990,11 +1989,7 @@ static struct journal_head *journal_alloc_journal_head(void)
1990 ret = kmem_cache_alloc(jbd2_journal_head_cache, GFP_NOFS); 1989 ret = kmem_cache_alloc(jbd2_journal_head_cache, GFP_NOFS);
1991 if (!ret) { 1990 if (!ret) {
1992 jbd_debug(1, "out of memory for journal_head\n"); 1991 jbd_debug(1, "out of memory for journal_head\n");
1993 if (time_after(jiffies, last_warning + 5*HZ)) { 1992 pr_notice_ratelimited("ENOMEM in %s, retrying.\n", __func__);
1994 printk(KERN_NOTICE "ENOMEM in %s, retrying.\n",
1995 __func__);
1996 last_warning = jiffies;
1997 }
1998 while (!ret) { 1993 while (!ret) {
1999 yield(); 1994 yield();
2000 ret = kmem_cache_alloc(jbd2_journal_head_cache, GFP_NOFS); 1995 ret = kmem_cache_alloc(jbd2_journal_head_cache, GFP_NOFS);
@@ -2292,17 +2287,19 @@ static void __exit jbd2_remove_jbd_stats_proc_entry(void)
2292 2287
2293#endif 2288#endif
2294 2289
2295struct kmem_cache *jbd2_handle_cache; 2290struct kmem_cache *jbd2_handle_cache, *jbd2_inode_cache;
2296 2291
2297static int __init journal_init_handle_cache(void) 2292static int __init journal_init_handle_cache(void)
2298{ 2293{
2299 jbd2_handle_cache = kmem_cache_create("jbd2_journal_handle", 2294 jbd2_handle_cache = KMEM_CACHE(jbd2_journal_handle, SLAB_TEMPORARY);
2300 sizeof(handle_t),
2301 0, /* offset */
2302 SLAB_TEMPORARY, /* flags */
2303 NULL); /* ctor */
2304 if (jbd2_handle_cache == NULL) { 2295 if (jbd2_handle_cache == NULL) {
2305 printk(KERN_EMERG "JBD: failed to create handle cache\n"); 2296 printk(KERN_EMERG "JBD2: failed to create handle cache\n");
2297 return -ENOMEM;
2298 }
2299 jbd2_inode_cache = KMEM_CACHE(jbd2_inode, 0);
2300 if (jbd2_inode_cache == NULL) {
2301 printk(KERN_EMERG "JBD2: failed to create inode cache\n");
2302 kmem_cache_destroy(jbd2_handle_cache);
2306 return -ENOMEM; 2303 return -ENOMEM;
2307 } 2304 }
2308 return 0; 2305 return 0;
@@ -2312,6 +2309,9 @@ static void jbd2_journal_destroy_handle_cache(void)
2312{ 2309{
2313 if (jbd2_handle_cache) 2310 if (jbd2_handle_cache)
2314 kmem_cache_destroy(jbd2_handle_cache); 2311 kmem_cache_destroy(jbd2_handle_cache);
2312 if (jbd2_inode_cache)
2313 kmem_cache_destroy(jbd2_inode_cache);
2314
2315} 2315}
2316 2316
2317/* 2317/*
diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c
index 2bc4d5f116f1..1cad869494f0 100644
--- a/fs/jbd2/recovery.c
+++ b/fs/jbd2/recovery.c
@@ -299,10 +299,10 @@ int jbd2_journal_skip_recovery(journal_t *journal)
299#ifdef CONFIG_JBD2_DEBUG 299#ifdef CONFIG_JBD2_DEBUG
300 int dropped = info.end_transaction - 300 int dropped = info.end_transaction -
301 be32_to_cpu(journal->j_superblock->s_sequence); 301 be32_to_cpu(journal->j_superblock->s_sequence);
302#endif
303 jbd_debug(1, 302 jbd_debug(1,
304 "JBD: ignoring %d transaction%s from the journal.\n", 303 "JBD: ignoring %d transaction%s from the journal.\n",
305 dropped, (dropped == 1) ? "" : "s"); 304 dropped, (dropped == 1) ? "" : "s");
305#endif
306 journal->j_transaction_sequence = ++info.end_transaction; 306 journal->j_transaction_sequence = ++info.end_transaction;
307 } 307 }
308 308
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 6bf0a242613e..394893242ae3 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -340,9 +340,7 @@ handle_t *jbd2__journal_start(journal_t *journal, int nblocks, int gfp_mask)
340 jbd2_free_handle(handle); 340 jbd2_free_handle(handle);
341 current->journal_info = NULL; 341 current->journal_info = NULL;
342 handle = ERR_PTR(err); 342 handle = ERR_PTR(err);
343 goto out;
344 } 343 }
345out:
346 return handle; 344 return handle;
347} 345}
348EXPORT_SYMBOL(jbd2__journal_start); 346EXPORT_SYMBOL(jbd2__journal_start);
@@ -589,7 +587,7 @@ do_get_write_access(handle_t *handle, struct journal_head *jh,
589 transaction = handle->h_transaction; 587 transaction = handle->h_transaction;
590 journal = transaction->t_journal; 588 journal = transaction->t_journal;
591 589
592 jbd_debug(5, "buffer_head %p, force_copy %d\n", jh, force_copy); 590 jbd_debug(5, "journal_head %p, force_copy %d\n", jh, force_copy);
593 591
594 JBUFFER_TRACE(jh, "entry"); 592 JBUFFER_TRACE(jh, "entry");
595repeat: 593repeat:
@@ -774,7 +772,7 @@ done:
774 J_EXPECT_JH(jh, buffer_uptodate(jh2bh(jh)), 772 J_EXPECT_JH(jh, buffer_uptodate(jh2bh(jh)),
775 "Possible IO failure.\n"); 773 "Possible IO failure.\n");
776 page = jh2bh(jh)->b_page; 774 page = jh2bh(jh)->b_page;
777 offset = ((unsigned long) jh2bh(jh)->b_data) & ~PAGE_MASK; 775 offset = offset_in_page(jh2bh(jh)->b_data);
778 source = kmap_atomic(page, KM_USER0); 776 source = kmap_atomic(page, KM_USER0);
779 /* Fire data frozen trigger just before we copy the data */ 777 /* Fire data frozen trigger just before we copy the data */
780 jbd2_buffer_frozen_trigger(jh, source + offset, 778 jbd2_buffer_frozen_trigger(jh, source + offset,
diff --git a/fs/lockd/Makefile b/fs/lockd/Makefile
index 97f6073ab339..ca58d64374ca 100644
--- a/fs/lockd/Makefile
+++ b/fs/lockd/Makefile
@@ -4,7 +4,7 @@
4 4
5obj-$(CONFIG_LOCKD) += lockd.o 5obj-$(CONFIG_LOCKD) += lockd.o
6 6
7lockd-objs-y := clntlock.o clntproc.o host.o svc.o svclock.o svcshare.o \ 7lockd-objs-y := clntlock.o clntproc.o clntxdr.o host.o svc.o svclock.o \
8 svcproc.o svcsubs.o mon.o xdr.o grace.o 8 svcshare.o svcproc.o svcsubs.o mon.o xdr.o grace.o
9lockd-objs-$(CONFIG_LOCKD_V4) += xdr4.o svc4proc.o 9lockd-objs-$(CONFIG_LOCKD_V4) += clnt4xdr.o xdr4.o svc4proc.o
10lockd-objs := $(lockd-objs-y) 10lockd-objs := $(lockd-objs-y)
diff --git a/fs/lockd/clnt4xdr.c b/fs/lockd/clnt4xdr.c
new file mode 100644
index 000000000000..f848b52c67b1
--- /dev/null
+++ b/fs/lockd/clnt4xdr.c
@@ -0,0 +1,605 @@
1/*
2 * linux/fs/lockd/clnt4xdr.c
3 *
4 * XDR functions to encode/decode NLM version 4 RPC arguments and results.
5 *
6 * NLM client-side only.
7 *
8 * Copyright (C) 2010, Oracle. All rights reserved.
9 */
10
11#include <linux/types.h>
12#include <linux/sunrpc/xdr.h>
13#include <linux/sunrpc/clnt.h>
14#include <linux/sunrpc/stats.h>
15#include <linux/lockd/lockd.h>
16
17#define NLMDBG_FACILITY NLMDBG_XDR
18
19#if (NLMCLNT_OHSIZE > XDR_MAX_NETOBJ)
20# error "NLM host name cannot be larger than XDR_MAX_NETOBJ!"
21#endif
22
23#if (NLMCLNT_OHSIZE > NLM_MAXSTRLEN)
24# error "NLM host name cannot be larger than NLM's maximum string length!"
25#endif
26
27/*
28 * Declare the space requirements for NLM arguments and replies as
29 * number of 32bit-words
30 */
31#define NLM4_void_sz (0)
32#define NLM4_cookie_sz (1+(NLM_MAXCOOKIELEN>>2))
33#define NLM4_caller_sz (1+(NLMCLNT_OHSIZE>>2))
34#define NLM4_owner_sz (1+(NLMCLNT_OHSIZE>>2))
35#define NLM4_fhandle_sz (1+(NFS3_FHSIZE>>2))
36#define NLM4_lock_sz (5+NLM4_caller_sz+NLM4_owner_sz+NLM4_fhandle_sz)
37#define NLM4_holder_sz (6+NLM4_owner_sz)
38
39#define NLM4_testargs_sz (NLM4_cookie_sz+1+NLM4_lock_sz)
40#define NLM4_lockargs_sz (NLM4_cookie_sz+4+NLM4_lock_sz)
41#define NLM4_cancargs_sz (NLM4_cookie_sz+2+NLM4_lock_sz)
42#define NLM4_unlockargs_sz (NLM4_cookie_sz+NLM4_lock_sz)
43
44#define NLM4_testres_sz (NLM4_cookie_sz+1+NLM4_holder_sz)
45#define NLM4_res_sz (NLM4_cookie_sz+1)
46#define NLM4_norep_sz (0)
47
48
49static s64 loff_t_to_s64(loff_t offset)
50{
51 s64 res;
52
53 if (offset >= NLM4_OFFSET_MAX)
54 res = NLM4_OFFSET_MAX;
55 else if (offset <= -NLM4_OFFSET_MAX)
56 res = -NLM4_OFFSET_MAX;
57 else
58 res = offset;
59 return res;
60}
61
62static void nlm4_compute_offsets(const struct nlm_lock *lock,
63 u64 *l_offset, u64 *l_len)
64{
65 const struct file_lock *fl = &lock->fl;
66
67 BUG_ON(fl->fl_start > NLM4_OFFSET_MAX);
68 BUG_ON(fl->fl_end > NLM4_OFFSET_MAX &&
69 fl->fl_end != OFFSET_MAX);
70
71 *l_offset = loff_t_to_s64(fl->fl_start);
72 if (fl->fl_end == OFFSET_MAX)
73 *l_len = 0;
74 else
75 *l_len = loff_t_to_s64(fl->fl_end - fl->fl_start + 1);
76}
77
78/*
79 * Handle decode buffer overflows out-of-line.
80 */
81static void print_overflow_msg(const char *func, const struct xdr_stream *xdr)
82{
83 dprintk("lockd: %s prematurely hit the end of our receive buffer. "
84 "Remaining buffer length is %tu words.\n",
85 func, xdr->end - xdr->p);
86}
87
88
89/*
90 * Encode/decode NLMv4 basic data types
91 *
92 * Basic NLMv4 data types are defined in Appendix II, section 6.1.4
93 * of RFC 1813: "NFS Version 3 Protocol Specification" and in Chapter
94 * 10 of X/Open's "Protocols for Interworking: XNFS, Version 3W".
95 *
96 * Not all basic data types have their own encoding and decoding
97 * functions. For run-time efficiency, some data types are encoded
98 * or decoded inline.
99 */
100
101static void encode_bool(struct xdr_stream *xdr, const int value)
102{
103 __be32 *p;
104
105 p = xdr_reserve_space(xdr, 4);
106 *p = value ? xdr_one : xdr_zero;
107}
108
109static void encode_int32(struct xdr_stream *xdr, const s32 value)
110{
111 __be32 *p;
112
113 p = xdr_reserve_space(xdr, 4);
114 *p = cpu_to_be32(value);
115}
116
117/*
118 * typedef opaque netobj<MAXNETOBJ_SZ>
119 */
120static void encode_netobj(struct xdr_stream *xdr,
121 const u8 *data, const unsigned int length)
122{
123 __be32 *p;
124
125 BUG_ON(length > XDR_MAX_NETOBJ);
126 p = xdr_reserve_space(xdr, 4 + length);
127 xdr_encode_opaque(p, data, length);
128}
129
130static int decode_netobj(struct xdr_stream *xdr,
131 struct xdr_netobj *obj)
132{
133 u32 length;
134 __be32 *p;
135
136 p = xdr_inline_decode(xdr, 4);
137 if (unlikely(p == NULL))
138 goto out_overflow;
139 length = be32_to_cpup(p++);
140 if (unlikely(length > XDR_MAX_NETOBJ))
141 goto out_size;
142 obj->len = length;
143 obj->data = (u8 *)p;
144 return 0;
145out_size:
146 dprintk("NFS: returned netobj was too long: %u\n", length);
147 return -EIO;
148out_overflow:
149 print_overflow_msg(__func__, xdr);
150 return -EIO;
151}
152
153/*
154 * netobj cookie;
155 */
156static void encode_cookie(struct xdr_stream *xdr,
157 const struct nlm_cookie *cookie)
158{
159 BUG_ON(cookie->len > NLM_MAXCOOKIELEN);
160 encode_netobj(xdr, (u8 *)&cookie->data, cookie->len);
161}
162
163static int decode_cookie(struct xdr_stream *xdr,
164 struct nlm_cookie *cookie)
165{
166 u32 length;
167 __be32 *p;
168
169 p = xdr_inline_decode(xdr, 4);
170 if (unlikely(p == NULL))
171 goto out_overflow;
172 length = be32_to_cpup(p++);
173 /* apparently HPUX can return empty cookies */
174 if (length == 0)
175 goto out_hpux;
176 if (length > NLM_MAXCOOKIELEN)
177 goto out_size;
178 p = xdr_inline_decode(xdr, length);
179 if (unlikely(p == NULL))
180 goto out_overflow;
181 cookie->len = length;
182 memcpy(cookie->data, p, length);
183 return 0;
184out_hpux:
185 cookie->len = 4;
186 memset(cookie->data, 0, 4);
187 return 0;
188out_size:
189 dprintk("NFS: returned cookie was too long: %u\n", length);
190 return -EIO;
191out_overflow:
192 print_overflow_msg(__func__, xdr);
193 return -EIO;
194}
195
196/*
197 * netobj fh;
198 */
199static void encode_fh(struct xdr_stream *xdr, const struct nfs_fh *fh)
200{
201 BUG_ON(fh->size > NFS3_FHSIZE);
202 encode_netobj(xdr, (u8 *)&fh->data, fh->size);
203}
204
205/*
206 * enum nlm4_stats {
207 * NLM4_GRANTED = 0,
208 * NLM4_DENIED = 1,
209 * NLM4_DENIED_NOLOCKS = 2,
210 * NLM4_BLOCKED = 3,
211 * NLM4_DENIED_GRACE_PERIOD = 4,
212 * NLM4_DEADLCK = 5,
213 * NLM4_ROFS = 6,
214 * NLM4_STALE_FH = 7,
215 * NLM4_FBIG = 8,
216 * NLM4_FAILED = 9
217 * };
218 *
219 * struct nlm4_stat {
220 * nlm4_stats stat;
221 * };
222 *
223 * NB: we don't swap bytes for the NLM status values. The upper
224 * layers deal directly with the status value in network byte
225 * order.
226 */
227static void encode_nlm4_stat(struct xdr_stream *xdr,
228 const __be32 stat)
229{
230 __be32 *p;
231
232 BUG_ON(be32_to_cpu(stat) > NLM_FAILED);
233 p = xdr_reserve_space(xdr, 4);
234 *p = stat;
235}
236
237static int decode_nlm4_stat(struct xdr_stream *xdr, __be32 *stat)
238{
239 __be32 *p;
240
241 p = xdr_inline_decode(xdr, 4);
242 if (unlikely(p == NULL))
243 goto out_overflow;
244 if (unlikely(*p > nlm4_failed))
245 goto out_bad_xdr;
246 *stat = *p;
247 return 0;
248out_bad_xdr:
249 dprintk("%s: server returned invalid nlm4_stats value: %u\n",
250 __func__, be32_to_cpup(p));
251 return -EIO;
252out_overflow:
253 print_overflow_msg(__func__, xdr);
254 return -EIO;
255}
256
257/*
258 * struct nlm4_holder {
259 * bool exclusive;
260 * int32 svid;
261 * netobj oh;
262 * uint64 l_offset;
263 * uint64 l_len;
264 * };
265 */
266static void encode_nlm4_holder(struct xdr_stream *xdr,
267 const struct nlm_res *result)
268{
269 const struct nlm_lock *lock = &result->lock;
270 u64 l_offset, l_len;
271 __be32 *p;
272
273 encode_bool(xdr, lock->fl.fl_type == F_RDLCK);
274 encode_int32(xdr, lock->svid);
275 encode_netobj(xdr, lock->oh.data, lock->oh.len);
276
277 p = xdr_reserve_space(xdr, 4 + 4);
278 nlm4_compute_offsets(lock, &l_offset, &l_len);
279 p = xdr_encode_hyper(p, l_offset);
280 xdr_encode_hyper(p, l_len);
281}
282
283static int decode_nlm4_holder(struct xdr_stream *xdr, struct nlm_res *result)
284{
285 struct nlm_lock *lock = &result->lock;
286 struct file_lock *fl = &lock->fl;
287 u64 l_offset, l_len;
288 u32 exclusive;
289 int error;
290 __be32 *p;
291 s32 end;
292
293 memset(lock, 0, sizeof(*lock));
294 locks_init_lock(fl);
295
296 p = xdr_inline_decode(xdr, 4 + 4);
297 if (unlikely(p == NULL))
298 goto out_overflow;
299 exclusive = be32_to_cpup(p++);
300 lock->svid = be32_to_cpup(p);
301 fl->fl_pid = (pid_t)lock->svid;
302
303 error = decode_netobj(xdr, &lock->oh);
304 if (unlikely(error))
305 goto out;
306
307 p = xdr_inline_decode(xdr, 8 + 8);
308 if (unlikely(p == NULL))
309 goto out_overflow;
310
311 fl->fl_flags = FL_POSIX;
312 fl->fl_type = exclusive != 0 ? F_WRLCK : F_RDLCK;
313 p = xdr_decode_hyper(p, &l_offset);
314 xdr_decode_hyper(p, &l_len);
315 end = l_offset + l_len - 1;
316
317 fl->fl_start = (loff_t)l_offset;
318 if (l_len == 0 || end < 0)
319 fl->fl_end = OFFSET_MAX;
320 else
321 fl->fl_end = (loff_t)end;
322 error = 0;
323out:
324 return error;
325out_overflow:
326 print_overflow_msg(__func__, xdr);
327 return -EIO;
328}
329
330/*
331 * string caller_name<LM_MAXSTRLEN>;
332 */
333static void encode_caller_name(struct xdr_stream *xdr, const char *name)
334{
335 /* NB: client-side does not set lock->len */
336 u32 length = strlen(name);
337 __be32 *p;
338
339 BUG_ON(length > NLM_MAXSTRLEN);
340 p = xdr_reserve_space(xdr, 4 + length);
341 xdr_encode_opaque(p, name, length);
342}
343
344/*
345 * struct nlm4_lock {
346 * string caller_name<LM_MAXSTRLEN>;
347 * netobj fh;
348 * netobj oh;
349 * int32 svid;
350 * uint64 l_offset;
351 * uint64 l_len;
352 * };
353 */
354static void encode_nlm4_lock(struct xdr_stream *xdr,
355 const struct nlm_lock *lock)
356{
357 u64 l_offset, l_len;
358 __be32 *p;
359
360 encode_caller_name(xdr, lock->caller);
361 encode_fh(xdr, &lock->fh);
362 encode_netobj(xdr, lock->oh.data, lock->oh.len);
363
364 p = xdr_reserve_space(xdr, 4 + 8 + 8);
365 *p++ = cpu_to_be32(lock->svid);
366
367 nlm4_compute_offsets(lock, &l_offset, &l_len);
368 p = xdr_encode_hyper(p, l_offset);
369 xdr_encode_hyper(p, l_len);
370}
371
372
373/*
374 * NLMv4 XDR encode functions
375 *
376 * NLMv4 argument types are defined in Appendix II of RFC 1813:
377 * "NFS Version 3 Protocol Specification" and Chapter 10 of X/Open's
378 * "Protocols for Interworking: XNFS, Version 3W".
379 */
380
381/*
382 * struct nlm4_testargs {
383 * netobj cookie;
384 * bool exclusive;
385 * struct nlm4_lock alock;
386 * };
387 */
388static void nlm4_xdr_enc_testargs(struct rpc_rqst *req,
389 struct xdr_stream *xdr,
390 const struct nlm_args *args)
391{
392 const struct nlm_lock *lock = &args->lock;
393
394 encode_cookie(xdr, &args->cookie);
395 encode_bool(xdr, lock->fl.fl_type == F_WRLCK);
396 encode_nlm4_lock(xdr, lock);
397}
398
399/*
400 * struct nlm4_lockargs {
401 * netobj cookie;
402 * bool block;
403 * bool exclusive;
404 * struct nlm4_lock alock;
405 * bool reclaim;
406 * int state;
407 * };
408 */
409static void nlm4_xdr_enc_lockargs(struct rpc_rqst *req,
410 struct xdr_stream *xdr,
411 const struct nlm_args *args)
412{
413 const struct nlm_lock *lock = &args->lock;
414
415 encode_cookie(xdr, &args->cookie);
416 encode_bool(xdr, args->block);
417 encode_bool(xdr, lock->fl.fl_type == F_WRLCK);
418 encode_nlm4_lock(xdr, lock);
419 encode_bool(xdr, args->reclaim);
420 encode_int32(xdr, args->state);
421}
422
423/*
424 * struct nlm4_cancargs {
425 * netobj cookie;
426 * bool block;
427 * bool exclusive;
428 * struct nlm4_lock alock;
429 * };
430 */
431static void nlm4_xdr_enc_cancargs(struct rpc_rqst *req,
432 struct xdr_stream *xdr,
433 const struct nlm_args *args)
434{
435 const struct nlm_lock *lock = &args->lock;
436
437 encode_cookie(xdr, &args->cookie);
438 encode_bool(xdr, args->block);
439 encode_bool(xdr, lock->fl.fl_type == F_WRLCK);
440 encode_nlm4_lock(xdr, lock);
441}
442
443/*
444 * struct nlm4_unlockargs {
445 * netobj cookie;
446 * struct nlm4_lock alock;
447 * };
448 */
449static void nlm4_xdr_enc_unlockargs(struct rpc_rqst *req,
450 struct xdr_stream *xdr,
451 const struct nlm_args *args)
452{
453 const struct nlm_lock *lock = &args->lock;
454
455 encode_cookie(xdr, &args->cookie);
456 encode_nlm4_lock(xdr, lock);
457}
458
459/*
460 * struct nlm4_res {
461 * netobj cookie;
462 * nlm4_stat stat;
463 * };
464 */
465static void nlm4_xdr_enc_res(struct rpc_rqst *req,
466 struct xdr_stream *xdr,
467 const struct nlm_res *result)
468{
469 encode_cookie(xdr, &result->cookie);
470 encode_nlm4_stat(xdr, result->status);
471}
472
473/*
474 * union nlm4_testrply switch (nlm4_stats stat) {
475 * case NLM4_DENIED:
476 * struct nlm4_holder holder;
477 * default:
478 * void;
479 * };
480 *
481 * struct nlm4_testres {
482 * netobj cookie;
483 * nlm4_testrply test_stat;
484 * };
485 */
486static void nlm4_xdr_enc_testres(struct rpc_rqst *req,
487 struct xdr_stream *xdr,
488 const struct nlm_res *result)
489{
490 encode_cookie(xdr, &result->cookie);
491 encode_nlm4_stat(xdr, result->status);
492 if (result->status == nlm_lck_denied)
493 encode_nlm4_holder(xdr, result);
494}
495
496
497/*
498 * NLMv4 XDR decode functions
499 *
500 * NLMv4 argument types are defined in Appendix II of RFC 1813:
501 * "NFS Version 3 Protocol Specification" and Chapter 10 of X/Open's
502 * "Protocols for Interworking: XNFS, Version 3W".
503 */
504
505/*
506 * union nlm4_testrply switch (nlm4_stats stat) {
507 * case NLM4_DENIED:
508 * struct nlm4_holder holder;
509 * default:
510 * void;
511 * };
512 *
513 * struct nlm4_testres {
514 * netobj cookie;
515 * nlm4_testrply test_stat;
516 * };
517 */
518static int decode_nlm4_testrply(struct xdr_stream *xdr,
519 struct nlm_res *result)
520{
521 int error;
522
523 error = decode_nlm4_stat(xdr, &result->status);
524 if (unlikely(error))
525 goto out;
526 if (result->status == nlm_lck_denied)
527 error = decode_nlm4_holder(xdr, result);
528out:
529 return error;
530}
531
532static int nlm4_xdr_dec_testres(struct rpc_rqst *req,
533 struct xdr_stream *xdr,
534 struct nlm_res *result)
535{
536 int error;
537
538 error = decode_cookie(xdr, &result->cookie);
539 if (unlikely(error))
540 goto out;
541 error = decode_nlm4_testrply(xdr, result);
542out:
543 return error;
544}
545
546/*
547 * struct nlm4_res {
548 * netobj cookie;
549 * nlm4_stat stat;
550 * };
551 */
552static int nlm4_xdr_dec_res(struct rpc_rqst *req,
553 struct xdr_stream *xdr,
554 struct nlm_res *result)
555{
556 int error;
557
558 error = decode_cookie(xdr, &result->cookie);
559 if (unlikely(error))
560 goto out;
561 error = decode_nlm4_stat(xdr, &result->status);
562out:
563 return error;
564}
565
566
567/*
568 * For NLM, a void procedure really returns nothing
569 */
570#define nlm4_xdr_dec_norep NULL
571
572#define PROC(proc, argtype, restype) \
573[NLMPROC_##proc] = { \
574 .p_proc = NLMPROC_##proc, \
575 .p_encode = (kxdreproc_t)nlm4_xdr_enc_##argtype, \
576 .p_decode = (kxdrdproc_t)nlm4_xdr_dec_##restype, \
577 .p_arglen = NLM4_##argtype##_sz, \
578 .p_replen = NLM4_##restype##_sz, \
579 .p_statidx = NLMPROC_##proc, \
580 .p_name = #proc, \
581 }
582
583static struct rpc_procinfo nlm4_procedures[] = {
584 PROC(TEST, testargs, testres),
585 PROC(LOCK, lockargs, res),
586 PROC(CANCEL, cancargs, res),
587 PROC(UNLOCK, unlockargs, res),
588 PROC(GRANTED, testargs, res),
589 PROC(TEST_MSG, testargs, norep),
590 PROC(LOCK_MSG, lockargs, norep),
591 PROC(CANCEL_MSG, cancargs, norep),
592 PROC(UNLOCK_MSG, unlockargs, norep),
593 PROC(GRANTED_MSG, testargs, norep),
594 PROC(TEST_RES, testres, norep),
595 PROC(LOCK_RES, res, norep),
596 PROC(CANCEL_RES, res, norep),
597 PROC(UNLOCK_RES, res, norep),
598 PROC(GRANTED_RES, res, norep),
599};
600
601struct rpc_version nlm_version4 = {
602 .number = 4,
603 .nrprocs = ARRAY_SIZE(nlm4_procedures),
604 .procs = nlm4_procedures,
605};
diff --git a/fs/lockd/clntlock.c b/fs/lockd/clntlock.c
index 25509eb28fd7..8d4ea8351e3d 100644
--- a/fs/lockd/clntlock.c
+++ b/fs/lockd/clntlock.c
@@ -79,7 +79,7 @@ EXPORT_SYMBOL_GPL(nlmclnt_init);
79 */ 79 */
80void nlmclnt_done(struct nlm_host *host) 80void nlmclnt_done(struct nlm_host *host)
81{ 81{
82 nlm_release_host(host); 82 nlmclnt_release_host(host);
83 lockd_down(); 83 lockd_down();
84} 84}
85EXPORT_SYMBOL_GPL(nlmclnt_done); 85EXPORT_SYMBOL_GPL(nlmclnt_done);
@@ -273,7 +273,7 @@ restart:
273 spin_unlock(&nlm_blocked_lock); 273 spin_unlock(&nlm_blocked_lock);
274 274
275 /* Release host handle after use */ 275 /* Release host handle after use */
276 nlm_release_host(host); 276 nlmclnt_release_host(host);
277 lockd_down(); 277 lockd_down();
278 return 0; 278 return 0;
279} 279}
diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
index 332c54cf75e0..adb45ec9038c 100644
--- a/fs/lockd/clntproc.c
+++ b/fs/lockd/clntproc.c
@@ -58,7 +58,7 @@ static void nlm_put_lockowner(struct nlm_lockowner *lockowner)
58 return; 58 return;
59 list_del(&lockowner->list); 59 list_del(&lockowner->list);
60 spin_unlock(&lockowner->host->h_lock); 60 spin_unlock(&lockowner->host->h_lock);
61 nlm_release_host(lockowner->host); 61 nlmclnt_release_host(lockowner->host);
62 kfree(lockowner); 62 kfree(lockowner);
63} 63}
64 64
@@ -207,22 +207,22 @@ struct nlm_rqst *nlm_alloc_call(struct nlm_host *host)
207 printk("nlm_alloc_call: failed, waiting for memory\n"); 207 printk("nlm_alloc_call: failed, waiting for memory\n");
208 schedule_timeout_interruptible(5*HZ); 208 schedule_timeout_interruptible(5*HZ);
209 } 209 }
210 nlm_release_host(host); 210 nlmclnt_release_host(host);
211 return NULL; 211 return NULL;
212} 212}
213 213
214void nlm_release_call(struct nlm_rqst *call) 214void nlmclnt_release_call(struct nlm_rqst *call)
215{ 215{
216 if (!atomic_dec_and_test(&call->a_count)) 216 if (!atomic_dec_and_test(&call->a_count))
217 return; 217 return;
218 nlm_release_host(call->a_host); 218 nlmclnt_release_host(call->a_host);
219 nlmclnt_release_lockargs(call); 219 nlmclnt_release_lockargs(call);
220 kfree(call); 220 kfree(call);
221} 221}
222 222
223static void nlmclnt_rpc_release(void *data) 223static void nlmclnt_rpc_release(void *data)
224{ 224{
225 nlm_release_call(data); 225 nlmclnt_release_call(data);
226} 226}
227 227
228static int nlm_wait_on_grace(wait_queue_head_t *queue) 228static int nlm_wait_on_grace(wait_queue_head_t *queue)
@@ -436,7 +436,7 @@ nlmclnt_test(struct nlm_rqst *req, struct file_lock *fl)
436 status = nlm_stat_to_errno(req->a_res.status); 436 status = nlm_stat_to_errno(req->a_res.status);
437 } 437 }
438out: 438out:
439 nlm_release_call(req); 439 nlmclnt_release_call(req);
440 return status; 440 return status;
441} 441}
442 442
@@ -593,7 +593,7 @@ again:
593out_unblock: 593out_unblock:
594 nlmclnt_finish_block(block); 594 nlmclnt_finish_block(block);
595out: 595out:
596 nlm_release_call(req); 596 nlmclnt_release_call(req);
597 return status; 597 return status;
598out_unlock: 598out_unlock:
599 /* Fatal error: ensure that we remove the lock altogether */ 599 /* Fatal error: ensure that we remove the lock altogether */
@@ -694,7 +694,7 @@ nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl)
694 /* What to do now? I'm out of my depth... */ 694 /* What to do now? I'm out of my depth... */
695 status = -ENOLCK; 695 status = -ENOLCK;
696out: 696out:
697 nlm_release_call(req); 697 nlmclnt_release_call(req);
698 return status; 698 return status;
699} 699}
700 700
@@ -755,7 +755,7 @@ static int nlmclnt_cancel(struct nlm_host *host, int block, struct file_lock *fl
755 NLMPROC_CANCEL, &nlmclnt_cancel_ops); 755 NLMPROC_CANCEL, &nlmclnt_cancel_ops);
756 if (status == 0 && req->a_res.status == nlm_lck_denied) 756 if (status == 0 && req->a_res.status == nlm_lck_denied)
757 status = -ENOLCK; 757 status = -ENOLCK;
758 nlm_release_call(req); 758 nlmclnt_release_call(req);
759 return status; 759 return status;
760} 760}
761 761
diff --git a/fs/lockd/clntxdr.c b/fs/lockd/clntxdr.c
new file mode 100644
index 000000000000..180ac34feb9a
--- /dev/null
+++ b/fs/lockd/clntxdr.c
@@ -0,0 +1,627 @@
1/*
2 * linux/fs/lockd/clntxdr.c
3 *
4 * XDR functions to encode/decode NLM version 3 RPC arguments and results.
5 * NLM version 3 is backwards compatible with NLM versions 1 and 2.
6 *
7 * NLM client-side only.
8 *
9 * Copyright (C) 2010, Oracle. All rights reserved.
10 */
11
12#include <linux/types.h>
13#include <linux/sunrpc/xdr.h>
14#include <linux/sunrpc/clnt.h>
15#include <linux/sunrpc/stats.h>
16#include <linux/lockd/lockd.h>
17
18#define NLMDBG_FACILITY NLMDBG_XDR
19
20#if (NLMCLNT_OHSIZE > XDR_MAX_NETOBJ)
21# error "NLM host name cannot be larger than XDR_MAX_NETOBJ!"
22#endif
23
24/*
25 * Declare the space requirements for NLM arguments and replies as
26 * number of 32bit-words
27 */
28#define NLM_cookie_sz (1+(NLM_MAXCOOKIELEN>>2))
29#define NLM_caller_sz (1+(NLMCLNT_OHSIZE>>2))
30#define NLM_owner_sz (1+(NLMCLNT_OHSIZE>>2))
31#define NLM_fhandle_sz (1+(NFS2_FHSIZE>>2))
32#define NLM_lock_sz (3+NLM_caller_sz+NLM_owner_sz+NLM_fhandle_sz)
33#define NLM_holder_sz (4+NLM_owner_sz)
34
35#define NLM_testargs_sz (NLM_cookie_sz+1+NLM_lock_sz)
36#define NLM_lockargs_sz (NLM_cookie_sz+4+NLM_lock_sz)
37#define NLM_cancargs_sz (NLM_cookie_sz+2+NLM_lock_sz)
38#define NLM_unlockargs_sz (NLM_cookie_sz+NLM_lock_sz)
39
40#define NLM_testres_sz (NLM_cookie_sz+1+NLM_holder_sz)
41#define NLM_res_sz (NLM_cookie_sz+1)
42#define NLM_norep_sz (0)
43
44
45static s32 loff_t_to_s32(loff_t offset)
46{
47 s32 res;
48
49 if (offset >= NLM_OFFSET_MAX)
50 res = NLM_OFFSET_MAX;
51 else if (offset <= -NLM_OFFSET_MAX)
52 res = -NLM_OFFSET_MAX;
53 else
54 res = offset;
55 return res;
56}
57
58static void nlm_compute_offsets(const struct nlm_lock *lock,
59 u32 *l_offset, u32 *l_len)
60{
61 const struct file_lock *fl = &lock->fl;
62
63 BUG_ON(fl->fl_start > NLM_OFFSET_MAX);
64 BUG_ON(fl->fl_end > NLM_OFFSET_MAX &&
65 fl->fl_end != OFFSET_MAX);
66
67 *l_offset = loff_t_to_s32(fl->fl_start);
68 if (fl->fl_end == OFFSET_MAX)
69 *l_len = 0;
70 else
71 *l_len = loff_t_to_s32(fl->fl_end - fl->fl_start + 1);
72}
73
74/*
75 * Handle decode buffer overflows out-of-line.
76 */
77static void print_overflow_msg(const char *func, const struct xdr_stream *xdr)
78{
79 dprintk("lockd: %s prematurely hit the end of our receive buffer. "
80 "Remaining buffer length is %tu words.\n",
81 func, xdr->end - xdr->p);
82}
83
84
85/*
86 * Encode/decode NLMv3 basic data types
87 *
88 * Basic NLMv3 data types are not defined in an IETF standards
89 * document. X/Open has a description of these data types that
90 * is useful. See Chapter 10 of "Protocols for Interworking:
91 * XNFS, Version 3W".
92 *
93 * Not all basic data types have their own encoding and decoding
94 * functions. For run-time efficiency, some data types are encoded
95 * or decoded inline.
96 */
97
98static void encode_bool(struct xdr_stream *xdr, const int value)
99{
100 __be32 *p;
101
102 p = xdr_reserve_space(xdr, 4);
103 *p = value ? xdr_one : xdr_zero;
104}
105
106static void encode_int32(struct xdr_stream *xdr, const s32 value)
107{
108 __be32 *p;
109
110 p = xdr_reserve_space(xdr, 4);
111 *p = cpu_to_be32(value);
112}
113
114/*
115 * typedef opaque netobj<MAXNETOBJ_SZ>
116 */
117static void encode_netobj(struct xdr_stream *xdr,
118 const u8 *data, const unsigned int length)
119{
120 __be32 *p;
121
122 BUG_ON(length > XDR_MAX_NETOBJ);
123 p = xdr_reserve_space(xdr, 4 + length);
124 xdr_encode_opaque(p, data, length);
125}
126
127static int decode_netobj(struct xdr_stream *xdr,
128 struct xdr_netobj *obj)
129{
130 u32 length;
131 __be32 *p;
132
133 p = xdr_inline_decode(xdr, 4);
134 if (unlikely(p == NULL))
135 goto out_overflow;
136 length = be32_to_cpup(p++);
137 if (unlikely(length > XDR_MAX_NETOBJ))
138 goto out_size;
139 obj->len = length;
140 obj->data = (u8 *)p;
141 return 0;
142out_size:
143 dprintk("NFS: returned netobj was too long: %u\n", length);
144 return -EIO;
145out_overflow:
146 print_overflow_msg(__func__, xdr);
147 return -EIO;
148}
149
150/*
151 * netobj cookie;
152 */
153static void encode_cookie(struct xdr_stream *xdr,
154 const struct nlm_cookie *cookie)
155{
156 BUG_ON(cookie->len > NLM_MAXCOOKIELEN);
157 encode_netobj(xdr, (u8 *)&cookie->data, cookie->len);
158}
159
160static int decode_cookie(struct xdr_stream *xdr,
161 struct nlm_cookie *cookie)
162{
163 u32 length;
164 __be32 *p;
165
166 p = xdr_inline_decode(xdr, 4);
167 if (unlikely(p == NULL))
168 goto out_overflow;
169 length = be32_to_cpup(p++);
170 /* apparently HPUX can return empty cookies */
171 if (length == 0)
172 goto out_hpux;
173 if (length > NLM_MAXCOOKIELEN)
174 goto out_size;
175 p = xdr_inline_decode(xdr, length);
176 if (unlikely(p == NULL))
177 goto out_overflow;
178 cookie->len = length;
179 memcpy(cookie->data, p, length);
180 return 0;
181out_hpux:
182 cookie->len = 4;
183 memset(cookie->data, 0, 4);
184 return 0;
185out_size:
186 dprintk("NFS: returned cookie was too long: %u\n", length);
187 return -EIO;
188out_overflow:
189 print_overflow_msg(__func__, xdr);
190 return -EIO;
191}
192
193/*
194 * netobj fh;
195 */
196static void encode_fh(struct xdr_stream *xdr, const struct nfs_fh *fh)
197{
198 BUG_ON(fh->size != NFS2_FHSIZE);
199 encode_netobj(xdr, (u8 *)&fh->data, NFS2_FHSIZE);
200}
201
202/*
203 * enum nlm_stats {
204 * LCK_GRANTED = 0,
205 * LCK_DENIED = 1,
206 * LCK_DENIED_NOLOCKS = 2,
207 * LCK_BLOCKED = 3,
208 * LCK_DENIED_GRACE_PERIOD = 4
209 * };
210 *
211 *
212 * struct nlm_stat {
213 * nlm_stats stat;
214 * };
215 *
216 * NB: we don't swap bytes for the NLM status values. The upper
217 * layers deal directly with the status value in network byte
218 * order.
219 */
220
221static void encode_nlm_stat(struct xdr_stream *xdr,
222 const __be32 stat)
223{
224 __be32 *p;
225
226 BUG_ON(be32_to_cpu(stat) > NLM_LCK_DENIED_GRACE_PERIOD);
227 p = xdr_reserve_space(xdr, 4);
228 *p = stat;
229}
230
231static int decode_nlm_stat(struct xdr_stream *xdr,
232 __be32 *stat)
233{
234 __be32 *p;
235
236 p = xdr_inline_decode(xdr, 4);
237 if (unlikely(p == NULL))
238 goto out_overflow;
239 if (unlikely(*p > nlm_lck_denied_grace_period))
240 goto out_enum;
241 *stat = *p;
242 return 0;
243out_enum:
244 dprintk("%s: server returned invalid nlm_stats value: %u\n",
245 __func__, be32_to_cpup(p));
246 return -EIO;
247out_overflow:
248 print_overflow_msg(__func__, xdr);
249 return -EIO;
250}
251
252/*
253 * struct nlm_holder {
254 * bool exclusive;
255 * int uppid;
256 * netobj oh;
257 * unsigned l_offset;
258 * unsigned l_len;
259 * };
260 */
261static void encode_nlm_holder(struct xdr_stream *xdr,
262 const struct nlm_res *result)
263{
264 const struct nlm_lock *lock = &result->lock;
265 u32 l_offset, l_len;
266 __be32 *p;
267
268 encode_bool(xdr, lock->fl.fl_type == F_RDLCK);
269 encode_int32(xdr, lock->svid);
270 encode_netobj(xdr, lock->oh.data, lock->oh.len);
271
272 p = xdr_reserve_space(xdr, 4 + 4);
273 nlm_compute_offsets(lock, &l_offset, &l_len);
274 *p++ = cpu_to_be32(l_offset);
275 *p = cpu_to_be32(l_len);
276}
277
278static int decode_nlm_holder(struct xdr_stream *xdr, struct nlm_res *result)
279{
280 struct nlm_lock *lock = &result->lock;
281 struct file_lock *fl = &lock->fl;
282 u32 exclusive, l_offset, l_len;
283 int error;
284 __be32 *p;
285 s32 end;
286
287 memset(lock, 0, sizeof(*lock));
288 locks_init_lock(fl);
289
290 p = xdr_inline_decode(xdr, 4 + 4);
291 if (unlikely(p == NULL))
292 goto out_overflow;
293 exclusive = be32_to_cpup(p++);
294 lock->svid = be32_to_cpup(p);
295 fl->fl_pid = (pid_t)lock->svid;
296
297 error = decode_netobj(xdr, &lock->oh);
298 if (unlikely(error))
299 goto out;
300
301 p = xdr_inline_decode(xdr, 4 + 4);
302 if (unlikely(p == NULL))
303 goto out_overflow;
304
305 fl->fl_flags = FL_POSIX;
306 fl->fl_type = exclusive != 0 ? F_WRLCK : F_RDLCK;
307 l_offset = be32_to_cpup(p++);
308 l_len = be32_to_cpup(p);
309 end = l_offset + l_len - 1;
310
311 fl->fl_start = (loff_t)l_offset;
312 if (l_len == 0 || end < 0)
313 fl->fl_end = OFFSET_MAX;
314 else
315 fl->fl_end = (loff_t)end;
316 error = 0;
317out:
318 return error;
319out_overflow:
320 print_overflow_msg(__func__, xdr);
321 return -EIO;
322}
323
324/*
325 * string caller_name<LM_MAXSTRLEN>;
326 */
327static void encode_caller_name(struct xdr_stream *xdr, const char *name)
328{
329 /* NB: client-side does not set lock->len */
330 u32 length = strlen(name);
331 __be32 *p;
332
333 BUG_ON(length > NLM_MAXSTRLEN);
334 p = xdr_reserve_space(xdr, 4 + length);
335 xdr_encode_opaque(p, name, length);
336}
337
338/*
339 * struct nlm_lock {
340 * string caller_name<LM_MAXSTRLEN>;
341 * netobj fh;
342 * netobj oh;
343 * int uppid;
344 * unsigned l_offset;
345 * unsigned l_len;
346 * };
347 */
348static void encode_nlm_lock(struct xdr_stream *xdr,
349 const struct nlm_lock *lock)
350{
351 u32 l_offset, l_len;
352 __be32 *p;
353
354 encode_caller_name(xdr, lock->caller);
355 encode_fh(xdr, &lock->fh);
356 encode_netobj(xdr, lock->oh.data, lock->oh.len);
357
358 p = xdr_reserve_space(xdr, 4 + 4 + 4);
359 *p++ = cpu_to_be32(lock->svid);
360
361 nlm_compute_offsets(lock, &l_offset, &l_len);
362 *p++ = cpu_to_be32(l_offset);
363 *p = cpu_to_be32(l_len);
364}
365
366
367/*
368 * NLMv3 XDR encode functions
369 *
370 * NLMv3 argument types are defined in Chapter 10 of The Open Group's
371 * "Protocols for Interworking: XNFS, Version 3W".
372 */
373
374/*
375 * struct nlm_testargs {
376 * netobj cookie;
377 * bool exclusive;
378 * struct nlm_lock alock;
379 * };
380 */
381static void nlm_xdr_enc_testargs(struct rpc_rqst *req,
382 struct xdr_stream *xdr,
383 const struct nlm_args *args)
384{
385 const struct nlm_lock *lock = &args->lock;
386
387 encode_cookie(xdr, &args->cookie);
388 encode_bool(xdr, lock->fl.fl_type == F_WRLCK);
389 encode_nlm_lock(xdr, lock);
390}
391
392/*
393 * struct nlm_lockargs {
394 * netobj cookie;
395 * bool block;
396 * bool exclusive;
397 * struct nlm_lock alock;
398 * bool reclaim;
399 * int state;
400 * };
401 */
402static void nlm_xdr_enc_lockargs(struct rpc_rqst *req,
403 struct xdr_stream *xdr,
404 const struct nlm_args *args)
405{
406 const struct nlm_lock *lock = &args->lock;
407
408 encode_cookie(xdr, &args->cookie);
409 encode_bool(xdr, args->block);
410 encode_bool(xdr, lock->fl.fl_type == F_WRLCK);
411 encode_nlm_lock(xdr, lock);
412 encode_bool(xdr, args->reclaim);
413 encode_int32(xdr, args->state);
414}
415
416/*
417 * struct nlm_cancargs {
418 * netobj cookie;
419 * bool block;
420 * bool exclusive;
421 * struct nlm_lock alock;
422 * };
423 */
424static void nlm_xdr_enc_cancargs(struct rpc_rqst *req,
425 struct xdr_stream *xdr,
426 const struct nlm_args *args)
427{
428 const struct nlm_lock *lock = &args->lock;
429
430 encode_cookie(xdr, &args->cookie);
431 encode_bool(xdr, args->block);
432 encode_bool(xdr, lock->fl.fl_type == F_WRLCK);
433 encode_nlm_lock(xdr, lock);
434}
435
436/*
437 * struct nlm_unlockargs {
438 * netobj cookie;
439 * struct nlm_lock alock;
440 * };
441 */
442static void nlm_xdr_enc_unlockargs(struct rpc_rqst *req,
443 struct xdr_stream *xdr,
444 const struct nlm_args *args)
445{
446 const struct nlm_lock *lock = &args->lock;
447
448 encode_cookie(xdr, &args->cookie);
449 encode_nlm_lock(xdr, lock);
450}
451
452/*
453 * struct nlm_res {
454 * netobj cookie;
455 * nlm_stat stat;
456 * };
457 */
458static void nlm_xdr_enc_res(struct rpc_rqst *req,
459 struct xdr_stream *xdr,
460 const struct nlm_res *result)
461{
462 encode_cookie(xdr, &result->cookie);
463 encode_nlm_stat(xdr, result->status);
464}
465
466/*
467 * union nlm_testrply switch (nlm_stats stat) {
468 * case LCK_DENIED:
469 * struct nlm_holder holder;
470 * default:
471 * void;
472 * };
473 *
474 * struct nlm_testres {
475 * netobj cookie;
476 * nlm_testrply test_stat;
477 * };
478 */
479static void encode_nlm_testrply(struct xdr_stream *xdr,
480 const struct nlm_res *result)
481{
482 if (result->status == nlm_lck_denied)
483 encode_nlm_holder(xdr, result);
484}
485
486static void nlm_xdr_enc_testres(struct rpc_rqst *req,
487 struct xdr_stream *xdr,
488 const struct nlm_res *result)
489{
490 encode_cookie(xdr, &result->cookie);
491 encode_nlm_stat(xdr, result->status);
492 encode_nlm_testrply(xdr, result);
493}
494
495
496/*
497 * NLMv3 XDR decode functions
498 *
499 * NLMv3 result types are defined in Chapter 10 of The Open Group's
500 * "Protocols for Interworking: XNFS, Version 3W".
501 */
502
503/*
504 * union nlm_testrply switch (nlm_stats stat) {
505 * case LCK_DENIED:
506 * struct nlm_holder holder;
507 * default:
508 * void;
509 * };
510 *
511 * struct nlm_testres {
512 * netobj cookie;
513 * nlm_testrply test_stat;
514 * };
515 */
516static int decode_nlm_testrply(struct xdr_stream *xdr,
517 struct nlm_res *result)
518{
519 int error;
520
521 error = decode_nlm_stat(xdr, &result->status);
522 if (unlikely(error))
523 goto out;
524 if (result->status == nlm_lck_denied)
525 error = decode_nlm_holder(xdr, result);
526out:
527 return error;
528}
529
530static int nlm_xdr_dec_testres(struct rpc_rqst *req,
531 struct xdr_stream *xdr,
532 struct nlm_res *result)
533{
534 int error;
535
536 error = decode_cookie(xdr, &result->cookie);
537 if (unlikely(error))
538 goto out;
539 error = decode_nlm_testrply(xdr, result);
540out:
541 return error;
542}
543
544/*
545 * struct nlm_res {
546 * netobj cookie;
547 * nlm_stat stat;
548 * };
549 */
550static int nlm_xdr_dec_res(struct rpc_rqst *req,
551 struct xdr_stream *xdr,
552 struct nlm_res *result)
553{
554 int error;
555
556 error = decode_cookie(xdr, &result->cookie);
557 if (unlikely(error))
558 goto out;
559 error = decode_nlm_stat(xdr, &result->status);
560out:
561 return error;
562}
563
564
565/*
566 * For NLM, a void procedure really returns nothing
567 */
568#define nlm_xdr_dec_norep NULL
569
570#define PROC(proc, argtype, restype) \
571[NLMPROC_##proc] = { \
572 .p_proc = NLMPROC_##proc, \
573 .p_encode = (kxdreproc_t)nlm_xdr_enc_##argtype, \
574 .p_decode = (kxdrdproc_t)nlm_xdr_dec_##restype, \
575 .p_arglen = NLM_##argtype##_sz, \
576 .p_replen = NLM_##restype##_sz, \
577 .p_statidx = NLMPROC_##proc, \
578 .p_name = #proc, \
579 }
580
581static struct rpc_procinfo nlm_procedures[] = {
582 PROC(TEST, testargs, testres),
583 PROC(LOCK, lockargs, res),
584 PROC(CANCEL, cancargs, res),
585 PROC(UNLOCK, unlockargs, res),
586 PROC(GRANTED, testargs, res),
587 PROC(TEST_MSG, testargs, norep),
588 PROC(LOCK_MSG, lockargs, norep),
589 PROC(CANCEL_MSG, cancargs, norep),
590 PROC(UNLOCK_MSG, unlockargs, norep),
591 PROC(GRANTED_MSG, testargs, norep),
592 PROC(TEST_RES, testres, norep),
593 PROC(LOCK_RES, res, norep),
594 PROC(CANCEL_RES, res, norep),
595 PROC(UNLOCK_RES, res, norep),
596 PROC(GRANTED_RES, res, norep),
597};
598
599static struct rpc_version nlm_version1 = {
600 .number = 1,
601 .nrprocs = ARRAY_SIZE(nlm_procedures),
602 .procs = nlm_procedures,
603};
604
605static struct rpc_version nlm_version3 = {
606 .number = 3,
607 .nrprocs = ARRAY_SIZE(nlm_procedures),
608 .procs = nlm_procedures,
609};
610
611static struct rpc_version *nlm_versions[] = {
612 [1] = &nlm_version1,
613 [3] = &nlm_version3,
614#ifdef CONFIG_LOCKD_V4
615 [4] = &nlm_version4,
616#endif
617};
618
619static struct rpc_stat nlm_rpc_stats;
620
621struct rpc_program nlm_program = {
622 .name = "lockd",
623 .number = NLM_PROGRAM,
624 .nrvers = ARRAY_SIZE(nlm_versions),
625 .version = nlm_versions,
626 .stats = &nlm_rpc_stats,
627};
diff --git a/fs/lockd/host.c b/fs/lockd/host.c
index ed0c59fe23ce..5f1bcb2f06f3 100644
--- a/fs/lockd/host.c
+++ b/fs/lockd/host.c
@@ -25,9 +25,22 @@
25#define NLM_HOST_EXPIRE (300 * HZ) 25#define NLM_HOST_EXPIRE (300 * HZ)
26#define NLM_HOST_COLLECT (120 * HZ) 26#define NLM_HOST_COLLECT (120 * HZ)
27 27
28static struct hlist_head nlm_hosts[NLM_HOST_NRHASH]; 28static struct hlist_head nlm_server_hosts[NLM_HOST_NRHASH];
29static struct hlist_head nlm_client_hosts[NLM_HOST_NRHASH];
30
31#define for_each_host(host, pos, chain, table) \
32 for ((chain) = (table); \
33 (chain) < (table) + NLM_HOST_NRHASH; ++(chain)) \
34 hlist_for_each_entry((host), (pos), (chain), h_hash)
35
36#define for_each_host_safe(host, pos, next, chain, table) \
37 for ((chain) = (table); \
38 (chain) < (table) + NLM_HOST_NRHASH; ++(chain)) \
39 hlist_for_each_entry_safe((host), (pos), (next), \
40 (chain), h_hash)
41
29static unsigned long next_gc; 42static unsigned long next_gc;
30static int nrhosts; 43static unsigned long nrhosts;
31static DEFINE_MUTEX(nlm_host_mutex); 44static DEFINE_MUTEX(nlm_host_mutex);
32 45
33static void nlm_gc_hosts(void); 46static void nlm_gc_hosts(void);
@@ -40,8 +53,6 @@ struct nlm_lookup_host_info {
40 const u32 version; /* NLM version to search for */ 53 const u32 version; /* NLM version to search for */
41 const char *hostname; /* remote's hostname */ 54 const char *hostname; /* remote's hostname */
42 const size_t hostname_len; /* it's length */ 55 const size_t hostname_len; /* it's length */
43 const struct sockaddr *src_sap; /* our address (optional) */
44 const size_t src_len; /* it's length */
45 const int noresvport; /* use non-priv port */ 56 const int noresvport; /* use non-priv port */
46}; 57};
47 58
@@ -88,127 +99,83 @@ static unsigned int nlm_hash_address(const struct sockaddr *sap)
88} 99}
89 100
90/* 101/*
91 * Common host lookup routine for server & client 102 * Allocate and initialize an nlm_host. Common to both client and server.
92 */ 103 */
93static struct nlm_host *nlm_lookup_host(struct nlm_lookup_host_info *ni) 104static struct nlm_host *nlm_alloc_host(struct nlm_lookup_host_info *ni,
105 struct nsm_handle *nsm)
94{ 106{
95 struct hlist_head *chain; 107 struct nlm_host *host = NULL;
96 struct hlist_node *pos; 108 unsigned long now = jiffies;
97 struct nlm_host *host;
98 struct nsm_handle *nsm = NULL;
99
100 mutex_lock(&nlm_host_mutex);
101 109
102 if (time_after_eq(jiffies, next_gc)) 110 if (nsm != NULL)
103 nlm_gc_hosts();
104
105 /* We may keep several nlm_host objects for a peer, because each
106 * nlm_host is identified by
107 * (address, protocol, version, server/client)
108 * We could probably simplify this a little by putting all those
109 * different NLM rpc_clients into one single nlm_host object.
110 * This would allow us to have one nlm_host per address.
111 */
112 chain = &nlm_hosts[nlm_hash_address(ni->sap)];
113 hlist_for_each_entry(host, pos, chain, h_hash) {
114 if (!rpc_cmp_addr(nlm_addr(host), ni->sap))
115 continue;
116
117 /* See if we have an NSM handle for this client */
118 if (!nsm)
119 nsm = host->h_nsmhandle;
120
121 if (host->h_proto != ni->protocol)
122 continue;
123 if (host->h_version != ni->version)
124 continue;
125 if (host->h_server != ni->server)
126 continue;
127 if (ni->server && ni->src_len != 0 &&
128 !rpc_cmp_addr(nlm_srcaddr(host), ni->src_sap))
129 continue;
130
131 /* Move to head of hash chain. */
132 hlist_del(&host->h_hash);
133 hlist_add_head(&host->h_hash, chain);
134
135 nlm_get_host(host);
136 dprintk("lockd: nlm_lookup_host found host %s (%s)\n",
137 host->h_name, host->h_addrbuf);
138 goto out;
139 }
140
141 /*
142 * The host wasn't in our hash table. If we don't
143 * have an NSM handle for it yet, create one.
144 */
145 if (nsm)
146 atomic_inc(&nsm->sm_count); 111 atomic_inc(&nsm->sm_count);
147 else { 112 else {
148 host = NULL; 113 host = NULL;
149 nsm = nsm_get_handle(ni->sap, ni->salen, 114 nsm = nsm_get_handle(ni->sap, ni->salen,
150 ni->hostname, ni->hostname_len); 115 ni->hostname, ni->hostname_len);
151 if (!nsm) { 116 if (unlikely(nsm == NULL)) {
152 dprintk("lockd: nlm_lookup_host failed; " 117 dprintk("lockd: %s failed; no nsm handle\n",
153 "no nsm handle\n"); 118 __func__);
154 goto out; 119 goto out;
155 } 120 }
156 } 121 }
157 122
158 host = kzalloc(sizeof(*host), GFP_KERNEL); 123 host = kmalloc(sizeof(*host), GFP_KERNEL);
159 if (!host) { 124 if (unlikely(host == NULL)) {
125 dprintk("lockd: %s failed; no memory\n", __func__);
160 nsm_release(nsm); 126 nsm_release(nsm);
161 dprintk("lockd: nlm_lookup_host failed; no memory\n");
162 goto out; 127 goto out;
163 } 128 }
164 host->h_name = nsm->sm_name; 129
165 host->h_addrbuf = nsm->sm_addrbuf;
166 memcpy(nlm_addr(host), ni->sap, ni->salen); 130 memcpy(nlm_addr(host), ni->sap, ni->salen);
167 host->h_addrlen = ni->salen; 131 host->h_addrlen = ni->salen;
168 rpc_set_port(nlm_addr(host), 0); 132 rpc_set_port(nlm_addr(host), 0);
169 memcpy(nlm_srcaddr(host), ni->src_sap, ni->src_len); 133 host->h_srcaddrlen = 0;
170 host->h_srcaddrlen = ni->src_len; 134
135 host->h_rpcclnt = NULL;
136 host->h_name = nsm->sm_name;
171 host->h_version = ni->version; 137 host->h_version = ni->version;
172 host->h_proto = ni->protocol; 138 host->h_proto = ni->protocol;
173 host->h_rpcclnt = NULL; 139 host->h_reclaiming = 0;
174 mutex_init(&host->h_mutex); 140 host->h_server = ni->server;
175 host->h_nextrebind = jiffies + NLM_HOST_REBIND; 141 host->h_noresvport = ni->noresvport;
176 host->h_expires = jiffies + NLM_HOST_EXPIRE; 142 host->h_inuse = 0;
177 atomic_set(&host->h_count, 1);
178 init_waitqueue_head(&host->h_gracewait); 143 init_waitqueue_head(&host->h_gracewait);
179 init_rwsem(&host->h_rwsem); 144 init_rwsem(&host->h_rwsem);
180 host->h_state = 0; /* pseudo NSM state */ 145 host->h_state = 0;
181 host->h_nsmstate = 0; /* real NSM state */ 146 host->h_nsmstate = 0;
182 host->h_nsmhandle = nsm; 147 host->h_pidcount = 0;
183 host->h_server = ni->server; 148 atomic_set(&host->h_count, 1);
184 host->h_noresvport = ni->noresvport; 149 mutex_init(&host->h_mutex);
185 hlist_add_head(&host->h_hash, chain); 150 host->h_nextrebind = now + NLM_HOST_REBIND;
151 host->h_expires = now + NLM_HOST_EXPIRE;
186 INIT_LIST_HEAD(&host->h_lockowners); 152 INIT_LIST_HEAD(&host->h_lockowners);
187 spin_lock_init(&host->h_lock); 153 spin_lock_init(&host->h_lock);
188 INIT_LIST_HEAD(&host->h_granted); 154 INIT_LIST_HEAD(&host->h_granted);
189 INIT_LIST_HEAD(&host->h_reclaim); 155 INIT_LIST_HEAD(&host->h_reclaim);
190 156 host->h_nsmhandle = nsm;
191 nrhosts++; 157 host->h_addrbuf = nsm->sm_addrbuf;
192
193 dprintk("lockd: nlm_lookup_host created host %s\n",
194 host->h_name);
195 158
196out: 159out:
197 mutex_unlock(&nlm_host_mutex);
198 return host; 160 return host;
199} 161}
200 162
201/* 163/*
202 * Destroy a host 164 * Destroy an nlm_host and free associated resources
165 *
166 * Caller must hold nlm_host_mutex.
203 */ 167 */
204static void 168static void nlm_destroy_host_locked(struct nlm_host *host)
205nlm_destroy_host(struct nlm_host *host)
206{ 169{
207 struct rpc_clnt *clnt; 170 struct rpc_clnt *clnt;
208 171
172 dprintk("lockd: destroy host %s\n", host->h_name);
173
209 BUG_ON(!list_empty(&host->h_lockowners)); 174 BUG_ON(!list_empty(&host->h_lockowners));
210 BUG_ON(atomic_read(&host->h_count)); 175 BUG_ON(atomic_read(&host->h_count));
211 176
177 hlist_del_init(&host->h_hash);
178
212 nsm_unmonitor(host); 179 nsm_unmonitor(host);
213 nsm_release(host->h_nsmhandle); 180 nsm_release(host->h_nsmhandle);
214 181
@@ -216,6 +183,8 @@ nlm_destroy_host(struct nlm_host *host)
216 if (clnt != NULL) 183 if (clnt != NULL)
217 rpc_shutdown_client(clnt); 184 rpc_shutdown_client(clnt);
218 kfree(host); 185 kfree(host);
186
187 nrhosts--;
219} 188}
220 189
221/** 190/**
@@ -249,12 +218,76 @@ struct nlm_host *nlmclnt_lookup_host(const struct sockaddr *sap,
249 .hostname_len = strlen(hostname), 218 .hostname_len = strlen(hostname),
250 .noresvport = noresvport, 219 .noresvport = noresvport,
251 }; 220 };
221 struct hlist_head *chain;
222 struct hlist_node *pos;
223 struct nlm_host *host;
224 struct nsm_handle *nsm = NULL;
252 225
253 dprintk("lockd: %s(host='%s', vers=%u, proto=%s)\n", __func__, 226 dprintk("lockd: %s(host='%s', vers=%u, proto=%s)\n", __func__,
254 (hostname ? hostname : "<none>"), version, 227 (hostname ? hostname : "<none>"), version,
255 (protocol == IPPROTO_UDP ? "udp" : "tcp")); 228 (protocol == IPPROTO_UDP ? "udp" : "tcp"));
256 229
257 return nlm_lookup_host(&ni); 230 mutex_lock(&nlm_host_mutex);
231
232 chain = &nlm_client_hosts[nlm_hash_address(sap)];
233 hlist_for_each_entry(host, pos, chain, h_hash) {
234 if (!rpc_cmp_addr(nlm_addr(host), sap))
235 continue;
236
237 /* Same address. Share an NSM handle if we already have one */
238 if (nsm == NULL)
239 nsm = host->h_nsmhandle;
240
241 if (host->h_proto != protocol)
242 continue;
243 if (host->h_version != version)
244 continue;
245
246 nlm_get_host(host);
247 dprintk("lockd: %s found host %s (%s)\n", __func__,
248 host->h_name, host->h_addrbuf);
249 goto out;
250 }
251
252 host = nlm_alloc_host(&ni, nsm);
253 if (unlikely(host == NULL))
254 goto out;
255
256 hlist_add_head(&host->h_hash, chain);
257 nrhosts++;
258
259 dprintk("lockd: %s created host %s (%s)\n", __func__,
260 host->h_name, host->h_addrbuf);
261
262out:
263 mutex_unlock(&nlm_host_mutex);
264 return host;
265}
266
267/**
268 * nlmclnt_release_host - release client nlm_host
269 * @host: nlm_host to release
270 *
271 */
272void nlmclnt_release_host(struct nlm_host *host)
273{
274 if (host == NULL)
275 return;
276
277 dprintk("lockd: release client host %s\n", host->h_name);
278
279 BUG_ON(atomic_read(&host->h_count) < 0);
280 BUG_ON(host->h_server);
281
282 if (atomic_dec_and_test(&host->h_count)) {
283 BUG_ON(!list_empty(&host->h_lockowners));
284 BUG_ON(!list_empty(&host->h_granted));
285 BUG_ON(!list_empty(&host->h_reclaim));
286
287 mutex_lock(&nlm_host_mutex);
288 nlm_destroy_host_locked(host);
289 mutex_unlock(&nlm_host_mutex);
290 }
258} 291}
259 292
260/** 293/**
@@ -279,12 +312,18 @@ struct nlm_host *nlmsvc_lookup_host(const struct svc_rqst *rqstp,
279 const char *hostname, 312 const char *hostname,
280 const size_t hostname_len) 313 const size_t hostname_len)
281{ 314{
315 struct hlist_head *chain;
316 struct hlist_node *pos;
317 struct nlm_host *host = NULL;
318 struct nsm_handle *nsm = NULL;
282 struct sockaddr_in sin = { 319 struct sockaddr_in sin = {
283 .sin_family = AF_INET, 320 .sin_family = AF_INET,
284 }; 321 };
285 struct sockaddr_in6 sin6 = { 322 struct sockaddr_in6 sin6 = {
286 .sin6_family = AF_INET6, 323 .sin6_family = AF_INET6,
287 }; 324 };
325 struct sockaddr *src_sap;
326 size_t src_len = rqstp->rq_addrlen;
288 struct nlm_lookup_host_info ni = { 327 struct nlm_lookup_host_info ni = {
289 .server = 1, 328 .server = 1,
290 .sap = svc_addr(rqstp), 329 .sap = svc_addr(rqstp),
@@ -293,27 +332,91 @@ struct nlm_host *nlmsvc_lookup_host(const struct svc_rqst *rqstp,
293 .version = rqstp->rq_vers, 332 .version = rqstp->rq_vers,
294 .hostname = hostname, 333 .hostname = hostname,
295 .hostname_len = hostname_len, 334 .hostname_len = hostname_len,
296 .src_len = rqstp->rq_addrlen,
297 }; 335 };
298 336
299 dprintk("lockd: %s(host='%*s', vers=%u, proto=%s)\n", __func__, 337 dprintk("lockd: %s(host='%*s', vers=%u, proto=%s)\n", __func__,
300 (int)hostname_len, hostname, rqstp->rq_vers, 338 (int)hostname_len, hostname, rqstp->rq_vers,
301 (rqstp->rq_prot == IPPROTO_UDP ? "udp" : "tcp")); 339 (rqstp->rq_prot == IPPROTO_UDP ? "udp" : "tcp"));
302 340
341 mutex_lock(&nlm_host_mutex);
342
303 switch (ni.sap->sa_family) { 343 switch (ni.sap->sa_family) {
304 case AF_INET: 344 case AF_INET:
305 sin.sin_addr.s_addr = rqstp->rq_daddr.addr.s_addr; 345 sin.sin_addr.s_addr = rqstp->rq_daddr.addr.s_addr;
306 ni.src_sap = (struct sockaddr *)&sin; 346 src_sap = (struct sockaddr *)&sin;
307 break; 347 break;
308 case AF_INET6: 348 case AF_INET6:
309 ipv6_addr_copy(&sin6.sin6_addr, &rqstp->rq_daddr.addr6); 349 ipv6_addr_copy(&sin6.sin6_addr, &rqstp->rq_daddr.addr6);
310 ni.src_sap = (struct sockaddr *)&sin6; 350 src_sap = (struct sockaddr *)&sin6;
311 break; 351 break;
312 default: 352 default:
313 return NULL; 353 dprintk("lockd: %s failed; unrecognized address family\n",
354 __func__);
355 goto out;
356 }
357
358 if (time_after_eq(jiffies, next_gc))
359 nlm_gc_hosts();
360
361 chain = &nlm_server_hosts[nlm_hash_address(ni.sap)];
362 hlist_for_each_entry(host, pos, chain, h_hash) {
363 if (!rpc_cmp_addr(nlm_addr(host), ni.sap))
364 continue;
365
366 /* Same address. Share an NSM handle if we already have one */
367 if (nsm == NULL)
368 nsm = host->h_nsmhandle;
369
370 if (host->h_proto != ni.protocol)
371 continue;
372 if (host->h_version != ni.version)
373 continue;
374 if (!rpc_cmp_addr(nlm_srcaddr(host), src_sap))
375 continue;
376
377 /* Move to head of hash chain. */
378 hlist_del(&host->h_hash);
379 hlist_add_head(&host->h_hash, chain);
380
381 nlm_get_host(host);
382 dprintk("lockd: %s found host %s (%s)\n",
383 __func__, host->h_name, host->h_addrbuf);
384 goto out;
314 } 385 }
315 386
316 return nlm_lookup_host(&ni); 387 host = nlm_alloc_host(&ni, nsm);
388 if (unlikely(host == NULL))
389 goto out;
390
391 memcpy(nlm_srcaddr(host), src_sap, src_len);
392 host->h_srcaddrlen = src_len;
393 hlist_add_head(&host->h_hash, chain);
394 nrhosts++;
395
396 dprintk("lockd: %s created host %s (%s)\n",
397 __func__, host->h_name, host->h_addrbuf);
398
399out:
400 mutex_unlock(&nlm_host_mutex);
401 return host;
402}
403
404/**
405 * nlmsvc_release_host - release server nlm_host
406 * @host: nlm_host to release
407 *
408 * Host is destroyed later in nlm_gc_host().
409 */
410void nlmsvc_release_host(struct nlm_host *host)
411{
412 if (host == NULL)
413 return;
414
415 dprintk("lockd: release server host %s\n", host->h_name);
416
417 BUG_ON(atomic_read(&host->h_count) < 0);
418 BUG_ON(!host->h_server);
419 atomic_dec(&host->h_count);
317} 420}
318 421
319/* 422/*
@@ -413,20 +516,28 @@ struct nlm_host * nlm_get_host(struct nlm_host *host)
413 return host; 516 return host;
414} 517}
415 518
416/* 519static struct nlm_host *next_host_state(struct hlist_head *cache,
417 * Release NLM host after use 520 struct nsm_handle *nsm,
418 */ 521 const struct nlm_reboot *info)
419void nlm_release_host(struct nlm_host *host)
420{ 522{
421 if (host != NULL) { 523 struct nlm_host *host = NULL;
422 dprintk("lockd: release host %s\n", host->h_name); 524 struct hlist_head *chain;
423 BUG_ON(atomic_read(&host->h_count) < 0); 525 struct hlist_node *pos;
424 if (atomic_dec_and_test(&host->h_count)) { 526
425 BUG_ON(!list_empty(&host->h_lockowners)); 527 mutex_lock(&nlm_host_mutex);
426 BUG_ON(!list_empty(&host->h_granted)); 528 for_each_host(host, pos, chain, cache) {
427 BUG_ON(!list_empty(&host->h_reclaim)); 529 if (host->h_nsmhandle == nsm
530 && host->h_nsmstate != info->state) {
531 host->h_nsmstate = info->state;
532 host->h_state++;
533
534 nlm_get_host(host);
535 goto out;
428 } 536 }
429 } 537 }
538out:
539 mutex_unlock(&nlm_host_mutex);
540 return host;
430} 541}
431 542
432/** 543/**
@@ -438,8 +549,6 @@ void nlm_release_host(struct nlm_host *host)
438 */ 549 */
439void nlm_host_rebooted(const struct nlm_reboot *info) 550void nlm_host_rebooted(const struct nlm_reboot *info)
440{ 551{
441 struct hlist_head *chain;
442 struct hlist_node *pos;
443 struct nsm_handle *nsm; 552 struct nsm_handle *nsm;
444 struct nlm_host *host; 553 struct nlm_host *host;
445 554
@@ -452,32 +561,15 @@ void nlm_host_rebooted(const struct nlm_reboot *info)
452 * lock for this. 561 * lock for this.
453 * To avoid processing a host several times, we match the nsmstate. 562 * To avoid processing a host several times, we match the nsmstate.
454 */ 563 */
455again: mutex_lock(&nlm_host_mutex); 564 while ((host = next_host_state(nlm_server_hosts, nsm, info)) != NULL) {
456 for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) { 565 nlmsvc_free_host_resources(host);
457 hlist_for_each_entry(host, pos, chain, h_hash) { 566 nlmsvc_release_host(host);
458 if (host->h_nsmhandle == nsm
459 && host->h_nsmstate != info->state) {
460 host->h_nsmstate = info->state;
461 host->h_state++;
462
463 nlm_get_host(host);
464 mutex_unlock(&nlm_host_mutex);
465
466 if (host->h_server) {
467 /* We're server for this guy, just ditch
468 * all the locks he held. */
469 nlmsvc_free_host_resources(host);
470 } else {
471 /* He's the server, initiate lock recovery. */
472 nlmclnt_recovery(host);
473 }
474
475 nlm_release_host(host);
476 goto again;
477 }
478 }
479 } 567 }
480 mutex_unlock(&nlm_host_mutex); 568 while ((host = next_host_state(nlm_client_hosts, nsm, info)) != NULL) {
569 nlmclnt_recovery(host);
570 nlmclnt_release_host(host);
571 }
572
481 nsm_release(nsm); 573 nsm_release(nsm);
482} 574}
483 575
@@ -497,13 +589,11 @@ nlm_shutdown_hosts(void)
497 589
498 /* First, make all hosts eligible for gc */ 590 /* First, make all hosts eligible for gc */
499 dprintk("lockd: nuking all hosts...\n"); 591 dprintk("lockd: nuking all hosts...\n");
500 for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) { 592 for_each_host(host, pos, chain, nlm_server_hosts) {
501 hlist_for_each_entry(host, pos, chain, h_hash) { 593 host->h_expires = jiffies - 1;
502 host->h_expires = jiffies - 1; 594 if (host->h_rpcclnt) {
503 if (host->h_rpcclnt) { 595 rpc_shutdown_client(host->h_rpcclnt);
504 rpc_shutdown_client(host->h_rpcclnt); 596 host->h_rpcclnt = NULL;
505 host->h_rpcclnt = NULL;
506 }
507 } 597 }
508 } 598 }
509 599
@@ -512,15 +602,13 @@ nlm_shutdown_hosts(void)
512 mutex_unlock(&nlm_host_mutex); 602 mutex_unlock(&nlm_host_mutex);
513 603
514 /* complain if any hosts are left */ 604 /* complain if any hosts are left */
515 if (nrhosts) { 605 if (nrhosts != 0) {
516 printk(KERN_WARNING "lockd: couldn't shutdown host module!\n"); 606 printk(KERN_WARNING "lockd: couldn't shutdown host module!\n");
517 dprintk("lockd: %d hosts left:\n", nrhosts); 607 dprintk("lockd: %lu hosts left:\n", nrhosts);
518 for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) { 608 for_each_host(host, pos, chain, nlm_server_hosts) {
519 hlist_for_each_entry(host, pos, chain, h_hash) { 609 dprintk(" %s (cnt %d use %d exp %ld)\n",
520 dprintk(" %s (cnt %d use %d exp %ld)\n", 610 host->h_name, atomic_read(&host->h_count),
521 host->h_name, atomic_read(&host->h_count), 611 host->h_inuse, host->h_expires);
522 host->h_inuse, host->h_expires);
523 }
524 } 612 }
525 } 613 }
526} 614}
@@ -538,29 +626,22 @@ nlm_gc_hosts(void)
538 struct nlm_host *host; 626 struct nlm_host *host;
539 627
540 dprintk("lockd: host garbage collection\n"); 628 dprintk("lockd: host garbage collection\n");
541 for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) { 629 for_each_host(host, pos, chain, nlm_server_hosts)
542 hlist_for_each_entry(host, pos, chain, h_hash) 630 host->h_inuse = 0;
543 host->h_inuse = 0;
544 }
545 631
546 /* Mark all hosts that hold locks, blocks or shares */ 632 /* Mark all hosts that hold locks, blocks or shares */
547 nlmsvc_mark_resources(); 633 nlmsvc_mark_resources();
548 634
549 for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) { 635 for_each_host_safe(host, pos, next, chain, nlm_server_hosts) {
550 hlist_for_each_entry_safe(host, pos, next, chain, h_hash) { 636 if (atomic_read(&host->h_count) || host->h_inuse
551 if (atomic_read(&host->h_count) || host->h_inuse 637 || time_before(jiffies, host->h_expires)) {
552 || time_before(jiffies, host->h_expires)) { 638 dprintk("nlm_gc_hosts skipping %s "
553 dprintk("nlm_gc_hosts skipping %s (cnt %d use %d exp %ld)\n", 639 "(cnt %d use %d exp %ld)\n",
554 host->h_name, atomic_read(&host->h_count), 640 host->h_name, atomic_read(&host->h_count),
555 host->h_inuse, host->h_expires); 641 host->h_inuse, host->h_expires);
556 continue; 642 continue;
557 }
558 dprintk("lockd: delete host %s\n", host->h_name);
559 hlist_del_init(&host->h_hash);
560
561 nlm_destroy_host(host);
562 nrhosts--;
563 } 643 }
644 nlm_destroy_host_locked(host);
564 } 645 }
565 646
566 next_gc = jiffies + NLM_HOST_COLLECT; 647 next_gc = jiffies + NLM_HOST_COLLECT;
diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
index e0c918949644..23d7451b2938 100644
--- a/fs/lockd/mon.c
+++ b/fs/lockd/mon.c
@@ -401,26 +401,22 @@ void nsm_release(struct nsm_handle *nsm)
401 * Status Monitor wire protocol. 401 * Status Monitor wire protocol.
402 */ 402 */
403 403
404static int encode_nsm_string(struct xdr_stream *xdr, const char *string) 404static void encode_nsm_string(struct xdr_stream *xdr, const char *string)
405{ 405{
406 const u32 len = strlen(string); 406 const u32 len = strlen(string);
407 __be32 *p; 407 __be32 *p;
408 408
409 if (unlikely(len > SM_MAXSTRLEN)) 409 BUG_ON(len > SM_MAXSTRLEN);
410 return -EIO; 410 p = xdr_reserve_space(xdr, 4 + len);
411 p = xdr_reserve_space(xdr, sizeof(u32) + len);
412 if (unlikely(p == NULL))
413 return -EIO;
414 xdr_encode_opaque(p, string, len); 411 xdr_encode_opaque(p, string, len);
415 return 0;
416} 412}
417 413
418/* 414/*
419 * "mon_name" specifies the host to be monitored. 415 * "mon_name" specifies the host to be monitored.
420 */ 416 */
421static int encode_mon_name(struct xdr_stream *xdr, const struct nsm_args *argp) 417static void encode_mon_name(struct xdr_stream *xdr, const struct nsm_args *argp)
422{ 418{
423 return encode_nsm_string(xdr, argp->mon_name); 419 encode_nsm_string(xdr, argp->mon_name);
424} 420}
425 421
426/* 422/*
@@ -429,35 +425,25 @@ static int encode_mon_name(struct xdr_stream *xdr, const struct nsm_args *argp)
429 * (via the NLMPROC_SM_NOTIFY call) that the state of host "mon_name" 425 * (via the NLMPROC_SM_NOTIFY call) that the state of host "mon_name"
430 * has changed. 426 * has changed.
431 */ 427 */
432static int encode_my_id(struct xdr_stream *xdr, const struct nsm_args *argp) 428static void encode_my_id(struct xdr_stream *xdr, const struct nsm_args *argp)
433{ 429{
434 int status;
435 __be32 *p; 430 __be32 *p;
436 431
437 status = encode_nsm_string(xdr, utsname()->nodename); 432 encode_nsm_string(xdr, utsname()->nodename);
438 if (unlikely(status != 0)) 433 p = xdr_reserve_space(xdr, 4 + 4 + 4);
439 return status; 434 *p++ = cpu_to_be32(argp->prog);
440 p = xdr_reserve_space(xdr, 3 * sizeof(u32)); 435 *p++ = cpu_to_be32(argp->vers);
441 if (unlikely(p == NULL)) 436 *p = cpu_to_be32(argp->proc);
442 return -EIO;
443 *p++ = htonl(argp->prog);
444 *p++ = htonl(argp->vers);
445 *p++ = htonl(argp->proc);
446 return 0;
447} 437}
448 438
449/* 439/*
450 * The "mon_id" argument specifies the non-private arguments 440 * The "mon_id" argument specifies the non-private arguments
451 * of an NSMPROC_MON or NSMPROC_UNMON call. 441 * of an NSMPROC_MON or NSMPROC_UNMON call.
452 */ 442 */
453static int encode_mon_id(struct xdr_stream *xdr, const struct nsm_args *argp) 443static void encode_mon_id(struct xdr_stream *xdr, const struct nsm_args *argp)
454{ 444{
455 int status; 445 encode_mon_name(xdr, argp);
456 446 encode_my_id(xdr, argp);
457 status = encode_mon_name(xdr, argp);
458 if (unlikely(status != 0))
459 return status;
460 return encode_my_id(xdr, argp);
461} 447}
462 448
463/* 449/*
@@ -465,68 +451,56 @@ static int encode_mon_id(struct xdr_stream *xdr, const struct nsm_args *argp)
465 * by the NSMPROC_MON call. This information will be supplied in the 451 * by the NSMPROC_MON call. This information will be supplied in the
466 * NLMPROC_SM_NOTIFY call. 452 * NLMPROC_SM_NOTIFY call.
467 */ 453 */
468static int encode_priv(struct xdr_stream *xdr, const struct nsm_args *argp) 454static void encode_priv(struct xdr_stream *xdr, const struct nsm_args *argp)
469{ 455{
470 __be32 *p; 456 __be32 *p;
471 457
472 p = xdr_reserve_space(xdr, SM_PRIV_SIZE); 458 p = xdr_reserve_space(xdr, SM_PRIV_SIZE);
473 if (unlikely(p == NULL))
474 return -EIO;
475 xdr_encode_opaque_fixed(p, argp->priv->data, SM_PRIV_SIZE); 459 xdr_encode_opaque_fixed(p, argp->priv->data, SM_PRIV_SIZE);
476 return 0;
477} 460}
478 461
479static int xdr_enc_mon(struct rpc_rqst *req, __be32 *p, 462static void nsm_xdr_enc_mon(struct rpc_rqst *req, struct xdr_stream *xdr,
480 const struct nsm_args *argp) 463 const struct nsm_args *argp)
481{ 464{
482 struct xdr_stream xdr; 465 encode_mon_id(xdr, argp);
483 int status; 466 encode_priv(xdr, argp);
484
485 xdr_init_encode(&xdr, &req->rq_snd_buf, p);
486 status = encode_mon_id(&xdr, argp);
487 if (unlikely(status))
488 return status;
489 return encode_priv(&xdr, argp);
490} 467}
491 468
492static int xdr_enc_unmon(struct rpc_rqst *req, __be32 *p, 469static void nsm_xdr_enc_unmon(struct rpc_rqst *req, struct xdr_stream *xdr,
493 const struct nsm_args *argp) 470 const struct nsm_args *argp)
494{ 471{
495 struct xdr_stream xdr; 472 encode_mon_id(xdr, argp);
496
497 xdr_init_encode(&xdr, &req->rq_snd_buf, p);
498 return encode_mon_id(&xdr, argp);
499} 473}
500 474
501static int xdr_dec_stat_res(struct rpc_rqst *rqstp, __be32 *p, 475static int nsm_xdr_dec_stat_res(struct rpc_rqst *rqstp,
502 struct nsm_res *resp) 476 struct xdr_stream *xdr,
477 struct nsm_res *resp)
503{ 478{
504 struct xdr_stream xdr; 479 __be32 *p;
505 480
506 xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); 481 p = xdr_inline_decode(xdr, 4 + 4);
507 p = xdr_inline_decode(&xdr, 2 * sizeof(u32));
508 if (unlikely(p == NULL)) 482 if (unlikely(p == NULL))
509 return -EIO; 483 return -EIO;
510 resp->status = ntohl(*p++); 484 resp->status = be32_to_cpup(p++);
511 resp->state = ntohl(*p); 485 resp->state = be32_to_cpup(p);
512 486
513 dprintk("lockd: xdr_dec_stat_res status %d state %d\n", 487 dprintk("lockd: %s status %d state %d\n",
514 resp->status, resp->state); 488 __func__, resp->status, resp->state);
515 return 0; 489 return 0;
516} 490}
517 491
518static int xdr_dec_stat(struct rpc_rqst *rqstp, __be32 *p, 492static int nsm_xdr_dec_stat(struct rpc_rqst *rqstp,
519 struct nsm_res *resp) 493 struct xdr_stream *xdr,
494 struct nsm_res *resp)
520{ 495{
521 struct xdr_stream xdr; 496 __be32 *p;
522 497
523 xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); 498 p = xdr_inline_decode(xdr, 4);
524 p = xdr_inline_decode(&xdr, sizeof(u32));
525 if (unlikely(p == NULL)) 499 if (unlikely(p == NULL))
526 return -EIO; 500 return -EIO;
527 resp->state = ntohl(*p); 501 resp->state = be32_to_cpup(p);
528 502
529 dprintk("lockd: xdr_dec_stat state %d\n", resp->state); 503 dprintk("lockd: %s state %d\n", __func__, resp->state);
530 return 0; 504 return 0;
531} 505}
532 506
@@ -542,8 +516,8 @@ static int xdr_dec_stat(struct rpc_rqst *rqstp, __be32 *p,
542static struct rpc_procinfo nsm_procedures[] = { 516static struct rpc_procinfo nsm_procedures[] = {
543[NSMPROC_MON] = { 517[NSMPROC_MON] = {
544 .p_proc = NSMPROC_MON, 518 .p_proc = NSMPROC_MON,
545 .p_encode = (kxdrproc_t)xdr_enc_mon, 519 .p_encode = (kxdreproc_t)nsm_xdr_enc_mon,
546 .p_decode = (kxdrproc_t)xdr_dec_stat_res, 520 .p_decode = (kxdrdproc_t)nsm_xdr_dec_stat_res,
547 .p_arglen = SM_mon_sz, 521 .p_arglen = SM_mon_sz,
548 .p_replen = SM_monres_sz, 522 .p_replen = SM_monres_sz,
549 .p_statidx = NSMPROC_MON, 523 .p_statidx = NSMPROC_MON,
@@ -551,8 +525,8 @@ static struct rpc_procinfo nsm_procedures[] = {
551 }, 525 },
552[NSMPROC_UNMON] = { 526[NSMPROC_UNMON] = {
553 .p_proc = NSMPROC_UNMON, 527 .p_proc = NSMPROC_UNMON,
554 .p_encode = (kxdrproc_t)xdr_enc_unmon, 528 .p_encode = (kxdreproc_t)nsm_xdr_enc_unmon,
555 .p_decode = (kxdrproc_t)xdr_dec_stat, 529 .p_decode = (kxdrdproc_t)nsm_xdr_dec_stat,
556 .p_arglen = SM_mon_id_sz, 530 .p_arglen = SM_mon_id_sz,
557 .p_replen = SM_unmonres_sz, 531 .p_replen = SM_unmonres_sz,
558 .p_statidx = NSMPROC_UNMON, 532 .p_statidx = NSMPROC_UNMON,
diff --git a/fs/lockd/svc4proc.c b/fs/lockd/svc4proc.c
index 38d261192453..9a41fdc19511 100644
--- a/fs/lockd/svc4proc.c
+++ b/fs/lockd/svc4proc.c
@@ -51,7 +51,7 @@ nlm4svc_retrieve_args(struct svc_rqst *rqstp, struct nlm_args *argp,
51 return 0; 51 return 0;
52 52
53no_locks: 53no_locks:
54 nlm_release_host(host); 54 nlmsvc_release_host(host);
55 if (error) 55 if (error)
56 return error; 56 return error;
57 return nlm_lck_denied_nolocks; 57 return nlm_lck_denied_nolocks;
@@ -92,7 +92,7 @@ nlm4svc_proc_test(struct svc_rqst *rqstp, struct nlm_args *argp,
92 else 92 else
93 dprintk("lockd: TEST4 status %d\n", ntohl(resp->status)); 93 dprintk("lockd: TEST4 status %d\n", ntohl(resp->status));
94 94
95 nlm_release_host(host); 95 nlmsvc_release_host(host);
96 nlm_release_file(file); 96 nlm_release_file(file);
97 return rc; 97 return rc;
98} 98}
@@ -134,7 +134,7 @@ nlm4svc_proc_lock(struct svc_rqst *rqstp, struct nlm_args *argp,
134 else 134 else
135 dprintk("lockd: LOCK status %d\n", ntohl(resp->status)); 135 dprintk("lockd: LOCK status %d\n", ntohl(resp->status));
136 136
137 nlm_release_host(host); 137 nlmsvc_release_host(host);
138 nlm_release_file(file); 138 nlm_release_file(file);
139 return rc; 139 return rc;
140} 140}
@@ -164,7 +164,7 @@ nlm4svc_proc_cancel(struct svc_rqst *rqstp, struct nlm_args *argp,
164 resp->status = nlmsvc_cancel_blocked(file, &argp->lock); 164 resp->status = nlmsvc_cancel_blocked(file, &argp->lock);
165 165
166 dprintk("lockd: CANCEL status %d\n", ntohl(resp->status)); 166 dprintk("lockd: CANCEL status %d\n", ntohl(resp->status));
167 nlm_release_host(host); 167 nlmsvc_release_host(host);
168 nlm_release_file(file); 168 nlm_release_file(file);
169 return rpc_success; 169 return rpc_success;
170} 170}
@@ -197,7 +197,7 @@ nlm4svc_proc_unlock(struct svc_rqst *rqstp, struct nlm_args *argp,
197 resp->status = nlmsvc_unlock(file, &argp->lock); 197 resp->status = nlmsvc_unlock(file, &argp->lock);
198 198
199 dprintk("lockd: UNLOCK status %d\n", ntohl(resp->status)); 199 dprintk("lockd: UNLOCK status %d\n", ntohl(resp->status));
200 nlm_release_host(host); 200 nlmsvc_release_host(host);
201 nlm_release_file(file); 201 nlm_release_file(file);
202 return rpc_success; 202 return rpc_success;
203} 203}
@@ -229,7 +229,7 @@ static void nlm4svc_callback_exit(struct rpc_task *task, void *data)
229 229
230static void nlm4svc_callback_release(void *data) 230static void nlm4svc_callback_release(void *data)
231{ 231{
232 nlm_release_call(data); 232 nlmsvc_release_call(data);
233} 233}
234 234
235static const struct rpc_call_ops nlm4svc_callback_ops = { 235static const struct rpc_call_ops nlm4svc_callback_ops = {
@@ -261,7 +261,7 @@ static __be32 nlm4svc_callback(struct svc_rqst *rqstp, u32 proc, struct nlm_args
261 261
262 stat = func(rqstp, argp, &call->a_res); 262 stat = func(rqstp, argp, &call->a_res);
263 if (stat != 0) { 263 if (stat != 0) {
264 nlm_release_call(call); 264 nlmsvc_release_call(call);
265 return stat; 265 return stat;
266 } 266 }
267 267
@@ -334,7 +334,7 @@ nlm4svc_proc_share(struct svc_rqst *rqstp, struct nlm_args *argp,
334 resp->status = nlmsvc_share_file(host, file, argp); 334 resp->status = nlmsvc_share_file(host, file, argp);
335 335
336 dprintk("lockd: SHARE status %d\n", ntohl(resp->status)); 336 dprintk("lockd: SHARE status %d\n", ntohl(resp->status));
337 nlm_release_host(host); 337 nlmsvc_release_host(host);
338 nlm_release_file(file); 338 nlm_release_file(file);
339 return rpc_success; 339 return rpc_success;
340} 340}
@@ -367,7 +367,7 @@ nlm4svc_proc_unshare(struct svc_rqst *rqstp, struct nlm_args *argp,
367 resp->status = nlmsvc_unshare_file(host, file, argp); 367 resp->status = nlmsvc_unshare_file(host, file, argp);
368 368
369 dprintk("lockd: UNSHARE status %d\n", ntohl(resp->status)); 369 dprintk("lockd: UNSHARE status %d\n", ntohl(resp->status));
370 nlm_release_host(host); 370 nlmsvc_release_host(host);
371 nlm_release_file(file); 371 nlm_release_file(file);
372 return rpc_success; 372 return rpc_success;
373} 373}
@@ -399,7 +399,7 @@ nlm4svc_proc_free_all(struct svc_rqst *rqstp, struct nlm_args *argp,
399 return rpc_success; 399 return rpc_success;
400 400
401 nlmsvc_free_host_resources(host); 401 nlmsvc_free_host_resources(host);
402 nlm_release_host(host); 402 nlmsvc_release_host(host);
403 return rpc_success; 403 return rpc_success;
404} 404}
405 405
diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
index ef5659b211e9..6e31695d046f 100644
--- a/fs/lockd/svclock.c
+++ b/fs/lockd/svclock.c
@@ -46,6 +46,7 @@ static void nlmsvc_remove_block(struct nlm_block *block);
46static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock); 46static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock);
47static void nlmsvc_freegrantargs(struct nlm_rqst *call); 47static void nlmsvc_freegrantargs(struct nlm_rqst *call);
48static const struct rpc_call_ops nlmsvc_grant_ops; 48static const struct rpc_call_ops nlmsvc_grant_ops;
49static const char *nlmdbg_cookie2a(const struct nlm_cookie *cookie);
49 50
50/* 51/*
51 * The list of blocked locks to retry 52 * The list of blocked locks to retry
@@ -233,7 +234,7 @@ nlmsvc_create_block(struct svc_rqst *rqstp, struct nlm_host *host,
233failed_free: 234failed_free:
234 kfree(block); 235 kfree(block);
235failed: 236failed:
236 nlm_release_call(call); 237 nlmsvc_release_call(call);
237 return NULL; 238 return NULL;
238} 239}
239 240
@@ -266,7 +267,7 @@ static void nlmsvc_free_block(struct kref *kref)
266 mutex_unlock(&file->f_mutex); 267 mutex_unlock(&file->f_mutex);
267 268
268 nlmsvc_freegrantargs(block->b_call); 269 nlmsvc_freegrantargs(block->b_call);
269 nlm_release_call(block->b_call); 270 nlmsvc_release_call(block->b_call);
270 nlm_release_file(block->b_file); 271 nlm_release_file(block->b_file);
271 kfree(block->b_fl); 272 kfree(block->b_fl);
272 kfree(block); 273 kfree(block);
@@ -934,3 +935,32 @@ nlmsvc_retry_blocked(void)
934 935
935 return timeout; 936 return timeout;
936} 937}
938
939#ifdef RPC_DEBUG
940static const char *nlmdbg_cookie2a(const struct nlm_cookie *cookie)
941{
942 /*
943 * We can get away with a static buffer because we're only
944 * called with BKL held.
945 */
946 static char buf[2*NLM_MAXCOOKIELEN+1];
947 unsigned int i, len = sizeof(buf);
948 char *p = buf;
949
950 len--; /* allow for trailing \0 */
951 if (len < 3)
952 return "???";
953 for (i = 0 ; i < cookie->len ; i++) {
954 if (len < 2) {
955 strcpy(p-3, "...");
956 break;
957 }
958 sprintf(p, "%02x", cookie->data[i]);
959 p += 2;
960 len -= 2;
961 }
962 *p = '\0';
963
964 return buf;
965}
966#endif
diff --git a/fs/lockd/svcproc.c b/fs/lockd/svcproc.c
index 0caea5310ac3..d27aab11f324 100644
--- a/fs/lockd/svcproc.c
+++ b/fs/lockd/svcproc.c
@@ -80,7 +80,7 @@ nlmsvc_retrieve_args(struct svc_rqst *rqstp, struct nlm_args *argp,
80 return 0; 80 return 0;
81 81
82no_locks: 82no_locks:
83 nlm_release_host(host); 83 nlmsvc_release_host(host);
84 if (error) 84 if (error)
85 return error; 85 return error;
86 return nlm_lck_denied_nolocks; 86 return nlm_lck_denied_nolocks;
@@ -122,7 +122,7 @@ nlmsvc_proc_test(struct svc_rqst *rqstp, struct nlm_args *argp,
122 dprintk("lockd: TEST status %d vers %d\n", 122 dprintk("lockd: TEST status %d vers %d\n",
123 ntohl(resp->status), rqstp->rq_vers); 123 ntohl(resp->status), rqstp->rq_vers);
124 124
125 nlm_release_host(host); 125 nlmsvc_release_host(host);
126 nlm_release_file(file); 126 nlm_release_file(file);
127 return rc; 127 return rc;
128} 128}
@@ -164,7 +164,7 @@ nlmsvc_proc_lock(struct svc_rqst *rqstp, struct nlm_args *argp,
164 else 164 else
165 dprintk("lockd: LOCK status %d\n", ntohl(resp->status)); 165 dprintk("lockd: LOCK status %d\n", ntohl(resp->status));
166 166
167 nlm_release_host(host); 167 nlmsvc_release_host(host);
168 nlm_release_file(file); 168 nlm_release_file(file);
169 return rc; 169 return rc;
170} 170}
@@ -194,7 +194,7 @@ nlmsvc_proc_cancel(struct svc_rqst *rqstp, struct nlm_args *argp,
194 resp->status = cast_status(nlmsvc_cancel_blocked(file, &argp->lock)); 194 resp->status = cast_status(nlmsvc_cancel_blocked(file, &argp->lock));
195 195
196 dprintk("lockd: CANCEL status %d\n", ntohl(resp->status)); 196 dprintk("lockd: CANCEL status %d\n", ntohl(resp->status));
197 nlm_release_host(host); 197 nlmsvc_release_host(host);
198 nlm_release_file(file); 198 nlm_release_file(file);
199 return rpc_success; 199 return rpc_success;
200} 200}
@@ -227,7 +227,7 @@ nlmsvc_proc_unlock(struct svc_rqst *rqstp, struct nlm_args *argp,
227 resp->status = cast_status(nlmsvc_unlock(file, &argp->lock)); 227 resp->status = cast_status(nlmsvc_unlock(file, &argp->lock));
228 228
229 dprintk("lockd: UNLOCK status %d\n", ntohl(resp->status)); 229 dprintk("lockd: UNLOCK status %d\n", ntohl(resp->status));
230 nlm_release_host(host); 230 nlmsvc_release_host(host);
231 nlm_release_file(file); 231 nlm_release_file(file);
232 return rpc_success; 232 return rpc_success;
233} 233}
@@ -257,9 +257,17 @@ static void nlmsvc_callback_exit(struct rpc_task *task, void *data)
257 -task->tk_status); 257 -task->tk_status);
258} 258}
259 259
260void nlmsvc_release_call(struct nlm_rqst *call)
261{
262 if (!atomic_dec_and_test(&call->a_count))
263 return;
264 nlmsvc_release_host(call->a_host);
265 kfree(call);
266}
267
260static void nlmsvc_callback_release(void *data) 268static void nlmsvc_callback_release(void *data)
261{ 269{
262 nlm_release_call(data); 270 nlmsvc_release_call(data);
263} 271}
264 272
265static const struct rpc_call_ops nlmsvc_callback_ops = { 273static const struct rpc_call_ops nlmsvc_callback_ops = {
@@ -291,7 +299,7 @@ static __be32 nlmsvc_callback(struct svc_rqst *rqstp, u32 proc, struct nlm_args
291 299
292 stat = func(rqstp, argp, &call->a_res); 300 stat = func(rqstp, argp, &call->a_res);
293 if (stat != 0) { 301 if (stat != 0) {
294 nlm_release_call(call); 302 nlmsvc_release_call(call);
295 return stat; 303 return stat;
296 } 304 }
297 305
@@ -366,7 +374,7 @@ nlmsvc_proc_share(struct svc_rqst *rqstp, struct nlm_args *argp,
366 resp->status = cast_status(nlmsvc_share_file(host, file, argp)); 374 resp->status = cast_status(nlmsvc_share_file(host, file, argp));
367 375
368 dprintk("lockd: SHARE status %d\n", ntohl(resp->status)); 376 dprintk("lockd: SHARE status %d\n", ntohl(resp->status));
369 nlm_release_host(host); 377 nlmsvc_release_host(host);
370 nlm_release_file(file); 378 nlm_release_file(file);
371 return rpc_success; 379 return rpc_success;
372} 380}
@@ -399,7 +407,7 @@ nlmsvc_proc_unshare(struct svc_rqst *rqstp, struct nlm_args *argp,
399 resp->status = cast_status(nlmsvc_unshare_file(host, file, argp)); 407 resp->status = cast_status(nlmsvc_unshare_file(host, file, argp));
400 408
401 dprintk("lockd: UNSHARE status %d\n", ntohl(resp->status)); 409 dprintk("lockd: UNSHARE status %d\n", ntohl(resp->status));
402 nlm_release_host(host); 410 nlmsvc_release_host(host);
403 nlm_release_file(file); 411 nlm_release_file(file);
404 return rpc_success; 412 return rpc_success;
405} 413}
@@ -431,7 +439,7 @@ nlmsvc_proc_free_all(struct svc_rqst *rqstp, struct nlm_args *argp,
431 return rpc_success; 439 return rpc_success;
432 440
433 nlmsvc_free_host_resources(host); 441 nlmsvc_free_host_resources(host);
434 nlm_release_host(host); 442 nlmsvc_release_host(host);
435 return rpc_success; 443 return rpc_success;
436} 444}
437 445
diff --git a/fs/lockd/xdr.c b/fs/lockd/xdr.c
index b583ab0a4cbb..964666c68a86 100644
--- a/fs/lockd/xdr.c
+++ b/fs/lockd/xdr.c
@@ -149,37 +149,6 @@ nlm_decode_lock(__be32 *p, struct nlm_lock *lock)
149} 149}
150 150
151/* 151/*
152 * Encode a lock as part of an NLM call
153 */
154static __be32 *
155nlm_encode_lock(__be32 *p, struct nlm_lock *lock)
156{
157 struct file_lock *fl = &lock->fl;
158 __s32 start, len;
159
160 if (!(p = xdr_encode_string(p, lock->caller))
161 || !(p = nlm_encode_fh(p, &lock->fh))
162 || !(p = nlm_encode_oh(p, &lock->oh)))
163 return NULL;
164
165 if (fl->fl_start > NLM_OFFSET_MAX
166 || (fl->fl_end > NLM_OFFSET_MAX && fl->fl_end != OFFSET_MAX))
167 return NULL;
168
169 start = loff_t_to_s32(fl->fl_start);
170 if (fl->fl_end == OFFSET_MAX)
171 len = 0;
172 else
173 len = loff_t_to_s32(fl->fl_end - fl->fl_start + 1);
174
175 *p++ = htonl(lock->svid);
176 *p++ = htonl(start);
177 *p++ = htonl(len);
178
179 return p;
180}
181
182/*
183 * Encode result of a TEST/TEST_MSG call 152 * Encode result of a TEST/TEST_MSG call
184 */ 153 */
185static __be32 * 154static __be32 *
@@ -372,259 +341,3 @@ nlmsvc_encode_void(struct svc_rqst *rqstp, __be32 *p, void *dummy)
372{ 341{
373 return xdr_ressize_check(rqstp, p); 342 return xdr_ressize_check(rqstp, p);
374} 343}
375
376/*
377 * Now, the client side XDR functions
378 */
379#ifdef NLMCLNT_SUPPORT_SHARES
380static int
381nlmclt_decode_void(struct rpc_rqst *req, u32 *p, void *ptr)
382{
383 return 0;
384}
385#endif
386
387static int
388nlmclt_encode_testargs(struct rpc_rqst *req, __be32 *p, nlm_args *argp)
389{
390 struct nlm_lock *lock = &argp->lock;
391
392 if (!(p = nlm_encode_cookie(p, &argp->cookie)))
393 return -EIO;
394 *p++ = (lock->fl.fl_type == F_WRLCK)? xdr_one : xdr_zero;
395 if (!(p = nlm_encode_lock(p, lock)))
396 return -EIO;
397 req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
398 return 0;
399}
400
401static int
402nlmclt_decode_testres(struct rpc_rqst *req, __be32 *p, struct nlm_res *resp)
403{
404 if (!(p = nlm_decode_cookie(p, &resp->cookie)))
405 return -EIO;
406 resp->status = *p++;
407 if (resp->status == nlm_lck_denied) {
408 struct file_lock *fl = &resp->lock.fl;
409 u32 excl;
410 s32 start, len, end;
411
412 memset(&resp->lock, 0, sizeof(resp->lock));
413 locks_init_lock(fl);
414 excl = ntohl(*p++);
415 resp->lock.svid = ntohl(*p++);
416 fl->fl_pid = (pid_t)resp->lock.svid;
417 if (!(p = nlm_decode_oh(p, &resp->lock.oh)))
418 return -EIO;
419
420 fl->fl_flags = FL_POSIX;
421 fl->fl_type = excl? F_WRLCK : F_RDLCK;
422 start = ntohl(*p++);
423 len = ntohl(*p++);
424 end = start + len - 1;
425
426 fl->fl_start = s32_to_loff_t(start);
427 if (len == 0 || end < 0)
428 fl->fl_end = OFFSET_MAX;
429 else
430 fl->fl_end = s32_to_loff_t(end);
431 }
432 return 0;
433}
434
435
436static int
437nlmclt_encode_lockargs(struct rpc_rqst *req, __be32 *p, nlm_args *argp)
438{
439 struct nlm_lock *lock = &argp->lock;
440
441 if (!(p = nlm_encode_cookie(p, &argp->cookie)))
442 return -EIO;
443 *p++ = argp->block? xdr_one : xdr_zero;
444 *p++ = (lock->fl.fl_type == F_WRLCK)? xdr_one : xdr_zero;
445 if (!(p = nlm_encode_lock(p, lock)))
446 return -EIO;
447 *p++ = argp->reclaim? xdr_one : xdr_zero;
448 *p++ = htonl(argp->state);
449 req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
450 return 0;
451}
452
453static int
454nlmclt_encode_cancargs(struct rpc_rqst *req, __be32 *p, nlm_args *argp)
455{
456 struct nlm_lock *lock = &argp->lock;
457
458 if (!(p = nlm_encode_cookie(p, &argp->cookie)))
459 return -EIO;
460 *p++ = argp->block? xdr_one : xdr_zero;
461 *p++ = (lock->fl.fl_type == F_WRLCK)? xdr_one : xdr_zero;
462 if (!(p = nlm_encode_lock(p, lock)))
463 return -EIO;
464 req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
465 return 0;
466}
467
468static int
469nlmclt_encode_unlockargs(struct rpc_rqst *req, __be32 *p, nlm_args *argp)
470{
471 struct nlm_lock *lock = &argp->lock;
472
473 if (!(p = nlm_encode_cookie(p, &argp->cookie)))
474 return -EIO;
475 if (!(p = nlm_encode_lock(p, lock)))
476 return -EIO;
477 req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
478 return 0;
479}
480
481static int
482nlmclt_encode_res(struct rpc_rqst *req, __be32 *p, struct nlm_res *resp)
483{
484 if (!(p = nlm_encode_cookie(p, &resp->cookie)))
485 return -EIO;
486 *p++ = resp->status;
487 req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
488 return 0;
489}
490
491static int
492nlmclt_encode_testres(struct rpc_rqst *req, __be32 *p, struct nlm_res *resp)
493{
494 if (!(p = nlm_encode_testres(p, resp)))
495 return -EIO;
496 req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
497 return 0;
498}
499
500static int
501nlmclt_decode_res(struct rpc_rqst *req, __be32 *p, struct nlm_res *resp)
502{
503 if (!(p = nlm_decode_cookie(p, &resp->cookie)))
504 return -EIO;
505 resp->status = *p++;
506 return 0;
507}
508
509#if (NLMCLNT_OHSIZE > XDR_MAX_NETOBJ)
510# error "NLM host name cannot be larger than XDR_MAX_NETOBJ!"
511#endif
512
513/*
514 * Buffer requirements for NLM
515 */
516#define NLM_void_sz 0
517#define NLM_cookie_sz 1+XDR_QUADLEN(NLM_MAXCOOKIELEN)
518#define NLM_caller_sz 1+XDR_QUADLEN(NLMCLNT_OHSIZE)
519#define NLM_owner_sz 1+XDR_QUADLEN(NLMCLNT_OHSIZE)
520#define NLM_fhandle_sz 1+XDR_QUADLEN(NFS2_FHSIZE)
521#define NLM_lock_sz 3+NLM_caller_sz+NLM_owner_sz+NLM_fhandle_sz
522#define NLM_holder_sz 4+NLM_owner_sz
523
524#define NLM_testargs_sz NLM_cookie_sz+1+NLM_lock_sz
525#define NLM_lockargs_sz NLM_cookie_sz+4+NLM_lock_sz
526#define NLM_cancargs_sz NLM_cookie_sz+2+NLM_lock_sz
527#define NLM_unlockargs_sz NLM_cookie_sz+NLM_lock_sz
528
529#define NLM_testres_sz NLM_cookie_sz+1+NLM_holder_sz
530#define NLM_res_sz NLM_cookie_sz+1
531#define NLM_norep_sz 0
532
533/*
534 * For NLM, a void procedure really returns nothing
535 */
536#define nlmclt_decode_norep NULL
537
538#define PROC(proc, argtype, restype) \
539[NLMPROC_##proc] = { \
540 .p_proc = NLMPROC_##proc, \
541 .p_encode = (kxdrproc_t) nlmclt_encode_##argtype, \
542 .p_decode = (kxdrproc_t) nlmclt_decode_##restype, \
543 .p_arglen = NLM_##argtype##_sz, \
544 .p_replen = NLM_##restype##_sz, \
545 .p_statidx = NLMPROC_##proc, \
546 .p_name = #proc, \
547 }
548
549static struct rpc_procinfo nlm_procedures[] = {
550 PROC(TEST, testargs, testres),
551 PROC(LOCK, lockargs, res),
552 PROC(CANCEL, cancargs, res),
553 PROC(UNLOCK, unlockargs, res),
554 PROC(GRANTED, testargs, res),
555 PROC(TEST_MSG, testargs, norep),
556 PROC(LOCK_MSG, lockargs, norep),
557 PROC(CANCEL_MSG, cancargs, norep),
558 PROC(UNLOCK_MSG, unlockargs, norep),
559 PROC(GRANTED_MSG, testargs, norep),
560 PROC(TEST_RES, testres, norep),
561 PROC(LOCK_RES, res, norep),
562 PROC(CANCEL_RES, res, norep),
563 PROC(UNLOCK_RES, res, norep),
564 PROC(GRANTED_RES, res, norep),
565#ifdef NLMCLNT_SUPPORT_SHARES
566 PROC(SHARE, shareargs, shareres),
567 PROC(UNSHARE, shareargs, shareres),
568 PROC(NM_LOCK, lockargs, res),
569 PROC(FREE_ALL, notify, void),
570#endif
571};
572
573static struct rpc_version nlm_version1 = {
574 .number = 1,
575 .nrprocs = 16,
576 .procs = nlm_procedures,
577};
578
579static struct rpc_version nlm_version3 = {
580 .number = 3,
581 .nrprocs = 24,
582 .procs = nlm_procedures,
583};
584
585static struct rpc_version * nlm_versions[] = {
586 [1] = &nlm_version1,
587 [3] = &nlm_version3,
588#ifdef CONFIG_LOCKD_V4
589 [4] = &nlm_version4,
590#endif
591};
592
593static struct rpc_stat nlm_stats;
594
595struct rpc_program nlm_program = {
596 .name = "lockd",
597 .number = NLM_PROGRAM,
598 .nrvers = ARRAY_SIZE(nlm_versions),
599 .version = nlm_versions,
600 .stats = &nlm_stats,
601};
602
603#ifdef RPC_DEBUG
604const char *nlmdbg_cookie2a(const struct nlm_cookie *cookie)
605{
606 /*
607 * We can get away with a static buffer because we're only
608 * called with BKL held.
609 */
610 static char buf[2*NLM_MAXCOOKIELEN+1];
611 unsigned int i, len = sizeof(buf);
612 char *p = buf;
613
614 len--; /* allow for trailing \0 */
615 if (len < 3)
616 return "???";
617 for (i = 0 ; i < cookie->len ; i++) {
618 if (len < 2) {
619 strcpy(p-3, "...");
620 break;
621 }
622 sprintf(p, "%02x", cookie->data[i]);
623 p += 2;
624 len -= 2;
625 }
626 *p = '\0';
627
628 return buf;
629}
630#endif
diff --git a/fs/lockd/xdr4.c b/fs/lockd/xdr4.c
index ad9dbbc9145d..dfa4789cd460 100644
--- a/fs/lockd/xdr4.c
+++ b/fs/lockd/xdr4.c
@@ -93,15 +93,6 @@ nlm4_decode_fh(__be32 *p, struct nfs_fh *f)
93 return p + XDR_QUADLEN(f->size); 93 return p + XDR_QUADLEN(f->size);
94} 94}
95 95
96static __be32 *
97nlm4_encode_fh(__be32 *p, struct nfs_fh *f)
98{
99 *p++ = htonl(f->size);
100 if (f->size) p[XDR_QUADLEN(f->size)-1] = 0; /* don't leak anything */
101 memcpy(p, f->data, f->size);
102 return p + XDR_QUADLEN(f->size);
103}
104
105/* 96/*
106 * Encode and decode owner handle 97 * Encode and decode owner handle
107 */ 98 */
@@ -112,12 +103,6 @@ nlm4_decode_oh(__be32 *p, struct xdr_netobj *oh)
112} 103}
113 104
114static __be32 * 105static __be32 *
115nlm4_encode_oh(__be32 *p, struct xdr_netobj *oh)
116{
117 return xdr_encode_netobj(p, oh);
118}
119
120static __be32 *
121nlm4_decode_lock(__be32 *p, struct nlm_lock *lock) 106nlm4_decode_lock(__be32 *p, struct nlm_lock *lock)
122{ 107{
123 struct file_lock *fl = &lock->fl; 108 struct file_lock *fl = &lock->fl;
@@ -150,38 +135,6 @@ nlm4_decode_lock(__be32 *p, struct nlm_lock *lock)
150} 135}
151 136
152/* 137/*
153 * Encode a lock as part of an NLM call
154 */
155static __be32 *
156nlm4_encode_lock(__be32 *p, struct nlm_lock *lock)
157{
158 struct file_lock *fl = &lock->fl;
159 __s64 start, len;
160
161 if (!(p = xdr_encode_string(p, lock->caller))
162 || !(p = nlm4_encode_fh(p, &lock->fh))
163 || !(p = nlm4_encode_oh(p, &lock->oh)))
164 return NULL;
165
166 if (fl->fl_start > NLM4_OFFSET_MAX
167 || (fl->fl_end > NLM4_OFFSET_MAX && fl->fl_end != OFFSET_MAX))
168 return NULL;
169
170 *p++ = htonl(lock->svid);
171
172 start = loff_t_to_s64(fl->fl_start);
173 if (fl->fl_end == OFFSET_MAX)
174 len = 0;
175 else
176 len = loff_t_to_s64(fl->fl_end - fl->fl_start + 1);
177
178 p = xdr_encode_hyper(p, start);
179 p = xdr_encode_hyper(p, len);
180
181 return p;
182}
183
184/*
185 * Encode result of a TEST/TEST_MSG call 138 * Encode result of a TEST/TEST_MSG call
186 */ 139 */
187static __be32 * 140static __be32 *
@@ -379,211 +332,3 @@ nlm4svc_encode_void(struct svc_rqst *rqstp, __be32 *p, void *dummy)
379{ 332{
380 return xdr_ressize_check(rqstp, p); 333 return xdr_ressize_check(rqstp, p);
381} 334}
382
383/*
384 * Now, the client side XDR functions
385 */
386#ifdef NLMCLNT_SUPPORT_SHARES
387static int
388nlm4clt_decode_void(struct rpc_rqst *req, __be32 *p, void *ptr)
389{
390 return 0;
391}
392#endif
393
394static int
395nlm4clt_encode_testargs(struct rpc_rqst *req, __be32 *p, nlm_args *argp)
396{
397 struct nlm_lock *lock = &argp->lock;
398
399 if (!(p = nlm4_encode_cookie(p, &argp->cookie)))
400 return -EIO;
401 *p++ = (lock->fl.fl_type == F_WRLCK)? xdr_one : xdr_zero;
402 if (!(p = nlm4_encode_lock(p, lock)))
403 return -EIO;
404 req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
405 return 0;
406}
407
408static int
409nlm4clt_decode_testres(struct rpc_rqst *req, __be32 *p, struct nlm_res *resp)
410{
411 if (!(p = nlm4_decode_cookie(p, &resp->cookie)))
412 return -EIO;
413 resp->status = *p++;
414 if (resp->status == nlm_lck_denied) {
415 struct file_lock *fl = &resp->lock.fl;
416 u32 excl;
417 __u64 start, len;
418 __s64 end;
419
420 memset(&resp->lock, 0, sizeof(resp->lock));
421 locks_init_lock(fl);
422 excl = ntohl(*p++);
423 resp->lock.svid = ntohl(*p++);
424 fl->fl_pid = (pid_t)resp->lock.svid;
425 if (!(p = nlm4_decode_oh(p, &resp->lock.oh)))
426 return -EIO;
427
428 fl->fl_flags = FL_POSIX;
429 fl->fl_type = excl? F_WRLCK : F_RDLCK;
430 p = xdr_decode_hyper(p, &start);
431 p = xdr_decode_hyper(p, &len);
432 end = start + len - 1;
433
434 fl->fl_start = s64_to_loff_t(start);
435 if (len == 0 || end < 0)
436 fl->fl_end = OFFSET_MAX;
437 else
438 fl->fl_end = s64_to_loff_t(end);
439 }
440 return 0;
441}
442
443
444static int
445nlm4clt_encode_lockargs(struct rpc_rqst *req, __be32 *p, nlm_args *argp)
446{
447 struct nlm_lock *lock = &argp->lock;
448
449 if (!(p = nlm4_encode_cookie(p, &argp->cookie)))
450 return -EIO;
451 *p++ = argp->block? xdr_one : xdr_zero;
452 *p++ = (lock->fl.fl_type == F_WRLCK)? xdr_one : xdr_zero;
453 if (!(p = nlm4_encode_lock(p, lock)))
454 return -EIO;
455 *p++ = argp->reclaim? xdr_one : xdr_zero;
456 *p++ = htonl(argp->state);
457 req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
458 return 0;
459}
460
461static int
462nlm4clt_encode_cancargs(struct rpc_rqst *req, __be32 *p, nlm_args *argp)
463{
464 struct nlm_lock *lock = &argp->lock;
465
466 if (!(p = nlm4_encode_cookie(p, &argp->cookie)))
467 return -EIO;
468 *p++ = argp->block? xdr_one : xdr_zero;
469 *p++ = (lock->fl.fl_type == F_WRLCK)? xdr_one : xdr_zero;
470 if (!(p = nlm4_encode_lock(p, lock)))
471 return -EIO;
472 req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
473 return 0;
474}
475
476static int
477nlm4clt_encode_unlockargs(struct rpc_rqst *req, __be32 *p, nlm_args *argp)
478{
479 struct nlm_lock *lock = &argp->lock;
480
481 if (!(p = nlm4_encode_cookie(p, &argp->cookie)))
482 return -EIO;
483 if (!(p = nlm4_encode_lock(p, lock)))
484 return -EIO;
485 req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
486 return 0;
487}
488
489static int
490nlm4clt_encode_res(struct rpc_rqst *req, __be32 *p, struct nlm_res *resp)
491{
492 if (!(p = nlm4_encode_cookie(p, &resp->cookie)))
493 return -EIO;
494 *p++ = resp->status;
495 req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
496 return 0;
497}
498
499static int
500nlm4clt_encode_testres(struct rpc_rqst *req, __be32 *p, struct nlm_res *resp)
501{
502 if (!(p = nlm4_encode_testres(p, resp)))
503 return -EIO;
504 req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
505 return 0;
506}
507
508static int
509nlm4clt_decode_res(struct rpc_rqst *req, __be32 *p, struct nlm_res *resp)
510{
511 if (!(p = nlm4_decode_cookie(p, &resp->cookie)))
512 return -EIO;
513 resp->status = *p++;
514 return 0;
515}
516
517#if (NLMCLNT_OHSIZE > XDR_MAX_NETOBJ)
518# error "NLM host name cannot be larger than XDR_MAX_NETOBJ!"
519#endif
520
521#if (NLMCLNT_OHSIZE > NLM_MAXSTRLEN)
522# error "NLM host name cannot be larger than NLM's maximum string length!"
523#endif
524
525/*
526 * Buffer requirements for NLM
527 */
528#define NLM4_void_sz 0
529#define NLM4_cookie_sz 1+XDR_QUADLEN(NLM_MAXCOOKIELEN)
530#define NLM4_caller_sz 1+XDR_QUADLEN(NLMCLNT_OHSIZE)
531#define NLM4_owner_sz 1+XDR_QUADLEN(NLMCLNT_OHSIZE)
532#define NLM4_fhandle_sz 1+XDR_QUADLEN(NFS3_FHSIZE)
533#define NLM4_lock_sz 5+NLM4_caller_sz+NLM4_owner_sz+NLM4_fhandle_sz
534#define NLM4_holder_sz 6+NLM4_owner_sz
535
536#define NLM4_testargs_sz NLM4_cookie_sz+1+NLM4_lock_sz
537#define NLM4_lockargs_sz NLM4_cookie_sz+4+NLM4_lock_sz
538#define NLM4_cancargs_sz NLM4_cookie_sz+2+NLM4_lock_sz
539#define NLM4_unlockargs_sz NLM4_cookie_sz+NLM4_lock_sz
540
541#define NLM4_testres_sz NLM4_cookie_sz+1+NLM4_holder_sz
542#define NLM4_res_sz NLM4_cookie_sz+1
543#define NLM4_norep_sz 0
544
545/*
546 * For NLM, a void procedure really returns nothing
547 */
548#define nlm4clt_decode_norep NULL
549
550#define PROC(proc, argtype, restype) \
551[NLMPROC_##proc] = { \
552 .p_proc = NLMPROC_##proc, \
553 .p_encode = (kxdrproc_t) nlm4clt_encode_##argtype, \
554 .p_decode = (kxdrproc_t) nlm4clt_decode_##restype, \
555 .p_arglen = NLM4_##argtype##_sz, \
556 .p_replen = NLM4_##restype##_sz, \
557 .p_statidx = NLMPROC_##proc, \
558 .p_name = #proc, \
559 }
560
561static struct rpc_procinfo nlm4_procedures[] = {
562 PROC(TEST, testargs, testres),
563 PROC(LOCK, lockargs, res),
564 PROC(CANCEL, cancargs, res),
565 PROC(UNLOCK, unlockargs, res),
566 PROC(GRANTED, testargs, res),
567 PROC(TEST_MSG, testargs, norep),
568 PROC(LOCK_MSG, lockargs, norep),
569 PROC(CANCEL_MSG, cancargs, norep),
570 PROC(UNLOCK_MSG, unlockargs, norep),
571 PROC(GRANTED_MSG, testargs, norep),
572 PROC(TEST_RES, testres, norep),
573 PROC(LOCK_RES, res, norep),
574 PROC(CANCEL_RES, res, norep),
575 PROC(UNLOCK_RES, res, norep),
576 PROC(GRANTED_RES, res, norep),
577#ifdef NLMCLNT_SUPPORT_SHARES
578 PROC(SHARE, shareargs, shareres),
579 PROC(UNSHARE, shareargs, shareres),
580 PROC(NM_LOCK, lockargs, res),
581 PROC(FREE_ALL, notify, void),
582#endif
583};
584
585struct rpc_version nlm_version4 = {
586 .number = 4,
587 .nrprocs = 24,
588 .procs = nlm4_procedures,
589};
diff --git a/fs/mbcache.c b/fs/mbcache.c
index 93444747237b..a25444ab2baf 100644
--- a/fs/mbcache.c
+++ b/fs/mbcache.c
@@ -76,18 +76,6 @@ EXPORT_SYMBOL(mb_cache_entry_find_first);
76EXPORT_SYMBOL(mb_cache_entry_find_next); 76EXPORT_SYMBOL(mb_cache_entry_find_next);
77#endif 77#endif
78 78
79struct mb_cache {
80 struct list_head c_cache_list;
81 const char *c_name;
82 atomic_t c_entry_count;
83 int c_max_entries;
84 int c_bucket_bits;
85 struct kmem_cache *c_entry_cache;
86 struct list_head *c_block_hash;
87 struct list_head *c_index_hash;
88};
89
90
91/* 79/*
92 * Global data: list of all mbcache's, lru list, and a spinlock for 80 * Global data: list of all mbcache's, lru list, and a spinlock for
93 * accessing cache data structures on SMP machines. The lru list is 81 * accessing cache data structures on SMP machines. The lru list is
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
index 93a8b3bd69e3..199016528fcb 100644
--- a/fs/nfs/callback.c
+++ b/fs/nfs/callback.c
@@ -16,9 +16,7 @@
16#include <linux/freezer.h> 16#include <linux/freezer.h>
17#include <linux/kthread.h> 17#include <linux/kthread.h>
18#include <linux/sunrpc/svcauth_gss.h> 18#include <linux/sunrpc/svcauth_gss.h>
19#if defined(CONFIG_NFS_V4_1)
20#include <linux/sunrpc/bc_xprt.h> 19#include <linux/sunrpc/bc_xprt.h>
21#endif
22 20
23#include <net/inet_sock.h> 21#include <net/inet_sock.h>
24 22
@@ -137,6 +135,33 @@ out_err:
137 135
138#if defined(CONFIG_NFS_V4_1) 136#if defined(CONFIG_NFS_V4_1)
139/* 137/*
138 * * CB_SEQUENCE operations will fail until the callback sessionid is set.
139 * */
140int nfs4_set_callback_sessionid(struct nfs_client *clp)
141{
142 struct svc_serv *serv = clp->cl_rpcclient->cl_xprt->bc_serv;
143 struct nfs4_sessionid *bc_sid;
144
145 if (!serv->sv_bc_xprt)
146 return -EINVAL;
147
148 /* on success freed in xprt_free */
149 bc_sid = kmalloc(sizeof(struct nfs4_sessionid), GFP_KERNEL);
150 if (!bc_sid)
151 return -ENOMEM;
152 memcpy(bc_sid->data, &clp->cl_session->sess_id.data,
153 NFS4_MAX_SESSIONID_LEN);
154 spin_lock_bh(&serv->sv_cb_lock);
155 serv->sv_bc_xprt->xpt_bc_sid = bc_sid;
156 spin_unlock_bh(&serv->sv_cb_lock);
157 dprintk("%s set xpt_bc_sid=%u:%u:%u:%u for sv_bc_xprt %p\n", __func__,
158 ((u32 *)bc_sid->data)[0], ((u32 *)bc_sid->data)[1],
159 ((u32 *)bc_sid->data)[2], ((u32 *)bc_sid->data)[3],
160 serv->sv_bc_xprt);
161 return 0;
162}
163
164/*
140 * The callback service for NFSv4.1 callbacks 165 * The callback service for NFSv4.1 callbacks
141 */ 166 */
142static int 167static int
@@ -177,30 +202,38 @@ nfs41_callback_svc(void *vrqstp)
177struct svc_rqst * 202struct svc_rqst *
178nfs41_callback_up(struct svc_serv *serv, struct rpc_xprt *xprt) 203nfs41_callback_up(struct svc_serv *serv, struct rpc_xprt *xprt)
179{ 204{
180 struct svc_xprt *bc_xprt; 205 struct svc_rqst *rqstp;
181 struct svc_rqst *rqstp = ERR_PTR(-ENOMEM); 206 int ret;
182 207
183 dprintk("--> %s\n", __func__); 208 /*
184 /* Create a svc_sock for the service */ 209 * Create an svc_sock for the back channel service that shares the
185 bc_xprt = svc_sock_create(serv, xprt->prot); 210 * fore channel connection.
186 if (!bc_xprt) 211 * Returns the input port (0) and sets the svc_serv bc_xprt on success
212 */
213 ret = svc_create_xprt(serv, "tcp-bc", &init_net, PF_INET, 0,
214 SVC_SOCK_ANONYMOUS);
215 if (ret < 0) {
216 rqstp = ERR_PTR(ret);
187 goto out; 217 goto out;
218 }
188 219
189 /* 220 /*
190 * Save the svc_serv in the transport so that it can 221 * Save the svc_serv in the transport so that it can
191 * be referenced when the session backchannel is initialized 222 * be referenced when the session backchannel is initialized
192 */ 223 */
193 serv->bc_xprt = bc_xprt;
194 xprt->bc_serv = serv; 224 xprt->bc_serv = serv;
195 225
196 INIT_LIST_HEAD(&serv->sv_cb_list); 226 INIT_LIST_HEAD(&serv->sv_cb_list);
197 spin_lock_init(&serv->sv_cb_lock); 227 spin_lock_init(&serv->sv_cb_lock);
198 init_waitqueue_head(&serv->sv_cb_waitq); 228 init_waitqueue_head(&serv->sv_cb_waitq);
199 rqstp = svc_prepare_thread(serv, &serv->sv_pools[0]); 229 rqstp = svc_prepare_thread(serv, &serv->sv_pools[0]);
200 if (IS_ERR(rqstp)) 230 if (IS_ERR(rqstp)) {
201 svc_sock_destroy(bc_xprt); 231 svc_xprt_put(serv->sv_bc_xprt);
232 serv->sv_bc_xprt = NULL;
233 }
202out: 234out:
203 dprintk("--> %s return %p\n", __func__, rqstp); 235 dprintk("--> %s return %ld\n", __func__,
236 IS_ERR(rqstp) ? PTR_ERR(rqstp) : 0);
204 return rqstp; 237 return rqstp;
205} 238}
206 239
@@ -233,6 +266,10 @@ static inline void nfs_callback_bc_serv(u32 minorversion, struct rpc_xprt *xprt,
233 struct nfs_callback_data *cb_info) 266 struct nfs_callback_data *cb_info)
234{ 267{
235} 268}
269int nfs4_set_callback_sessionid(struct nfs_client *clp)
270{
271 return 0;
272}
236#endif /* CONFIG_NFS_V4_1 */ 273#endif /* CONFIG_NFS_V4_1 */
237 274
238/* 275/*
@@ -328,6 +365,9 @@ static int check_gss_callback_principal(struct nfs_client *clp,
328 struct rpc_clnt *r = clp->cl_rpcclient; 365 struct rpc_clnt *r = clp->cl_rpcclient;
329 char *p = svc_gss_principal(rqstp); 366 char *p = svc_gss_principal(rqstp);
330 367
368 /* No RPC_AUTH_GSS on NFSv4.1 back channel yet */
369 if (clp->cl_minorversion != 0)
370 return SVC_DROP;
331 /* 371 /*
332 * It might just be a normal user principal, in which case 372 * It might just be a normal user principal, in which case
333 * userspace won't bother to tell us the name at all. 373 * userspace won't bother to tell us the name at all.
@@ -345,6 +385,23 @@ static int check_gss_callback_principal(struct nfs_client *clp,
345 return SVC_OK; 385 return SVC_OK;
346} 386}
347 387
388/* pg_authenticate method helper */
389static struct nfs_client *nfs_cb_find_client(struct svc_rqst *rqstp)
390{
391 struct nfs4_sessionid *sessionid = bc_xprt_sid(rqstp);
392 int is_cb_compound = rqstp->rq_proc == CB_COMPOUND ? 1 : 0;
393
394 dprintk("--> %s rq_proc %d\n", __func__, rqstp->rq_proc);
395 if (svc_is_backchannel(rqstp))
396 /* Sessionid (usually) set after CB_NULL ping */
397 return nfs4_find_client_sessionid(svc_addr(rqstp), sessionid,
398 is_cb_compound);
399 else
400 /* No callback identifier in pg_authenticate */
401 return nfs4_find_client_no_ident(svc_addr(rqstp));
402}
403
404/* pg_authenticate method for nfsv4 callback threads. */
348static int nfs_callback_authenticate(struct svc_rqst *rqstp) 405static int nfs_callback_authenticate(struct svc_rqst *rqstp)
349{ 406{
350 struct nfs_client *clp; 407 struct nfs_client *clp;
@@ -352,7 +409,7 @@ static int nfs_callback_authenticate(struct svc_rqst *rqstp)
352 int ret = SVC_OK; 409 int ret = SVC_OK;
353 410
354 /* Don't talk to strangers */ 411 /* Don't talk to strangers */
355 clp = nfs_find_client(svc_addr(rqstp), 4); 412 clp = nfs_cb_find_client(rqstp);
356 if (clp == NULL) 413 if (clp == NULL)
357 return SVC_DROP; 414 return SVC_DROP;
358 415
diff --git a/fs/nfs/callback.h b/fs/nfs/callback.h
index 85a7cfd1b8dd..d3b44f9bd747 100644
--- a/fs/nfs/callback.h
+++ b/fs/nfs/callback.h
@@ -34,10 +34,17 @@ enum nfs4_callback_opnum {
34 OP_CB_ILLEGAL = 10044, 34 OP_CB_ILLEGAL = 10044,
35}; 35};
36 36
37struct cb_process_state {
38 __be32 drc_status;
39 struct nfs_client *clp;
40 struct nfs4_sessionid *svc_sid; /* v4.1 callback service sessionid */
41};
42
37struct cb_compound_hdr_arg { 43struct cb_compound_hdr_arg {
38 unsigned int taglen; 44 unsigned int taglen;
39 const char *tag; 45 const char *tag;
40 unsigned int minorversion; 46 unsigned int minorversion;
47 unsigned int cb_ident; /* v4.0 callback identifier */
41 unsigned nops; 48 unsigned nops;
42}; 49};
43 50
@@ -103,14 +110,23 @@ struct cb_sequenceres {
103 uint32_t csr_target_highestslotid; 110 uint32_t csr_target_highestslotid;
104}; 111};
105 112
106extern unsigned nfs4_callback_sequence(struct cb_sequenceargs *args, 113extern __be32 nfs4_callback_sequence(struct cb_sequenceargs *args,
107 struct cb_sequenceres *res); 114 struct cb_sequenceres *res,
115 struct cb_process_state *cps);
108 116
109extern int nfs41_validate_delegation_stateid(struct nfs_delegation *delegation, 117extern int nfs41_validate_delegation_stateid(struct nfs_delegation *delegation,
110 const nfs4_stateid *stateid); 118 const nfs4_stateid *stateid);
111 119
112#define RCA4_TYPE_MASK_RDATA_DLG 0 120#define RCA4_TYPE_MASK_RDATA_DLG 0
113#define RCA4_TYPE_MASK_WDATA_DLG 1 121#define RCA4_TYPE_MASK_WDATA_DLG 1
122#define RCA4_TYPE_MASK_DIR_DLG 2
123#define RCA4_TYPE_MASK_FILE_LAYOUT 3
124#define RCA4_TYPE_MASK_BLK_LAYOUT 4
125#define RCA4_TYPE_MASK_OBJ_LAYOUT_MIN 8
126#define RCA4_TYPE_MASK_OBJ_LAYOUT_MAX 9
127#define RCA4_TYPE_MASK_OTHER_LAYOUT_MIN 12
128#define RCA4_TYPE_MASK_OTHER_LAYOUT_MAX 15
129#define RCA4_TYPE_MASK_ALL 0xf31f
114 130
115struct cb_recallanyargs { 131struct cb_recallanyargs {
116 struct sockaddr *craa_addr; 132 struct sockaddr *craa_addr;
@@ -118,25 +134,52 @@ struct cb_recallanyargs {
118 uint32_t craa_type_mask; 134 uint32_t craa_type_mask;
119}; 135};
120 136
121extern unsigned nfs4_callback_recallany(struct cb_recallanyargs *args, void *dummy); 137extern __be32 nfs4_callback_recallany(struct cb_recallanyargs *args,
138 void *dummy,
139 struct cb_process_state *cps);
122 140
123struct cb_recallslotargs { 141struct cb_recallslotargs {
124 struct sockaddr *crsa_addr; 142 struct sockaddr *crsa_addr;
125 uint32_t crsa_target_max_slots; 143 uint32_t crsa_target_max_slots;
126}; 144};
127extern unsigned nfs4_callback_recallslot(struct cb_recallslotargs *args, 145extern __be32 nfs4_callback_recallslot(struct cb_recallslotargs *args,
128 void *dummy); 146 void *dummy,
147 struct cb_process_state *cps);
148
149struct cb_layoutrecallargs {
150 struct sockaddr *cbl_addr;
151 uint32_t cbl_recall_type;
152 uint32_t cbl_layout_type;
153 uint32_t cbl_layoutchanged;
154 union {
155 struct {
156 struct nfs_fh cbl_fh;
157 struct pnfs_layout_range cbl_range;
158 nfs4_stateid cbl_stateid;
159 };
160 struct nfs_fsid cbl_fsid;
161 };
162};
129 163
130#endif /* CONFIG_NFS_V4_1 */ 164extern unsigned nfs4_callback_layoutrecall(
165 struct cb_layoutrecallargs *args,
166 void *dummy, struct cb_process_state *cps);
131 167
132extern __be32 nfs4_callback_getattr(struct cb_getattrargs *args, struct cb_getattrres *res); 168extern void nfs4_check_drain_bc_complete(struct nfs4_session *ses);
133extern __be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy); 169extern void nfs4_cb_take_slot(struct nfs_client *clp);
170#endif /* CONFIG_NFS_V4_1 */
134 171
172extern __be32 nfs4_callback_getattr(struct cb_getattrargs *args,
173 struct cb_getattrres *res,
174 struct cb_process_state *cps);
175extern __be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy,
176 struct cb_process_state *cps);
135#ifdef CONFIG_NFS_V4 177#ifdef CONFIG_NFS_V4
136extern int nfs_callback_up(u32 minorversion, struct rpc_xprt *xprt); 178extern int nfs_callback_up(u32 minorversion, struct rpc_xprt *xprt);
137extern void nfs_callback_down(int minorversion); 179extern void nfs_callback_down(int minorversion);
138extern int nfs4_validate_delegation_stateid(struct nfs_delegation *delegation, 180extern int nfs4_validate_delegation_stateid(struct nfs_delegation *delegation,
139 const nfs4_stateid *stateid); 181 const nfs4_stateid *stateid);
182extern int nfs4_set_callback_sessionid(struct nfs_client *clp);
140#endif /* CONFIG_NFS_V4 */ 183#endif /* CONFIG_NFS_V4 */
141/* 184/*
142 * nfs41: Callbacks are expected to not cause substantial latency, 185 * nfs41: Callbacks are expected to not cause substantial latency,
diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
index 2950fca0c61b..4bb91cb2620d 100644
--- a/fs/nfs/callback_proc.c
+++ b/fs/nfs/callback_proc.c
@@ -12,30 +12,33 @@
12#include "callback.h" 12#include "callback.h"
13#include "delegation.h" 13#include "delegation.h"
14#include "internal.h" 14#include "internal.h"
15#include "pnfs.h"
15 16
16#ifdef NFS_DEBUG 17#ifdef NFS_DEBUG
17#define NFSDBG_FACILITY NFSDBG_CALLBACK 18#define NFSDBG_FACILITY NFSDBG_CALLBACK
18#endif 19#endif
19 20
20__be32 nfs4_callback_getattr(struct cb_getattrargs *args, struct cb_getattrres *res) 21__be32 nfs4_callback_getattr(struct cb_getattrargs *args,
22 struct cb_getattrres *res,
23 struct cb_process_state *cps)
21{ 24{
22 struct nfs_client *clp;
23 struct nfs_delegation *delegation; 25 struct nfs_delegation *delegation;
24 struct nfs_inode *nfsi; 26 struct nfs_inode *nfsi;
25 struct inode *inode; 27 struct inode *inode;
26 28
29 res->status = htonl(NFS4ERR_OP_NOT_IN_SESSION);
30 if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */
31 goto out;
32
27 res->bitmap[0] = res->bitmap[1] = 0; 33 res->bitmap[0] = res->bitmap[1] = 0;
28 res->status = htonl(NFS4ERR_BADHANDLE); 34 res->status = htonl(NFS4ERR_BADHANDLE);
29 clp = nfs_find_client(args->addr, 4);
30 if (clp == NULL)
31 goto out;
32 35
33 dprintk("NFS: GETATTR callback request from %s\n", 36 dprintk("NFS: GETATTR callback request from %s\n",
34 rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR)); 37 rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
35 38
36 inode = nfs_delegation_find_inode(clp, &args->fh); 39 inode = nfs_delegation_find_inode(cps->clp, &args->fh);
37 if (inode == NULL) 40 if (inode == NULL)
38 goto out_putclient; 41 goto out;
39 nfsi = NFS_I(inode); 42 nfsi = NFS_I(inode);
40 rcu_read_lock(); 43 rcu_read_lock();
41 delegation = rcu_dereference(nfsi->delegation); 44 delegation = rcu_dereference(nfsi->delegation);
@@ -55,49 +58,41 @@ __be32 nfs4_callback_getattr(struct cb_getattrargs *args, struct cb_getattrres *
55out_iput: 58out_iput:
56 rcu_read_unlock(); 59 rcu_read_unlock();
57 iput(inode); 60 iput(inode);
58out_putclient:
59 nfs_put_client(clp);
60out: 61out:
61 dprintk("%s: exit with status = %d\n", __func__, ntohl(res->status)); 62 dprintk("%s: exit with status = %d\n", __func__, ntohl(res->status));
62 return res->status; 63 return res->status;
63} 64}
64 65
65__be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy) 66__be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy,
67 struct cb_process_state *cps)
66{ 68{
67 struct nfs_client *clp;
68 struct inode *inode; 69 struct inode *inode;
69 __be32 res; 70 __be32 res;
70 71
71 res = htonl(NFS4ERR_BADHANDLE); 72 res = htonl(NFS4ERR_OP_NOT_IN_SESSION);
72 clp = nfs_find_client(args->addr, 4); 73 if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */
73 if (clp == NULL)
74 goto out; 74 goto out;
75 75
76 dprintk("NFS: RECALL callback request from %s\n", 76 dprintk("NFS: RECALL callback request from %s\n",
77 rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR)); 77 rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
78 78
79 do { 79 res = htonl(NFS4ERR_BADHANDLE);
80 struct nfs_client *prev = clp; 80 inode = nfs_delegation_find_inode(cps->clp, &args->fh);
81 81 if (inode == NULL)
82 inode = nfs_delegation_find_inode(clp, &args->fh); 82 goto out;
83 if (inode != NULL) { 83 /* Set up a helper thread to actually return the delegation */
84 /* Set up a helper thread to actually return the delegation */ 84 switch (nfs_async_inode_return_delegation(inode, &args->stateid)) {
85 switch (nfs_async_inode_return_delegation(inode, &args->stateid)) { 85 case 0:
86 case 0: 86 res = 0;
87 res = 0; 87 break;
88 break; 88 case -ENOENT:
89 case -ENOENT: 89 if (res != 0)
90 if (res != 0) 90 res = htonl(NFS4ERR_BAD_STATEID);
91 res = htonl(NFS4ERR_BAD_STATEID); 91 break;
92 break; 92 default:
93 default: 93 res = htonl(NFS4ERR_RESOURCE);
94 res = htonl(NFS4ERR_RESOURCE); 94 }
95 } 95 iput(inode);
96 iput(inode);
97 }
98 clp = nfs_find_client_next(prev);
99 nfs_put_client(prev);
100 } while (clp != NULL);
101out: 96out:
102 dprintk("%s: exit with status = %d\n", __func__, ntohl(res)); 97 dprintk("%s: exit with status = %d\n", __func__, ntohl(res));
103 return res; 98 return res;
@@ -113,6 +108,139 @@ int nfs4_validate_delegation_stateid(struct nfs_delegation *delegation, const nf
113 108
114#if defined(CONFIG_NFS_V4_1) 109#if defined(CONFIG_NFS_V4_1)
115 110
111static u32 initiate_file_draining(struct nfs_client *clp,
112 struct cb_layoutrecallargs *args)
113{
114 struct pnfs_layout_hdr *lo;
115 struct inode *ino;
116 bool found = false;
117 u32 rv = NFS4ERR_NOMATCHING_LAYOUT;
118 LIST_HEAD(free_me_list);
119
120 spin_lock(&clp->cl_lock);
121 list_for_each_entry(lo, &clp->cl_layouts, plh_layouts) {
122 if (nfs_compare_fh(&args->cbl_fh,
123 &NFS_I(lo->plh_inode)->fh))
124 continue;
125 ino = igrab(lo->plh_inode);
126 if (!ino)
127 continue;
128 found = true;
129 /* Without this, layout can be freed as soon
130 * as we release cl_lock.
131 */
132 get_layout_hdr(lo);
133 break;
134 }
135 spin_unlock(&clp->cl_lock);
136 if (!found)
137 return NFS4ERR_NOMATCHING_LAYOUT;
138
139 spin_lock(&ino->i_lock);
140 if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags) ||
141 mark_matching_lsegs_invalid(lo, &free_me_list,
142 args->cbl_range.iomode))
143 rv = NFS4ERR_DELAY;
144 else
145 rv = NFS4ERR_NOMATCHING_LAYOUT;
146 pnfs_set_layout_stateid(lo, &args->cbl_stateid, true);
147 spin_unlock(&ino->i_lock);
148 pnfs_free_lseg_list(&free_me_list);
149 put_layout_hdr(lo);
150 iput(ino);
151 return rv;
152}
153
154static u32 initiate_bulk_draining(struct nfs_client *clp,
155 struct cb_layoutrecallargs *args)
156{
157 struct pnfs_layout_hdr *lo;
158 struct inode *ino;
159 u32 rv = NFS4ERR_NOMATCHING_LAYOUT;
160 struct pnfs_layout_hdr *tmp;
161 LIST_HEAD(recall_list);
162 LIST_HEAD(free_me_list);
163 struct pnfs_layout_range range = {
164 .iomode = IOMODE_ANY,
165 .offset = 0,
166 .length = NFS4_MAX_UINT64,
167 };
168
169 spin_lock(&clp->cl_lock);
170 list_for_each_entry(lo, &clp->cl_layouts, plh_layouts) {
171 if ((args->cbl_recall_type == RETURN_FSID) &&
172 memcmp(&NFS_SERVER(lo->plh_inode)->fsid,
173 &args->cbl_fsid, sizeof(struct nfs_fsid)))
174 continue;
175 if (!igrab(lo->plh_inode))
176 continue;
177 get_layout_hdr(lo);
178 BUG_ON(!list_empty(&lo->plh_bulk_recall));
179 list_add(&lo->plh_bulk_recall, &recall_list);
180 }
181 spin_unlock(&clp->cl_lock);
182 list_for_each_entry_safe(lo, tmp,
183 &recall_list, plh_bulk_recall) {
184 ino = lo->plh_inode;
185 spin_lock(&ino->i_lock);
186 set_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
187 if (mark_matching_lsegs_invalid(lo, &free_me_list, range.iomode))
188 rv = NFS4ERR_DELAY;
189 list_del_init(&lo->plh_bulk_recall);
190 spin_unlock(&ino->i_lock);
191 put_layout_hdr(lo);
192 iput(ino);
193 }
194 pnfs_free_lseg_list(&free_me_list);
195 return rv;
196}
197
198static u32 do_callback_layoutrecall(struct nfs_client *clp,
199 struct cb_layoutrecallargs *args)
200{
201 u32 res = NFS4ERR_DELAY;
202
203 dprintk("%s enter, type=%i\n", __func__, args->cbl_recall_type);
204 if (test_and_set_bit(NFS4CLNT_LAYOUTRECALL, &clp->cl_state))
205 goto out;
206 if (args->cbl_recall_type == RETURN_FILE)
207 res = initiate_file_draining(clp, args);
208 else
209 res = initiate_bulk_draining(clp, args);
210 clear_bit(NFS4CLNT_LAYOUTRECALL, &clp->cl_state);
211out:
212 dprintk("%s returning %i\n", __func__, res);
213 return res;
214
215}
216
217__be32 nfs4_callback_layoutrecall(struct cb_layoutrecallargs *args,
218 void *dummy, struct cb_process_state *cps)
219{
220 u32 res;
221
222 dprintk("%s: -->\n", __func__);
223
224 if (cps->clp)
225 res = do_callback_layoutrecall(cps->clp, args);
226 else
227 res = NFS4ERR_OP_NOT_IN_SESSION;
228
229 dprintk("%s: exit with status = %d\n", __func__, res);
230 return cpu_to_be32(res);
231}
232
233static void pnfs_recall_all_layouts(struct nfs_client *clp)
234{
235 struct cb_layoutrecallargs args;
236
237 /* Pretend we got a CB_LAYOUTRECALL(ALL) */
238 memset(&args, 0, sizeof(args));
239 args.cbl_recall_type = RETURN_ALL;
240 /* FIXME we ignore errors, what should we do? */
241 do_callback_layoutrecall(clp, &args);
242}
243
116int nfs41_validate_delegation_stateid(struct nfs_delegation *delegation, const nfs4_stateid *stateid) 244int nfs41_validate_delegation_stateid(struct nfs_delegation *delegation, const nfs4_stateid *stateid)
117{ 245{
118 if (delegation == NULL) 246 if (delegation == NULL)
@@ -185,42 +313,6 @@ validate_seqid(struct nfs4_slot_table *tbl, struct cb_sequenceargs * args)
185} 313}
186 314
187/* 315/*
188 * Returns a pointer to a held 'struct nfs_client' that matches the server's
189 * address, major version number, and session ID. It is the caller's
190 * responsibility to release the returned reference.
191 *
192 * Returns NULL if there are no connections with sessions, or if no session
193 * matches the one of interest.
194 */
195 static struct nfs_client *find_client_with_session(
196 const struct sockaddr *addr, u32 nfsversion,
197 struct nfs4_sessionid *sessionid)
198{
199 struct nfs_client *clp;
200
201 clp = nfs_find_client(addr, 4);
202 if (clp == NULL)
203 return NULL;
204
205 do {
206 struct nfs_client *prev = clp;
207
208 if (clp->cl_session != NULL) {
209 if (memcmp(clp->cl_session->sess_id.data,
210 sessionid->data,
211 NFS4_MAX_SESSIONID_LEN) == 0) {
212 /* Returns a held reference to clp */
213 return clp;
214 }
215 }
216 clp = nfs_find_client_next(prev);
217 nfs_put_client(prev);
218 } while (clp != NULL);
219
220 return NULL;
221}
222
223/*
224 * For each referring call triple, check the session's slot table for 316 * For each referring call triple, check the session's slot table for
225 * a match. If the slot is in use and the sequence numbers match, the 317 * a match. If the slot is in use and the sequence numbers match, the
226 * client is still waiting for a response to the original request. 318 * client is still waiting for a response to the original request.
@@ -276,20 +368,34 @@ out:
276} 368}
277 369
278__be32 nfs4_callback_sequence(struct cb_sequenceargs *args, 370__be32 nfs4_callback_sequence(struct cb_sequenceargs *args,
279 struct cb_sequenceres *res) 371 struct cb_sequenceres *res,
372 struct cb_process_state *cps)
280{ 373{
281 struct nfs_client *clp; 374 struct nfs_client *clp;
282 int i; 375 int i;
283 __be32 status; 376 __be32 status;
284 377
378 cps->clp = NULL;
379
285 status = htonl(NFS4ERR_BADSESSION); 380 status = htonl(NFS4ERR_BADSESSION);
286 clp = find_client_with_session(args->csa_addr, 4, &args->csa_sessionid); 381 /* Incoming session must match the callback session */
382 if (memcmp(&args->csa_sessionid, cps->svc_sid, NFS4_MAX_SESSIONID_LEN))
383 goto out;
384
385 clp = nfs4_find_client_sessionid(args->csa_addr,
386 &args->csa_sessionid, 1);
287 if (clp == NULL) 387 if (clp == NULL)
288 goto out; 388 goto out;
289 389
390 /* state manager is resetting the session */
391 if (test_bit(NFS4_SESSION_DRAINING, &clp->cl_session->session_state)) {
392 status = NFS4ERR_DELAY;
393 goto out;
394 }
395
290 status = validate_seqid(&clp->cl_session->bc_slot_table, args); 396 status = validate_seqid(&clp->cl_session->bc_slot_table, args);
291 if (status) 397 if (status)
292 goto out_putclient; 398 goto out;
293 399
294 /* 400 /*
295 * Check for pending referring calls. If a match is found, a 401 * Check for pending referring calls. If a match is found, a
@@ -298,7 +404,7 @@ __be32 nfs4_callback_sequence(struct cb_sequenceargs *args,
298 */ 404 */
299 if (referring_call_exists(clp, args->csa_nrclists, args->csa_rclists)) { 405 if (referring_call_exists(clp, args->csa_nrclists, args->csa_rclists)) {
300 status = htonl(NFS4ERR_DELAY); 406 status = htonl(NFS4ERR_DELAY);
301 goto out_putclient; 407 goto out;
302 } 408 }
303 409
304 memcpy(&res->csr_sessionid, &args->csa_sessionid, 410 memcpy(&res->csr_sessionid, &args->csa_sessionid,
@@ -307,83 +413,93 @@ __be32 nfs4_callback_sequence(struct cb_sequenceargs *args,
307 res->csr_slotid = args->csa_slotid; 413 res->csr_slotid = args->csa_slotid;
308 res->csr_highestslotid = NFS41_BC_MAX_CALLBACKS - 1; 414 res->csr_highestslotid = NFS41_BC_MAX_CALLBACKS - 1;
309 res->csr_target_highestslotid = NFS41_BC_MAX_CALLBACKS - 1; 415 res->csr_target_highestslotid = NFS41_BC_MAX_CALLBACKS - 1;
416 nfs4_cb_take_slot(clp);
417 cps->clp = clp; /* put in nfs4_callback_compound */
310 418
311out_putclient:
312 nfs_put_client(clp);
313out: 419out:
314 for (i = 0; i < args->csa_nrclists; i++) 420 for (i = 0; i < args->csa_nrclists; i++)
315 kfree(args->csa_rclists[i].rcl_refcalls); 421 kfree(args->csa_rclists[i].rcl_refcalls);
316 kfree(args->csa_rclists); 422 kfree(args->csa_rclists);
317 423
318 if (status == htonl(NFS4ERR_RETRY_UNCACHED_REP)) 424 if (status == htonl(NFS4ERR_RETRY_UNCACHED_REP)) {
319 res->csr_status = 0; 425 cps->drc_status = status;
320 else 426 status = 0;
427 } else
321 res->csr_status = status; 428 res->csr_status = status;
429
322 dprintk("%s: exit with status = %d res->csr_status %d\n", __func__, 430 dprintk("%s: exit with status = %d res->csr_status %d\n", __func__,
323 ntohl(status), ntohl(res->csr_status)); 431 ntohl(status), ntohl(res->csr_status));
324 return status; 432 return status;
325} 433}
326 434
327__be32 nfs4_callback_recallany(struct cb_recallanyargs *args, void *dummy) 435static bool
436validate_bitmap_values(unsigned long mask)
437{
438 return (mask & ~RCA4_TYPE_MASK_ALL) == 0;
439}
440
441__be32 nfs4_callback_recallany(struct cb_recallanyargs *args, void *dummy,
442 struct cb_process_state *cps)
328{ 443{
329 struct nfs_client *clp;
330 __be32 status; 444 __be32 status;
331 fmode_t flags = 0; 445 fmode_t flags = 0;
332 446
333 status = htonl(NFS4ERR_OP_NOT_IN_SESSION); 447 status = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION);
334 clp = nfs_find_client(args->craa_addr, 4); 448 if (!cps->clp) /* set in cb_sequence */
335 if (clp == NULL)
336 goto out; 449 goto out;
337 450
338 dprintk("NFS: RECALL_ANY callback request from %s\n", 451 dprintk("NFS: RECALL_ANY callback request from %s\n",
339 rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR)); 452 rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
453
454 status = cpu_to_be32(NFS4ERR_INVAL);
455 if (!validate_bitmap_values(args->craa_type_mask))
456 goto out;
340 457
458 status = cpu_to_be32(NFS4_OK);
341 if (test_bit(RCA4_TYPE_MASK_RDATA_DLG, (const unsigned long *) 459 if (test_bit(RCA4_TYPE_MASK_RDATA_DLG, (const unsigned long *)
342 &args->craa_type_mask)) 460 &args->craa_type_mask))
343 flags = FMODE_READ; 461 flags = FMODE_READ;
344 if (test_bit(RCA4_TYPE_MASK_WDATA_DLG, (const unsigned long *) 462 if (test_bit(RCA4_TYPE_MASK_WDATA_DLG, (const unsigned long *)
345 &args->craa_type_mask)) 463 &args->craa_type_mask))
346 flags |= FMODE_WRITE; 464 flags |= FMODE_WRITE;
347 465 if (test_bit(RCA4_TYPE_MASK_FILE_LAYOUT, (const unsigned long *)
466 &args->craa_type_mask))
467 pnfs_recall_all_layouts(cps->clp);
348 if (flags) 468 if (flags)
349 nfs_expire_all_delegation_types(clp, flags); 469 nfs_expire_all_delegation_types(cps->clp, flags);
350 status = htonl(NFS4_OK);
351out: 470out:
352 dprintk("%s: exit with status = %d\n", __func__, ntohl(status)); 471 dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
353 return status; 472 return status;
354} 473}
355 474
356/* Reduce the fore channel's max_slots to the target value */ 475/* Reduce the fore channel's max_slots to the target value */
357__be32 nfs4_callback_recallslot(struct cb_recallslotargs *args, void *dummy) 476__be32 nfs4_callback_recallslot(struct cb_recallslotargs *args, void *dummy,
477 struct cb_process_state *cps)
358{ 478{
359 struct nfs_client *clp;
360 struct nfs4_slot_table *fc_tbl; 479 struct nfs4_slot_table *fc_tbl;
361 __be32 status; 480 __be32 status;
362 481
363 status = htonl(NFS4ERR_OP_NOT_IN_SESSION); 482 status = htonl(NFS4ERR_OP_NOT_IN_SESSION);
364 clp = nfs_find_client(args->crsa_addr, 4); 483 if (!cps->clp) /* set in cb_sequence */
365 if (clp == NULL)
366 goto out; 484 goto out;
367 485
368 dprintk("NFS: CB_RECALL_SLOT request from %s target max slots %d\n", 486 dprintk("NFS: CB_RECALL_SLOT request from %s target max slots %d\n",
369 rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR), 487 rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR),
370 args->crsa_target_max_slots); 488 args->crsa_target_max_slots);
371 489
372 fc_tbl = &clp->cl_session->fc_slot_table; 490 fc_tbl = &cps->clp->cl_session->fc_slot_table;
373 491
374 status = htonl(NFS4ERR_BAD_HIGH_SLOT); 492 status = htonl(NFS4ERR_BAD_HIGH_SLOT);
375 if (args->crsa_target_max_slots > fc_tbl->max_slots || 493 if (args->crsa_target_max_slots > fc_tbl->max_slots ||
376 args->crsa_target_max_slots < 1) 494 args->crsa_target_max_slots < 1)
377 goto out_putclient; 495 goto out;
378 496
379 status = htonl(NFS4_OK); 497 status = htonl(NFS4_OK);
380 if (args->crsa_target_max_slots == fc_tbl->max_slots) 498 if (args->crsa_target_max_slots == fc_tbl->max_slots)
381 goto out_putclient; 499 goto out;
382 500
383 fc_tbl->target_max_slots = args->crsa_target_max_slots; 501 fc_tbl->target_max_slots = args->crsa_target_max_slots;
384 nfs41_handle_recall_slot(clp); 502 nfs41_handle_recall_slot(cps->clp);
385out_putclient:
386 nfs_put_client(clp); /* balance nfs_find_client */
387out: 503out:
388 dprintk("%s: exit with status = %d\n", __func__, ntohl(status)); 504 dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
389 return status; 505 return status;
diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
index 05af212f0edf..23112c263f81 100644
--- a/fs/nfs/callback_xdr.c
+++ b/fs/nfs/callback_xdr.c
@@ -10,8 +10,10 @@
10#include <linux/nfs4.h> 10#include <linux/nfs4.h>
11#include <linux/nfs_fs.h> 11#include <linux/nfs_fs.h>
12#include <linux/slab.h> 12#include <linux/slab.h>
13#include <linux/sunrpc/bc_xprt.h>
13#include "nfs4_fs.h" 14#include "nfs4_fs.h"
14#include "callback.h" 15#include "callback.h"
16#include "internal.h"
15 17
16#define CB_OP_TAGLEN_MAXSZ (512) 18#define CB_OP_TAGLEN_MAXSZ (512)
17#define CB_OP_HDR_RES_MAXSZ (2 + CB_OP_TAGLEN_MAXSZ) 19#define CB_OP_HDR_RES_MAXSZ (2 + CB_OP_TAGLEN_MAXSZ)
@@ -22,6 +24,7 @@
22#define CB_OP_RECALL_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ) 24#define CB_OP_RECALL_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ)
23 25
24#if defined(CONFIG_NFS_V4_1) 26#if defined(CONFIG_NFS_V4_1)
27#define CB_OP_LAYOUTRECALL_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ)
25#define CB_OP_SEQUENCE_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ + \ 28#define CB_OP_SEQUENCE_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ + \
26 4 + 1 + 3) 29 4 + 1 + 3)
27#define CB_OP_RECALLANY_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ) 30#define CB_OP_RECALLANY_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ)
@@ -33,7 +36,8 @@
33/* Internal error code */ 36/* Internal error code */
34#define NFS4ERR_RESOURCE_HDR 11050 37#define NFS4ERR_RESOURCE_HDR 11050
35 38
36typedef __be32 (*callback_process_op_t)(void *, void *); 39typedef __be32 (*callback_process_op_t)(void *, void *,
40 struct cb_process_state *);
37typedef __be32 (*callback_decode_arg_t)(struct svc_rqst *, struct xdr_stream *, void *); 41typedef __be32 (*callback_decode_arg_t)(struct svc_rqst *, struct xdr_stream *, void *);
38typedef __be32 (*callback_encode_res_t)(struct svc_rqst *, struct xdr_stream *, void *); 42typedef __be32 (*callback_encode_res_t)(struct svc_rqst *, struct xdr_stream *, void *);
39 43
@@ -160,7 +164,7 @@ static __be32 decode_compound_hdr_arg(struct xdr_stream *xdr, struct cb_compound
160 hdr->minorversion = ntohl(*p++); 164 hdr->minorversion = ntohl(*p++);
161 /* Check minor version is zero or one. */ 165 /* Check minor version is zero or one. */
162 if (hdr->minorversion <= 1) { 166 if (hdr->minorversion <= 1) {
163 p++; /* skip callback_ident */ 167 hdr->cb_ident = ntohl(*p++); /* ignored by v4.1 */
164 } else { 168 } else {
165 printk(KERN_WARNING "%s: NFSv4 server callback with " 169 printk(KERN_WARNING "%s: NFSv4 server callback with "
166 "illegal minor version %u!\n", 170 "illegal minor version %u!\n",
@@ -220,6 +224,66 @@ out:
220 224
221#if defined(CONFIG_NFS_V4_1) 225#if defined(CONFIG_NFS_V4_1)
222 226
227static __be32 decode_layoutrecall_args(struct svc_rqst *rqstp,
228 struct xdr_stream *xdr,
229 struct cb_layoutrecallargs *args)
230{
231 __be32 *p;
232 __be32 status = 0;
233 uint32_t iomode;
234
235 args->cbl_addr = svc_addr(rqstp);
236 p = read_buf(xdr, 4 * sizeof(uint32_t));
237 if (unlikely(p == NULL)) {
238 status = htonl(NFS4ERR_BADXDR);
239 goto out;
240 }
241
242 args->cbl_layout_type = ntohl(*p++);
243 /* Depite the spec's xdr, iomode really belongs in the FILE switch,
244 * as it is unuseable and ignored with the other types.
245 */
246 iomode = ntohl(*p++);
247 args->cbl_layoutchanged = ntohl(*p++);
248 args->cbl_recall_type = ntohl(*p++);
249
250 if (args->cbl_recall_type == RETURN_FILE) {
251 args->cbl_range.iomode = iomode;
252 status = decode_fh(xdr, &args->cbl_fh);
253 if (unlikely(status != 0))
254 goto out;
255
256 p = read_buf(xdr, 2 * sizeof(uint64_t));
257 if (unlikely(p == NULL)) {
258 status = htonl(NFS4ERR_BADXDR);
259 goto out;
260 }
261 p = xdr_decode_hyper(p, &args->cbl_range.offset);
262 p = xdr_decode_hyper(p, &args->cbl_range.length);
263 status = decode_stateid(xdr, &args->cbl_stateid);
264 if (unlikely(status != 0))
265 goto out;
266 } else if (args->cbl_recall_type == RETURN_FSID) {
267 p = read_buf(xdr, 2 * sizeof(uint64_t));
268 if (unlikely(p == NULL)) {
269 status = htonl(NFS4ERR_BADXDR);
270 goto out;
271 }
272 p = xdr_decode_hyper(p, &args->cbl_fsid.major);
273 p = xdr_decode_hyper(p, &args->cbl_fsid.minor);
274 } else if (args->cbl_recall_type != RETURN_ALL) {
275 status = htonl(NFS4ERR_BADXDR);
276 goto out;
277 }
278 dprintk("%s: ltype 0x%x iomode %d changed %d recall_type %d\n",
279 __func__,
280 args->cbl_layout_type, iomode,
281 args->cbl_layoutchanged, args->cbl_recall_type);
282out:
283 dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
284 return status;
285}
286
223static __be32 decode_sessionid(struct xdr_stream *xdr, 287static __be32 decode_sessionid(struct xdr_stream *xdr,
224 struct nfs4_sessionid *sid) 288 struct nfs4_sessionid *sid)
225{ 289{
@@ -574,10 +638,10 @@ preprocess_nfs41_op(int nop, unsigned int op_nr, struct callback_op **op)
574 case OP_CB_SEQUENCE: 638 case OP_CB_SEQUENCE:
575 case OP_CB_RECALL_ANY: 639 case OP_CB_RECALL_ANY:
576 case OP_CB_RECALL_SLOT: 640 case OP_CB_RECALL_SLOT:
641 case OP_CB_LAYOUTRECALL:
577 *op = &callback_ops[op_nr]; 642 *op = &callback_ops[op_nr];
578 break; 643 break;
579 644
580 case OP_CB_LAYOUTRECALL:
581 case OP_CB_NOTIFY_DEVICEID: 645 case OP_CB_NOTIFY_DEVICEID:
582 case OP_CB_NOTIFY: 646 case OP_CB_NOTIFY:
583 case OP_CB_PUSH_DELEG: 647 case OP_CB_PUSH_DELEG:
@@ -593,6 +657,37 @@ preprocess_nfs41_op(int nop, unsigned int op_nr, struct callback_op **op)
593 return htonl(NFS_OK); 657 return htonl(NFS_OK);
594} 658}
595 659
660static void nfs4_callback_free_slot(struct nfs4_session *session)
661{
662 struct nfs4_slot_table *tbl = &session->bc_slot_table;
663
664 spin_lock(&tbl->slot_tbl_lock);
665 /*
666 * Let the state manager know callback processing done.
667 * A single slot, so highest used slotid is either 0 or -1
668 */
669 tbl->highest_used_slotid--;
670 nfs4_check_drain_bc_complete(session);
671 spin_unlock(&tbl->slot_tbl_lock);
672}
673
674static void nfs4_cb_free_slot(struct nfs_client *clp)
675{
676 if (clp && clp->cl_session)
677 nfs4_callback_free_slot(clp->cl_session);
678}
679
680/* A single slot, so highest used slotid is either 0 or -1 */
681void nfs4_cb_take_slot(struct nfs_client *clp)
682{
683 struct nfs4_slot_table *tbl = &clp->cl_session->bc_slot_table;
684
685 spin_lock(&tbl->slot_tbl_lock);
686 tbl->highest_used_slotid++;
687 BUG_ON(tbl->highest_used_slotid != 0);
688 spin_unlock(&tbl->slot_tbl_lock);
689}
690
596#else /* CONFIG_NFS_V4_1 */ 691#else /* CONFIG_NFS_V4_1 */
597 692
598static __be32 693static __be32
@@ -601,6 +696,9 @@ preprocess_nfs41_op(int nop, unsigned int op_nr, struct callback_op **op)
601 return htonl(NFS4ERR_MINOR_VERS_MISMATCH); 696 return htonl(NFS4ERR_MINOR_VERS_MISMATCH);
602} 697}
603 698
699static void nfs4_cb_free_slot(struct nfs_client *clp)
700{
701}
604#endif /* CONFIG_NFS_V4_1 */ 702#endif /* CONFIG_NFS_V4_1 */
605 703
606static __be32 704static __be32
@@ -621,7 +719,8 @@ preprocess_nfs4_op(unsigned int op_nr, struct callback_op **op)
621static __be32 process_op(uint32_t minorversion, int nop, 719static __be32 process_op(uint32_t minorversion, int nop,
622 struct svc_rqst *rqstp, 720 struct svc_rqst *rqstp,
623 struct xdr_stream *xdr_in, void *argp, 721 struct xdr_stream *xdr_in, void *argp,
624 struct xdr_stream *xdr_out, void *resp, int* drc_status) 722 struct xdr_stream *xdr_out, void *resp,
723 struct cb_process_state *cps)
625{ 724{
626 struct callback_op *op = &callback_ops[0]; 725 struct callback_op *op = &callback_ops[0];
627 unsigned int op_nr; 726 unsigned int op_nr;
@@ -644,8 +743,8 @@ static __be32 process_op(uint32_t minorversion, int nop,
644 if (status) 743 if (status)
645 goto encode_hdr; 744 goto encode_hdr;
646 745
647 if (*drc_status) { 746 if (cps->drc_status) {
648 status = *drc_status; 747 status = cps->drc_status;
649 goto encode_hdr; 748 goto encode_hdr;
650 } 749 }
651 750
@@ -653,16 +752,10 @@ static __be32 process_op(uint32_t minorversion, int nop,
653 if (maxlen > 0 && maxlen < PAGE_SIZE) { 752 if (maxlen > 0 && maxlen < PAGE_SIZE) {
654 status = op->decode_args(rqstp, xdr_in, argp); 753 status = op->decode_args(rqstp, xdr_in, argp);
655 if (likely(status == 0)) 754 if (likely(status == 0))
656 status = op->process_op(argp, resp); 755 status = op->process_op(argp, resp, cps);
657 } else 756 } else
658 status = htonl(NFS4ERR_RESOURCE); 757 status = htonl(NFS4ERR_RESOURCE);
659 758
660 /* Only set by OP_CB_SEQUENCE processing */
661 if (status == htonl(NFS4ERR_RETRY_UNCACHED_REP)) {
662 *drc_status = status;
663 status = 0;
664 }
665
666encode_hdr: 759encode_hdr:
667 res = encode_op_hdr(xdr_out, op_nr, status); 760 res = encode_op_hdr(xdr_out, op_nr, status);
668 if (unlikely(res)) 761 if (unlikely(res))
@@ -681,8 +774,11 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r
681 struct cb_compound_hdr_arg hdr_arg = { 0 }; 774 struct cb_compound_hdr_arg hdr_arg = { 0 };
682 struct cb_compound_hdr_res hdr_res = { NULL }; 775 struct cb_compound_hdr_res hdr_res = { NULL };
683 struct xdr_stream xdr_in, xdr_out; 776 struct xdr_stream xdr_in, xdr_out;
684 __be32 *p; 777 __be32 *p, status;
685 __be32 status, drc_status = 0; 778 struct cb_process_state cps = {
779 .drc_status = 0,
780 .clp = NULL,
781 };
686 unsigned int nops = 0; 782 unsigned int nops = 0;
687 783
688 dprintk("%s: start\n", __func__); 784 dprintk("%s: start\n", __func__);
@@ -696,6 +792,13 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r
696 if (status == __constant_htonl(NFS4ERR_RESOURCE)) 792 if (status == __constant_htonl(NFS4ERR_RESOURCE))
697 return rpc_garbage_args; 793 return rpc_garbage_args;
698 794
795 if (hdr_arg.minorversion == 0) {
796 cps.clp = nfs4_find_client_ident(hdr_arg.cb_ident);
797 if (!cps.clp)
798 return rpc_drop_reply;
799 } else
800 cps.svc_sid = bc_xprt_sid(rqstp);
801
699 hdr_res.taglen = hdr_arg.taglen; 802 hdr_res.taglen = hdr_arg.taglen;
700 hdr_res.tag = hdr_arg.tag; 803 hdr_res.tag = hdr_arg.tag;
701 if (encode_compound_hdr_res(&xdr_out, &hdr_res) != 0) 804 if (encode_compound_hdr_res(&xdr_out, &hdr_res) != 0)
@@ -703,7 +806,7 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r
703 806
704 while (status == 0 && nops != hdr_arg.nops) { 807 while (status == 0 && nops != hdr_arg.nops) {
705 status = process_op(hdr_arg.minorversion, nops, rqstp, 808 status = process_op(hdr_arg.minorversion, nops, rqstp,
706 &xdr_in, argp, &xdr_out, resp, &drc_status); 809 &xdr_in, argp, &xdr_out, resp, &cps);
707 nops++; 810 nops++;
708 } 811 }
709 812
@@ -716,6 +819,8 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r
716 819
717 *hdr_res.status = status; 820 *hdr_res.status = status;
718 *hdr_res.nops = htonl(nops); 821 *hdr_res.nops = htonl(nops);
822 nfs4_cb_free_slot(cps.clp);
823 nfs_put_client(cps.clp);
719 dprintk("%s: done, status = %u\n", __func__, ntohl(status)); 824 dprintk("%s: done, status = %u\n", __func__, ntohl(status));
720 return rpc_success; 825 return rpc_success;
721} 826}
@@ -739,6 +844,12 @@ static struct callback_op callback_ops[] = {
739 .res_maxsize = CB_OP_RECALL_RES_MAXSZ, 844 .res_maxsize = CB_OP_RECALL_RES_MAXSZ,
740 }, 845 },
741#if defined(CONFIG_NFS_V4_1) 846#if defined(CONFIG_NFS_V4_1)
847 [OP_CB_LAYOUTRECALL] = {
848 .process_op = (callback_process_op_t)nfs4_callback_layoutrecall,
849 .decode_args =
850 (callback_decode_arg_t)decode_layoutrecall_args,
851 .res_maxsize = CB_OP_LAYOUTRECALL_RES_MAXSZ,
852 },
742 [OP_CB_SEQUENCE] = { 853 [OP_CB_SEQUENCE] = {
743 .process_op = (callback_process_op_t)nfs4_callback_sequence, 854 .process_op = (callback_process_op_t)nfs4_callback_sequence,
744 .decode_args = (callback_decode_arg_t)decode_cb_sequence_args, 855 .decode_args = (callback_decode_arg_t)decode_cb_sequence_args,
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 0870d0d4efc0..192f2f860265 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -56,6 +56,30 @@ static DEFINE_SPINLOCK(nfs_client_lock);
56static LIST_HEAD(nfs_client_list); 56static LIST_HEAD(nfs_client_list);
57static LIST_HEAD(nfs_volume_list); 57static LIST_HEAD(nfs_volume_list);
58static DECLARE_WAIT_QUEUE_HEAD(nfs_client_active_wq); 58static DECLARE_WAIT_QUEUE_HEAD(nfs_client_active_wq);
59#ifdef CONFIG_NFS_V4
60static DEFINE_IDR(cb_ident_idr); /* Protected by nfs_client_lock */
61
62/*
63 * Get a unique NFSv4.0 callback identifier which will be used
64 * by the V4.0 callback service to lookup the nfs_client struct
65 */
66static int nfs_get_cb_ident_idr(struct nfs_client *clp, int minorversion)
67{
68 int ret = 0;
69
70 if (clp->rpc_ops->version != 4 || minorversion != 0)
71 return ret;
72retry:
73 if (!idr_pre_get(&cb_ident_idr, GFP_KERNEL))
74 return -ENOMEM;
75 spin_lock(&nfs_client_lock);
76 ret = idr_get_new(&cb_ident_idr, clp, &clp->cl_cb_ident);
77 spin_unlock(&nfs_client_lock);
78 if (ret == -EAGAIN)
79 goto retry;
80 return ret;
81}
82#endif /* CONFIG_NFS_V4 */
59 83
60/* 84/*
61 * RPC cruft for NFS 85 * RPC cruft for NFS
@@ -144,7 +168,10 @@ static struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_
144 clp->cl_proto = cl_init->proto; 168 clp->cl_proto = cl_init->proto;
145 169
146#ifdef CONFIG_NFS_V4 170#ifdef CONFIG_NFS_V4
147 INIT_LIST_HEAD(&clp->cl_delegations); 171 err = nfs_get_cb_ident_idr(clp, cl_init->minorversion);
172 if (err)
173 goto error_cleanup;
174
148 spin_lock_init(&clp->cl_lock); 175 spin_lock_init(&clp->cl_lock);
149 INIT_DELAYED_WORK(&clp->cl_renewd, nfs4_renew_state); 176 INIT_DELAYED_WORK(&clp->cl_renewd, nfs4_renew_state);
150 rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS client"); 177 rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS client");
@@ -170,21 +197,17 @@ error_0:
170} 197}
171 198
172#ifdef CONFIG_NFS_V4 199#ifdef CONFIG_NFS_V4
173/*
174 * Clears/puts all minor version specific parts from an nfs_client struct
175 * reverting it to minorversion 0.
176 */
177static void nfs4_clear_client_minor_version(struct nfs_client *clp)
178{
179#ifdef CONFIG_NFS_V4_1 200#ifdef CONFIG_NFS_V4_1
180 if (nfs4_has_session(clp)) { 201static void nfs4_shutdown_session(struct nfs_client *clp)
202{
203 if (nfs4_has_session(clp))
181 nfs4_destroy_session(clp->cl_session); 204 nfs4_destroy_session(clp->cl_session);
182 clp->cl_session = NULL;
183 }
184
185 clp->cl_mvops = nfs_v4_minor_ops[0];
186#endif /* CONFIG_NFS_V4_1 */
187} 205}
206#else /* CONFIG_NFS_V4_1 */
207static void nfs4_shutdown_session(struct nfs_client *clp)
208{
209}
210#endif /* CONFIG_NFS_V4_1 */
188 211
189/* 212/*
190 * Destroy the NFS4 callback service 213 * Destroy the NFS4 callback service
@@ -199,17 +222,49 @@ static void nfs4_shutdown_client(struct nfs_client *clp)
199{ 222{
200 if (__test_and_clear_bit(NFS_CS_RENEWD, &clp->cl_res_state)) 223 if (__test_and_clear_bit(NFS_CS_RENEWD, &clp->cl_res_state))
201 nfs4_kill_renewd(clp); 224 nfs4_kill_renewd(clp);
202 nfs4_clear_client_minor_version(clp); 225 nfs4_shutdown_session(clp);
203 nfs4_destroy_callback(clp); 226 nfs4_destroy_callback(clp);
204 if (__test_and_clear_bit(NFS_CS_IDMAP, &clp->cl_res_state)) 227 if (__test_and_clear_bit(NFS_CS_IDMAP, &clp->cl_res_state))
205 nfs_idmap_delete(clp); 228 nfs_idmap_delete(clp);
206 229
207 rpc_destroy_wait_queue(&clp->cl_rpcwaitq); 230 rpc_destroy_wait_queue(&clp->cl_rpcwaitq);
208} 231}
232
233/* idr_remove_all is not needed as all id's are removed by nfs_put_client */
234void nfs_cleanup_cb_ident_idr(void)
235{
236 idr_destroy(&cb_ident_idr);
237}
238
239/* nfs_client_lock held */
240static void nfs_cb_idr_remove_locked(struct nfs_client *clp)
241{
242 if (clp->cl_cb_ident)
243 idr_remove(&cb_ident_idr, clp->cl_cb_ident);
244}
245
246static void pnfs_init_server(struct nfs_server *server)
247{
248 rpc_init_wait_queue(&server->roc_rpcwaitq, "pNFS ROC");
249}
250
209#else 251#else
210static void nfs4_shutdown_client(struct nfs_client *clp) 252static void nfs4_shutdown_client(struct nfs_client *clp)
211{ 253{
212} 254}
255
256void nfs_cleanup_cb_ident_idr(void)
257{
258}
259
260static void nfs_cb_idr_remove_locked(struct nfs_client *clp)
261{
262}
263
264static void pnfs_init_server(struct nfs_server *server)
265{
266}
267
213#endif /* CONFIG_NFS_V4 */ 268#endif /* CONFIG_NFS_V4 */
214 269
215/* 270/*
@@ -248,6 +303,7 @@ void nfs_put_client(struct nfs_client *clp)
248 303
249 if (atomic_dec_and_lock(&clp->cl_count, &nfs_client_lock)) { 304 if (atomic_dec_and_lock(&clp->cl_count, &nfs_client_lock)) {
250 list_del(&clp->cl_share_link); 305 list_del(&clp->cl_share_link);
306 nfs_cb_idr_remove_locked(clp);
251 spin_unlock(&nfs_client_lock); 307 spin_unlock(&nfs_client_lock);
252 308
253 BUG_ON(!list_empty(&clp->cl_superblocks)); 309 BUG_ON(!list_empty(&clp->cl_superblocks));
@@ -363,70 +419,28 @@ static int nfs_sockaddr_cmp(const struct sockaddr *sa1,
363 return 0; 419 return 0;
364} 420}
365 421
366/* 422/* Common match routine for v4.0 and v4.1 callback services */
367 * Find a client by IP address and protocol version 423bool
368 * - returns NULL if no such client 424nfs4_cb_match_client(const struct sockaddr *addr, struct nfs_client *clp,
369 */ 425 u32 minorversion)
370struct nfs_client *nfs_find_client(const struct sockaddr *addr, u32 nfsversion)
371{
372 struct nfs_client *clp;
373
374 spin_lock(&nfs_client_lock);
375 list_for_each_entry(clp, &nfs_client_list, cl_share_link) {
376 struct sockaddr *clap = (struct sockaddr *)&clp->cl_addr;
377
378 /* Don't match clients that failed to initialise properly */
379 if (!(clp->cl_cons_state == NFS_CS_READY ||
380 clp->cl_cons_state == NFS_CS_SESSION_INITING))
381 continue;
382
383 /* Different NFS versions cannot share the same nfs_client */
384 if (clp->rpc_ops->version != nfsversion)
385 continue;
386
387 /* Match only the IP address, not the port number */
388 if (!nfs_sockaddr_match_ipaddr(addr, clap))
389 continue;
390
391 atomic_inc(&clp->cl_count);
392 spin_unlock(&nfs_client_lock);
393 return clp;
394 }
395 spin_unlock(&nfs_client_lock);
396 return NULL;
397}
398
399/*
400 * Find a client by IP address and protocol version
401 * - returns NULL if no such client
402 */
403struct nfs_client *nfs_find_client_next(struct nfs_client *clp)
404{ 426{
405 struct sockaddr *sap = (struct sockaddr *)&clp->cl_addr; 427 struct sockaddr *clap = (struct sockaddr *)&clp->cl_addr;
406 u32 nfsvers = clp->rpc_ops->version;
407 428
408 spin_lock(&nfs_client_lock); 429 /* Don't match clients that failed to initialise */
409 list_for_each_entry_continue(clp, &nfs_client_list, cl_share_link) { 430 if (!(clp->cl_cons_state == NFS_CS_READY ||
410 struct sockaddr *clap = (struct sockaddr *)&clp->cl_addr; 431 clp->cl_cons_state == NFS_CS_SESSION_INITING))
432 return false;
411 433
412 /* Don't match clients that failed to initialise properly */ 434 /* Match the version and minorversion */
413 if (clp->cl_cons_state != NFS_CS_READY) 435 if (clp->rpc_ops->version != 4 ||
414 continue; 436 clp->cl_minorversion != minorversion)
437 return false;
415 438
416 /* Different NFS versions cannot share the same nfs_client */ 439 /* Match only the IP address, not the port number */
417 if (clp->rpc_ops->version != nfsvers) 440 if (!nfs_sockaddr_match_ipaddr(addr, clap))
418 continue; 441 return false;
419 442
420 /* Match only the IP address, not the port number */ 443 return true;
421 if (!nfs_sockaddr_match_ipaddr(sap, clap))
422 continue;
423
424 atomic_inc(&clp->cl_count);
425 spin_unlock(&nfs_client_lock);
426 return clp;
427 }
428 spin_unlock(&nfs_client_lock);
429 return NULL;
430} 444}
431 445
432/* 446/*
@@ -988,6 +1002,27 @@ static void nfs_server_copy_userdata(struct nfs_server *target, struct nfs_serve
988 target->options = source->options; 1002 target->options = source->options;
989} 1003}
990 1004
1005static void nfs_server_insert_lists(struct nfs_server *server)
1006{
1007 struct nfs_client *clp = server->nfs_client;
1008
1009 spin_lock(&nfs_client_lock);
1010 list_add_tail_rcu(&server->client_link, &clp->cl_superblocks);
1011 list_add_tail(&server->master_link, &nfs_volume_list);
1012 spin_unlock(&nfs_client_lock);
1013
1014}
1015
1016static void nfs_server_remove_lists(struct nfs_server *server)
1017{
1018 spin_lock(&nfs_client_lock);
1019 list_del_rcu(&server->client_link);
1020 list_del(&server->master_link);
1021 spin_unlock(&nfs_client_lock);
1022
1023 synchronize_rcu();
1024}
1025
991/* 1026/*
992 * Allocate and initialise a server record 1027 * Allocate and initialise a server record
993 */ 1028 */
@@ -1004,6 +1039,7 @@ static struct nfs_server *nfs_alloc_server(void)
1004 /* Zero out the NFS state stuff */ 1039 /* Zero out the NFS state stuff */
1005 INIT_LIST_HEAD(&server->client_link); 1040 INIT_LIST_HEAD(&server->client_link);
1006 INIT_LIST_HEAD(&server->master_link); 1041 INIT_LIST_HEAD(&server->master_link);
1042 INIT_LIST_HEAD(&server->delegations);
1007 1043
1008 atomic_set(&server->active, 0); 1044 atomic_set(&server->active, 0);
1009 1045
@@ -1019,6 +1055,8 @@ static struct nfs_server *nfs_alloc_server(void)
1019 return NULL; 1055 return NULL;
1020 } 1056 }
1021 1057
1058 pnfs_init_server(server);
1059
1022 return server; 1060 return server;
1023} 1061}
1024 1062
@@ -1029,11 +1067,8 @@ void nfs_free_server(struct nfs_server *server)
1029{ 1067{
1030 dprintk("--> nfs_free_server()\n"); 1068 dprintk("--> nfs_free_server()\n");
1031 1069
1070 nfs_server_remove_lists(server);
1032 unset_pnfs_layoutdriver(server); 1071 unset_pnfs_layoutdriver(server);
1033 spin_lock(&nfs_client_lock);
1034 list_del(&server->client_link);
1035 list_del(&server->master_link);
1036 spin_unlock(&nfs_client_lock);
1037 1072
1038 if (server->destroy != NULL) 1073 if (server->destroy != NULL)
1039 server->destroy(server); 1074 server->destroy(server);
@@ -1108,11 +1143,7 @@ struct nfs_server *nfs_create_server(const struct nfs_parsed_mount_data *data,
1108 (unsigned long long) server->fsid.major, 1143 (unsigned long long) server->fsid.major,
1109 (unsigned long long) server->fsid.minor); 1144 (unsigned long long) server->fsid.minor);
1110 1145
1111 spin_lock(&nfs_client_lock); 1146 nfs_server_insert_lists(server);
1112 list_add_tail(&server->client_link, &server->nfs_client->cl_superblocks);
1113 list_add_tail(&server->master_link, &nfs_volume_list);
1114 spin_unlock(&nfs_client_lock);
1115
1116 server->mount_time = jiffies; 1147 server->mount_time = jiffies;
1117 nfs_free_fattr(fattr); 1148 nfs_free_fattr(fattr);
1118 return server; 1149 return server;
@@ -1125,6 +1156,101 @@ error:
1125 1156
1126#ifdef CONFIG_NFS_V4 1157#ifdef CONFIG_NFS_V4
1127/* 1158/*
1159 * NFSv4.0 callback thread helper
1160 *
1161 * Find a client by IP address, protocol version, and minorversion
1162 *
1163 * Called from the pg_authenticate method. The callback identifier
1164 * is not used as it has not been decoded.
1165 *
1166 * Returns NULL if no such client
1167 */
1168struct nfs_client *
1169nfs4_find_client_no_ident(const struct sockaddr *addr)
1170{
1171 struct nfs_client *clp;
1172
1173 spin_lock(&nfs_client_lock);
1174 list_for_each_entry(clp, &nfs_client_list, cl_share_link) {
1175 if (nfs4_cb_match_client(addr, clp, 0) == false)
1176 continue;
1177 atomic_inc(&clp->cl_count);
1178 spin_unlock(&nfs_client_lock);
1179 return clp;
1180 }
1181 spin_unlock(&nfs_client_lock);
1182 return NULL;
1183}
1184
1185/*
1186 * NFSv4.0 callback thread helper
1187 *
1188 * Find a client by callback identifier
1189 */
1190struct nfs_client *
1191nfs4_find_client_ident(int cb_ident)
1192{
1193 struct nfs_client *clp;
1194
1195 spin_lock(&nfs_client_lock);
1196 clp = idr_find(&cb_ident_idr, cb_ident);
1197 if (clp)
1198 atomic_inc(&clp->cl_count);
1199 spin_unlock(&nfs_client_lock);
1200 return clp;
1201}
1202
1203#if defined(CONFIG_NFS_V4_1)
1204/*
1205 * NFSv4.1 callback thread helper
1206 * For CB_COMPOUND calls, find a client by IP address, protocol version,
1207 * minorversion, and sessionID
1208 *
1209 * CREATE_SESSION triggers a CB_NULL ping from servers. The callback service
1210 * sessionid can only be set after the CREATE_SESSION return, so a CB_NULL
1211 * can arrive before the callback sessionid is set. For CB_NULL calls,
1212 * find a client by IP address protocol version, and minorversion.
1213 *
1214 * Returns NULL if no such client
1215 */
1216struct nfs_client *
1217nfs4_find_client_sessionid(const struct sockaddr *addr,
1218 struct nfs4_sessionid *sid, int is_cb_compound)
1219{
1220 struct nfs_client *clp;
1221
1222 spin_lock(&nfs_client_lock);
1223 list_for_each_entry(clp, &nfs_client_list, cl_share_link) {
1224 if (nfs4_cb_match_client(addr, clp, 1) == false)
1225 continue;
1226
1227 if (!nfs4_has_session(clp))
1228 continue;
1229
1230 /* Match sessionid unless cb_null call*/
1231 if (is_cb_compound && (memcmp(clp->cl_session->sess_id.data,
1232 sid->data, NFS4_MAX_SESSIONID_LEN) != 0))
1233 continue;
1234
1235 atomic_inc(&clp->cl_count);
1236 spin_unlock(&nfs_client_lock);
1237 return clp;
1238 }
1239 spin_unlock(&nfs_client_lock);
1240 return NULL;
1241}
1242
1243#else /* CONFIG_NFS_V4_1 */
1244
1245struct nfs_client *
1246nfs4_find_client_sessionid(const struct sockaddr *addr,
1247 struct nfs4_sessionid *sid, int is_cb_compound)
1248{
1249 return NULL;
1250}
1251#endif /* CONFIG_NFS_V4_1 */
1252
1253/*
1128 * Initialize the NFS4 callback service 1254 * Initialize the NFS4 callback service
1129 */ 1255 */
1130static int nfs4_init_callback(struct nfs_client *clp) 1256static int nfs4_init_callback(struct nfs_client *clp)
@@ -1342,11 +1468,7 @@ static int nfs4_server_common_setup(struct nfs_server *server,
1342 if (server->namelen == 0 || server->namelen > NFS4_MAXNAMLEN) 1468 if (server->namelen == 0 || server->namelen > NFS4_MAXNAMLEN)
1343 server->namelen = NFS4_MAXNAMLEN; 1469 server->namelen = NFS4_MAXNAMLEN;
1344 1470
1345 spin_lock(&nfs_client_lock); 1471 nfs_server_insert_lists(server);
1346 list_add_tail(&server->client_link, &server->nfs_client->cl_superblocks);
1347 list_add_tail(&server->master_link, &nfs_volume_list);
1348 spin_unlock(&nfs_client_lock);
1349
1350 server->mount_time = jiffies; 1472 server->mount_time = jiffies;
1351out: 1473out:
1352 nfs_free_fattr(fattr); 1474 nfs_free_fattr(fattr);
@@ -1551,11 +1673,7 @@ struct nfs_server *nfs_clone_server(struct nfs_server *source,
1551 if (error < 0) 1673 if (error < 0)
1552 goto out_free_server; 1674 goto out_free_server;
1553 1675
1554 spin_lock(&nfs_client_lock); 1676 nfs_server_insert_lists(server);
1555 list_add_tail(&server->client_link, &server->nfs_client->cl_superblocks);
1556 list_add_tail(&server->master_link, &nfs_volume_list);
1557 spin_unlock(&nfs_client_lock);
1558
1559 server->mount_time = jiffies; 1677 server->mount_time = jiffies;
1560 1678
1561 nfs_free_fattr(fattr_fsinfo); 1679 nfs_free_fattr(fattr_fsinfo);
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index 1fd62fc49be3..364e4328f392 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -40,11 +40,23 @@ static void nfs_free_delegation(struct nfs_delegation *delegation)
40 call_rcu(&delegation->rcu, nfs_free_delegation_callback); 40 call_rcu(&delegation->rcu, nfs_free_delegation_callback);
41} 41}
42 42
43/**
44 * nfs_mark_delegation_referenced - set delegation's REFERENCED flag
45 * @delegation: delegation to process
46 *
47 */
43void nfs_mark_delegation_referenced(struct nfs_delegation *delegation) 48void nfs_mark_delegation_referenced(struct nfs_delegation *delegation)
44{ 49{
45 set_bit(NFS_DELEGATION_REFERENCED, &delegation->flags); 50 set_bit(NFS_DELEGATION_REFERENCED, &delegation->flags);
46} 51}
47 52
53/**
54 * nfs_have_delegation - check if inode has a delegation
55 * @inode: inode to check
56 * @flags: delegation types to check for
57 *
58 * Returns one if inode has the indicated delegation, otherwise zero.
59 */
48int nfs_have_delegation(struct inode *inode, fmode_t flags) 60int nfs_have_delegation(struct inode *inode, fmode_t flags)
49{ 61{
50 struct nfs_delegation *delegation; 62 struct nfs_delegation *delegation;
@@ -119,10 +131,15 @@ again:
119 return 0; 131 return 0;
120} 132}
121 133
122/* 134/**
123 * Set up a delegation on an inode 135 * nfs_inode_reclaim_delegation - process a delegation reclaim request
136 * @inode: inode to process
137 * @cred: credential to use for request
138 * @res: new delegation state from server
139 *
124 */ 140 */
125void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res) 141void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred,
142 struct nfs_openres *res)
126{ 143{
127 struct nfs_delegation *delegation; 144 struct nfs_delegation *delegation;
128 struct rpc_cred *oldcred = NULL; 145 struct rpc_cred *oldcred = NULL;
@@ -175,38 +192,52 @@ static struct inode *nfs_delegation_grab_inode(struct nfs_delegation *delegation
175 return inode; 192 return inode;
176} 193}
177 194
178static struct nfs_delegation *nfs_detach_delegation_locked(struct nfs_inode *nfsi, 195static struct nfs_delegation *
179 const nfs4_stateid *stateid, 196nfs_detach_delegation_locked(struct nfs_inode *nfsi,
180 struct nfs_client *clp) 197 struct nfs_server *server)
181{ 198{
182 struct nfs_delegation *delegation = 199 struct nfs_delegation *delegation =
183 rcu_dereference_protected(nfsi->delegation, 200 rcu_dereference_protected(nfsi->delegation,
184 lockdep_is_held(&clp->cl_lock)); 201 lockdep_is_held(&server->nfs_client->cl_lock));
185 202
186 if (delegation == NULL) 203 if (delegation == NULL)
187 goto nomatch; 204 goto nomatch;
205
188 spin_lock(&delegation->lock); 206 spin_lock(&delegation->lock);
189 if (stateid != NULL && memcmp(delegation->stateid.data, stateid->data,
190 sizeof(delegation->stateid.data)) != 0)
191 goto nomatch_unlock;
192 list_del_rcu(&delegation->super_list); 207 list_del_rcu(&delegation->super_list);
193 delegation->inode = NULL; 208 delegation->inode = NULL;
194 nfsi->delegation_state = 0; 209 nfsi->delegation_state = 0;
195 rcu_assign_pointer(nfsi->delegation, NULL); 210 rcu_assign_pointer(nfsi->delegation, NULL);
196 spin_unlock(&delegation->lock); 211 spin_unlock(&delegation->lock);
197 return delegation; 212 return delegation;
198nomatch_unlock:
199 spin_unlock(&delegation->lock);
200nomatch: 213nomatch:
201 return NULL; 214 return NULL;
202} 215}
203 216
204/* 217static struct nfs_delegation *nfs_detach_delegation(struct nfs_inode *nfsi,
205 * Set up a delegation on an inode 218 struct nfs_server *server)
219{
220 struct nfs_client *clp = server->nfs_client;
221 struct nfs_delegation *delegation;
222
223 spin_lock(&clp->cl_lock);
224 delegation = nfs_detach_delegation_locked(nfsi, server);
225 spin_unlock(&clp->cl_lock);
226 return delegation;
227}
228
229/**
230 * nfs_inode_set_delegation - set up a delegation on an inode
231 * @inode: inode to which delegation applies
232 * @cred: cred to use for subsequent delegation processing
233 * @res: new delegation state from server
234 *
235 * Returns zero on success, or a negative errno value.
206 */ 236 */
207int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res) 237int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res)
208{ 238{
209 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; 239 struct nfs_server *server = NFS_SERVER(inode);
240 struct nfs_client *clp = server->nfs_client;
210 struct nfs_inode *nfsi = NFS_I(inode); 241 struct nfs_inode *nfsi = NFS_I(inode);
211 struct nfs_delegation *delegation, *old_delegation; 242 struct nfs_delegation *delegation, *old_delegation;
212 struct nfs_delegation *freeme = NULL; 243 struct nfs_delegation *freeme = NULL;
@@ -227,7 +258,7 @@ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct
227 258
228 spin_lock(&clp->cl_lock); 259 spin_lock(&clp->cl_lock);
229 old_delegation = rcu_dereference_protected(nfsi->delegation, 260 old_delegation = rcu_dereference_protected(nfsi->delegation,
230 lockdep_is_held(&clp->cl_lock)); 261 lockdep_is_held(&clp->cl_lock));
231 if (old_delegation != NULL) { 262 if (old_delegation != NULL) {
232 if (memcmp(&delegation->stateid, &old_delegation->stateid, 263 if (memcmp(&delegation->stateid, &old_delegation->stateid,
233 sizeof(old_delegation->stateid)) == 0 && 264 sizeof(old_delegation->stateid)) == 0 &&
@@ -246,9 +277,9 @@ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct
246 delegation = NULL; 277 delegation = NULL;
247 goto out; 278 goto out;
248 } 279 }
249 freeme = nfs_detach_delegation_locked(nfsi, NULL, clp); 280 freeme = nfs_detach_delegation_locked(nfsi, server);
250 } 281 }
251 list_add_rcu(&delegation->super_list, &clp->cl_delegations); 282 list_add_rcu(&delegation->super_list, &server->delegations);
252 nfsi->delegation_state = delegation->type; 283 nfsi->delegation_state = delegation->type;
253 rcu_assign_pointer(nfsi->delegation, delegation); 284 rcu_assign_pointer(nfsi->delegation, delegation);
254 delegation = NULL; 285 delegation = NULL;
@@ -290,73 +321,85 @@ out:
290 return err; 321 return err;
291} 322}
292 323
293/* 324/**
294 * Return all delegations that have been marked for return 325 * nfs_client_return_marked_delegations - return previously marked delegations
326 * @clp: nfs_client to process
327 *
328 * Returns zero on success, or a negative errno value.
295 */ 329 */
296int nfs_client_return_marked_delegations(struct nfs_client *clp) 330int nfs_client_return_marked_delegations(struct nfs_client *clp)
297{ 331{
298 struct nfs_delegation *delegation; 332 struct nfs_delegation *delegation;
333 struct nfs_server *server;
299 struct inode *inode; 334 struct inode *inode;
300 int err = 0; 335 int err = 0;
301 336
302restart: 337restart:
303 rcu_read_lock(); 338 rcu_read_lock();
304 list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) { 339 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
305 if (!test_and_clear_bit(NFS_DELEGATION_RETURN, &delegation->flags)) 340 list_for_each_entry_rcu(delegation, &server->delegations,
306 continue; 341 super_list) {
307 inode = nfs_delegation_grab_inode(delegation); 342 if (!test_and_clear_bit(NFS_DELEGATION_RETURN,
308 if (inode == NULL) 343 &delegation->flags))
309 continue; 344 continue;
310 spin_lock(&clp->cl_lock); 345 inode = nfs_delegation_grab_inode(delegation);
311 delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL, clp); 346 if (inode == NULL)
312 spin_unlock(&clp->cl_lock); 347 continue;
313 rcu_read_unlock(); 348 delegation = nfs_detach_delegation(NFS_I(inode),
314 if (delegation != NULL) { 349 server);
315 filemap_flush(inode->i_mapping); 350 rcu_read_unlock();
316 err = __nfs_inode_return_delegation(inode, delegation, 0); 351
352 if (delegation != NULL) {
353 filemap_flush(inode->i_mapping);
354 err = __nfs_inode_return_delegation(inode,
355 delegation, 0);
356 }
357 iput(inode);
358 if (!err)
359 goto restart;
360 set_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state);
361 return err;
317 } 362 }
318 iput(inode);
319 if (!err)
320 goto restart;
321 set_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state);
322 return err;
323 } 363 }
324 rcu_read_unlock(); 364 rcu_read_unlock();
325 return 0; 365 return 0;
326} 366}
327 367
328/* 368/**
329 * This function returns the delegation without reclaiming opens 369 * nfs_inode_return_delegation_noreclaim - return delegation, don't reclaim opens
330 * or protecting against delegation reclaims. 370 * @inode: inode to process
331 * It is therefore really only safe to be called from 371 *
332 * nfs4_clear_inode() 372 * Does not protect against delegation reclaims, therefore really only safe
373 * to be called from nfs4_clear_inode().
333 */ 374 */
334void nfs_inode_return_delegation_noreclaim(struct inode *inode) 375void nfs_inode_return_delegation_noreclaim(struct inode *inode)
335{ 376{
336 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; 377 struct nfs_server *server = NFS_SERVER(inode);
337 struct nfs_inode *nfsi = NFS_I(inode); 378 struct nfs_inode *nfsi = NFS_I(inode);
338 struct nfs_delegation *delegation; 379 struct nfs_delegation *delegation;
339 380
340 if (rcu_access_pointer(nfsi->delegation) != NULL) { 381 if (rcu_access_pointer(nfsi->delegation) != NULL) {
341 spin_lock(&clp->cl_lock); 382 delegation = nfs_detach_delegation(nfsi, server);
342 delegation = nfs_detach_delegation_locked(nfsi, NULL, clp);
343 spin_unlock(&clp->cl_lock);
344 if (delegation != NULL) 383 if (delegation != NULL)
345 nfs_do_return_delegation(inode, delegation, 0); 384 nfs_do_return_delegation(inode, delegation, 0);
346 } 385 }
347} 386}
348 387
388/**
389 * nfs_inode_return_delegation - synchronously return a delegation
390 * @inode: inode to process
391 *
392 * Returns zero on success, or a negative errno value.
393 */
349int nfs_inode_return_delegation(struct inode *inode) 394int nfs_inode_return_delegation(struct inode *inode)
350{ 395{
351 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; 396 struct nfs_server *server = NFS_SERVER(inode);
352 struct nfs_inode *nfsi = NFS_I(inode); 397 struct nfs_inode *nfsi = NFS_I(inode);
353 struct nfs_delegation *delegation; 398 struct nfs_delegation *delegation;
354 int err = 0; 399 int err = 0;
355 400
356 if (rcu_access_pointer(nfsi->delegation) != NULL) { 401 if (rcu_access_pointer(nfsi->delegation) != NULL) {
357 spin_lock(&clp->cl_lock); 402 delegation = nfs_detach_delegation(nfsi, server);
358 delegation = nfs_detach_delegation_locked(nfsi, NULL, clp);
359 spin_unlock(&clp->cl_lock);
360 if (delegation != NULL) { 403 if (delegation != NULL) {
361 nfs_wb_all(inode); 404 nfs_wb_all(inode);
362 err = __nfs_inode_return_delegation(inode, delegation, 1); 405 err = __nfs_inode_return_delegation(inode, delegation, 1);
@@ -365,46 +408,61 @@ int nfs_inode_return_delegation(struct inode *inode)
365 return err; 408 return err;
366} 409}
367 410
368static void nfs_mark_return_delegation(struct nfs_client *clp, struct nfs_delegation *delegation) 411static void nfs_mark_return_delegation(struct nfs_delegation *delegation)
369{ 412{
413 struct nfs_client *clp = NFS_SERVER(delegation->inode)->nfs_client;
414
370 set_bit(NFS_DELEGATION_RETURN, &delegation->flags); 415 set_bit(NFS_DELEGATION_RETURN, &delegation->flags);
371 set_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state); 416 set_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state);
372} 417}
373 418
374/* 419/**
375 * Return all delegations associated to a super block 420 * nfs_super_return_all_delegations - return delegations for one superblock
421 * @sb: sb to process
422 *
376 */ 423 */
377void nfs_super_return_all_delegations(struct super_block *sb) 424void nfs_super_return_all_delegations(struct super_block *sb)
378{ 425{
379 struct nfs_client *clp = NFS_SB(sb)->nfs_client; 426 struct nfs_server *server = NFS_SB(sb);
427 struct nfs_client *clp = server->nfs_client;
380 struct nfs_delegation *delegation; 428 struct nfs_delegation *delegation;
381 429
382 if (clp == NULL) 430 if (clp == NULL)
383 return; 431 return;
432
384 rcu_read_lock(); 433 rcu_read_lock();
385 list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) { 434 list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
386 spin_lock(&delegation->lock); 435 spin_lock(&delegation->lock);
387 if (delegation->inode != NULL && delegation->inode->i_sb == sb) 436 set_bit(NFS_DELEGATION_RETURN, &delegation->flags);
388 set_bit(NFS_DELEGATION_RETURN, &delegation->flags);
389 spin_unlock(&delegation->lock); 437 spin_unlock(&delegation->lock);
390 } 438 }
391 rcu_read_unlock(); 439 rcu_read_unlock();
440
392 if (nfs_client_return_marked_delegations(clp) != 0) 441 if (nfs_client_return_marked_delegations(clp) != 0)
393 nfs4_schedule_state_manager(clp); 442 nfs4_schedule_state_manager(clp);
394} 443}
395 444
396static 445static void nfs_mark_return_all_delegation_types(struct nfs_server *server,
397void nfs_client_mark_return_all_delegation_types(struct nfs_client *clp, fmode_t flags) 446 fmode_t flags)
398{ 447{
399 struct nfs_delegation *delegation; 448 struct nfs_delegation *delegation;
400 449
401 rcu_read_lock(); 450 list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
402 list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
403 if ((delegation->type == (FMODE_READ|FMODE_WRITE)) && !(flags & FMODE_WRITE)) 451 if ((delegation->type == (FMODE_READ|FMODE_WRITE)) && !(flags & FMODE_WRITE))
404 continue; 452 continue;
405 if (delegation->type & flags) 453 if (delegation->type & flags)
406 nfs_mark_return_delegation(clp, delegation); 454 nfs_mark_return_delegation(delegation);
407 } 455 }
456}
457
458static void nfs_client_mark_return_all_delegation_types(struct nfs_client *clp,
459 fmode_t flags)
460{
461 struct nfs_server *server;
462
463 rcu_read_lock();
464 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
465 nfs_mark_return_all_delegation_types(server, flags);
408 rcu_read_unlock(); 466 rcu_read_unlock();
409} 467}
410 468
@@ -419,19 +477,32 @@ static void nfs_delegation_run_state_manager(struct nfs_client *clp)
419 nfs4_schedule_state_manager(clp); 477 nfs4_schedule_state_manager(clp);
420} 478}
421 479
480/**
481 * nfs_expire_all_delegation_types
482 * @clp: client to process
483 * @flags: delegation types to expire
484 *
485 */
422void nfs_expire_all_delegation_types(struct nfs_client *clp, fmode_t flags) 486void nfs_expire_all_delegation_types(struct nfs_client *clp, fmode_t flags)
423{ 487{
424 nfs_client_mark_return_all_delegation_types(clp, flags); 488 nfs_client_mark_return_all_delegation_types(clp, flags);
425 nfs_delegation_run_state_manager(clp); 489 nfs_delegation_run_state_manager(clp);
426} 490}
427 491
492/**
493 * nfs_expire_all_delegations
494 * @clp: client to process
495 *
496 */
428void nfs_expire_all_delegations(struct nfs_client *clp) 497void nfs_expire_all_delegations(struct nfs_client *clp)
429{ 498{
430 nfs_expire_all_delegation_types(clp, FMODE_READ|FMODE_WRITE); 499 nfs_expire_all_delegation_types(clp, FMODE_READ|FMODE_WRITE);
431} 500}
432 501
433/* 502/**
434 * Return all delegations following an NFS4ERR_CB_PATH_DOWN error. 503 * nfs_handle_cb_pathdown - return all delegations after NFS4ERR_CB_PATH_DOWN
504 * @clp: client to process
505 *
435 */ 506 */
436void nfs_handle_cb_pathdown(struct nfs_client *clp) 507void nfs_handle_cb_pathdown(struct nfs_client *clp)
437{ 508{
@@ -440,29 +511,43 @@ void nfs_handle_cb_pathdown(struct nfs_client *clp)
440 nfs_client_mark_return_all_delegations(clp); 511 nfs_client_mark_return_all_delegations(clp);
441} 512}
442 513
443static void nfs_client_mark_return_unreferenced_delegations(struct nfs_client *clp) 514static void nfs_mark_return_unreferenced_delegations(struct nfs_server *server)
444{ 515{
445 struct nfs_delegation *delegation; 516 struct nfs_delegation *delegation;
446 517
447 rcu_read_lock(); 518 list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
448 list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
449 if (test_and_clear_bit(NFS_DELEGATION_REFERENCED, &delegation->flags)) 519 if (test_and_clear_bit(NFS_DELEGATION_REFERENCED, &delegation->flags))
450 continue; 520 continue;
451 nfs_mark_return_delegation(clp, delegation); 521 nfs_mark_return_delegation(delegation);
452 } 522 }
453 rcu_read_unlock();
454} 523}
455 524
525/**
526 * nfs_expire_unreferenced_delegations - Eliminate unused delegations
527 * @clp: nfs_client to process
528 *
529 */
456void nfs_expire_unreferenced_delegations(struct nfs_client *clp) 530void nfs_expire_unreferenced_delegations(struct nfs_client *clp)
457{ 531{
458 nfs_client_mark_return_unreferenced_delegations(clp); 532 struct nfs_server *server;
533
534 rcu_read_lock();
535 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
536 nfs_mark_return_unreferenced_delegations(server);
537 rcu_read_unlock();
538
459 nfs_delegation_run_state_manager(clp); 539 nfs_delegation_run_state_manager(clp);
460} 540}
461 541
462/* 542/**
463 * Asynchronous delegation recall! 543 * nfs_async_inode_return_delegation - asynchronously return a delegation
544 * @inode: inode to process
545 * @stateid: state ID information from CB_RECALL arguments
546 *
547 * Returns zero on success, or a negative errno value.
464 */ 548 */
465int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid) 549int nfs_async_inode_return_delegation(struct inode *inode,
550 const nfs4_stateid *stateid)
466{ 551{
467 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; 552 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
468 struct nfs_delegation *delegation; 553 struct nfs_delegation *delegation;
@@ -474,22 +559,21 @@ int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *s
474 rcu_read_unlock(); 559 rcu_read_unlock();
475 return -ENOENT; 560 return -ENOENT;
476 } 561 }
477 562 nfs_mark_return_delegation(delegation);
478 nfs_mark_return_delegation(clp, delegation);
479 rcu_read_unlock(); 563 rcu_read_unlock();
564
480 nfs_delegation_run_state_manager(clp); 565 nfs_delegation_run_state_manager(clp);
481 return 0; 566 return 0;
482} 567}
483 568
484/* 569static struct inode *
485 * Retrieve the inode associated with a delegation 570nfs_delegation_find_inode_server(struct nfs_server *server,
486 */ 571 const struct nfs_fh *fhandle)
487struct inode *nfs_delegation_find_inode(struct nfs_client *clp, const struct nfs_fh *fhandle)
488{ 572{
489 struct nfs_delegation *delegation; 573 struct nfs_delegation *delegation;
490 struct inode *res = NULL; 574 struct inode *res = NULL;
491 rcu_read_lock(); 575
492 list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) { 576 list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
493 spin_lock(&delegation->lock); 577 spin_lock(&delegation->lock);
494 if (delegation->inode != NULL && 578 if (delegation->inode != NULL &&
495 nfs_compare_fh(fhandle, &NFS_I(delegation->inode)->fh) == 0) { 579 nfs_compare_fh(fhandle, &NFS_I(delegation->inode)->fh) == 0) {
@@ -499,49 +583,121 @@ struct inode *nfs_delegation_find_inode(struct nfs_client *clp, const struct nfs
499 if (res != NULL) 583 if (res != NULL)
500 break; 584 break;
501 } 585 }
586 return res;
587}
588
589/**
590 * nfs_delegation_find_inode - retrieve the inode associated with a delegation
591 * @clp: client state handle
592 * @fhandle: filehandle from a delegation recall
593 *
594 * Returns pointer to inode matching "fhandle," or NULL if a matching inode
595 * cannot be found.
596 */
597struct inode *nfs_delegation_find_inode(struct nfs_client *clp,
598 const struct nfs_fh *fhandle)
599{
600 struct nfs_server *server;
601 struct inode *res = NULL;
602
603 rcu_read_lock();
604 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
605 res = nfs_delegation_find_inode_server(server, fhandle);
606 if (res != NULL)
607 break;
608 }
502 rcu_read_unlock(); 609 rcu_read_unlock();
503 return res; 610 return res;
504} 611}
505 612
506/* 613static void nfs_delegation_mark_reclaim_server(struct nfs_server *server)
507 * Mark all delegations as needing to be reclaimed 614{
615 struct nfs_delegation *delegation;
616
617 list_for_each_entry_rcu(delegation, &server->delegations, super_list)
618 set_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags);
619}
620
621/**
622 * nfs_delegation_mark_reclaim - mark all delegations as needing to be reclaimed
623 * @clp: nfs_client to process
624 *
508 */ 625 */
509void nfs_delegation_mark_reclaim(struct nfs_client *clp) 626void nfs_delegation_mark_reclaim(struct nfs_client *clp)
510{ 627{
511 struct nfs_delegation *delegation; 628 struct nfs_server *server;
629
512 rcu_read_lock(); 630 rcu_read_lock();
513 list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) 631 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
514 set_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags); 632 nfs_delegation_mark_reclaim_server(server);
515 rcu_read_unlock(); 633 rcu_read_unlock();
516} 634}
517 635
518/* 636/**
519 * Reap all unclaimed delegations after reboot recovery is done 637 * nfs_delegation_reap_unclaimed - reap unclaimed delegations after reboot recovery is done
638 * @clp: nfs_client to process
639 *
520 */ 640 */
521void nfs_delegation_reap_unclaimed(struct nfs_client *clp) 641void nfs_delegation_reap_unclaimed(struct nfs_client *clp)
522{ 642{
523 struct nfs_delegation *delegation; 643 struct nfs_delegation *delegation;
644 struct nfs_server *server;
524 struct inode *inode; 645 struct inode *inode;
646
525restart: 647restart:
526 rcu_read_lock(); 648 rcu_read_lock();
527 list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) { 649 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
528 if (test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) == 0) 650 list_for_each_entry_rcu(delegation, &server->delegations,
529 continue; 651 super_list) {
530 inode = nfs_delegation_grab_inode(delegation); 652 if (test_bit(NFS_DELEGATION_NEED_RECLAIM,
531 if (inode == NULL) 653 &delegation->flags) == 0)
532 continue; 654 continue;
533 spin_lock(&clp->cl_lock); 655 inode = nfs_delegation_grab_inode(delegation);
534 delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL, clp); 656 if (inode == NULL)
535 spin_unlock(&clp->cl_lock); 657 continue;
536 rcu_read_unlock(); 658 delegation = nfs_detach_delegation(NFS_I(inode),
537 if (delegation != NULL) 659 server);
538 nfs_free_delegation(delegation); 660 rcu_read_unlock();
539 iput(inode); 661
540 goto restart; 662 if (delegation != NULL)
663 nfs_free_delegation(delegation);
664 iput(inode);
665 goto restart;
666 }
541 } 667 }
542 rcu_read_unlock(); 668 rcu_read_unlock();
543} 669}
544 670
671/**
672 * nfs_delegations_present - check for existence of delegations
673 * @clp: client state handle
674 *
675 * Returns one if there are any nfs_delegation structures attached
676 * to this nfs_client.
677 */
678int nfs_delegations_present(struct nfs_client *clp)
679{
680 struct nfs_server *server;
681 int ret = 0;
682
683 rcu_read_lock();
684 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
685 if (!list_empty(&server->delegations)) {
686 ret = 1;
687 break;
688 }
689 rcu_read_unlock();
690 return ret;
691}
692
693/**
694 * nfs4_copy_delegation_stateid - Copy inode's state ID information
695 * @dst: stateid data structure to fill in
696 * @inode: inode to check
697 *
698 * Returns one and fills in "dst->data" * if inode had a delegation,
699 * otherwise zero is returned.
700 */
545int nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode) 701int nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode)
546{ 702{
547 struct nfs_inode *nfsi = NFS_I(inode); 703 struct nfs_inode *nfsi = NFS_I(inode);
diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h
index 2026304bda19..d9322e490c56 100644
--- a/fs/nfs/delegation.h
+++ b/fs/nfs/delegation.h
@@ -44,6 +44,7 @@ void nfs_expire_all_delegation_types(struct nfs_client *clp, fmode_t flags);
44void nfs_expire_unreferenced_delegations(struct nfs_client *clp); 44void nfs_expire_unreferenced_delegations(struct nfs_client *clp);
45void nfs_handle_cb_pathdown(struct nfs_client *clp); 45void nfs_handle_cb_pathdown(struct nfs_client *clp);
46int nfs_client_return_marked_delegations(struct nfs_client *clp); 46int nfs_client_return_marked_delegations(struct nfs_client *clp);
47int nfs_delegations_present(struct nfs_client *clp);
47 48
48void nfs_delegation_mark_reclaim(struct nfs_client *clp); 49void nfs_delegation_mark_reclaim(struct nfs_client *clp);
49void nfs_delegation_reap_unclaimed(struct nfs_client *clp); 50void nfs_delegation_reap_unclaimed(struct nfs_client *clp);
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index d33da530097a..abe4f0c8dc5f 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -33,8 +33,8 @@
33#include <linux/namei.h> 33#include <linux/namei.h>
34#include <linux/mount.h> 34#include <linux/mount.h>
35#include <linux/sched.h> 35#include <linux/sched.h>
36#include <linux/vmalloc.h>
37#include <linux/kmemleak.h> 36#include <linux/kmemleak.h>
37#include <linux/xattr.h>
38 38
39#include "delegation.h" 39#include "delegation.h"
40#include "iostat.h" 40#include "iostat.h"
@@ -125,9 +125,10 @@ const struct inode_operations nfs4_dir_inode_operations = {
125 .permission = nfs_permission, 125 .permission = nfs_permission,
126 .getattr = nfs_getattr, 126 .getattr = nfs_getattr,
127 .setattr = nfs_setattr, 127 .setattr = nfs_setattr,
128 .getxattr = nfs4_getxattr, 128 .getxattr = generic_getxattr,
129 .setxattr = nfs4_setxattr, 129 .setxattr = generic_setxattr,
130 .listxattr = nfs4_listxattr, 130 .listxattr = generic_listxattr,
131 .removexattr = generic_removexattr,
131}; 132};
132 133
133#endif /* CONFIG_NFS_V4 */ 134#endif /* CONFIG_NFS_V4 */
@@ -172,7 +173,7 @@ struct nfs_cache_array {
172 struct nfs_cache_array_entry array[0]; 173 struct nfs_cache_array_entry array[0];
173}; 174};
174 175
175typedef __be32 * (*decode_dirent_t)(struct xdr_stream *, struct nfs_entry *, struct nfs_server *, int); 176typedef int (*decode_dirent_t)(struct xdr_stream *, struct nfs_entry *, int);
176typedef struct { 177typedef struct {
177 struct file *file; 178 struct file *file;
178 struct page *page; 179 struct page *page;
@@ -378,14 +379,14 @@ error:
378 return error; 379 return error;
379} 380}
380 381
381/* Fill in an entry based on the xdr code stored in desc->page */ 382static int xdr_decode(nfs_readdir_descriptor_t *desc,
382static 383 struct nfs_entry *entry, struct xdr_stream *xdr)
383int xdr_decode(nfs_readdir_descriptor_t *desc, struct nfs_entry *entry, struct xdr_stream *stream)
384{ 384{
385 __be32 *p = desc->decode(stream, entry, NFS_SERVER(desc->file->f_path.dentry->d_inode), desc->plus); 385 int error;
386 if (IS_ERR(p))
387 return PTR_ERR(p);
388 386
387 error = desc->decode(xdr, entry, desc->plus);
388 if (error)
389 return error;
389 entry->fattr->time_start = desc->timestamp; 390 entry->fattr->time_start = desc->timestamp;
390 entry->fattr->gencount = desc->gencount; 391 entry->fattr->gencount = desc->gencount;
391 return 0; 392 return 0;
@@ -459,25 +460,26 @@ out:
459/* Perform conversion from xdr to cache array */ 460/* Perform conversion from xdr to cache array */
460static 461static
461int nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *entry, 462int nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *entry,
462 void *xdr_page, struct page *page, unsigned int buflen) 463 struct page **xdr_pages, struct page *page, unsigned int buflen)
463{ 464{
464 struct xdr_stream stream; 465 struct xdr_stream stream;
465 struct xdr_buf buf; 466 struct xdr_buf buf = {
466 __be32 *ptr = xdr_page; 467 .pages = xdr_pages,
468 .page_len = buflen,
469 .buflen = buflen,
470 .len = buflen,
471 };
472 struct page *scratch;
467 struct nfs_cache_array *array; 473 struct nfs_cache_array *array;
468 unsigned int count = 0; 474 unsigned int count = 0;
469 int status; 475 int status;
470 476
471 buf.head->iov_base = xdr_page; 477 scratch = alloc_page(GFP_KERNEL);
472 buf.head->iov_len = buflen; 478 if (scratch == NULL)
473 buf.tail->iov_len = 0; 479 return -ENOMEM;
474 buf.page_base = 0;
475 buf.page_len = 0;
476 buf.buflen = buf.head->iov_len;
477 buf.len = buf.head->iov_len;
478
479 xdr_init_decode(&stream, &buf, ptr);
480 480
481 xdr_init_decode(&stream, &buf, NULL);
482 xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE);
481 483
482 do { 484 do {
483 status = xdr_decode(desc, entry, &stream); 485 status = xdr_decode(desc, entry, &stream);
@@ -506,6 +508,8 @@ int nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *en
506 } else 508 } else
507 status = PTR_ERR(array); 509 status = PTR_ERR(array);
508 } 510 }
511
512 put_page(scratch);
509 return status; 513 return status;
510} 514}
511 515
@@ -521,7 +525,6 @@ static
521void nfs_readdir_free_large_page(void *ptr, struct page **pages, 525void nfs_readdir_free_large_page(void *ptr, struct page **pages,
522 unsigned int npages) 526 unsigned int npages)
523{ 527{
524 vm_unmap_ram(ptr, npages);
525 nfs_readdir_free_pagearray(pages, npages); 528 nfs_readdir_free_pagearray(pages, npages);
526} 529}
527 530
@@ -530,9 +533,8 @@ void nfs_readdir_free_large_page(void *ptr, struct page **pages,
530 * to nfs_readdir_free_large_page 533 * to nfs_readdir_free_large_page
531 */ 534 */
532static 535static
533void *nfs_readdir_large_page(struct page **pages, unsigned int npages) 536int nfs_readdir_large_page(struct page **pages, unsigned int npages)
534{ 537{
535 void *ptr;
536 unsigned int i; 538 unsigned int i;
537 539
538 for (i = 0; i < npages; i++) { 540 for (i = 0; i < npages; i++) {
@@ -541,13 +543,11 @@ void *nfs_readdir_large_page(struct page **pages, unsigned int npages)
541 goto out_freepages; 543 goto out_freepages;
542 pages[i] = page; 544 pages[i] = page;
543 } 545 }
546 return 0;
544 547
545 ptr = vm_map_ram(pages, npages, 0, PAGE_KERNEL);
546 if (!IS_ERR_OR_NULL(ptr))
547 return ptr;
548out_freepages: 548out_freepages:
549 nfs_readdir_free_pagearray(pages, i); 549 nfs_readdir_free_pagearray(pages, i);
550 return NULL; 550 return -ENOMEM;
551} 551}
552 552
553static 553static
@@ -566,6 +566,7 @@ int nfs_readdir_xdr_to_array(nfs_readdir_descriptor_t *desc, struct page *page,
566 entry.eof = 0; 566 entry.eof = 0;
567 entry.fh = nfs_alloc_fhandle(); 567 entry.fh = nfs_alloc_fhandle();
568 entry.fattr = nfs_alloc_fattr(); 568 entry.fattr = nfs_alloc_fattr();
569 entry.server = NFS_SERVER(inode);
569 if (entry.fh == NULL || entry.fattr == NULL) 570 if (entry.fh == NULL || entry.fattr == NULL)
570 goto out; 571 goto out;
571 572
@@ -577,8 +578,8 @@ int nfs_readdir_xdr_to_array(nfs_readdir_descriptor_t *desc, struct page *page,
577 memset(array, 0, sizeof(struct nfs_cache_array)); 578 memset(array, 0, sizeof(struct nfs_cache_array));
578 array->eof_index = -1; 579 array->eof_index = -1;
579 580
580 pages_ptr = nfs_readdir_large_page(pages, array_size); 581 status = nfs_readdir_large_page(pages, array_size);
581 if (!pages_ptr) 582 if (status < 0)
582 goto out_release_array; 583 goto out_release_array;
583 do { 584 do {
584 unsigned int pglen; 585 unsigned int pglen;
@@ -587,7 +588,7 @@ int nfs_readdir_xdr_to_array(nfs_readdir_descriptor_t *desc, struct page *page,
587 if (status < 0) 588 if (status < 0)
588 break; 589 break;
589 pglen = status; 590 pglen = status;
590 status = nfs_readdir_page_filler(desc, &entry, pages_ptr, page, pglen); 591 status = nfs_readdir_page_filler(desc, &entry, pages, page, pglen);
591 if (status < 0) { 592 if (status < 0) {
592 if (status == -ENOSPC) 593 if (status == -ENOSPC)
593 status = 0; 594 status = 0;
@@ -1221,7 +1222,7 @@ static struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, stru
1221 goto out_unblock_sillyrename; 1222 goto out_unblock_sillyrename;
1222 } 1223 }
1223 inode = nfs_fhget(dentry->d_sb, fhandle, fattr); 1224 inode = nfs_fhget(dentry->d_sb, fhandle, fattr);
1224 res = (struct dentry *)inode; 1225 res = ERR_CAST(inode);
1225 if (IS_ERR(res)) 1226 if (IS_ERR(res))
1226 goto out_unblock_sillyrename; 1227 goto out_unblock_sillyrename;
1227 1228
@@ -1355,8 +1356,7 @@ static struct dentry *nfs_atomic_lookup(struct inode *dir, struct dentry *dentry
1355 if (nd->flags & LOOKUP_CREATE) { 1356 if (nd->flags & LOOKUP_CREATE) {
1356 attr.ia_mode = nd->intent.open.create_mode; 1357 attr.ia_mode = nd->intent.open.create_mode;
1357 attr.ia_valid = ATTR_MODE; 1358 attr.ia_valid = ATTR_MODE;
1358 if (!IS_POSIXACL(dir)) 1359 attr.ia_mode &= ~current_umask();
1359 attr.ia_mode &= ~current_umask();
1360 } else { 1360 } else {
1361 open_flags &= ~(O_EXCL | O_CREAT); 1361 open_flags &= ~(O_EXCL | O_CREAT);
1362 attr.ia_valid = 0; 1362 attr.ia_valid = 0;
diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c
index 4e2d9b6b1380..18696882f1c6 100644
--- a/fs/nfs/idmap.c
+++ b/fs/nfs/idmap.c
@@ -238,7 +238,7 @@ int nfs_map_gid_to_group(struct nfs_client *clp, __u32 gid, char *buf, size_t bu
238 return nfs_idmap_lookup_name(gid, "group", buf, buflen); 238 return nfs_idmap_lookup_name(gid, "group", buf, buflen);
239} 239}
240 240
241#else /* CONFIG_NFS_USE_IDMAPPER not defined */ 241#else /* CONFIG_NFS_USE_NEW_IDMAPPER not defined */
242 242
243#include <linux/module.h> 243#include <linux/module.h>
244#include <linux/mutex.h> 244#include <linux/mutex.h>
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 017daa3bed38..ce00b704452c 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -1410,9 +1410,9 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
1410 */ 1410 */
1411void nfs4_evict_inode(struct inode *inode) 1411void nfs4_evict_inode(struct inode *inode)
1412{ 1412{
1413 pnfs_destroy_layout(NFS_I(inode));
1413 truncate_inode_pages(&inode->i_data, 0); 1414 truncate_inode_pages(&inode->i_data, 0);
1414 end_writeback(inode); 1415 end_writeback(inode);
1415 pnfs_destroy_layout(NFS_I(inode));
1416 /* If we are holding a delegation, return it! */ 1416 /* If we are holding a delegation, return it! */
1417 nfs_inode_return_delegation_noreclaim(inode); 1417 nfs_inode_return_delegation_noreclaim(inode);
1418 /* First call standard NFS clear_inode() code */ 1418 /* First call standard NFS clear_inode() code */
@@ -1619,6 +1619,7 @@ static void __exit exit_nfs_fs(void)
1619#ifdef CONFIG_PROC_FS 1619#ifdef CONFIG_PROC_FS
1620 rpc_proc_unregister("nfs"); 1620 rpc_proc_unregister("nfs");
1621#endif 1621#endif
1622 nfs_cleanup_cb_ident_idr();
1622 unregister_nfs_fs(); 1623 unregister_nfs_fs();
1623 nfs_fs_proc_exit(); 1624 nfs_fs_proc_exit();
1624 nfsiod_stop(); 1625 nfsiod_stop();
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index e6356b750b77..bfa3a34af801 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -128,9 +128,13 @@ extern void nfs_umount(const struct nfs_mount_request *info);
128/* client.c */ 128/* client.c */
129extern struct rpc_program nfs_program; 129extern struct rpc_program nfs_program;
130 130
131extern void nfs_cleanup_cb_ident_idr(void);
131extern void nfs_put_client(struct nfs_client *); 132extern void nfs_put_client(struct nfs_client *);
132extern struct nfs_client *nfs_find_client(const struct sockaddr *, u32); 133extern struct nfs_client *nfs4_find_client_no_ident(const struct sockaddr *);
133extern struct nfs_client *nfs_find_client_next(struct nfs_client *); 134extern struct nfs_client *nfs4_find_client_ident(int);
135extern struct nfs_client *
136nfs4_find_client_sessionid(const struct sockaddr *, struct nfs4_sessionid *,
137 int);
134extern struct nfs_server *nfs_create_server( 138extern struct nfs_server *nfs_create_server(
135 const struct nfs_parsed_mount_data *, 139 const struct nfs_parsed_mount_data *,
136 struct nfs_fh *); 140 struct nfs_fh *);
@@ -185,17 +189,20 @@ extern int __init nfs_init_directcache(void);
185extern void nfs_destroy_directcache(void); 189extern void nfs_destroy_directcache(void);
186 190
187/* nfs2xdr.c */ 191/* nfs2xdr.c */
188extern int nfs_stat_to_errno(int); 192extern int nfs_stat_to_errno(enum nfs_stat);
189extern struct rpc_procinfo nfs_procedures[]; 193extern struct rpc_procinfo nfs_procedures[];
190extern __be32 *nfs_decode_dirent(struct xdr_stream *, struct nfs_entry *, struct nfs_server *, int); 194extern int nfs2_decode_dirent(struct xdr_stream *,
195 struct nfs_entry *, int);
191 196
192/* nfs3xdr.c */ 197/* nfs3xdr.c */
193extern struct rpc_procinfo nfs3_procedures[]; 198extern struct rpc_procinfo nfs3_procedures[];
194extern __be32 *nfs3_decode_dirent(struct xdr_stream *, struct nfs_entry *, struct nfs_server *, int); 199extern int nfs3_decode_dirent(struct xdr_stream *,
200 struct nfs_entry *, int);
195 201
196/* nfs4xdr.c */ 202/* nfs4xdr.c */
197#ifdef CONFIG_NFS_V4 203#ifdef CONFIG_NFS_V4
198extern __be32 *nfs4_decode_dirent(struct xdr_stream *, struct nfs_entry *, struct nfs_server *, int); 204extern int nfs4_decode_dirent(struct xdr_stream *,
205 struct nfs_entry *, int);
199#endif 206#endif
200#ifdef CONFIG_NFS_V4_1 207#ifdef CONFIG_NFS_V4_1
201extern const u32 nfs41_maxread_overhead; 208extern const u32 nfs41_maxread_overhead;
diff --git a/fs/nfs/mount_clnt.c b/fs/nfs/mount_clnt.c
index 4f981f1f6689..d4c2d6b7507e 100644
--- a/fs/nfs/mount_clnt.c
+++ b/fs/nfs/mount_clnt.c
@@ -236,10 +236,8 @@ void nfs_umount(const struct nfs_mount_request *info)
236 .authflavor = RPC_AUTH_UNIX, 236 .authflavor = RPC_AUTH_UNIX,
237 .flags = RPC_CLNT_CREATE_NOPING, 237 .flags = RPC_CLNT_CREATE_NOPING,
238 }; 238 };
239 struct mountres result;
240 struct rpc_message msg = { 239 struct rpc_message msg = {
241 .rpc_argp = info->dirpath, 240 .rpc_argp = info->dirpath,
242 .rpc_resp = &result,
243 }; 241 };
244 struct rpc_clnt *clnt; 242 struct rpc_clnt *clnt;
245 int status; 243 int status;
@@ -248,7 +246,7 @@ void nfs_umount(const struct nfs_mount_request *info)
248 args.flags |= RPC_CLNT_CREATE_NONPRIVPORT; 246 args.flags |= RPC_CLNT_CREATE_NONPRIVPORT;
249 247
250 clnt = rpc_create(&args); 248 clnt = rpc_create(&args);
251 if (unlikely(IS_ERR(clnt))) 249 if (IS_ERR(clnt))
252 goto out_clnt_err; 250 goto out_clnt_err;
253 251
254 dprintk("NFS: sending UMNT request for %s:%s\n", 252 dprintk("NFS: sending UMNT request for %s:%s\n",
@@ -280,29 +278,20 @@ out_call_err:
280 * XDR encode/decode functions for MOUNT 278 * XDR encode/decode functions for MOUNT
281 */ 279 */
282 280
283static int encode_mntdirpath(struct xdr_stream *xdr, const char *pathname) 281static void encode_mntdirpath(struct xdr_stream *xdr, const char *pathname)
284{ 282{
285 const u32 pathname_len = strlen(pathname); 283 const u32 pathname_len = strlen(pathname);
286 __be32 *p; 284 __be32 *p;
287 285
288 if (unlikely(pathname_len > MNTPATHLEN)) 286 BUG_ON(pathname_len > MNTPATHLEN);
289 return -EIO; 287 p = xdr_reserve_space(xdr, 4 + pathname_len);
290
291 p = xdr_reserve_space(xdr, sizeof(u32) + pathname_len);
292 if (unlikely(p == NULL))
293 return -EIO;
294 xdr_encode_opaque(p, pathname, pathname_len); 288 xdr_encode_opaque(p, pathname, pathname_len);
295
296 return 0;
297} 289}
298 290
299static int mnt_enc_dirpath(struct rpc_rqst *req, __be32 *p, 291static void mnt_xdr_enc_dirpath(struct rpc_rqst *req, struct xdr_stream *xdr,
300 const char *dirpath) 292 const char *dirpath)
301{ 293{
302 struct xdr_stream xdr; 294 encode_mntdirpath(xdr, dirpath);
303
304 xdr_init_encode(&xdr, &req->rq_snd_buf, p);
305 return encode_mntdirpath(&xdr, dirpath);
306} 295}
307 296
308/* 297/*
@@ -320,10 +309,10 @@ static int decode_status(struct xdr_stream *xdr, struct mountres *res)
320 u32 status; 309 u32 status;
321 __be32 *p; 310 __be32 *p;
322 311
323 p = xdr_inline_decode(xdr, sizeof(status)); 312 p = xdr_inline_decode(xdr, 4);
324 if (unlikely(p == NULL)) 313 if (unlikely(p == NULL))
325 return -EIO; 314 return -EIO;
326 status = ntohl(*p); 315 status = be32_to_cpup(p);
327 316
328 for (i = 0; i < ARRAY_SIZE(mnt_errtbl); i++) { 317 for (i = 0; i < ARRAY_SIZE(mnt_errtbl); i++) {
329 if (mnt_errtbl[i].status == status) { 318 if (mnt_errtbl[i].status == status) {
@@ -351,18 +340,16 @@ static int decode_fhandle(struct xdr_stream *xdr, struct mountres *res)
351 return 0; 340 return 0;
352} 341}
353 342
354static int mnt_dec_mountres(struct rpc_rqst *req, __be32 *p, 343static int mnt_xdr_dec_mountres(struct rpc_rqst *req,
355 struct mountres *res) 344 struct xdr_stream *xdr,
345 struct mountres *res)
356{ 346{
357 struct xdr_stream xdr;
358 int status; 347 int status;
359 348
360 xdr_init_decode(&xdr, &req->rq_rcv_buf, p); 349 status = decode_status(xdr, res);
361
362 status = decode_status(&xdr, res);
363 if (unlikely(status != 0 || res->errno != 0)) 350 if (unlikely(status != 0 || res->errno != 0))
364 return status; 351 return status;
365 return decode_fhandle(&xdr, res); 352 return decode_fhandle(xdr, res);
366} 353}
367 354
368static int decode_fhs_status(struct xdr_stream *xdr, struct mountres *res) 355static int decode_fhs_status(struct xdr_stream *xdr, struct mountres *res)
@@ -371,10 +358,10 @@ static int decode_fhs_status(struct xdr_stream *xdr, struct mountres *res)
371 u32 status; 358 u32 status;
372 __be32 *p; 359 __be32 *p;
373 360
374 p = xdr_inline_decode(xdr, sizeof(status)); 361 p = xdr_inline_decode(xdr, 4);
375 if (unlikely(p == NULL)) 362 if (unlikely(p == NULL))
376 return -EIO; 363 return -EIO;
377 status = ntohl(*p); 364 status = be32_to_cpup(p);
378 365
379 for (i = 0; i < ARRAY_SIZE(mnt3_errtbl); i++) { 366 for (i = 0; i < ARRAY_SIZE(mnt3_errtbl); i++) {
380 if (mnt3_errtbl[i].status == status) { 367 if (mnt3_errtbl[i].status == status) {
@@ -394,11 +381,11 @@ static int decode_fhandle3(struct xdr_stream *xdr, struct mountres *res)
394 u32 size; 381 u32 size;
395 __be32 *p; 382 __be32 *p;
396 383
397 p = xdr_inline_decode(xdr, sizeof(size)); 384 p = xdr_inline_decode(xdr, 4);
398 if (unlikely(p == NULL)) 385 if (unlikely(p == NULL))
399 return -EIO; 386 return -EIO;
400 387
401 size = ntohl(*p++); 388 size = be32_to_cpup(p);
402 if (size > NFS3_FHSIZE || size == 0) 389 if (size > NFS3_FHSIZE || size == 0)
403 return -EIO; 390 return -EIO;
404 391
@@ -421,15 +408,15 @@ static int decode_auth_flavors(struct xdr_stream *xdr, struct mountres *res)
421 if (*count == 0) 408 if (*count == 0)
422 return 0; 409 return 0;
423 410
424 p = xdr_inline_decode(xdr, sizeof(entries)); 411 p = xdr_inline_decode(xdr, 4);
425 if (unlikely(p == NULL)) 412 if (unlikely(p == NULL))
426 return -EIO; 413 return -EIO;
427 entries = ntohl(*p); 414 entries = be32_to_cpup(p);
428 dprintk("NFS: received %u auth flavors\n", entries); 415 dprintk("NFS: received %u auth flavors\n", entries);
429 if (entries > NFS_MAX_SECFLAVORS) 416 if (entries > NFS_MAX_SECFLAVORS)
430 entries = NFS_MAX_SECFLAVORS; 417 entries = NFS_MAX_SECFLAVORS;
431 418
432 p = xdr_inline_decode(xdr, sizeof(u32) * entries); 419 p = xdr_inline_decode(xdr, 4 * entries);
433 if (unlikely(p == NULL)) 420 if (unlikely(p == NULL))
434 return -EIO; 421 return -EIO;
435 422
@@ -437,7 +424,7 @@ static int decode_auth_flavors(struct xdr_stream *xdr, struct mountres *res)
437 entries = *count; 424 entries = *count;
438 425
439 for (i = 0; i < entries; i++) { 426 for (i = 0; i < entries; i++) {
440 flavors[i] = ntohl(*p++); 427 flavors[i] = be32_to_cpup(p++);
441 dprintk("NFS: auth flavor[%u]: %d\n", i, flavors[i]); 428 dprintk("NFS: auth flavor[%u]: %d\n", i, flavors[i]);
442 } 429 }
443 *count = i; 430 *count = i;
@@ -445,30 +432,28 @@ static int decode_auth_flavors(struct xdr_stream *xdr, struct mountres *res)
445 return 0; 432 return 0;
446} 433}
447 434
448static int mnt_dec_mountres3(struct rpc_rqst *req, __be32 *p, 435static int mnt_xdr_dec_mountres3(struct rpc_rqst *req,
449 struct mountres *res) 436 struct xdr_stream *xdr,
437 struct mountres *res)
450{ 438{
451 struct xdr_stream xdr;
452 int status; 439 int status;
453 440
454 xdr_init_decode(&xdr, &req->rq_rcv_buf, p); 441 status = decode_fhs_status(xdr, res);
455
456 status = decode_fhs_status(&xdr, res);
457 if (unlikely(status != 0 || res->errno != 0)) 442 if (unlikely(status != 0 || res->errno != 0))
458 return status; 443 return status;
459 status = decode_fhandle3(&xdr, res); 444 status = decode_fhandle3(xdr, res);
460 if (unlikely(status != 0)) { 445 if (unlikely(status != 0)) {
461 res->errno = -EBADHANDLE; 446 res->errno = -EBADHANDLE;
462 return 0; 447 return 0;
463 } 448 }
464 return decode_auth_flavors(&xdr, res); 449 return decode_auth_flavors(xdr, res);
465} 450}
466 451
467static struct rpc_procinfo mnt_procedures[] = { 452static struct rpc_procinfo mnt_procedures[] = {
468 [MOUNTPROC_MNT] = { 453 [MOUNTPROC_MNT] = {
469 .p_proc = MOUNTPROC_MNT, 454 .p_proc = MOUNTPROC_MNT,
470 .p_encode = (kxdrproc_t)mnt_enc_dirpath, 455 .p_encode = (kxdreproc_t)mnt_xdr_enc_dirpath,
471 .p_decode = (kxdrproc_t)mnt_dec_mountres, 456 .p_decode = (kxdrdproc_t)mnt_xdr_dec_mountres,
472 .p_arglen = MNT_enc_dirpath_sz, 457 .p_arglen = MNT_enc_dirpath_sz,
473 .p_replen = MNT_dec_mountres_sz, 458 .p_replen = MNT_dec_mountres_sz,
474 .p_statidx = MOUNTPROC_MNT, 459 .p_statidx = MOUNTPROC_MNT,
@@ -476,7 +461,7 @@ static struct rpc_procinfo mnt_procedures[] = {
476 }, 461 },
477 [MOUNTPROC_UMNT] = { 462 [MOUNTPROC_UMNT] = {
478 .p_proc = MOUNTPROC_UMNT, 463 .p_proc = MOUNTPROC_UMNT,
479 .p_encode = (kxdrproc_t)mnt_enc_dirpath, 464 .p_encode = (kxdreproc_t)mnt_xdr_enc_dirpath,
480 .p_arglen = MNT_enc_dirpath_sz, 465 .p_arglen = MNT_enc_dirpath_sz,
481 .p_statidx = MOUNTPROC_UMNT, 466 .p_statidx = MOUNTPROC_UMNT,
482 .p_name = "UMOUNT", 467 .p_name = "UMOUNT",
@@ -486,8 +471,8 @@ static struct rpc_procinfo mnt_procedures[] = {
486static struct rpc_procinfo mnt3_procedures[] = { 471static struct rpc_procinfo mnt3_procedures[] = {
487 [MOUNTPROC3_MNT] = { 472 [MOUNTPROC3_MNT] = {
488 .p_proc = MOUNTPROC3_MNT, 473 .p_proc = MOUNTPROC3_MNT,
489 .p_encode = (kxdrproc_t)mnt_enc_dirpath, 474 .p_encode = (kxdreproc_t)mnt_xdr_enc_dirpath,
490 .p_decode = (kxdrproc_t)mnt_dec_mountres3, 475 .p_decode = (kxdrdproc_t)mnt_xdr_dec_mountres3,
491 .p_arglen = MNT_enc_dirpath_sz, 476 .p_arglen = MNT_enc_dirpath_sz,
492 .p_replen = MNT_dec_mountres3_sz, 477 .p_replen = MNT_dec_mountres3_sz,
493 .p_statidx = MOUNTPROC3_MNT, 478 .p_statidx = MOUNTPROC3_MNT,
@@ -495,7 +480,7 @@ static struct rpc_procinfo mnt3_procedures[] = {
495 }, 480 },
496 [MOUNTPROC3_UMNT] = { 481 [MOUNTPROC3_UMNT] = {
497 .p_proc = MOUNTPROC3_UMNT, 482 .p_proc = MOUNTPROC3_UMNT,
498 .p_encode = (kxdrproc_t)mnt_enc_dirpath, 483 .p_encode = (kxdreproc_t)mnt_xdr_enc_dirpath,
499 .p_arglen = MNT_enc_dirpath_sz, 484 .p_arglen = MNT_enc_dirpath_sz,
500 .p_statidx = MOUNTPROC3_UMNT, 485 .p_statidx = MOUNTPROC3_UMNT,
501 .p_name = "UMOUNT", 486 .p_name = "UMOUNT",
diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c
index 5914a1911c95..792cb13a4304 100644
--- a/fs/nfs/nfs2xdr.c
+++ b/fs/nfs/nfs2xdr.c
@@ -61,584 +61,1008 @@
61#define NFS_readdirres_sz (1) 61#define NFS_readdirres_sz (1)
62#define NFS_statfsres_sz (1+NFS_info_sz) 62#define NFS_statfsres_sz (1+NFS_info_sz)
63 63
64
64/* 65/*
65 * Common NFS XDR functions as inlines 66 * While encoding arguments, set up the reply buffer in advance to
67 * receive reply data directly into the page cache.
66 */ 68 */
67static inline __be32 * 69static void prepare_reply_buffer(struct rpc_rqst *req, struct page **pages,
68xdr_encode_fhandle(__be32 *p, const struct nfs_fh *fhandle) 70 unsigned int base, unsigned int len,
71 unsigned int bufsize)
69{ 72{
70 memcpy(p, fhandle->data, NFS2_FHSIZE); 73 struct rpc_auth *auth = req->rq_cred->cr_auth;
71 return p + XDR_QUADLEN(NFS2_FHSIZE); 74 unsigned int replen;
75
76 replen = RPC_REPHDRSIZE + auth->au_rslack + bufsize;
77 xdr_inline_pages(&req->rq_rcv_buf, replen << 2, pages, base, len);
72} 78}
73 79
74static inline __be32 * 80/*
75xdr_decode_fhandle(__be32 *p, struct nfs_fh *fhandle) 81 * Handle decode buffer overflows out-of-line.
82 */
83static void print_overflow_msg(const char *func, const struct xdr_stream *xdr)
76{ 84{
77 /* NFSv2 handles have a fixed length */ 85 dprintk("NFS: %s prematurely hit the end of our receive buffer. "
78 fhandle->size = NFS2_FHSIZE; 86 "Remaining buffer length is %tu words.\n",
79 memcpy(fhandle->data, p, NFS2_FHSIZE); 87 func, xdr->end - xdr->p);
80 return p + XDR_QUADLEN(NFS2_FHSIZE); 88}
89
90
91/*
92 * Encode/decode NFSv2 basic data types
93 *
94 * Basic NFSv2 data types are defined in section 2.3 of RFC 1094:
95 * "NFS: Network File System Protocol Specification".
96 *
97 * Not all basic data types have their own encoding and decoding
98 * functions. For run-time efficiency, some data types are encoded
99 * or decoded inline.
100 */
101
102/*
103 * typedef opaque nfsdata<>;
104 */
105static int decode_nfsdata(struct xdr_stream *xdr, struct nfs_readres *result)
106{
107 u32 recvd, count;
108 size_t hdrlen;
109 __be32 *p;
110
111 p = xdr_inline_decode(xdr, 4);
112 if (unlikely(p == NULL))
113 goto out_overflow;
114 count = be32_to_cpup(p);
115 hdrlen = (u8 *)xdr->p - (u8 *)xdr->iov->iov_base;
116 recvd = xdr->buf->len - hdrlen;
117 if (unlikely(count > recvd))
118 goto out_cheating;
119out:
120 xdr_read_pages(xdr, count);
121 result->eof = 0; /* NFSv2 does not pass EOF flag on the wire. */
122 result->count = count;
123 return count;
124out_cheating:
125 dprintk("NFS: server cheating in read result: "
126 "count %u > recvd %u\n", count, recvd);
127 count = recvd;
128 goto out;
129out_overflow:
130 print_overflow_msg(__func__, xdr);
131 return -EIO;
132}
133
134/*
135 * enum stat {
136 * NFS_OK = 0,
137 * NFSERR_PERM = 1,
138 * NFSERR_NOENT = 2,
139 * NFSERR_IO = 5,
140 * NFSERR_NXIO = 6,
141 * NFSERR_ACCES = 13,
142 * NFSERR_EXIST = 17,
143 * NFSERR_NODEV = 19,
144 * NFSERR_NOTDIR = 20,
145 * NFSERR_ISDIR = 21,
146 * NFSERR_FBIG = 27,
147 * NFSERR_NOSPC = 28,
148 * NFSERR_ROFS = 30,
149 * NFSERR_NAMETOOLONG = 63,
150 * NFSERR_NOTEMPTY = 66,
151 * NFSERR_DQUOT = 69,
152 * NFSERR_STALE = 70,
153 * NFSERR_WFLUSH = 99
154 * };
155 */
156static int decode_stat(struct xdr_stream *xdr, enum nfs_stat *status)
157{
158 __be32 *p;
159
160 p = xdr_inline_decode(xdr, 4);
161 if (unlikely(p == NULL))
162 goto out_overflow;
163 *status = be32_to_cpup(p);
164 return 0;
165out_overflow:
166 print_overflow_msg(__func__, xdr);
167 return -EIO;
81} 168}
82 169
83static inline __be32* 170/*
84xdr_encode_time(__be32 *p, struct timespec *timep) 171 * 2.3.2. ftype
172 *
173 * enum ftype {
174 * NFNON = 0,
175 * NFREG = 1,
176 * NFDIR = 2,
177 * NFBLK = 3,
178 * NFCHR = 4,
179 * NFLNK = 5
180 * };
181 *
182 */
183static __be32 *xdr_decode_ftype(__be32 *p, u32 *type)
85{ 184{
86 *p++ = htonl(timep->tv_sec); 185 *type = be32_to_cpup(p++);
87 /* Convert nanoseconds into microseconds */ 186 if (unlikely(*type > NF2FIFO))
88 *p++ = htonl(timep->tv_nsec ? timep->tv_nsec / 1000 : 0); 187 *type = NFBAD;
89 return p; 188 return p;
90} 189}
91 190
92static inline __be32* 191/*
93xdr_encode_current_server_time(__be32 *p, struct timespec *timep) 192 * 2.3.3. fhandle
193 *
194 * typedef opaque fhandle[FHSIZE];
195 */
196static void encode_fhandle(struct xdr_stream *xdr, const struct nfs_fh *fh)
94{ 197{
95 /* 198 __be32 *p;
96 * Passing the invalid value useconds=1000000 is a 199
97 * Sun convention for "set to current server time". 200 BUG_ON(fh->size != NFS2_FHSIZE);
98 * It's needed to make permissions checks for the 201 p = xdr_reserve_space(xdr, NFS2_FHSIZE);
99 * "touch" program across v2 mounts to Solaris and 202 memcpy(p, fh->data, NFS2_FHSIZE);
100 * Irix boxes work correctly. See description of 203}
101 * sattr in section 6.1 of "NFS Illustrated" by 204
102 * Brent Callaghan, Addison-Wesley, ISBN 0-201-32750-5 205static int decode_fhandle(struct xdr_stream *xdr, struct nfs_fh *fh)
103 */ 206{
104 *p++ = htonl(timep->tv_sec); 207 __be32 *p;
105 *p++ = htonl(1000000); 208
209 p = xdr_inline_decode(xdr, NFS2_FHSIZE);
210 if (unlikely(p == NULL))
211 goto out_overflow;
212 fh->size = NFS2_FHSIZE;
213 memcpy(fh->data, p, NFS2_FHSIZE);
214 return 0;
215out_overflow:
216 print_overflow_msg(__func__, xdr);
217 return -EIO;
218}
219
220/*
221 * 2.3.4. timeval
222 *
223 * struct timeval {
224 * unsigned int seconds;
225 * unsigned int useconds;
226 * };
227 */
228static __be32 *xdr_encode_time(__be32 *p, const struct timespec *timep)
229{
230 *p++ = cpu_to_be32(timep->tv_sec);
231 if (timep->tv_nsec != 0)
232 *p++ = cpu_to_be32(timep->tv_nsec / NSEC_PER_USEC);
233 else
234 *p++ = cpu_to_be32(0);
106 return p; 235 return p;
107} 236}
108 237
109static inline __be32* 238/*
110xdr_decode_time(__be32 *p, struct timespec *timep) 239 * Passing the invalid value useconds=1000000 is a Sun convention for
240 * "set to current server time". It's needed to make permissions checks
241 * for the "touch" program across v2 mounts to Solaris and Irix servers
242 * work correctly. See description of sattr in section 6.1 of "NFS
243 * Illustrated" by Brent Callaghan, Addison-Wesley, ISBN 0-201-32750-5.
244 */
245static __be32 *xdr_encode_current_server_time(__be32 *p,
246 const struct timespec *timep)
111{ 247{
112 timep->tv_sec = ntohl(*p++); 248 *p++ = cpu_to_be32(timep->tv_sec);
113 /* Convert microseconds into nanoseconds */ 249 *p++ = cpu_to_be32(1000000);
114 timep->tv_nsec = ntohl(*p++) * 1000;
115 return p; 250 return p;
116} 251}
117 252
118static __be32 * 253static __be32 *xdr_decode_time(__be32 *p, struct timespec *timep)
119xdr_decode_fattr(__be32 *p, struct nfs_fattr *fattr) 254{
255 timep->tv_sec = be32_to_cpup(p++);
256 timep->tv_nsec = be32_to_cpup(p++) * NSEC_PER_USEC;
257 return p;
258}
259
260/*
261 * 2.3.5. fattr
262 *
263 * struct fattr {
264 * ftype type;
265 * unsigned int mode;
266 * unsigned int nlink;
267 * unsigned int uid;
268 * unsigned int gid;
269 * unsigned int size;
270 * unsigned int blocksize;
271 * unsigned int rdev;
272 * unsigned int blocks;
273 * unsigned int fsid;
274 * unsigned int fileid;
275 * timeval atime;
276 * timeval mtime;
277 * timeval ctime;
278 * };
279 *
280 */
281static int decode_fattr(struct xdr_stream *xdr, struct nfs_fattr *fattr)
120{ 282{
121 u32 rdev, type; 283 u32 rdev, type;
122 type = ntohl(*p++); 284 __be32 *p;
123 fattr->mode = ntohl(*p++); 285
124 fattr->nlink = ntohl(*p++); 286 p = xdr_inline_decode(xdr, NFS_fattr_sz << 2);
125 fattr->uid = ntohl(*p++); 287 if (unlikely(p == NULL))
126 fattr->gid = ntohl(*p++); 288 goto out_overflow;
127 fattr->size = ntohl(*p++); 289
128 fattr->du.nfs2.blocksize = ntohl(*p++);
129 rdev = ntohl(*p++);
130 fattr->du.nfs2.blocks = ntohl(*p++);
131 fattr->fsid.major = ntohl(*p++);
132 fattr->fsid.minor = 0;
133 fattr->fileid = ntohl(*p++);
134 p = xdr_decode_time(p, &fattr->atime);
135 p = xdr_decode_time(p, &fattr->mtime);
136 p = xdr_decode_time(p, &fattr->ctime);
137 fattr->valid |= NFS_ATTR_FATTR_V2; 290 fattr->valid |= NFS_ATTR_FATTR_V2;
291
292 p = xdr_decode_ftype(p, &type);
293
294 fattr->mode = be32_to_cpup(p++);
295 fattr->nlink = be32_to_cpup(p++);
296 fattr->uid = be32_to_cpup(p++);
297 fattr->gid = be32_to_cpup(p++);
298 fattr->size = be32_to_cpup(p++);
299 fattr->du.nfs2.blocksize = be32_to_cpup(p++);
300
301 rdev = be32_to_cpup(p++);
138 fattr->rdev = new_decode_dev(rdev); 302 fattr->rdev = new_decode_dev(rdev);
139 if (type == NFCHR && rdev == NFS2_FIFO_DEV) { 303 if (type == (u32)NFCHR && rdev == (u32)NFS2_FIFO_DEV) {
140 fattr->mode = (fattr->mode & ~S_IFMT) | S_IFIFO; 304 fattr->mode = (fattr->mode & ~S_IFMT) | S_IFIFO;
141 fattr->rdev = 0; 305 fattr->rdev = 0;
142 } 306 }
307
308 fattr->du.nfs2.blocks = be32_to_cpup(p++);
309 fattr->fsid.major = be32_to_cpup(p++);
310 fattr->fsid.minor = 0;
311 fattr->fileid = be32_to_cpup(p++);
312
313 p = xdr_decode_time(p, &fattr->atime);
314 p = xdr_decode_time(p, &fattr->mtime);
315 xdr_decode_time(p, &fattr->ctime);
316 return 0;
317out_overflow:
318 print_overflow_msg(__func__, xdr);
319 return -EIO;
320}
321
322/*
323 * 2.3.6. sattr
324 *
325 * struct sattr {
326 * unsigned int mode;
327 * unsigned int uid;
328 * unsigned int gid;
329 * unsigned int size;
330 * timeval atime;
331 * timeval mtime;
332 * };
333 */
334
335#define NFS2_SATTR_NOT_SET (0xffffffff)
336
337static __be32 *xdr_time_not_set(__be32 *p)
338{
339 *p++ = cpu_to_be32(NFS2_SATTR_NOT_SET);
340 *p++ = cpu_to_be32(NFS2_SATTR_NOT_SET);
143 return p; 341 return p;
144} 342}
145 343
146static inline __be32 * 344static void encode_sattr(struct xdr_stream *xdr, const struct iattr *attr)
147xdr_encode_sattr(__be32 *p, struct iattr *attr)
148{ 345{
149 const __be32 not_set = __constant_htonl(0xFFFFFFFF); 346 __be32 *p;
150 347
151 *p++ = (attr->ia_valid & ATTR_MODE) ? htonl(attr->ia_mode) : not_set; 348 p = xdr_reserve_space(xdr, NFS_sattr_sz << 2);
152 *p++ = (attr->ia_valid & ATTR_UID) ? htonl(attr->ia_uid) : not_set;
153 *p++ = (attr->ia_valid & ATTR_GID) ? htonl(attr->ia_gid) : not_set;
154 *p++ = (attr->ia_valid & ATTR_SIZE) ? htonl(attr->ia_size) : not_set;
155 349
156 if (attr->ia_valid & ATTR_ATIME_SET) { 350 if (attr->ia_valid & ATTR_MODE)
351 *p++ = cpu_to_be32(attr->ia_mode);
352 else
353 *p++ = cpu_to_be32(NFS2_SATTR_NOT_SET);
354 if (attr->ia_valid & ATTR_UID)
355 *p++ = cpu_to_be32(attr->ia_uid);
356 else
357 *p++ = cpu_to_be32(NFS2_SATTR_NOT_SET);
358 if (attr->ia_valid & ATTR_GID)
359 *p++ = cpu_to_be32(attr->ia_gid);
360 else
361 *p++ = cpu_to_be32(NFS2_SATTR_NOT_SET);
362 if (attr->ia_valid & ATTR_SIZE)
363 *p++ = cpu_to_be32((u32)attr->ia_size);
364 else
365 *p++ = cpu_to_be32(NFS2_SATTR_NOT_SET);
366
367 if (attr->ia_valid & ATTR_ATIME_SET)
157 p = xdr_encode_time(p, &attr->ia_atime); 368 p = xdr_encode_time(p, &attr->ia_atime);
158 } else if (attr->ia_valid & ATTR_ATIME) { 369 else if (attr->ia_valid & ATTR_ATIME)
159 p = xdr_encode_current_server_time(p, &attr->ia_atime); 370 p = xdr_encode_current_server_time(p, &attr->ia_atime);
160 } else { 371 else
161 *p++ = not_set; 372 p = xdr_time_not_set(p);
162 *p++ = not_set; 373 if (attr->ia_valid & ATTR_MTIME_SET)
163 } 374 xdr_encode_time(p, &attr->ia_mtime);
164 375 else if (attr->ia_valid & ATTR_MTIME)
165 if (attr->ia_valid & ATTR_MTIME_SET) { 376 xdr_encode_current_server_time(p, &attr->ia_mtime);
166 p = xdr_encode_time(p, &attr->ia_mtime); 377 else
167 } else if (attr->ia_valid & ATTR_MTIME) { 378 xdr_time_not_set(p);
168 p = xdr_encode_current_server_time(p, &attr->ia_mtime);
169 } else {
170 *p++ = not_set;
171 *p++ = not_set;
172 }
173 return p;
174} 379}
175 380
176/* 381/*
177 * NFS encode functions 382 * 2.3.7. filename
383 *
384 * typedef string filename<MAXNAMLEN>;
178 */ 385 */
386static void encode_filename(struct xdr_stream *xdr,
387 const char *name, u32 length)
388{
389 __be32 *p;
390
391 BUG_ON(length > NFS2_MAXNAMLEN);
392 p = xdr_reserve_space(xdr, 4 + length);
393 xdr_encode_opaque(p, name, length);
394}
395
396static int decode_filename_inline(struct xdr_stream *xdr,
397 const char **name, u32 *length)
398{
399 __be32 *p;
400 u32 count;
401
402 p = xdr_inline_decode(xdr, 4);
403 if (unlikely(p == NULL))
404 goto out_overflow;
405 count = be32_to_cpup(p);
406 if (count > NFS3_MAXNAMLEN)
407 goto out_nametoolong;
408 p = xdr_inline_decode(xdr, count);
409 if (unlikely(p == NULL))
410 goto out_overflow;
411 *name = (const char *)p;
412 *length = count;
413 return 0;
414out_nametoolong:
415 dprintk("NFS: returned filename too long: %u\n", count);
416 return -ENAMETOOLONG;
417out_overflow:
418 print_overflow_msg(__func__, xdr);
419 return -EIO;
420}
421
179/* 422/*
180 * Encode file handle argument 423 * 2.3.8. path
181 * GETATTR, READLINK, STATFS 424 *
425 * typedef string path<MAXPATHLEN>;
182 */ 426 */
183static int 427static void encode_path(struct xdr_stream *xdr, struct page **pages, u32 length)
184nfs_xdr_fhandle(struct rpc_rqst *req, __be32 *p, struct nfs_fh *fh)
185{ 428{
186 p = xdr_encode_fhandle(p, fh); 429 __be32 *p;
187 req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); 430
431 BUG_ON(length > NFS2_MAXPATHLEN);
432 p = xdr_reserve_space(xdr, 4);
433 *p = cpu_to_be32(length);
434 xdr_write_pages(xdr, pages, 0, length);
435}
436
437static int decode_path(struct xdr_stream *xdr)
438{
439 u32 length, recvd;
440 size_t hdrlen;
441 __be32 *p;
442
443 p = xdr_inline_decode(xdr, 4);
444 if (unlikely(p == NULL))
445 goto out_overflow;
446 length = be32_to_cpup(p);
447 if (unlikely(length >= xdr->buf->page_len || length > NFS_MAXPATHLEN))
448 goto out_size;
449 hdrlen = (u8 *)xdr->p - (u8 *)xdr->iov->iov_base;
450 recvd = xdr->buf->len - hdrlen;
451 if (unlikely(length > recvd))
452 goto out_cheating;
453
454 xdr_read_pages(xdr, length);
455 xdr_terminate_string(xdr->buf, length);
188 return 0; 456 return 0;
457out_size:
458 dprintk("NFS: returned pathname too long: %u\n", length);
459 return -ENAMETOOLONG;
460out_cheating:
461 dprintk("NFS: server cheating in pathname result: "
462 "length %u > received %u\n", length, recvd);
463 return -EIO;
464out_overflow:
465 print_overflow_msg(__func__, xdr);
466 return -EIO;
189} 467}
190 468
191/* 469/*
192 * Encode SETATTR arguments 470 * 2.3.9. attrstat
471 *
472 * union attrstat switch (stat status) {
473 * case NFS_OK:
474 * fattr attributes;
475 * default:
476 * void;
477 * };
193 */ 478 */
194static int 479static int decode_attrstat(struct xdr_stream *xdr, struct nfs_fattr *result)
195nfs_xdr_sattrargs(struct rpc_rqst *req, __be32 *p, struct nfs_sattrargs *args)
196{ 480{
197 p = xdr_encode_fhandle(p, args->fh); 481 enum nfs_stat status;
198 p = xdr_encode_sattr(p, args->sattr); 482 int error;
199 req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); 483
200 return 0; 484 error = decode_stat(xdr, &status);
485 if (unlikely(error))
486 goto out;
487 if (status != NFS_OK)
488 goto out_default;
489 error = decode_fattr(xdr, result);
490out:
491 return error;
492out_default:
493 return nfs_stat_to_errno(status);
201} 494}
202 495
203/* 496/*
204 * Encode directory ops argument 497 * 2.3.10. diropargs
205 * LOOKUP, RMDIR 498 *
499 * struct diropargs {
500 * fhandle dir;
501 * filename name;
502 * };
206 */ 503 */
207static int 504static void encode_diropargs(struct xdr_stream *xdr, const struct nfs_fh *fh,
208nfs_xdr_diropargs(struct rpc_rqst *req, __be32 *p, struct nfs_diropargs *args) 505 const char *name, u32 length)
209{ 506{
210 p = xdr_encode_fhandle(p, args->fh); 507 encode_fhandle(xdr, fh);
211 p = xdr_encode_array(p, args->name, args->len); 508 encode_filename(xdr, name, length);
212 req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
213 return 0;
214} 509}
215 510
216/* 511/*
217 * Encode REMOVE argument 512 * 2.3.11. diropres
513 *
514 * union diropres switch (stat status) {
515 * case NFS_OK:
516 * struct {
517 * fhandle file;
518 * fattr attributes;
519 * } diropok;
520 * default:
521 * void;
522 * };
218 */ 523 */
219static int 524static int decode_diropok(struct xdr_stream *xdr, struct nfs_diropok *result)
220nfs_xdr_removeargs(struct rpc_rqst *req, __be32 *p, const struct nfs_removeargs *args)
221{ 525{
222 p = xdr_encode_fhandle(p, args->fh); 526 int error;
223 p = xdr_encode_array(p, args->name.name, args->name.len); 527
224 req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); 528 error = decode_fhandle(xdr, result->fh);
225 return 0; 529 if (unlikely(error))
530 goto out;
531 error = decode_fattr(xdr, result->fattr);
532out:
533 return error;
534}
535
536static int decode_diropres(struct xdr_stream *xdr, struct nfs_diropok *result)
537{
538 enum nfs_stat status;
539 int error;
540
541 error = decode_stat(xdr, &status);
542 if (unlikely(error))
543 goto out;
544 if (status != NFS_OK)
545 goto out_default;
546 error = decode_diropok(xdr, result);
547out:
548 return error;
549out_default:
550 return nfs_stat_to_errno(status);
226} 551}
227 552
553
228/* 554/*
229 * Arguments to a READ call. Since we read data directly into the page 555 * NFSv2 XDR encode functions
230 * cache, we also set up the reply iovec here so that iov[1] points 556 *
231 * exactly to the page we want to fetch. 557 * NFSv2 argument types are defined in section 2.2 of RFC 1094:
558 * "NFS: Network File System Protocol Specification".
232 */ 559 */
233static int 560
234nfs_xdr_readargs(struct rpc_rqst *req, __be32 *p, struct nfs_readargs *args) 561static void nfs2_xdr_enc_fhandle(struct rpc_rqst *req,
562 struct xdr_stream *xdr,
563 const struct nfs_fh *fh)
235{ 564{
236 struct rpc_auth *auth = req->rq_cred->cr_auth; 565 encode_fhandle(xdr, fh);
237 unsigned int replen; 566}
238 u32 offset = (u32)args->offset; 567
568/*
569 * 2.2.3. sattrargs
570 *
571 * struct sattrargs {
572 * fhandle file;
573 * sattr attributes;
574 * };
575 */
576static void nfs2_xdr_enc_sattrargs(struct rpc_rqst *req,
577 struct xdr_stream *xdr,
578 const struct nfs_sattrargs *args)
579{
580 encode_fhandle(xdr, args->fh);
581 encode_sattr(xdr, args->sattr);
582}
583
584static void nfs2_xdr_enc_diropargs(struct rpc_rqst *req,
585 struct xdr_stream *xdr,
586 const struct nfs_diropargs *args)
587{
588 encode_diropargs(xdr, args->fh, args->name, args->len);
589}
590
591static void nfs2_xdr_enc_readlinkargs(struct rpc_rqst *req,
592 struct xdr_stream *xdr,
593 const struct nfs_readlinkargs *args)
594{
595 encode_fhandle(xdr, args->fh);
596 prepare_reply_buffer(req, args->pages, args->pgbase,
597 args->pglen, NFS_readlinkres_sz);
598}
599
600/*
601 * 2.2.7. readargs
602 *
603 * struct readargs {
604 * fhandle file;
605 * unsigned offset;
606 * unsigned count;
607 * unsigned totalcount;
608 * };
609 */
610static void encode_readargs(struct xdr_stream *xdr,
611 const struct nfs_readargs *args)
612{
613 u32 offset = args->offset;
239 u32 count = args->count; 614 u32 count = args->count;
615 __be32 *p;
240 616
241 p = xdr_encode_fhandle(p, args->fh); 617 encode_fhandle(xdr, args->fh);
242 *p++ = htonl(offset);
243 *p++ = htonl(count);
244 *p++ = htonl(count);
245 req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
246 618
247 /* Inline the page array */ 619 p = xdr_reserve_space(xdr, 4 + 4 + 4);
248 replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS_readres_sz) << 2; 620 *p++ = cpu_to_be32(offset);
249 xdr_inline_pages(&req->rq_rcv_buf, replen, 621 *p++ = cpu_to_be32(count);
250 args->pages, args->pgbase, count); 622 *p = cpu_to_be32(count);
623}
624
625static void nfs2_xdr_enc_readargs(struct rpc_rqst *req,
626 struct xdr_stream *xdr,
627 const struct nfs_readargs *args)
628{
629 encode_readargs(xdr, args);
630 prepare_reply_buffer(req, args->pages, args->pgbase,
631 args->count, NFS_readres_sz);
251 req->rq_rcv_buf.flags |= XDRBUF_READ; 632 req->rq_rcv_buf.flags |= XDRBUF_READ;
252 return 0;
253} 633}
254 634
255/* 635/*
256 * Decode READ reply 636 * 2.2.9. writeargs
637 *
638 * struct writeargs {
639 * fhandle file;
640 * unsigned beginoffset;
641 * unsigned offset;
642 * unsigned totalcount;
643 * nfsdata data;
644 * };
257 */ 645 */
258static int 646static void encode_writeargs(struct xdr_stream *xdr,
259nfs_xdr_readres(struct rpc_rqst *req, __be32 *p, struct nfs_readres *res) 647 const struct nfs_writeargs *args)
260{ 648{
261 struct kvec *iov = req->rq_rcv_buf.head; 649 u32 offset = args->offset;
262 size_t hdrlen; 650 u32 count = args->count;
263 u32 count, recvd; 651 __be32 *p;
264 int status;
265
266 if ((status = ntohl(*p++)))
267 return nfs_stat_to_errno(status);
268 p = xdr_decode_fattr(p, res->fattr);
269
270 count = ntohl(*p++);
271 res->eof = 0;
272 hdrlen = (u8 *) p - (u8 *) iov->iov_base;
273 if (iov->iov_len < hdrlen) {
274 dprintk("NFS: READ reply header overflowed:"
275 "length %Zu > %Zu\n", hdrlen, iov->iov_len);
276 return -errno_NFSERR_IO;
277 } else if (iov->iov_len != hdrlen) {
278 dprintk("NFS: READ header is short. iovec will be shifted.\n");
279 xdr_shift_buf(&req->rq_rcv_buf, iov->iov_len - hdrlen);
280 }
281 652
282 recvd = req->rq_rcv_buf.len - hdrlen; 653 encode_fhandle(xdr, args->fh);
283 if (count > recvd) {
284 dprintk("NFS: server cheating in read reply: "
285 "count %u > recvd %u\n", count, recvd);
286 count = recvd;
287 }
288 654
289 dprintk("RPC: readres OK count %u\n", count); 655 p = xdr_reserve_space(xdr, 4 + 4 + 4 + 4);
290 if (count < res->count) 656 *p++ = cpu_to_be32(offset);
291 res->count = count; 657 *p++ = cpu_to_be32(offset);
658 *p++ = cpu_to_be32(count);
292 659
293 return count; 660 /* nfsdata */
661 *p = cpu_to_be32(count);
662 xdr_write_pages(xdr, args->pages, args->pgbase, count);
294} 663}
295 664
665static void nfs2_xdr_enc_writeargs(struct rpc_rqst *req,
666 struct xdr_stream *xdr,
667 const struct nfs_writeargs *args)
668{
669 encode_writeargs(xdr, args);
670 xdr->buf->flags |= XDRBUF_WRITE;
671}
296 672
297/* 673/*
298 * Write arguments. Splice the buffer to be written into the iovec. 674 * 2.2.10. createargs
675 *
676 * struct createargs {
677 * diropargs where;
678 * sattr attributes;
679 * };
299 */ 680 */
300static int 681static void nfs2_xdr_enc_createargs(struct rpc_rqst *req,
301nfs_xdr_writeargs(struct rpc_rqst *req, __be32 *p, struct nfs_writeargs *args) 682 struct xdr_stream *xdr,
683 const struct nfs_createargs *args)
302{ 684{
303 struct xdr_buf *sndbuf = &req->rq_snd_buf; 685 encode_diropargs(xdr, args->fh, args->name, args->len);
304 u32 offset = (u32)args->offset; 686 encode_sattr(xdr, args->sattr);
305 u32 count = args->count; 687}
306
307 p = xdr_encode_fhandle(p, args->fh);
308 *p++ = htonl(offset);
309 *p++ = htonl(offset);
310 *p++ = htonl(count);
311 *p++ = htonl(count);
312 sndbuf->len = xdr_adjust_iovec(sndbuf->head, p);
313 688
314 /* Copy the page array */ 689static void nfs2_xdr_enc_removeargs(struct rpc_rqst *req,
315 xdr_encode_pages(sndbuf, args->pages, args->pgbase, count); 690 struct xdr_stream *xdr,
316 sndbuf->flags |= XDRBUF_WRITE; 691 const struct nfs_removeargs *args)
317 return 0; 692{
693 encode_diropargs(xdr, args->fh, args->name.name, args->name.len);
318} 694}
319 695
320/* 696/*
321 * Encode create arguments 697 * 2.2.12. renameargs
322 * CREATE, MKDIR 698 *
699 * struct renameargs {
700 * diropargs from;
701 * diropargs to;
702 * };
323 */ 703 */
324static int 704static void nfs2_xdr_enc_renameargs(struct rpc_rqst *req,
325nfs_xdr_createargs(struct rpc_rqst *req, __be32 *p, struct nfs_createargs *args) 705 struct xdr_stream *xdr,
706 const struct nfs_renameargs *args)
326{ 707{
327 p = xdr_encode_fhandle(p, args->fh); 708 const struct qstr *old = args->old_name;
328 p = xdr_encode_array(p, args->name, args->len); 709 const struct qstr *new = args->new_name;
329 p = xdr_encode_sattr(p, args->sattr); 710
330 req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); 711 encode_diropargs(xdr, args->old_dir, old->name, old->len);
331 return 0; 712 encode_diropargs(xdr, args->new_dir, new->name, new->len);
332} 713}
333 714
334/* 715/*
335 * Encode RENAME arguments 716 * 2.2.13. linkargs
717 *
718 * struct linkargs {
719 * fhandle from;
720 * diropargs to;
721 * };
336 */ 722 */
337static int 723static void nfs2_xdr_enc_linkargs(struct rpc_rqst *req,
338nfs_xdr_renameargs(struct rpc_rqst *req, __be32 *p, struct nfs_renameargs *args) 724 struct xdr_stream *xdr,
725 const struct nfs_linkargs *args)
339{ 726{
340 p = xdr_encode_fhandle(p, args->old_dir); 727 encode_fhandle(xdr, args->fromfh);
341 p = xdr_encode_array(p, args->old_name->name, args->old_name->len); 728 encode_diropargs(xdr, args->tofh, args->toname, args->tolen);
342 p = xdr_encode_fhandle(p, args->new_dir);
343 p = xdr_encode_array(p, args->new_name->name, args->new_name->len);
344 req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
345 return 0;
346} 729}
347 730
348/* 731/*
349 * Encode LINK arguments 732 * 2.2.14. symlinkargs
733 *
734 * struct symlinkargs {
735 * diropargs from;
736 * path to;
737 * sattr attributes;
738 * };
350 */ 739 */
351static int 740static void nfs2_xdr_enc_symlinkargs(struct rpc_rqst *req,
352nfs_xdr_linkargs(struct rpc_rqst *req, __be32 *p, struct nfs_linkargs *args) 741 struct xdr_stream *xdr,
742 const struct nfs_symlinkargs *args)
353{ 743{
354 p = xdr_encode_fhandle(p, args->fromfh); 744 encode_diropargs(xdr, args->fromfh, args->fromname, args->fromlen);
355 p = xdr_encode_fhandle(p, args->tofh); 745 encode_path(xdr, args->pages, args->pathlen);
356 p = xdr_encode_array(p, args->toname, args->tolen); 746 encode_sattr(xdr, args->sattr);
357 req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
358 return 0;
359} 747}
360 748
361/* 749/*
362 * Encode SYMLINK arguments 750 * 2.2.17. readdirargs
751 *
752 * struct readdirargs {
753 * fhandle dir;
754 * nfscookie cookie;
755 * unsigned count;
756 * };
363 */ 757 */
364static int 758static void encode_readdirargs(struct xdr_stream *xdr,
365nfs_xdr_symlinkargs(struct rpc_rqst *req, __be32 *p, struct nfs_symlinkargs *args) 759 const struct nfs_readdirargs *args)
366{ 760{
367 struct xdr_buf *sndbuf = &req->rq_snd_buf; 761 __be32 *p;
368 size_t pad;
369 762
370 p = xdr_encode_fhandle(p, args->fromfh); 763 encode_fhandle(xdr, args->fh);
371 p = xdr_encode_array(p, args->fromname, args->fromlen);
372 *p++ = htonl(args->pathlen);
373 sndbuf->len = xdr_adjust_iovec(sndbuf->head, p);
374 764
375 xdr_encode_pages(sndbuf, args->pages, 0, args->pathlen); 765 p = xdr_reserve_space(xdr, 4 + 4);
766 *p++ = cpu_to_be32(args->cookie);
767 *p = cpu_to_be32(args->count);
768}
376 769
377 /* 770static void nfs2_xdr_enc_readdirargs(struct rpc_rqst *req,
378 * xdr_encode_pages may have added a few bytes to ensure the 771 struct xdr_stream *xdr,
379 * pathname ends on a 4-byte boundary. Start encoding the 772 const struct nfs_readdirargs *args)
380 * attributes after the pad bytes. 773{
381 */ 774 encode_readdirargs(xdr, args);
382 pad = sndbuf->tail->iov_len; 775 prepare_reply_buffer(req, args->pages, 0,
383 if (pad > 0) 776 args->count, NFS_readdirres_sz);
384 p++;
385 p = xdr_encode_sattr(p, args->sattr);
386 sndbuf->len += xdr_adjust_iovec(sndbuf->tail, p) - pad;
387 return 0;
388} 777}
389 778
390/* 779/*
391 * Encode arguments to readdir call 780 * NFSv2 XDR decode functions
781 *
782 * NFSv2 result types are defined in section 2.2 of RFC 1094:
783 * "NFS: Network File System Protocol Specification".
392 */ 784 */
393static int 785
394nfs_xdr_readdirargs(struct rpc_rqst *req, __be32 *p, struct nfs_readdirargs *args) 786static int nfs2_xdr_dec_stat(struct rpc_rqst *req, struct xdr_stream *xdr,
787 void *__unused)
395{ 788{
396 struct rpc_auth *auth = req->rq_cred->cr_auth; 789 enum nfs_stat status;
397 unsigned int replen; 790 int error;
398 u32 count = args->count; 791
792 error = decode_stat(xdr, &status);
793 if (unlikely(error))
794 goto out;
795 if (status != NFS_OK)
796 goto out_default;
797out:
798 return error;
799out_default:
800 return nfs_stat_to_errno(status);
801}
399 802
400 p = xdr_encode_fhandle(p, args->fh); 803static int nfs2_xdr_dec_attrstat(struct rpc_rqst *req, struct xdr_stream *xdr,
401 *p++ = htonl(args->cookie); 804 struct nfs_fattr *result)
402 *p++ = htonl(count); /* see above */ 805{
403 req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); 806 return decode_attrstat(xdr, result);
807}
404 808
405 /* Inline the page array */ 809static int nfs2_xdr_dec_diropres(struct rpc_rqst *req, struct xdr_stream *xdr,
406 replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS_readdirres_sz) << 2; 810 struct nfs_diropok *result)
407 xdr_inline_pages(&req->rq_rcv_buf, replen, args->pages, 0, count); 811{
408 return 0; 812 return decode_diropres(xdr, result);
409} 813}
410 814
411/* 815/*
412 * Decode the result of a readdir call. 816 * 2.2.6. readlinkres
413 * We're not really decoding anymore, we just leave the buffer untouched 817 *
414 * and only check that it is syntactically correct. 818 * union readlinkres switch (stat status) {
415 * The real decoding happens in nfs_decode_entry below, called directly 819 * case NFS_OK:
416 * from nfs_readdir for each entry. 820 * path data;
821 * default:
822 * void;
823 * };
417 */ 824 */
418static int 825static int nfs2_xdr_dec_readlinkres(struct rpc_rqst *req,
419nfs_xdr_readdirres(struct rpc_rqst *req, __be32 *p, void *dummy) 826 struct xdr_stream *xdr, void *__unused)
420{ 827{
421 struct xdr_buf *rcvbuf = &req->rq_rcv_buf; 828 enum nfs_stat status;
422 struct kvec *iov = rcvbuf->head; 829 int error;
423 struct page **page; 830
424 size_t hdrlen; 831 error = decode_stat(xdr, &status);
425 unsigned int pglen, recvd; 832 if (unlikely(error))
426 int status; 833 goto out;
427 834 if (status != NFS_OK)
428 if ((status = ntohl(*p++))) 835 goto out_default;
429 return nfs_stat_to_errno(status); 836 error = decode_path(xdr);
430 837out:
431 hdrlen = (u8 *) p - (u8 *) iov->iov_base; 838 return error;
432 if (iov->iov_len < hdrlen) { 839out_default:
433 dprintk("NFS: READDIR reply header overflowed:" 840 return nfs_stat_to_errno(status);
434 "length %Zu > %Zu\n", hdrlen, iov->iov_len); 841}
435 return -errno_NFSERR_IO;
436 } else if (iov->iov_len != hdrlen) {
437 dprintk("NFS: READDIR header is short. iovec will be shifted.\n");
438 xdr_shift_buf(rcvbuf, iov->iov_len - hdrlen);
439 }
440 842
441 pglen = rcvbuf->page_len; 843/*
442 recvd = rcvbuf->len - hdrlen; 844 * 2.2.7. readres
443 if (pglen > recvd) 845 *
444 pglen = recvd; 846 * union readres switch (stat status) {
445 page = rcvbuf->pages; 847 * case NFS_OK:
446 return pglen; 848 * fattr attributes;
849 * nfsdata data;
850 * default:
851 * void;
852 * };
853 */
854static int nfs2_xdr_dec_readres(struct rpc_rqst *req, struct xdr_stream *xdr,
855 struct nfs_readres *result)
856{
857 enum nfs_stat status;
858 int error;
859
860 error = decode_stat(xdr, &status);
861 if (unlikely(error))
862 goto out;
863 if (status != NFS_OK)
864 goto out_default;
865 error = decode_fattr(xdr, result->fattr);
866 if (unlikely(error))
867 goto out;
868 error = decode_nfsdata(xdr, result);
869out:
870 return error;
871out_default:
872 return nfs_stat_to_errno(status);
447} 873}
448 874
449static void print_overflow_msg(const char *func, const struct xdr_stream *xdr) 875static int nfs2_xdr_dec_writeres(struct rpc_rqst *req, struct xdr_stream *xdr,
876 struct nfs_writeres *result)
450{ 877{
451 dprintk("nfs: %s: prematurely hit end of receive buffer. " 878 /* All NFSv2 writes are "file sync" writes */
452 "Remaining buffer length is %tu words.\n", 879 result->verf->committed = NFS_FILE_SYNC;
453 func, xdr->end - xdr->p); 880 return decode_attrstat(xdr, result->fattr);
454} 881}
455 882
456__be32 * 883/**
457nfs_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, struct nfs_server *server, int plus) 884 * nfs2_decode_dirent - Decode a single NFSv2 directory entry stored in
885 * the local page cache.
886 * @xdr: XDR stream where entry resides
887 * @entry: buffer to fill in with entry data
888 * @plus: boolean indicating whether this should be a readdirplus entry
889 *
890 * Returns zero if successful, otherwise a negative errno value is
891 * returned.
892 *
893 * This function is not invoked during READDIR reply decoding, but
894 * rather whenever an application invokes the getdents(2) system call
895 * on a directory already in our cache.
896 *
897 * 2.2.17. entry
898 *
899 * struct entry {
900 * unsigned fileid;
901 * filename name;
902 * nfscookie cookie;
903 * entry *nextentry;
904 * };
905 */
906int nfs2_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
907 int plus)
458{ 908{
459 __be32 *p; 909 __be32 *p;
910 int error;
911
460 p = xdr_inline_decode(xdr, 4); 912 p = xdr_inline_decode(xdr, 4);
461 if (unlikely(!p)) 913 if (unlikely(p == NULL))
462 goto out_overflow; 914 goto out_overflow;
463 if (!ntohl(*p++)) { 915 if (*p++ == xdr_zero) {
464 p = xdr_inline_decode(xdr, 4); 916 p = xdr_inline_decode(xdr, 4);
465 if (unlikely(!p)) 917 if (unlikely(p == NULL))
466 goto out_overflow; 918 goto out_overflow;
467 if (!ntohl(*p++)) 919 if (*p++ == xdr_zero)
468 return ERR_PTR(-EAGAIN); 920 return -EAGAIN;
469 entry->eof = 1; 921 entry->eof = 1;
470 return ERR_PTR(-EBADCOOKIE); 922 return -EBADCOOKIE;
471 } 923 }
472 924
473 p = xdr_inline_decode(xdr, 8); 925 p = xdr_inline_decode(xdr, 4);
474 if (unlikely(!p)) 926 if (unlikely(p == NULL))
475 goto out_overflow; 927 goto out_overflow;
928 entry->ino = be32_to_cpup(p);
476 929
477 entry->ino = ntohl(*p++); 930 error = decode_filename_inline(xdr, &entry->name, &entry->len);
478 entry->len = ntohl(*p++); 931 if (unlikely(error))
932 return error;
479 933
480 p = xdr_inline_decode(xdr, entry->len + 4); 934 /*
481 if (unlikely(!p)) 935 * The type (size and byte order) of nfscookie isn't defined in
936 * RFC 1094. This implementation assumes that it's an XDR uint32.
937 */
938 entry->prev_cookie = entry->cookie;
939 p = xdr_inline_decode(xdr, 4);
940 if (unlikely(p == NULL))
482 goto out_overflow; 941 goto out_overflow;
483 entry->name = (const char *) p; 942 entry->cookie = be32_to_cpup(p);
484 p += XDR_QUADLEN(entry->len);
485 entry->prev_cookie = entry->cookie;
486 entry->cookie = ntohl(*p++);
487 943
488 entry->d_type = DT_UNKNOWN; 944 entry->d_type = DT_UNKNOWN;
489 945
490 p = xdr_inline_peek(xdr, 8); 946 return 0;
491 if (p != NULL)
492 entry->eof = !p[0] && p[1];
493 else
494 entry->eof = 0;
495
496 return p;
497 947
498out_overflow: 948out_overflow:
499 print_overflow_msg(__func__, xdr); 949 print_overflow_msg(__func__, xdr);
500 return ERR_PTR(-EAGAIN); 950 return -EAGAIN;
501}
502
503/*
504 * NFS XDR decode functions
505 */
506/*
507 * Decode simple status reply
508 */
509static int
510nfs_xdr_stat(struct rpc_rqst *req, __be32 *p, void *dummy)
511{
512 int status;
513
514 if ((status = ntohl(*p++)) != 0)
515 status = nfs_stat_to_errno(status);
516 return status;
517} 951}
518 952
519/* 953/*
520 * Decode attrstat reply 954 * 2.2.17. readdirres
521 * GETATTR, SETATTR, WRITE 955 *
522 */ 956 * union readdirres switch (stat status) {
523static int 957 * case NFS_OK:
524nfs_xdr_attrstat(struct rpc_rqst *req, __be32 *p, struct nfs_fattr *fattr) 958 * struct {
525{ 959 * entry *entries;
526 int status; 960 * bool eof;
527 961 * } readdirok;
528 if ((status = ntohl(*p++))) 962 * default:
529 return nfs_stat_to_errno(status); 963 * void;
530 xdr_decode_fattr(p, fattr); 964 * };
531 return 0; 965 *
532} 966 * Read the directory contents into the page cache, but don't
533 967 * touch them. The actual decoding is done by nfs2_decode_dirent()
534/* 968 * during subsequent nfs_readdir() calls.
535 * Decode diropres reply
536 * LOOKUP, CREATE, MKDIR
537 */ 969 */
538static int 970static int decode_readdirok(struct xdr_stream *xdr)
539nfs_xdr_diropres(struct rpc_rqst *req, __be32 *p, struct nfs_diropok *res)
540{ 971{
541 int status; 972 u32 recvd, pglen;
973 size_t hdrlen;
542 974
543 if ((status = ntohl(*p++))) 975 pglen = xdr->buf->page_len;
544 return nfs_stat_to_errno(status); 976 hdrlen = (u8 *)xdr->p - (u8 *)xdr->iov->iov_base;
545 p = xdr_decode_fhandle(p, res->fh); 977 recvd = xdr->buf->len - hdrlen;
546 xdr_decode_fattr(p, res->fattr); 978 if (unlikely(pglen > recvd))
547 return 0; 979 goto out_cheating;
980out:
981 xdr_read_pages(xdr, pglen);
982 return pglen;
983out_cheating:
984 dprintk("NFS: server cheating in readdir result: "
985 "pglen %u > recvd %u\n", pglen, recvd);
986 pglen = recvd;
987 goto out;
548} 988}
549 989
550/* 990static int nfs2_xdr_dec_readdirres(struct rpc_rqst *req,
551 * Encode READLINK args 991 struct xdr_stream *xdr, void *__unused)
552 */
553static int
554nfs_xdr_readlinkargs(struct rpc_rqst *req, __be32 *p, struct nfs_readlinkargs *args)
555{ 992{
556 struct rpc_auth *auth = req->rq_cred->cr_auth; 993 enum nfs_stat status;
557 unsigned int replen; 994 int error;
558 995
559 p = xdr_encode_fhandle(p, args->fh); 996 error = decode_stat(xdr, &status);
560 req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); 997 if (unlikely(error))
561 998 goto out;
562 /* Inline the page array */ 999 if (status != NFS_OK)
563 replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS_readlinkres_sz) << 2; 1000 goto out_default;
564 xdr_inline_pages(&req->rq_rcv_buf, replen, args->pages, args->pgbase, args->pglen); 1001 error = decode_readdirok(xdr);
565 return 0; 1002out:
1003 return error;
1004out_default:
1005 return nfs_stat_to_errno(status);
566} 1006}
567 1007
568/* 1008/*
569 * Decode READLINK reply 1009 * 2.2.18. statfsres
1010 *
1011 * union statfsres (stat status) {
1012 * case NFS_OK:
1013 * struct {
1014 * unsigned tsize;
1015 * unsigned bsize;
1016 * unsigned blocks;
1017 * unsigned bfree;
1018 * unsigned bavail;
1019 * } info;
1020 * default:
1021 * void;
1022 * };
570 */ 1023 */
571static int 1024static int decode_info(struct xdr_stream *xdr, struct nfs2_fsstat *result)
572nfs_xdr_readlinkres(struct rpc_rqst *req, __be32 *p, void *dummy)
573{ 1025{
574 struct xdr_buf *rcvbuf = &req->rq_rcv_buf; 1026 __be32 *p;
575 struct kvec *iov = rcvbuf->head;
576 size_t hdrlen;
577 u32 len, recvd;
578 int status;
579
580 if ((status = ntohl(*p++)))
581 return nfs_stat_to_errno(status);
582 /* Convert length of symlink */
583 len = ntohl(*p++);
584 if (len >= rcvbuf->page_len) {
585 dprintk("nfs: server returned giant symlink!\n");
586 return -ENAMETOOLONG;
587 }
588 hdrlen = (u8 *) p - (u8 *) iov->iov_base;
589 if (iov->iov_len < hdrlen) {
590 dprintk("NFS: READLINK reply header overflowed:"
591 "length %Zu > %Zu\n", hdrlen, iov->iov_len);
592 return -errno_NFSERR_IO;
593 } else if (iov->iov_len != hdrlen) {
594 dprintk("NFS: READLINK header is short. iovec will be shifted.\n");
595 xdr_shift_buf(rcvbuf, iov->iov_len - hdrlen);
596 }
597 recvd = req->rq_rcv_buf.len - hdrlen;
598 if (recvd < len) {
599 dprintk("NFS: server cheating in readlink reply: "
600 "count %u > recvd %u\n", len, recvd);
601 return -EIO;
602 }
603 1027
604 xdr_terminate_string(rcvbuf, len); 1028 p = xdr_inline_decode(xdr, NFS_info_sz << 2);
1029 if (unlikely(p == NULL))
1030 goto out_overflow;
1031 result->tsize = be32_to_cpup(p++);
1032 result->bsize = be32_to_cpup(p++);
1033 result->blocks = be32_to_cpup(p++);
1034 result->bfree = be32_to_cpup(p++);
1035 result->bavail = be32_to_cpup(p);
605 return 0; 1036 return 0;
1037out_overflow:
1038 print_overflow_msg(__func__, xdr);
1039 return -EIO;
606} 1040}
607 1041
608/* 1042static int nfs2_xdr_dec_statfsres(struct rpc_rqst *req, struct xdr_stream *xdr,
609 * Decode WRITE reply 1043 struct nfs2_fsstat *result)
610 */
611static int
612nfs_xdr_writeres(struct rpc_rqst *req, __be32 *p, struct nfs_writeres *res)
613{ 1044{
614 res->verf->committed = NFS_FILE_SYNC; 1045 enum nfs_stat status;
615 return nfs_xdr_attrstat(req, p, res->fattr); 1046 int error;
1047
1048 error = decode_stat(xdr, &status);
1049 if (unlikely(error))
1050 goto out;
1051 if (status != NFS_OK)
1052 goto out_default;
1053 error = decode_info(xdr, result);
1054out:
1055 return error;
1056out_default:
1057 return nfs_stat_to_errno(status);
616} 1058}
617 1059
618/*
619 * Decode STATFS reply
620 */
621static int
622nfs_xdr_statfsres(struct rpc_rqst *req, __be32 *p, struct nfs2_fsstat *res)
623{
624 int status;
625
626 if ((status = ntohl(*p++)))
627 return nfs_stat_to_errno(status);
628
629 res->tsize = ntohl(*p++);
630 res->bsize = ntohl(*p++);
631 res->blocks = ntohl(*p++);
632 res->bfree = ntohl(*p++);
633 res->bavail = ntohl(*p++);
634 return 0;
635}
636 1060
637/* 1061/*
638 * We need to translate between nfs status return values and 1062 * We need to translate between nfs status return values and
639 * the local errno values which may not be the same. 1063 * the local errno values which may not be the same.
640 */ 1064 */
641static struct { 1065static const struct {
642 int stat; 1066 int stat;
643 int errno; 1067 int errno;
644} nfs_errtbl[] = { 1068} nfs_errtbl[] = {
@@ -678,28 +1102,30 @@ static struct {
678 { -1, -EIO } 1102 { -1, -EIO }
679}; 1103};
680 1104
681/* 1105/**
682 * Convert an NFS error code to a local one. 1106 * nfs_stat_to_errno - convert an NFS status code to a local errno
683 * This one is used jointly by NFSv2 and NFSv3. 1107 * @status: NFS status code to convert
1108 *
1109 * Returns a local errno value, or -EIO if the NFS status code is
1110 * not recognized. This function is used jointly by NFSv2 and NFSv3.
684 */ 1111 */
685int 1112int nfs_stat_to_errno(enum nfs_stat status)
686nfs_stat_to_errno(int stat)
687{ 1113{
688 int i; 1114 int i;
689 1115
690 for (i = 0; nfs_errtbl[i].stat != -1; i++) { 1116 for (i = 0; nfs_errtbl[i].stat != -1; i++) {
691 if (nfs_errtbl[i].stat == stat) 1117 if (nfs_errtbl[i].stat == (int)status)
692 return nfs_errtbl[i].errno; 1118 return nfs_errtbl[i].errno;
693 } 1119 }
694 dprintk("nfs_stat_to_errno: bad nfs status return value: %d\n", stat); 1120 dprintk("NFS: Unrecognized nfs status value: %u\n", status);
695 return nfs_errtbl[i].errno; 1121 return nfs_errtbl[i].errno;
696} 1122}
697 1123
698#define PROC(proc, argtype, restype, timer) \ 1124#define PROC(proc, argtype, restype, timer) \
699[NFSPROC_##proc] = { \ 1125[NFSPROC_##proc] = { \
700 .p_proc = NFSPROC_##proc, \ 1126 .p_proc = NFSPROC_##proc, \
701 .p_encode = (kxdrproc_t) nfs_xdr_##argtype, \ 1127 .p_encode = (kxdreproc_t)nfs2_xdr_enc_##argtype, \
702 .p_decode = (kxdrproc_t) nfs_xdr_##restype, \ 1128 .p_decode = (kxdrdproc_t)nfs2_xdr_dec_##restype, \
703 .p_arglen = NFS_##argtype##_sz, \ 1129 .p_arglen = NFS_##argtype##_sz, \
704 .p_replen = NFS_##restype##_sz, \ 1130 .p_replen = NFS_##restype##_sz, \
705 .p_timer = timer, \ 1131 .p_timer = timer, \
@@ -707,21 +1133,21 @@ nfs_stat_to_errno(int stat)
707 .p_name = #proc, \ 1133 .p_name = #proc, \
708 } 1134 }
709struct rpc_procinfo nfs_procedures[] = { 1135struct rpc_procinfo nfs_procedures[] = {
710 PROC(GETATTR, fhandle, attrstat, 1), 1136 PROC(GETATTR, fhandle, attrstat, 1),
711 PROC(SETATTR, sattrargs, attrstat, 0), 1137 PROC(SETATTR, sattrargs, attrstat, 0),
712 PROC(LOOKUP, diropargs, diropres, 2), 1138 PROC(LOOKUP, diropargs, diropres, 2),
713 PROC(READLINK, readlinkargs, readlinkres, 3), 1139 PROC(READLINK, readlinkargs, readlinkres, 3),
714 PROC(READ, readargs, readres, 3), 1140 PROC(READ, readargs, readres, 3),
715 PROC(WRITE, writeargs, writeres, 4), 1141 PROC(WRITE, writeargs, writeres, 4),
716 PROC(CREATE, createargs, diropres, 0), 1142 PROC(CREATE, createargs, diropres, 0),
717 PROC(REMOVE, removeargs, stat, 0), 1143 PROC(REMOVE, removeargs, stat, 0),
718 PROC(RENAME, renameargs, stat, 0), 1144 PROC(RENAME, renameargs, stat, 0),
719 PROC(LINK, linkargs, stat, 0), 1145 PROC(LINK, linkargs, stat, 0),
720 PROC(SYMLINK, symlinkargs, stat, 0), 1146 PROC(SYMLINK, symlinkargs, stat, 0),
721 PROC(MKDIR, createargs, diropres, 0), 1147 PROC(MKDIR, createargs, diropres, 0),
722 PROC(RMDIR, diropargs, stat, 0), 1148 PROC(RMDIR, diropargs, stat, 0),
723 PROC(READDIR, readdirargs, readdirres, 3), 1149 PROC(READDIR, readdirargs, readdirres, 3),
724 PROC(STATFS, fhandle, statfsres, 0), 1150 PROC(STATFS, fhandle, statfsres, 0),
725}; 1151};
726 1152
727struct rpc_version nfs_version2 = { 1153struct rpc_version nfs_version2 = {
diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c
index f6cc60f06dac..01c5e8b1941d 100644
--- a/fs/nfs/nfs3xdr.c
+++ b/fs/nfs/nfs3xdr.c
@@ -37,18 +37,16 @@
37#define NFS3_filename_sz (1+(NFS3_MAXNAMLEN>>2)) 37#define NFS3_filename_sz (1+(NFS3_MAXNAMLEN>>2))
38#define NFS3_path_sz (1+(NFS3_MAXPATHLEN>>2)) 38#define NFS3_path_sz (1+(NFS3_MAXPATHLEN>>2))
39#define NFS3_fattr_sz (21) 39#define NFS3_fattr_sz (21)
40#define NFS3_wcc_attr_sz (6) 40#define NFS3_cookieverf_sz (NFS3_COOKIEVERFSIZE>>2)
41#define NFS3_wcc_attr_sz (6)
41#define NFS3_pre_op_attr_sz (1+NFS3_wcc_attr_sz) 42#define NFS3_pre_op_attr_sz (1+NFS3_wcc_attr_sz)
42#define NFS3_post_op_attr_sz (1+NFS3_fattr_sz) 43#define NFS3_post_op_attr_sz (1+NFS3_fattr_sz)
43#define NFS3_wcc_data_sz (NFS3_pre_op_attr_sz+NFS3_post_op_attr_sz) 44#define NFS3_wcc_data_sz (NFS3_pre_op_attr_sz+NFS3_post_op_attr_sz)
44#define NFS3_fsstat_sz
45#define NFS3_fsinfo_sz
46#define NFS3_pathconf_sz
47#define NFS3_entry_sz (NFS3_filename_sz+3)
48
49#define NFS3_sattrargs_sz (NFS3_fh_sz+NFS3_sattr_sz+3)
50#define NFS3_diropargs_sz (NFS3_fh_sz+NFS3_filename_sz) 45#define NFS3_diropargs_sz (NFS3_fh_sz+NFS3_filename_sz)
51#define NFS3_removeargs_sz (NFS3_fh_sz+NFS3_filename_sz) 46
47#define NFS3_getattrargs_sz (NFS3_fh_sz)
48#define NFS3_setattrargs_sz (NFS3_fh_sz+NFS3_sattr_sz+3)
49#define NFS3_lookupargs_sz (NFS3_fh_sz+NFS3_filename_sz)
52#define NFS3_accessargs_sz (NFS3_fh_sz+1) 50#define NFS3_accessargs_sz (NFS3_fh_sz+1)
53#define NFS3_readlinkargs_sz (NFS3_fh_sz) 51#define NFS3_readlinkargs_sz (NFS3_fh_sz)
54#define NFS3_readargs_sz (NFS3_fh_sz+3) 52#define NFS3_readargs_sz (NFS3_fh_sz+3)
@@ -57,14 +55,16 @@
57#define NFS3_mkdirargs_sz (NFS3_diropargs_sz+NFS3_sattr_sz) 55#define NFS3_mkdirargs_sz (NFS3_diropargs_sz+NFS3_sattr_sz)
58#define NFS3_symlinkargs_sz (NFS3_diropargs_sz+1+NFS3_sattr_sz) 56#define NFS3_symlinkargs_sz (NFS3_diropargs_sz+1+NFS3_sattr_sz)
59#define NFS3_mknodargs_sz (NFS3_diropargs_sz+2+NFS3_sattr_sz) 57#define NFS3_mknodargs_sz (NFS3_diropargs_sz+2+NFS3_sattr_sz)
58#define NFS3_removeargs_sz (NFS3_fh_sz+NFS3_filename_sz)
60#define NFS3_renameargs_sz (NFS3_diropargs_sz+NFS3_diropargs_sz) 59#define NFS3_renameargs_sz (NFS3_diropargs_sz+NFS3_diropargs_sz)
61#define NFS3_linkargs_sz (NFS3_fh_sz+NFS3_diropargs_sz) 60#define NFS3_linkargs_sz (NFS3_fh_sz+NFS3_diropargs_sz)
62#define NFS3_readdirargs_sz (NFS3_fh_sz+2) 61#define NFS3_readdirargs_sz (NFS3_fh_sz+NFS3_cookieverf_sz+3)
62#define NFS3_readdirplusargs_sz (NFS3_fh_sz+NFS3_cookieverf_sz+4)
63#define NFS3_commitargs_sz (NFS3_fh_sz+3) 63#define NFS3_commitargs_sz (NFS3_fh_sz+3)
64 64
65#define NFS3_attrstat_sz (1+NFS3_fattr_sz) 65#define NFS3_getattrres_sz (1+NFS3_fattr_sz)
66#define NFS3_wccstat_sz (1+NFS3_wcc_data_sz) 66#define NFS3_setattrres_sz (1+NFS3_wcc_data_sz)
67#define NFS3_removeres_sz (NFS3_wccstat_sz) 67#define NFS3_removeres_sz (NFS3_setattrres_sz)
68#define NFS3_lookupres_sz (1+NFS3_fh_sz+(2 * NFS3_post_op_attr_sz)) 68#define NFS3_lookupres_sz (1+NFS3_fh_sz+(2 * NFS3_post_op_attr_sz))
69#define NFS3_accessres_sz (1+NFS3_post_op_attr_sz+1) 69#define NFS3_accessres_sz (1+NFS3_post_op_attr_sz+1)
70#define NFS3_readlinkres_sz (1+NFS3_post_op_attr_sz+1) 70#define NFS3_readlinkres_sz (1+NFS3_post_op_attr_sz+1)
@@ -100,1079 +100,2362 @@ static const umode_t nfs_type2fmt[] = {
100 [NF3FIFO] = S_IFIFO, 100 [NF3FIFO] = S_IFIFO,
101}; 101};
102 102
103/*
104 * While encoding arguments, set up the reply buffer in advance to
105 * receive reply data directly into the page cache.
106 */
107static void prepare_reply_buffer(struct rpc_rqst *req, struct page **pages,
108 unsigned int base, unsigned int len,
109 unsigned int bufsize)
110{
111 struct rpc_auth *auth = req->rq_cred->cr_auth;
112 unsigned int replen;
113
114 replen = RPC_REPHDRSIZE + auth->au_rslack + bufsize;
115 xdr_inline_pages(&req->rq_rcv_buf, replen << 2, pages, base, len);
116}
117
118/*
119 * Handle decode buffer overflows out-of-line.
120 */
103static void print_overflow_msg(const char *func, const struct xdr_stream *xdr) 121static void print_overflow_msg(const char *func, const struct xdr_stream *xdr)
104{ 122{
105 dprintk("nfs: %s: prematurely hit end of receive buffer. " 123 dprintk("NFS: %s prematurely hit the end of our receive buffer. "
106 "Remaining buffer length is %tu words.\n", 124 "Remaining buffer length is %tu words.\n",
107 func, xdr->end - xdr->p); 125 func, xdr->end - xdr->p);
108} 126}
109 127
128
110/* 129/*
111 * Common NFS XDR functions as inlines 130 * Encode/decode NFSv3 basic data types
131 *
132 * Basic NFSv3 data types are defined in section 2.5 of RFC 1813:
133 * "NFS Version 3 Protocol Specification".
134 *
135 * Not all basic data types have their own encoding and decoding
136 * functions. For run-time efficiency, some data types are encoded
137 * or decoded inline.
112 */ 138 */
113static inline __be32 * 139
114xdr_encode_fhandle(__be32 *p, const struct nfs_fh *fh) 140static void encode_uint32(struct xdr_stream *xdr, u32 value)
115{ 141{
116 return xdr_encode_array(p, fh->data, fh->size); 142 __be32 *p = xdr_reserve_space(xdr, 4);
143 *p = cpu_to_be32(value);
117} 144}
118 145
119static inline __be32 * 146static int decode_uint32(struct xdr_stream *xdr, u32 *value)
120xdr_decode_fhandle(__be32 *p, struct nfs_fh *fh)
121{ 147{
122 if ((fh->size = ntohl(*p++)) <= NFS3_FHSIZE) { 148 __be32 *p;
123 memcpy(fh->data, p, fh->size); 149
124 return p + XDR_QUADLEN(fh->size); 150 p = xdr_inline_decode(xdr, 4);
125 } 151 if (unlikely(p == NULL))
126 return NULL; 152 goto out_overflow;
153 *value = be32_to_cpup(p);
154 return 0;
155out_overflow:
156 print_overflow_msg(__func__, xdr);
157 return -EIO;
158}
159
160static int decode_uint64(struct xdr_stream *xdr, u64 *value)
161{
162 __be32 *p;
163
164 p = xdr_inline_decode(xdr, 8);
165 if (unlikely(p == NULL))
166 goto out_overflow;
167 xdr_decode_hyper(p, value);
168 return 0;
169out_overflow:
170 print_overflow_msg(__func__, xdr);
171 return -EIO;
172}
173
174/*
175 * fileid3
176 *
177 * typedef uint64 fileid3;
178 */
179static __be32 *xdr_decode_fileid3(__be32 *p, u64 *fileid)
180{
181 return xdr_decode_hyper(p, fileid);
182}
183
184static int decode_fileid3(struct xdr_stream *xdr, u64 *fileid)
185{
186 return decode_uint64(xdr, fileid);
187}
188
189/*
190 * filename3
191 *
192 * typedef string filename3<>;
193 */
194static void encode_filename3(struct xdr_stream *xdr,
195 const char *name, u32 length)
196{
197 __be32 *p;
198
199 BUG_ON(length > NFS3_MAXNAMLEN);
200 p = xdr_reserve_space(xdr, 4 + length);
201 xdr_encode_opaque(p, name, length);
127} 202}
128 203
129static inline __be32 * 204static int decode_inline_filename3(struct xdr_stream *xdr,
130xdr_decode_fhandle_stream(struct xdr_stream *xdr, struct nfs_fh *fh) 205 const char **name, u32 *length)
131{ 206{
132 __be32 *p; 207 __be32 *p;
208 u32 count;
209
133 p = xdr_inline_decode(xdr, 4); 210 p = xdr_inline_decode(xdr, 4);
134 if (unlikely(!p)) 211 if (unlikely(p == NULL))
212 goto out_overflow;
213 count = be32_to_cpup(p);
214 if (count > NFS3_MAXNAMLEN)
215 goto out_nametoolong;
216 p = xdr_inline_decode(xdr, count);
217 if (unlikely(p == NULL))
135 goto out_overflow; 218 goto out_overflow;
136 fh->size = ntohl(*p++); 219 *name = (const char *)p;
220 *length = count;
221 return 0;
137 222
138 if (fh->size <= NFS3_FHSIZE) { 223out_nametoolong:
139 p = xdr_inline_decode(xdr, fh->size); 224 dprintk("NFS: returned filename too long: %u\n", count);
140 if (unlikely(!p)) 225 return -ENAMETOOLONG;
141 goto out_overflow; 226out_overflow:
142 memcpy(fh->data, p, fh->size); 227 print_overflow_msg(__func__, xdr);
143 return p + XDR_QUADLEN(fh->size); 228 return -EIO;
144 } 229}
145 return NULL; 230
231/*
232 * nfspath3
233 *
234 * typedef string nfspath3<>;
235 */
236static void encode_nfspath3(struct xdr_stream *xdr, struct page **pages,
237 const u32 length)
238{
239 BUG_ON(length > NFS3_MAXPATHLEN);
240 encode_uint32(xdr, length);
241 xdr_write_pages(xdr, pages, 0, length);
242}
146 243
244static int decode_nfspath3(struct xdr_stream *xdr)
245{
246 u32 recvd, count;
247 size_t hdrlen;
248 __be32 *p;
249
250 p = xdr_inline_decode(xdr, 4);
251 if (unlikely(p == NULL))
252 goto out_overflow;
253 count = be32_to_cpup(p);
254 if (unlikely(count >= xdr->buf->page_len || count > NFS3_MAXPATHLEN))
255 goto out_nametoolong;
256 hdrlen = (u8 *)xdr->p - (u8 *)xdr->iov->iov_base;
257 recvd = xdr->buf->len - hdrlen;
258 if (unlikely(count > recvd))
259 goto out_cheating;
260
261 xdr_read_pages(xdr, count);
262 xdr_terminate_string(xdr->buf, count);
263 return 0;
264
265out_nametoolong:
266 dprintk("NFS: returned pathname too long: %u\n", count);
267 return -ENAMETOOLONG;
268out_cheating:
269 dprintk("NFS: server cheating in pathname result: "
270 "count %u > recvd %u\n", count, recvd);
271 return -EIO;
147out_overflow: 272out_overflow:
148 print_overflow_msg(__func__, xdr); 273 print_overflow_msg(__func__, xdr);
149 return ERR_PTR(-EIO); 274 return -EIO;
150} 275}
151 276
152/* 277/*
153 * Encode/decode time. 278 * cookie3
279 *
280 * typedef uint64 cookie3
154 */ 281 */
155static inline __be32 * 282static __be32 *xdr_encode_cookie3(__be32 *p, u64 cookie)
156xdr_encode_time3(__be32 *p, struct timespec *timep)
157{ 283{
158 *p++ = htonl(timep->tv_sec); 284 return xdr_encode_hyper(p, cookie);
159 *p++ = htonl(timep->tv_nsec);
160 return p;
161} 285}
162 286
163static inline __be32 * 287static int decode_cookie3(struct xdr_stream *xdr, u64 *cookie)
164xdr_decode_time3(__be32 *p, struct timespec *timep)
165{ 288{
166 timep->tv_sec = ntohl(*p++); 289 return decode_uint64(xdr, cookie);
167 timep->tv_nsec = ntohl(*p++); 290}
168 return p; 291
292/*
293 * cookieverf3
294 *
295 * typedef opaque cookieverf3[NFS3_COOKIEVERFSIZE];
296 */
297static __be32 *xdr_encode_cookieverf3(__be32 *p, const __be32 *verifier)
298{
299 memcpy(p, verifier, NFS3_COOKIEVERFSIZE);
300 return p + XDR_QUADLEN(NFS3_COOKIEVERFSIZE);
301}
302
303static int decode_cookieverf3(struct xdr_stream *xdr, __be32 *verifier)
304{
305 __be32 *p;
306
307 p = xdr_inline_decode(xdr, NFS3_COOKIEVERFSIZE);
308 if (unlikely(p == NULL))
309 goto out_overflow;
310 memcpy(verifier, p, NFS3_COOKIEVERFSIZE);
311 return 0;
312out_overflow:
313 print_overflow_msg(__func__, xdr);
314 return -EIO;
315}
316
317/*
318 * createverf3
319 *
320 * typedef opaque createverf3[NFS3_CREATEVERFSIZE];
321 */
322static void encode_createverf3(struct xdr_stream *xdr, const __be32 *verifier)
323{
324 __be32 *p;
325
326 p = xdr_reserve_space(xdr, NFS3_CREATEVERFSIZE);
327 memcpy(p, verifier, NFS3_CREATEVERFSIZE);
328}
329
330static int decode_writeverf3(struct xdr_stream *xdr, __be32 *verifier)
331{
332 __be32 *p;
333
334 p = xdr_inline_decode(xdr, NFS3_WRITEVERFSIZE);
335 if (unlikely(p == NULL))
336 goto out_overflow;
337 memcpy(verifier, p, NFS3_WRITEVERFSIZE);
338 return 0;
339out_overflow:
340 print_overflow_msg(__func__, xdr);
341 return -EIO;
342}
343
344/*
345 * size3
346 *
347 * typedef uint64 size3;
348 */
349static __be32 *xdr_decode_size3(__be32 *p, u64 *size)
350{
351 return xdr_decode_hyper(p, size);
352}
353
354/*
355 * nfsstat3
356 *
357 * enum nfsstat3 {
358 * NFS3_OK = 0,
359 * ...
360 * }
361 */
362#define NFS3_OK NFS_OK
363
364static int decode_nfsstat3(struct xdr_stream *xdr, enum nfs_stat *status)
365{
366 __be32 *p;
367
368 p = xdr_inline_decode(xdr, 4);
369 if (unlikely(p == NULL))
370 goto out_overflow;
371 *status = be32_to_cpup(p);
372 return 0;
373out_overflow:
374 print_overflow_msg(__func__, xdr);
375 return -EIO;
376}
377
378/*
379 * ftype3
380 *
381 * enum ftype3 {
382 * NF3REG = 1,
383 * NF3DIR = 2,
384 * NF3BLK = 3,
385 * NF3CHR = 4,
386 * NF3LNK = 5,
387 * NF3SOCK = 6,
388 * NF3FIFO = 7
389 * };
390 */
391static void encode_ftype3(struct xdr_stream *xdr, const u32 type)
392{
393 BUG_ON(type > NF3FIFO);
394 encode_uint32(xdr, type);
169} 395}
170 396
171static __be32 * 397static __be32 *xdr_decode_ftype3(__be32 *p, umode_t *mode)
172xdr_decode_fattr(__be32 *p, struct nfs_fattr *fattr)
173{ 398{
174 unsigned int type, major, minor; 399 u32 type;
175 umode_t fmode;
176 400
177 type = ntohl(*p++); 401 type = be32_to_cpup(p++);
178 if (type > NF3FIFO) 402 if (type > NF3FIFO)
179 type = NF3NON; 403 type = NF3NON;
180 fmode = nfs_type2fmt[type]; 404 *mode = nfs_type2fmt[type];
181 fattr->mode = (ntohl(*p++) & ~S_IFMT) | fmode; 405 return p;
182 fattr->nlink = ntohl(*p++); 406}
183 fattr->uid = ntohl(*p++);
184 fattr->gid = ntohl(*p++);
185 p = xdr_decode_hyper(p, &fattr->size);
186 p = xdr_decode_hyper(p, &fattr->du.nfs3.used);
187
188 /* Turn remote device info into Linux-specific dev_t */
189 major = ntohl(*p++);
190 minor = ntohl(*p++);
191 fattr->rdev = MKDEV(major, minor);
192 if (MAJOR(fattr->rdev) != major || MINOR(fattr->rdev) != minor)
193 fattr->rdev = 0;
194 407
195 p = xdr_decode_hyper(p, &fattr->fsid.major); 408/*
196 fattr->fsid.minor = 0; 409 * specdata3
197 p = xdr_decode_hyper(p, &fattr->fileid); 410 *
198 p = xdr_decode_time3(p, &fattr->atime); 411 * struct specdata3 {
199 p = xdr_decode_time3(p, &fattr->mtime); 412 * uint32 specdata1;
200 p = xdr_decode_time3(p, &fattr->ctime); 413 * uint32 specdata2;
414 * };
415 */
416static void encode_specdata3(struct xdr_stream *xdr, const dev_t rdev)
417{
418 __be32 *p;
201 419
202 /* Update the mode bits */ 420 p = xdr_reserve_space(xdr, 8);
203 fattr->valid |= NFS_ATTR_FATTR_V3; 421 *p++ = cpu_to_be32(MAJOR(rdev));
422 *p = cpu_to_be32(MINOR(rdev));
423}
424
425static __be32 *xdr_decode_specdata3(__be32 *p, dev_t *rdev)
426{
427 unsigned int major, minor;
428
429 major = be32_to_cpup(p++);
430 minor = be32_to_cpup(p++);
431 *rdev = MKDEV(major, minor);
432 if (MAJOR(*rdev) != major || MINOR(*rdev) != minor)
433 *rdev = 0;
434 return p;
435}
436
437/*
438 * nfs_fh3
439 *
440 * struct nfs_fh3 {
441 * opaque data<NFS3_FHSIZE>;
442 * };
443 */
444static void encode_nfs_fh3(struct xdr_stream *xdr, const struct nfs_fh *fh)
445{
446 __be32 *p;
447
448 BUG_ON(fh->size > NFS3_FHSIZE);
449 p = xdr_reserve_space(xdr, 4 + fh->size);
450 xdr_encode_opaque(p, fh->data, fh->size);
451}
452
453static int decode_nfs_fh3(struct xdr_stream *xdr, struct nfs_fh *fh)
454{
455 u32 length;
456 __be32 *p;
457
458 p = xdr_inline_decode(xdr, 4);
459 if (unlikely(p == NULL))
460 goto out_overflow;
461 length = be32_to_cpup(p++);
462 if (unlikely(length > NFS3_FHSIZE))
463 goto out_toobig;
464 p = xdr_inline_decode(xdr, length);
465 if (unlikely(p == NULL))
466 goto out_overflow;
467 fh->size = length;
468 memcpy(fh->data, p, length);
469 return 0;
470out_toobig:
471 dprintk("NFS: file handle size (%u) too big\n", length);
472 return -E2BIG;
473out_overflow:
474 print_overflow_msg(__func__, xdr);
475 return -EIO;
476}
477
478static void zero_nfs_fh3(struct nfs_fh *fh)
479{
480 memset(fh, 0, sizeof(*fh));
481}
482
483/*
484 * nfstime3
485 *
486 * struct nfstime3 {
487 * uint32 seconds;
488 * uint32 nseconds;
489 * };
490 */
491static __be32 *xdr_encode_nfstime3(__be32 *p, const struct timespec *timep)
492{
493 *p++ = cpu_to_be32(timep->tv_sec);
494 *p++ = cpu_to_be32(timep->tv_nsec);
204 return p; 495 return p;
205} 496}
206 497
207static inline __be32 * 498static __be32 *xdr_decode_nfstime3(__be32 *p, struct timespec *timep)
208xdr_encode_sattr(__be32 *p, struct iattr *attr)
209{ 499{
500 timep->tv_sec = be32_to_cpup(p++);
501 timep->tv_nsec = be32_to_cpup(p++);
502 return p;
503}
504
505/*
506 * sattr3
507 *
508 * enum time_how {
509 * DONT_CHANGE = 0,
510 * SET_TO_SERVER_TIME = 1,
511 * SET_TO_CLIENT_TIME = 2
512 * };
513 *
514 * union set_mode3 switch (bool set_it) {
515 * case TRUE:
516 * mode3 mode;
517 * default:
518 * void;
519 * };
520 *
521 * union set_uid3 switch (bool set_it) {
522 * case TRUE:
523 * uid3 uid;
524 * default:
525 * void;
526 * };
527 *
528 * union set_gid3 switch (bool set_it) {
529 * case TRUE:
530 * gid3 gid;
531 * default:
532 * void;
533 * };
534 *
535 * union set_size3 switch (bool set_it) {
536 * case TRUE:
537 * size3 size;
538 * default:
539 * void;
540 * };
541 *
542 * union set_atime switch (time_how set_it) {
543 * case SET_TO_CLIENT_TIME:
544 * nfstime3 atime;
545 * default:
546 * void;
547 * };
548 *
549 * union set_mtime switch (time_how set_it) {
550 * case SET_TO_CLIENT_TIME:
551 * nfstime3 mtime;
552 * default:
553 * void;
554 * };
555 *
556 * struct sattr3 {
557 * set_mode3 mode;
558 * set_uid3 uid;
559 * set_gid3 gid;
560 * set_size3 size;
561 * set_atime atime;
562 * set_mtime mtime;
563 * };
564 */
565static void encode_sattr3(struct xdr_stream *xdr, const struct iattr *attr)
566{
567 u32 nbytes;
568 __be32 *p;
569
570 /*
571 * In order to make only a single xdr_reserve_space() call,
572 * pre-compute the total number of bytes to be reserved.
573 * Six boolean values, one for each set_foo field, are always
574 * present in the encoded result, so start there.
575 */
576 nbytes = 6 * 4;
577 if (attr->ia_valid & ATTR_MODE)
578 nbytes += 4;
579 if (attr->ia_valid & ATTR_UID)
580 nbytes += 4;
581 if (attr->ia_valid & ATTR_GID)
582 nbytes += 4;
583 if (attr->ia_valid & ATTR_SIZE)
584 nbytes += 8;
585 if (attr->ia_valid & ATTR_ATIME_SET)
586 nbytes += 8;
587 if (attr->ia_valid & ATTR_MTIME_SET)
588 nbytes += 8;
589 p = xdr_reserve_space(xdr, nbytes);
590
210 if (attr->ia_valid & ATTR_MODE) { 591 if (attr->ia_valid & ATTR_MODE) {
211 *p++ = xdr_one; 592 *p++ = xdr_one;
212 *p++ = htonl(attr->ia_mode & S_IALLUGO); 593 *p++ = cpu_to_be32(attr->ia_mode & S_IALLUGO);
213 } else { 594 } else
214 *p++ = xdr_zero; 595 *p++ = xdr_zero;
215 } 596
216 if (attr->ia_valid & ATTR_UID) { 597 if (attr->ia_valid & ATTR_UID) {
217 *p++ = xdr_one; 598 *p++ = xdr_one;
218 *p++ = htonl(attr->ia_uid); 599 *p++ = cpu_to_be32(attr->ia_uid);
219 } else { 600 } else
220 *p++ = xdr_zero; 601 *p++ = xdr_zero;
221 } 602
222 if (attr->ia_valid & ATTR_GID) { 603 if (attr->ia_valid & ATTR_GID) {
223 *p++ = xdr_one; 604 *p++ = xdr_one;
224 *p++ = htonl(attr->ia_gid); 605 *p++ = cpu_to_be32(attr->ia_gid);
225 } else { 606 } else
226 *p++ = xdr_zero; 607 *p++ = xdr_zero;
227 } 608
228 if (attr->ia_valid & ATTR_SIZE) { 609 if (attr->ia_valid & ATTR_SIZE) {
229 *p++ = xdr_one; 610 *p++ = xdr_one;
230 p = xdr_encode_hyper(p, (__u64) attr->ia_size); 611 p = xdr_encode_hyper(p, (u64)attr->ia_size);
231 } else { 612 } else
232 *p++ = xdr_zero; 613 *p++ = xdr_zero;
233 } 614
234 if (attr->ia_valid & ATTR_ATIME_SET) { 615 if (attr->ia_valid & ATTR_ATIME_SET) {
235 *p++ = xdr_two; 616 *p++ = xdr_two;
236 p = xdr_encode_time3(p, &attr->ia_atime); 617 p = xdr_encode_nfstime3(p, &attr->ia_atime);
237 } else if (attr->ia_valid & ATTR_ATIME) { 618 } else if (attr->ia_valid & ATTR_ATIME) {
238 *p++ = xdr_one; 619 *p++ = xdr_one;
239 } else { 620 } else
240 *p++ = xdr_zero; 621 *p++ = xdr_zero;
241 } 622
242 if (attr->ia_valid & ATTR_MTIME_SET) { 623 if (attr->ia_valid & ATTR_MTIME_SET) {
243 *p++ = xdr_two; 624 *p++ = xdr_two;
244 p = xdr_encode_time3(p, &attr->ia_mtime); 625 xdr_encode_nfstime3(p, &attr->ia_mtime);
245 } else if (attr->ia_valid & ATTR_MTIME) { 626 } else if (attr->ia_valid & ATTR_MTIME) {
246 *p++ = xdr_one; 627 *p = xdr_one;
247 } else { 628 } else
248 *p++ = xdr_zero; 629 *p = xdr_zero;
249 } 630}
250 return p; 631
632/*
633 * fattr3
634 *
635 * struct fattr3 {
636 * ftype3 type;
637 * mode3 mode;
638 * uint32 nlink;
639 * uid3 uid;
640 * gid3 gid;
641 * size3 size;
642 * size3 used;
643 * specdata3 rdev;
644 * uint64 fsid;
645 * fileid3 fileid;
646 * nfstime3 atime;
647 * nfstime3 mtime;
648 * nfstime3 ctime;
649 * };
650 */
651static int decode_fattr3(struct xdr_stream *xdr, struct nfs_fattr *fattr)
652{
653 umode_t fmode;
654 __be32 *p;
655
656 p = xdr_inline_decode(xdr, NFS3_fattr_sz << 2);
657 if (unlikely(p == NULL))
658 goto out_overflow;
659
660 p = xdr_decode_ftype3(p, &fmode);
661
662 fattr->mode = (be32_to_cpup(p++) & ~S_IFMT) | fmode;
663 fattr->nlink = be32_to_cpup(p++);
664 fattr->uid = be32_to_cpup(p++);
665 fattr->gid = be32_to_cpup(p++);
666
667 p = xdr_decode_size3(p, &fattr->size);
668 p = xdr_decode_size3(p, &fattr->du.nfs3.used);
669 p = xdr_decode_specdata3(p, &fattr->rdev);
670
671 p = xdr_decode_hyper(p, &fattr->fsid.major);
672 fattr->fsid.minor = 0;
673
674 p = xdr_decode_fileid3(p, &fattr->fileid);
675 p = xdr_decode_nfstime3(p, &fattr->atime);
676 p = xdr_decode_nfstime3(p, &fattr->mtime);
677 xdr_decode_nfstime3(p, &fattr->ctime);
678
679 fattr->valid |= NFS_ATTR_FATTR_V3;
680 return 0;
681out_overflow:
682 print_overflow_msg(__func__, xdr);
683 return -EIO;
251} 684}
252 685
253static inline __be32 * 686/*
254xdr_decode_wcc_attr(__be32 *p, struct nfs_fattr *fattr) 687 * post_op_attr
688 *
689 * union post_op_attr switch (bool attributes_follow) {
690 * case TRUE:
691 * fattr3 attributes;
692 * case FALSE:
693 * void;
694 * };
695 */
696static int decode_post_op_attr(struct xdr_stream *xdr, struct nfs_fattr *fattr)
255{ 697{
256 p = xdr_decode_hyper(p, &fattr->pre_size); 698 __be32 *p;
257 p = xdr_decode_time3(p, &fattr->pre_mtime); 699
258 p = xdr_decode_time3(p, &fattr->pre_ctime); 700 p = xdr_inline_decode(xdr, 4);
701 if (unlikely(p == NULL))
702 goto out_overflow;
703 if (*p != xdr_zero)
704 return decode_fattr3(xdr, fattr);
705 return 0;
706out_overflow:
707 print_overflow_msg(__func__, xdr);
708 return -EIO;
709}
710
711/*
712 * wcc_attr
713 * struct wcc_attr {
714 * size3 size;
715 * nfstime3 mtime;
716 * nfstime3 ctime;
717 * };
718 */
719static int decode_wcc_attr(struct xdr_stream *xdr, struct nfs_fattr *fattr)
720{
721 __be32 *p;
722
723 p = xdr_inline_decode(xdr, NFS3_wcc_attr_sz << 2);
724 if (unlikely(p == NULL))
725 goto out_overflow;
726
259 fattr->valid |= NFS_ATTR_FATTR_PRESIZE 727 fattr->valid |= NFS_ATTR_FATTR_PRESIZE
260 | NFS_ATTR_FATTR_PREMTIME 728 | NFS_ATTR_FATTR_PREMTIME
261 | NFS_ATTR_FATTR_PRECTIME; 729 | NFS_ATTR_FATTR_PRECTIME;
262 return p;
263}
264 730
265static inline __be32 * 731 p = xdr_decode_size3(p, &fattr->pre_size);
266xdr_decode_post_op_attr(__be32 *p, struct nfs_fattr *fattr) 732 p = xdr_decode_nfstime3(p, &fattr->pre_mtime);
267{ 733 xdr_decode_nfstime3(p, &fattr->pre_ctime);
268 if (*p++) 734
269 p = xdr_decode_fattr(p, fattr); 735 return 0;
270 return p; 736out_overflow:
737 print_overflow_msg(__func__, xdr);
738 return -EIO;
271} 739}
272 740
273static inline __be32 * 741/*
274xdr_decode_post_op_attr_stream(struct xdr_stream *xdr, struct nfs_fattr *fattr) 742 * pre_op_attr
743 * union pre_op_attr switch (bool attributes_follow) {
744 * case TRUE:
745 * wcc_attr attributes;
746 * case FALSE:
747 * void;
748 * };
749 *
750 * wcc_data
751 *
752 * struct wcc_data {
753 * pre_op_attr before;
754 * post_op_attr after;
755 * };
756 */
757static int decode_pre_op_attr(struct xdr_stream *xdr, struct nfs_fattr *fattr)
275{ 758{
276 __be32 *p; 759 __be32 *p;
277 760
278 p = xdr_inline_decode(xdr, 4); 761 p = xdr_inline_decode(xdr, 4);
279 if (unlikely(!p)) 762 if (unlikely(p == NULL))
280 goto out_overflow; 763 goto out_overflow;
281 if (ntohl(*p++)) { 764 if (*p != xdr_zero)
282 p = xdr_inline_decode(xdr, 84); 765 return decode_wcc_attr(xdr, fattr);
283 if (unlikely(!p)) 766 return 0;
284 goto out_overflow;
285 p = xdr_decode_fattr(p, fattr);
286 }
287 return p;
288out_overflow: 767out_overflow:
289 print_overflow_msg(__func__, xdr); 768 print_overflow_msg(__func__, xdr);
290 return ERR_PTR(-EIO); 769 return -EIO;
291} 770}
292 771
293static inline __be32 * 772static int decode_wcc_data(struct xdr_stream *xdr, struct nfs_fattr *fattr)
294xdr_decode_pre_op_attr(__be32 *p, struct nfs_fattr *fattr)
295{ 773{
296 if (*p++) 774 int error;
297 return xdr_decode_wcc_attr(p, fattr); 775
298 return p; 776 error = decode_pre_op_attr(xdr, fattr);
777 if (unlikely(error))
778 goto out;
779 error = decode_post_op_attr(xdr, fattr);
780out:
781 return error;
299} 782}
300 783
784/*
785 * post_op_fh3
786 *
787 * union post_op_fh3 switch (bool handle_follows) {
788 * case TRUE:
789 * nfs_fh3 handle;
790 * case FALSE:
791 * void;
792 * };
793 */
794static int decode_post_op_fh3(struct xdr_stream *xdr, struct nfs_fh *fh)
795{
796 __be32 *p = xdr_inline_decode(xdr, 4);
797 if (unlikely(p == NULL))
798 goto out_overflow;
799 if (*p != xdr_zero)
800 return decode_nfs_fh3(xdr, fh);
801 zero_nfs_fh3(fh);
802 return 0;
803out_overflow:
804 print_overflow_msg(__func__, xdr);
805 return -EIO;
806}
301 807
302static inline __be32 * 808/*
303xdr_decode_wcc_data(__be32 *p, struct nfs_fattr *fattr) 809 * diropargs3
810 *
811 * struct diropargs3 {
812 * nfs_fh3 dir;
813 * filename3 name;
814 * };
815 */
816static void encode_diropargs3(struct xdr_stream *xdr, const struct nfs_fh *fh,
817 const char *name, u32 length)
304{ 818{
305 p = xdr_decode_pre_op_attr(p, fattr); 819 encode_nfs_fh3(xdr, fh);
306 return xdr_decode_post_op_attr(p, fattr); 820 encode_filename3(xdr, name, length);
307} 821}
308 822
823
309/* 824/*
310 * NFS encode functions 825 * NFSv3 XDR encode functions
826 *
827 * NFSv3 argument types are defined in section 3.3 of RFC 1813:
828 * "NFS Version 3 Protocol Specification".
311 */ 829 */
312 830
313/* 831/*
314 * Encode file handle argument 832 * 3.3.1 GETATTR3args
833 *
834 * struct GETATTR3args {
835 * nfs_fh3 object;
836 * };
315 */ 837 */
316static int 838static void nfs3_xdr_enc_getattr3args(struct rpc_rqst *req,
317nfs3_xdr_fhandle(struct rpc_rqst *req, __be32 *p, struct nfs_fh *fh) 839 struct xdr_stream *xdr,
840 const struct nfs_fh *fh)
318{ 841{
319 p = xdr_encode_fhandle(p, fh); 842 encode_nfs_fh3(xdr, fh);
320 req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
321 return 0;
322} 843}
323 844
324/* 845/*
325 * Encode SETATTR arguments 846 * 3.3.2 SETATTR3args
847 *
848 * union sattrguard3 switch (bool check) {
849 * case TRUE:
850 * nfstime3 obj_ctime;
851 * case FALSE:
852 * void;
853 * };
854 *
855 * struct SETATTR3args {
856 * nfs_fh3 object;
857 * sattr3 new_attributes;
858 * sattrguard3 guard;
859 * };
326 */ 860 */
327static int 861static void encode_sattrguard3(struct xdr_stream *xdr,
328nfs3_xdr_sattrargs(struct rpc_rqst *req, __be32 *p, struct nfs3_sattrargs *args) 862 const struct nfs3_sattrargs *args)
329{ 863{
330 p = xdr_encode_fhandle(p, args->fh); 864 __be32 *p;
331 p = xdr_encode_sattr(p, args->sattr); 865
332 *p++ = htonl(args->guard); 866 if (args->guard) {
333 if (args->guard) 867 p = xdr_reserve_space(xdr, 4 + 8);
334 p = xdr_encode_time3(p, &args->guardtime); 868 *p++ = xdr_one;
335 req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); 869 xdr_encode_nfstime3(p, &args->guardtime);
336 return 0; 870 } else {
871 p = xdr_reserve_space(xdr, 4);
872 *p = xdr_zero;
873 }
874}
875
876static void nfs3_xdr_enc_setattr3args(struct rpc_rqst *req,
877 struct xdr_stream *xdr,
878 const struct nfs3_sattrargs *args)
879{
880 encode_nfs_fh3(xdr, args->fh);
881 encode_sattr3(xdr, args->sattr);
882 encode_sattrguard3(xdr, args);
337} 883}
338 884
339/* 885/*
340 * Encode directory ops argument 886 * 3.3.3 LOOKUP3args
887 *
888 * struct LOOKUP3args {
889 * diropargs3 what;
890 * };
341 */ 891 */
342static int 892static void nfs3_xdr_enc_lookup3args(struct rpc_rqst *req,
343nfs3_xdr_diropargs(struct rpc_rqst *req, __be32 *p, struct nfs3_diropargs *args) 893 struct xdr_stream *xdr,
894 const struct nfs3_diropargs *args)
344{ 895{
345 p = xdr_encode_fhandle(p, args->fh); 896 encode_diropargs3(xdr, args->fh, args->name, args->len);
346 p = xdr_encode_array(p, args->name, args->len);
347 req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
348 return 0;
349} 897}
350 898
351/* 899/*
352 * Encode REMOVE argument 900 * 3.3.4 ACCESS3args
901 *
902 * struct ACCESS3args {
903 * nfs_fh3 object;
904 * uint32 access;
905 * };
353 */ 906 */
354static int 907static void encode_access3args(struct xdr_stream *xdr,
355nfs3_xdr_removeargs(struct rpc_rqst *req, __be32 *p, const struct nfs_removeargs *args) 908 const struct nfs3_accessargs *args)
356{ 909{
357 p = xdr_encode_fhandle(p, args->fh); 910 encode_nfs_fh3(xdr, args->fh);
358 p = xdr_encode_array(p, args->name.name, args->name.len); 911 encode_uint32(xdr, args->access);
359 req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); 912}
360 return 0; 913
914static void nfs3_xdr_enc_access3args(struct rpc_rqst *req,
915 struct xdr_stream *xdr,
916 const struct nfs3_accessargs *args)
917{
918 encode_access3args(xdr, args);
361} 919}
362 920
363/* 921/*
364 * Encode access() argument 922 * 3.3.5 READLINK3args
923 *
924 * struct READLINK3args {
925 * nfs_fh3 symlink;
926 * };
365 */ 927 */
366static int 928static void nfs3_xdr_enc_readlink3args(struct rpc_rqst *req,
367nfs3_xdr_accessargs(struct rpc_rqst *req, __be32 *p, struct nfs3_accessargs *args) 929 struct xdr_stream *xdr,
930 const struct nfs3_readlinkargs *args)
368{ 931{
369 p = xdr_encode_fhandle(p, args->fh); 932 encode_nfs_fh3(xdr, args->fh);
370 *p++ = htonl(args->access); 933 prepare_reply_buffer(req, args->pages, args->pgbase,
371 req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); 934 args->pglen, NFS3_readlinkres_sz);
372 return 0;
373} 935}
374 936
375/* 937/*
376 * Arguments to a READ call. Since we read data directly into the page 938 * 3.3.6 READ3args
377 * cache, we also set up the reply iovec here so that iov[1] points 939 *
378 * exactly to the page we want to fetch. 940 * struct READ3args {
941 * nfs_fh3 file;
942 * offset3 offset;
943 * count3 count;
944 * };
379 */ 945 */
380static int 946static void encode_read3args(struct xdr_stream *xdr,
381nfs3_xdr_readargs(struct rpc_rqst *req, __be32 *p, struct nfs_readargs *args) 947 const struct nfs_readargs *args)
382{ 948{
383 struct rpc_auth *auth = req->rq_cred->cr_auth; 949 __be32 *p;
384 unsigned int replen; 950
385 u32 count = args->count; 951 encode_nfs_fh3(xdr, args->fh);
386 952
387 p = xdr_encode_fhandle(p, args->fh); 953 p = xdr_reserve_space(xdr, 8 + 4);
388 p = xdr_encode_hyper(p, args->offset); 954 p = xdr_encode_hyper(p, args->offset);
389 *p++ = htonl(count); 955 *p = cpu_to_be32(args->count);
390 req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); 956}
391 957
392 /* Inline the page array */ 958static void nfs3_xdr_enc_read3args(struct rpc_rqst *req,
393 replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS3_readres_sz) << 2; 959 struct xdr_stream *xdr,
394 xdr_inline_pages(&req->rq_rcv_buf, replen, 960 const struct nfs_readargs *args)
395 args->pages, args->pgbase, count); 961{
962 encode_read3args(xdr, args);
963 prepare_reply_buffer(req, args->pages, args->pgbase,
964 args->count, NFS3_readres_sz);
396 req->rq_rcv_buf.flags |= XDRBUF_READ; 965 req->rq_rcv_buf.flags |= XDRBUF_READ;
397 return 0;
398} 966}
399 967
400/* 968/*
401 * Write arguments. Splice the buffer to be written into the iovec. 969 * 3.3.7 WRITE3args
970 *
971 * enum stable_how {
972 * UNSTABLE = 0,
973 * DATA_SYNC = 1,
974 * FILE_SYNC = 2
975 * };
976 *
977 * struct WRITE3args {
978 * nfs_fh3 file;
979 * offset3 offset;
980 * count3 count;
981 * stable_how stable;
982 * opaque data<>;
983 * };
402 */ 984 */
403static int 985static void encode_write3args(struct xdr_stream *xdr,
404nfs3_xdr_writeargs(struct rpc_rqst *req, __be32 *p, struct nfs_writeargs *args) 986 const struct nfs_writeargs *args)
405{ 987{
406 struct xdr_buf *sndbuf = &req->rq_snd_buf; 988 __be32 *p;
407 u32 count = args->count; 989
990 encode_nfs_fh3(xdr, args->fh);
408 991
409 p = xdr_encode_fhandle(p, args->fh); 992 p = xdr_reserve_space(xdr, 8 + 4 + 4 + 4);
410 p = xdr_encode_hyper(p, args->offset); 993 p = xdr_encode_hyper(p, args->offset);
411 *p++ = htonl(count); 994 *p++ = cpu_to_be32(args->count);
412 *p++ = htonl(args->stable); 995 *p++ = cpu_to_be32(args->stable);
413 *p++ = htonl(count); 996 *p = cpu_to_be32(args->count);
414 sndbuf->len = xdr_adjust_iovec(sndbuf->head, p); 997 xdr_write_pages(xdr, args->pages, args->pgbase, args->count);
415 998}
416 /* Copy the page array */ 999
417 xdr_encode_pages(sndbuf, args->pages, args->pgbase, count); 1000static void nfs3_xdr_enc_write3args(struct rpc_rqst *req,
418 sndbuf->flags |= XDRBUF_WRITE; 1001 struct xdr_stream *xdr,
419 return 0; 1002 const struct nfs_writeargs *args)
1003{
1004 encode_write3args(xdr, args);
1005 xdr->buf->flags |= XDRBUF_WRITE;
420} 1006}
421 1007
422/* 1008/*
423 * Encode CREATE arguments 1009 * 3.3.8 CREATE3args
1010 *
1011 * enum createmode3 {
1012 * UNCHECKED = 0,
1013 * GUARDED = 1,
1014 * EXCLUSIVE = 2
1015 * };
1016 *
1017 * union createhow3 switch (createmode3 mode) {
1018 * case UNCHECKED:
1019 * case GUARDED:
1020 * sattr3 obj_attributes;
1021 * case EXCLUSIVE:
1022 * createverf3 verf;
1023 * };
1024 *
1025 * struct CREATE3args {
1026 * diropargs3 where;
1027 * createhow3 how;
1028 * };
424 */ 1029 */
425static int 1030static void encode_createhow3(struct xdr_stream *xdr,
426nfs3_xdr_createargs(struct rpc_rqst *req, __be32 *p, struct nfs3_createargs *args) 1031 const struct nfs3_createargs *args)
427{ 1032{
428 p = xdr_encode_fhandle(p, args->fh); 1033 encode_uint32(xdr, args->createmode);
429 p = xdr_encode_array(p, args->name, args->len); 1034 switch (args->createmode) {
430 1035 case NFS3_CREATE_UNCHECKED:
431 *p++ = htonl(args->createmode); 1036 case NFS3_CREATE_GUARDED:
432 if (args->createmode == NFS3_CREATE_EXCLUSIVE) { 1037 encode_sattr3(xdr, args->sattr);
433 *p++ = args->verifier[0]; 1038 break;
434 *p++ = args->verifier[1]; 1039 case NFS3_CREATE_EXCLUSIVE:
435 } else 1040 encode_createverf3(xdr, args->verifier);
436 p = xdr_encode_sattr(p, args->sattr); 1041 break;
1042 default:
1043 BUG();
1044 }
1045}
437 1046
438 req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); 1047static void nfs3_xdr_enc_create3args(struct rpc_rqst *req,
439 return 0; 1048 struct xdr_stream *xdr,
1049 const struct nfs3_createargs *args)
1050{
1051 encode_diropargs3(xdr, args->fh, args->name, args->len);
1052 encode_createhow3(xdr, args);
440} 1053}
441 1054
442/* 1055/*
443 * Encode MKDIR arguments 1056 * 3.3.9 MKDIR3args
1057 *
1058 * struct MKDIR3args {
1059 * diropargs3 where;
1060 * sattr3 attributes;
1061 * };
444 */ 1062 */
445static int 1063static void nfs3_xdr_enc_mkdir3args(struct rpc_rqst *req,
446nfs3_xdr_mkdirargs(struct rpc_rqst *req, __be32 *p, struct nfs3_mkdirargs *args) 1064 struct xdr_stream *xdr,
1065 const struct nfs3_mkdirargs *args)
447{ 1066{
448 p = xdr_encode_fhandle(p, args->fh); 1067 encode_diropargs3(xdr, args->fh, args->name, args->len);
449 p = xdr_encode_array(p, args->name, args->len); 1068 encode_sattr3(xdr, args->sattr);
450 p = xdr_encode_sattr(p, args->sattr);
451 req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
452 return 0;
453} 1069}
454 1070
455/* 1071/*
456 * Encode SYMLINK arguments 1072 * 3.3.10 SYMLINK3args
1073 *
1074 * struct symlinkdata3 {
1075 * sattr3 symlink_attributes;
1076 * nfspath3 symlink_data;
1077 * };
1078 *
1079 * struct SYMLINK3args {
1080 * diropargs3 where;
1081 * symlinkdata3 symlink;
1082 * };
457 */ 1083 */
458static int 1084static void encode_symlinkdata3(struct xdr_stream *xdr,
459nfs3_xdr_symlinkargs(struct rpc_rqst *req, __be32 *p, struct nfs3_symlinkargs *args) 1085 const struct nfs3_symlinkargs *args)
460{ 1086{
461 p = xdr_encode_fhandle(p, args->fromfh); 1087 encode_sattr3(xdr, args->sattr);
462 p = xdr_encode_array(p, args->fromname, args->fromlen); 1088 encode_nfspath3(xdr, args->pages, args->pathlen);
463 p = xdr_encode_sattr(p, args->sattr); 1089}
464 *p++ = htonl(args->pathlen);
465 req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
466 1090
467 /* Copy the page */ 1091static void nfs3_xdr_enc_symlink3args(struct rpc_rqst *req,
468 xdr_encode_pages(&req->rq_snd_buf, args->pages, 0, args->pathlen); 1092 struct xdr_stream *xdr,
469 return 0; 1093 const struct nfs3_symlinkargs *args)
1094{
1095 encode_diropargs3(xdr, args->fromfh, args->fromname, args->fromlen);
1096 encode_symlinkdata3(xdr, args);
470} 1097}
471 1098
472/* 1099/*
473 * Encode MKNOD arguments 1100 * 3.3.11 MKNOD3args
1101 *
1102 * struct devicedata3 {
1103 * sattr3 dev_attributes;
1104 * specdata3 spec;
1105 * };
1106 *
1107 * union mknoddata3 switch (ftype3 type) {
1108 * case NF3CHR:
1109 * case NF3BLK:
1110 * devicedata3 device;
1111 * case NF3SOCK:
1112 * case NF3FIFO:
1113 * sattr3 pipe_attributes;
1114 * default:
1115 * void;
1116 * };
1117 *
1118 * struct MKNOD3args {
1119 * diropargs3 where;
1120 * mknoddata3 what;
1121 * };
474 */ 1122 */
475static int 1123static void encode_devicedata3(struct xdr_stream *xdr,
476nfs3_xdr_mknodargs(struct rpc_rqst *req, __be32 *p, struct nfs3_mknodargs *args) 1124 const struct nfs3_mknodargs *args)
477{ 1125{
478 p = xdr_encode_fhandle(p, args->fh); 1126 encode_sattr3(xdr, args->sattr);
479 p = xdr_encode_array(p, args->name, args->len); 1127 encode_specdata3(xdr, args->rdev);
480 *p++ = htonl(args->type); 1128}
481 p = xdr_encode_sattr(p, args->sattr); 1129
482 if (args->type == NF3CHR || args->type == NF3BLK) { 1130static void encode_mknoddata3(struct xdr_stream *xdr,
483 *p++ = htonl(MAJOR(args->rdev)); 1131 const struct nfs3_mknodargs *args)
484 *p++ = htonl(MINOR(args->rdev)); 1132{
1133 encode_ftype3(xdr, args->type);
1134 switch (args->type) {
1135 case NF3CHR:
1136 case NF3BLK:
1137 encode_devicedata3(xdr, args);
1138 break;
1139 case NF3SOCK:
1140 case NF3FIFO:
1141 encode_sattr3(xdr, args->sattr);
1142 break;
1143 case NF3REG:
1144 case NF3DIR:
1145 break;
1146 default:
1147 BUG();
485 } 1148 }
1149}
486 1150
487 req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); 1151static void nfs3_xdr_enc_mknod3args(struct rpc_rqst *req,
488 return 0; 1152 struct xdr_stream *xdr,
1153 const struct nfs3_mknodargs *args)
1154{
1155 encode_diropargs3(xdr, args->fh, args->name, args->len);
1156 encode_mknoddata3(xdr, args);
489} 1157}
490 1158
491/* 1159/*
492 * Encode RENAME arguments 1160 * 3.3.12 REMOVE3args
1161 *
1162 * struct REMOVE3args {
1163 * diropargs3 object;
1164 * };
493 */ 1165 */
494static int 1166static void nfs3_xdr_enc_remove3args(struct rpc_rqst *req,
495nfs3_xdr_renameargs(struct rpc_rqst *req, __be32 *p, struct nfs_renameargs *args) 1167 struct xdr_stream *xdr,
496{ 1168 const struct nfs_removeargs *args)
497 p = xdr_encode_fhandle(p, args->old_dir); 1169{
498 p = xdr_encode_array(p, args->old_name->name, args->old_name->len); 1170 encode_diropargs3(xdr, args->fh, args->name.name, args->name.len);
499 p = xdr_encode_fhandle(p, args->new_dir);
500 p = xdr_encode_array(p, args->new_name->name, args->new_name->len);
501 req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
502 return 0;
503} 1171}
504 1172
505/* 1173/*
506 * Encode LINK arguments 1174 * 3.3.14 RENAME3args
1175 *
1176 * struct RENAME3args {
1177 * diropargs3 from;
1178 * diropargs3 to;
1179 * };
507 */ 1180 */
508static int 1181static void nfs3_xdr_enc_rename3args(struct rpc_rqst *req,
509nfs3_xdr_linkargs(struct rpc_rqst *req, __be32 *p, struct nfs3_linkargs *args) 1182 struct xdr_stream *xdr,
1183 const struct nfs_renameargs *args)
510{ 1184{
511 p = xdr_encode_fhandle(p, args->fromfh); 1185 const struct qstr *old = args->old_name;
512 p = xdr_encode_fhandle(p, args->tofh); 1186 const struct qstr *new = args->new_name;
513 p = xdr_encode_array(p, args->toname, args->tolen); 1187
514 req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); 1188 encode_diropargs3(xdr, args->old_dir, old->name, old->len);
515 return 0; 1189 encode_diropargs3(xdr, args->new_dir, new->name, new->len);
516} 1190}
517 1191
518/* 1192/*
519 * Encode arguments to readdir call 1193 * 3.3.15 LINK3args
1194 *
1195 * struct LINK3args {
1196 * nfs_fh3 file;
1197 * diropargs3 link;
1198 * };
520 */ 1199 */
521static int 1200static void nfs3_xdr_enc_link3args(struct rpc_rqst *req,
522nfs3_xdr_readdirargs(struct rpc_rqst *req, __be32 *p, struct nfs3_readdirargs *args) 1201 struct xdr_stream *xdr,
1202 const struct nfs3_linkargs *args)
523{ 1203{
524 struct rpc_auth *auth = req->rq_cred->cr_auth; 1204 encode_nfs_fh3(xdr, args->fromfh);
525 unsigned int replen; 1205 encode_diropargs3(xdr, args->tofh, args->toname, args->tolen);
526 u32 count = args->count;
527
528 p = xdr_encode_fhandle(p, args->fh);
529 p = xdr_encode_hyper(p, args->cookie);
530 *p++ = args->verf[0];
531 *p++ = args->verf[1];
532 if (args->plus) {
533 /* readdirplus: need dircount + buffer size.
534 * We just make sure we make dircount big enough */
535 *p++ = htonl(count >> 3);
536 }
537 *p++ = htonl(count);
538 req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
539
540 /* Inline the page array */
541 replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS3_readdirres_sz) << 2;
542 xdr_inline_pages(&req->rq_rcv_buf, replen, args->pages, 0, count);
543 return 0;
544} 1206}
545 1207
546/* 1208/*
547 * Decode the result of a readdir call. 1209 * 3.3.16 READDIR3args
548 * We just check for syntactical correctness. 1210 *
1211 * struct READDIR3args {
1212 * nfs_fh3 dir;
1213 * cookie3 cookie;
1214 * cookieverf3 cookieverf;
1215 * count3 count;
1216 * };
549 */ 1217 */
550static int 1218static void encode_readdir3args(struct xdr_stream *xdr,
551nfs3_xdr_readdirres(struct rpc_rqst *req, __be32 *p, struct nfs3_readdirres *res) 1219 const struct nfs3_readdirargs *args)
552{ 1220{
553 struct xdr_buf *rcvbuf = &req->rq_rcv_buf; 1221 __be32 *p;
554 struct kvec *iov = rcvbuf->head;
555 struct page **page;
556 size_t hdrlen;
557 u32 recvd, pglen;
558 int status;
559
560 status = ntohl(*p++);
561 /* Decode post_op_attrs */
562 p = xdr_decode_post_op_attr(p, res->dir_attr);
563 if (status)
564 return nfs_stat_to_errno(status);
565 /* Decode verifier cookie */
566 if (res->verf) {
567 res->verf[0] = *p++;
568 res->verf[1] = *p++;
569 } else {
570 p += 2;
571 }
572 1222
573 hdrlen = (u8 *) p - (u8 *) iov->iov_base; 1223 encode_nfs_fh3(xdr, args->fh);
574 if (iov->iov_len < hdrlen) {
575 dprintk("NFS: READDIR reply header overflowed:"
576 "length %Zu > %Zu\n", hdrlen, iov->iov_len);
577 return -errno_NFSERR_IO;
578 } else if (iov->iov_len != hdrlen) {
579 dprintk("NFS: READDIR header is short. iovec will be shifted.\n");
580 xdr_shift_buf(rcvbuf, iov->iov_len - hdrlen);
581 }
582 1224
583 pglen = rcvbuf->page_len; 1225 p = xdr_reserve_space(xdr, 8 + NFS3_COOKIEVERFSIZE + 4);
584 recvd = rcvbuf->len - hdrlen; 1226 p = xdr_encode_cookie3(p, args->cookie);
585 if (pglen > recvd) 1227 p = xdr_encode_cookieverf3(p, args->verf);
586 pglen = recvd; 1228 *p = cpu_to_be32(args->count);
587 page = rcvbuf->pages; 1229}
588 1230
589 return pglen; 1231static void nfs3_xdr_enc_readdir3args(struct rpc_rqst *req,
1232 struct xdr_stream *xdr,
1233 const struct nfs3_readdirargs *args)
1234{
1235 encode_readdir3args(xdr, args);
1236 prepare_reply_buffer(req, args->pages, 0,
1237 args->count, NFS3_readdirres_sz);
590} 1238}
591 1239
592__be32 * 1240/*
593nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, struct nfs_server *server, int plus) 1241 * 3.3.17 READDIRPLUS3args
1242 *
1243 * struct READDIRPLUS3args {
1244 * nfs_fh3 dir;
1245 * cookie3 cookie;
1246 * cookieverf3 cookieverf;
1247 * count3 dircount;
1248 * count3 maxcount;
1249 * };
1250 */
1251static void encode_readdirplus3args(struct xdr_stream *xdr,
1252 const struct nfs3_readdirargs *args)
594{ 1253{
595 __be32 *p; 1254 __be32 *p;
596 struct nfs_entry old = *entry;
597
598 p = xdr_inline_decode(xdr, 4);
599 if (unlikely(!p))
600 goto out_overflow;
601 if (!ntohl(*p++)) {
602 p = xdr_inline_decode(xdr, 4);
603 if (unlikely(!p))
604 goto out_overflow;
605 if (!ntohl(*p++))
606 return ERR_PTR(-EAGAIN);
607 entry->eof = 1;
608 return ERR_PTR(-EBADCOOKIE);
609 }
610 1255
611 p = xdr_inline_decode(xdr, 12); 1256 encode_nfs_fh3(xdr, args->fh);
612 if (unlikely(!p))
613 goto out_overflow;
614 p = xdr_decode_hyper(p, &entry->ino);
615 entry->len = ntohl(*p++);
616 1257
617 p = xdr_inline_decode(xdr, entry->len + 8); 1258 p = xdr_reserve_space(xdr, 8 + NFS3_COOKIEVERFSIZE + 4 + 4);
618 if (unlikely(!p)) 1259 p = xdr_encode_cookie3(p, args->cookie);
619 goto out_overflow; 1260 p = xdr_encode_cookieverf3(p, args->verf);
620 entry->name = (const char *) p;
621 p += XDR_QUADLEN(entry->len);
622 entry->prev_cookie = entry->cookie;
623 p = xdr_decode_hyper(p, &entry->cookie);
624
625 entry->d_type = DT_UNKNOWN;
626 if (plus) {
627 entry->fattr->valid = 0;
628 p = xdr_decode_post_op_attr_stream(xdr, entry->fattr);
629 if (IS_ERR(p))
630 goto out_overflow_exit;
631 entry->d_type = nfs_umode_to_dtype(entry->fattr->mode);
632 /* In fact, a post_op_fh3: */
633 p = xdr_inline_decode(xdr, 4);
634 if (unlikely(!p))
635 goto out_overflow;
636 if (*p++) {
637 p = xdr_decode_fhandle_stream(xdr, entry->fh);
638 if (IS_ERR(p))
639 goto out_overflow_exit;
640 /* Ugh -- server reply was truncated */
641 if (p == NULL) {
642 dprintk("NFS: FH truncated\n");
643 *entry = old;
644 return ERR_PTR(-EAGAIN);
645 }
646 } else
647 memset((u8*)(entry->fh), 0, sizeof(*entry->fh));
648 }
649 1261
650 p = xdr_inline_peek(xdr, 8); 1262 /*
651 if (p != NULL) 1263 * readdirplus: need dircount + buffer size.
652 entry->eof = !p[0] && p[1]; 1264 * We just make sure we make dircount big enough
653 else 1265 */
654 entry->eof = 0; 1266 *p++ = cpu_to_be32(args->count >> 3);
655 1267
656 return p; 1268 *p = cpu_to_be32(args->count);
1269}
657 1270
658out_overflow: 1271static void nfs3_xdr_enc_readdirplus3args(struct rpc_rqst *req,
659 print_overflow_msg(__func__, xdr); 1272 struct xdr_stream *xdr,
660out_overflow_exit: 1273 const struct nfs3_readdirargs *args)
661 return ERR_PTR(-EAGAIN); 1274{
1275 encode_readdirplus3args(xdr, args);
1276 prepare_reply_buffer(req, args->pages, 0,
1277 args->count, NFS3_readdirres_sz);
662} 1278}
663 1279
664/* 1280/*
665 * Encode COMMIT arguments 1281 * 3.3.21 COMMIT3args
1282 *
1283 * struct COMMIT3args {
1284 * nfs_fh3 file;
1285 * offset3 offset;
1286 * count3 count;
1287 * };
666 */ 1288 */
667static int 1289static void encode_commit3args(struct xdr_stream *xdr,
668nfs3_xdr_commitargs(struct rpc_rqst *req, __be32 *p, struct nfs_writeargs *args) 1290 const struct nfs_writeargs *args)
669{ 1291{
670 p = xdr_encode_fhandle(p, args->fh); 1292 __be32 *p;
1293
1294 encode_nfs_fh3(xdr, args->fh);
1295
1296 p = xdr_reserve_space(xdr, 8 + 4);
671 p = xdr_encode_hyper(p, args->offset); 1297 p = xdr_encode_hyper(p, args->offset);
672 *p++ = htonl(args->count); 1298 *p = cpu_to_be32(args->count);
673 req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
674 return 0;
675} 1299}
676 1300
677#ifdef CONFIG_NFS_V3_ACL 1301static void nfs3_xdr_enc_commit3args(struct rpc_rqst *req,
678/* 1302 struct xdr_stream *xdr,
679 * Encode GETACL arguments 1303 const struct nfs_writeargs *args)
680 */
681static int
682nfs3_xdr_getaclargs(struct rpc_rqst *req, __be32 *p,
683 struct nfs3_getaclargs *args)
684{ 1304{
685 struct rpc_auth *auth = req->rq_cred->cr_auth; 1305 encode_commit3args(xdr, args);
686 unsigned int replen; 1306}
687 1307
688 p = xdr_encode_fhandle(p, args->fh); 1308#ifdef CONFIG_NFS_V3_ACL
689 *p++ = htonl(args->mask);
690 req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
691 1309
692 if (args->mask & (NFS_ACL | NFS_DFACL)) { 1310static void nfs3_xdr_enc_getacl3args(struct rpc_rqst *req,
693 /* Inline the page array */ 1311 struct xdr_stream *xdr,
694 replen = (RPC_REPHDRSIZE + auth->au_rslack + 1312 const struct nfs3_getaclargs *args)
695 ACL3_getaclres_sz) << 2; 1313{
696 xdr_inline_pages(&req->rq_rcv_buf, replen, args->pages, 0, 1314 encode_nfs_fh3(xdr, args->fh);
697 NFSACL_MAXPAGES << PAGE_SHIFT); 1315 encode_uint32(xdr, args->mask);
698 } 1316 if (args->mask & (NFS_ACL | NFS_DFACL))
699 return 0; 1317 prepare_reply_buffer(req, args->pages, 0,
1318 NFSACL_MAXPAGES << PAGE_SHIFT,
1319 ACL3_getaclres_sz);
700} 1320}
701 1321
702/* 1322static void nfs3_xdr_enc_setacl3args(struct rpc_rqst *req,
703 * Encode SETACL arguments 1323 struct xdr_stream *xdr,
704 */ 1324 const struct nfs3_setaclargs *args)
705static int
706nfs3_xdr_setaclargs(struct rpc_rqst *req, __be32 *p,
707 struct nfs3_setaclargs *args)
708{ 1325{
709 struct xdr_buf *buf = &req->rq_snd_buf;
710 unsigned int base; 1326 unsigned int base;
711 int err; 1327 int error;
712
713 p = xdr_encode_fhandle(p, NFS_FH(args->inode));
714 *p++ = htonl(args->mask);
715 req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
716 base = req->rq_slen;
717 1328
1329 encode_nfs_fh3(xdr, NFS_FH(args->inode));
1330 encode_uint32(xdr, args->mask);
718 if (args->npages != 0) 1331 if (args->npages != 0)
719 xdr_encode_pages(buf, args->pages, 0, args->len); 1332 xdr_write_pages(xdr, args->pages, 0, args->len);
720 else
721 req->rq_slen = xdr_adjust_iovec(req->rq_svec,
722 p + XDR_QUADLEN(args->len));
723 1333
724 err = nfsacl_encode(buf, base, args->inode, 1334 base = req->rq_slen;
1335 error = nfsacl_encode(xdr->buf, base, args->inode,
725 (args->mask & NFS_ACL) ? 1336 (args->mask & NFS_ACL) ?
726 args->acl_access : NULL, 1, 0); 1337 args->acl_access : NULL, 1, 0);
727 if (err > 0) 1338 BUG_ON(error < 0);
728 err = nfsacl_encode(buf, base + err, args->inode, 1339 error = nfsacl_encode(xdr->buf, base + error, args->inode,
729 (args->mask & NFS_DFACL) ? 1340 (args->mask & NFS_DFACL) ?
730 args->acl_default : NULL, 1, 1341 args->acl_default : NULL, 1,
731 NFS_ACL_DEFAULT); 1342 NFS_ACL_DEFAULT);
732 return (err > 0) ? 0 : err; 1343 BUG_ON(error < 0);
733} 1344}
1345
734#endif /* CONFIG_NFS_V3_ACL */ 1346#endif /* CONFIG_NFS_V3_ACL */
735 1347
736/* 1348/*
737 * NFS XDR decode functions 1349 * NFSv3 XDR decode functions
1350 *
1351 * NFSv3 result types are defined in section 3.3 of RFC 1813:
1352 * "NFS Version 3 Protocol Specification".
738 */ 1353 */
739 1354
740/* 1355/*
741 * Decode attrstat reply. 1356 * 3.3.1 GETATTR3res
1357 *
1358 * struct GETATTR3resok {
1359 * fattr3 obj_attributes;
1360 * };
1361 *
1362 * union GETATTR3res switch (nfsstat3 status) {
1363 * case NFS3_OK:
1364 * GETATTR3resok resok;
1365 * default:
1366 * void;
1367 * };
742 */ 1368 */
743static int 1369static int nfs3_xdr_dec_getattr3res(struct rpc_rqst *req,
744nfs3_xdr_attrstat(struct rpc_rqst *req, __be32 *p, struct nfs_fattr *fattr) 1370 struct xdr_stream *xdr,
1371 struct nfs_fattr *result)
745{ 1372{
746 int status; 1373 enum nfs_stat status;
747 1374 int error;
748 if ((status = ntohl(*p++))) 1375
749 return nfs_stat_to_errno(status); 1376 error = decode_nfsstat3(xdr, &status);
750 xdr_decode_fattr(p, fattr); 1377 if (unlikely(error))
751 return 0; 1378 goto out;
1379 if (status != NFS3_OK)
1380 goto out_default;
1381 error = decode_fattr3(xdr, result);
1382out:
1383 return error;
1384out_default:
1385 return nfs_stat_to_errno(status);
752} 1386}
753 1387
754/* 1388/*
755 * Decode status+wcc_data reply 1389 * 3.3.2 SETATTR3res
756 * SATTR, REMOVE, RMDIR 1390 *
1391 * struct SETATTR3resok {
1392 * wcc_data obj_wcc;
1393 * };
1394 *
1395 * struct SETATTR3resfail {
1396 * wcc_data obj_wcc;
1397 * };
1398 *
1399 * union SETATTR3res switch (nfsstat3 status) {
1400 * case NFS3_OK:
1401 * SETATTR3resok resok;
1402 * default:
1403 * SETATTR3resfail resfail;
1404 * };
757 */ 1405 */
758static int 1406static int nfs3_xdr_dec_setattr3res(struct rpc_rqst *req,
759nfs3_xdr_wccstat(struct rpc_rqst *req, __be32 *p, struct nfs_fattr *fattr) 1407 struct xdr_stream *xdr,
1408 struct nfs_fattr *result)
760{ 1409{
761 int status; 1410 enum nfs_stat status;
762 1411 int error;
763 if ((status = ntohl(*p++))) 1412
764 status = nfs_stat_to_errno(status); 1413 error = decode_nfsstat3(xdr, &status);
765 xdr_decode_wcc_data(p, fattr); 1414 if (unlikely(error))
766 return status; 1415 goto out;
1416 error = decode_wcc_data(xdr, result);
1417 if (unlikely(error))
1418 goto out;
1419 if (status != NFS3_OK)
1420 goto out_status;
1421out:
1422 return error;
1423out_status:
1424 return nfs_stat_to_errno(status);
767} 1425}
768 1426
769static int 1427/*
770nfs3_xdr_removeres(struct rpc_rqst *req, __be32 *p, struct nfs_removeres *res) 1428 * 3.3.3 LOOKUP3res
1429 *
1430 * struct LOOKUP3resok {
1431 * nfs_fh3 object;
1432 * post_op_attr obj_attributes;
1433 * post_op_attr dir_attributes;
1434 * };
1435 *
1436 * struct LOOKUP3resfail {
1437 * post_op_attr dir_attributes;
1438 * };
1439 *
1440 * union LOOKUP3res switch (nfsstat3 status) {
1441 * case NFS3_OK:
1442 * LOOKUP3resok resok;
1443 * default:
1444 * LOOKUP3resfail resfail;
1445 * };
1446 */
1447static int nfs3_xdr_dec_lookup3res(struct rpc_rqst *req,
1448 struct xdr_stream *xdr,
1449 struct nfs3_diropres *result)
771{ 1450{
772 return nfs3_xdr_wccstat(req, p, res->dir_attr); 1451 enum nfs_stat status;
1452 int error;
1453
1454 error = decode_nfsstat3(xdr, &status);
1455 if (unlikely(error))
1456 goto out;
1457 if (status != NFS3_OK)
1458 goto out_default;
1459 error = decode_nfs_fh3(xdr, result->fh);
1460 if (unlikely(error))
1461 goto out;
1462 error = decode_post_op_attr(xdr, result->fattr);
1463 if (unlikely(error))
1464 goto out;
1465 error = decode_post_op_attr(xdr, result->dir_attr);
1466out:
1467 return error;
1468out_default:
1469 error = decode_post_op_attr(xdr, result->dir_attr);
1470 if (unlikely(error))
1471 goto out;
1472 return nfs_stat_to_errno(status);
773} 1473}
774 1474
775/* 1475/*
776 * Decode LOOKUP reply 1476 * 3.3.4 ACCESS3res
1477 *
1478 * struct ACCESS3resok {
1479 * post_op_attr obj_attributes;
1480 * uint32 access;
1481 * };
1482 *
1483 * struct ACCESS3resfail {
1484 * post_op_attr obj_attributes;
1485 * };
1486 *
1487 * union ACCESS3res switch (nfsstat3 status) {
1488 * case NFS3_OK:
1489 * ACCESS3resok resok;
1490 * default:
1491 * ACCESS3resfail resfail;
1492 * };
777 */ 1493 */
778static int 1494static int nfs3_xdr_dec_access3res(struct rpc_rqst *req,
779nfs3_xdr_lookupres(struct rpc_rqst *req, __be32 *p, struct nfs3_diropres *res) 1495 struct xdr_stream *xdr,
1496 struct nfs3_accessres *result)
780{ 1497{
781 int status; 1498 enum nfs_stat status;
782 1499 int error;
783 if ((status = ntohl(*p++))) { 1500
784 status = nfs_stat_to_errno(status); 1501 error = decode_nfsstat3(xdr, &status);
785 } else { 1502 if (unlikely(error))
786 if (!(p = xdr_decode_fhandle(p, res->fh))) 1503 goto out;
787 return -errno_NFSERR_IO; 1504 error = decode_post_op_attr(xdr, result->fattr);
788 p = xdr_decode_post_op_attr(p, res->fattr); 1505 if (unlikely(error))
789 } 1506 goto out;
790 xdr_decode_post_op_attr(p, res->dir_attr); 1507 if (status != NFS3_OK)
791 return status; 1508 goto out_default;
1509 error = decode_uint32(xdr, &result->access);
1510out:
1511 return error;
1512out_default:
1513 return nfs_stat_to_errno(status);
792} 1514}
793 1515
794/* 1516/*
795 * Decode ACCESS reply 1517 * 3.3.5 READLINK3res
1518 *
1519 * struct READLINK3resok {
1520 * post_op_attr symlink_attributes;
1521 * nfspath3 data;
1522 * };
1523 *
1524 * struct READLINK3resfail {
1525 * post_op_attr symlink_attributes;
1526 * };
1527 *
1528 * union READLINK3res switch (nfsstat3 status) {
1529 * case NFS3_OK:
1530 * READLINK3resok resok;
1531 * default:
1532 * READLINK3resfail resfail;
1533 * };
796 */ 1534 */
797static int 1535static int nfs3_xdr_dec_readlink3res(struct rpc_rqst *req,
798nfs3_xdr_accessres(struct rpc_rqst *req, __be32 *p, struct nfs3_accessres *res) 1536 struct xdr_stream *xdr,
1537 struct nfs_fattr *result)
799{ 1538{
800 int status = ntohl(*p++); 1539 enum nfs_stat status;
801 1540 int error;
802 p = xdr_decode_post_op_attr(p, res->fattr); 1541
803 if (status) 1542 error = decode_nfsstat3(xdr, &status);
804 return nfs_stat_to_errno(status); 1543 if (unlikely(error))
805 res->access = ntohl(*p++); 1544 goto out;
806 return 0; 1545 error = decode_post_op_attr(xdr, result);
1546 if (unlikely(error))
1547 goto out;
1548 if (status != NFS3_OK)
1549 goto out_default;
1550 error = decode_nfspath3(xdr);
1551out:
1552 return error;
1553out_default:
1554 return nfs_stat_to_errno(status);
807} 1555}
808 1556
809static int 1557/*
810nfs3_xdr_readlinkargs(struct rpc_rqst *req, __be32 *p, struct nfs3_readlinkargs *args) 1558 * 3.3.6 READ3res
1559 *
1560 * struct READ3resok {
1561 * post_op_attr file_attributes;
1562 * count3 count;
1563 * bool eof;
1564 * opaque data<>;
1565 * };
1566 *
1567 * struct READ3resfail {
1568 * post_op_attr file_attributes;
1569 * };
1570 *
1571 * union READ3res switch (nfsstat3 status) {
1572 * case NFS3_OK:
1573 * READ3resok resok;
1574 * default:
1575 * READ3resfail resfail;
1576 * };
1577 */
1578static int decode_read3resok(struct xdr_stream *xdr,
1579 struct nfs_readres *result)
811{ 1580{
812 struct rpc_auth *auth = req->rq_cred->cr_auth; 1581 u32 eof, count, ocount, recvd;
813 unsigned int replen; 1582 size_t hdrlen;
1583 __be32 *p;
814 1584
815 p = xdr_encode_fhandle(p, args->fh); 1585 p = xdr_inline_decode(xdr, 4 + 4 + 4);
816 req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); 1586 if (unlikely(p == NULL))
1587 goto out_overflow;
1588 count = be32_to_cpup(p++);
1589 eof = be32_to_cpup(p++);
1590 ocount = be32_to_cpup(p++);
1591 if (unlikely(ocount != count))
1592 goto out_mismatch;
1593 hdrlen = (u8 *)xdr->p - (u8 *)xdr->iov->iov_base;
1594 recvd = xdr->buf->len - hdrlen;
1595 if (unlikely(count > recvd))
1596 goto out_cheating;
1597
1598out:
1599 xdr_read_pages(xdr, count);
1600 result->eof = eof;
1601 result->count = count;
1602 return count;
1603out_mismatch:
1604 dprintk("NFS: READ count doesn't match length of opaque: "
1605 "count %u != ocount %u\n", count, ocount);
1606 return -EIO;
1607out_cheating:
1608 dprintk("NFS: server cheating in read result: "
1609 "count %u > recvd %u\n", count, recvd);
1610 count = recvd;
1611 eof = 0;
1612 goto out;
1613out_overflow:
1614 print_overflow_msg(__func__, xdr);
1615 return -EIO;
1616}
817 1617
818 /* Inline the page array */ 1618static int nfs3_xdr_dec_read3res(struct rpc_rqst *req, struct xdr_stream *xdr,
819 replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS3_readlinkres_sz) << 2; 1619 struct nfs_readres *result)
820 xdr_inline_pages(&req->rq_rcv_buf, replen, args->pages, args->pgbase, args->pglen); 1620{
821 return 0; 1621 enum nfs_stat status;
1622 int error;
1623
1624 error = decode_nfsstat3(xdr, &status);
1625 if (unlikely(error))
1626 goto out;
1627 error = decode_post_op_attr(xdr, result->fattr);
1628 if (unlikely(error))
1629 goto out;
1630 if (status != NFS3_OK)
1631 goto out_status;
1632 error = decode_read3resok(xdr, result);
1633out:
1634 return error;
1635out_status:
1636 return nfs_stat_to_errno(status);
822} 1637}
823 1638
824/* 1639/*
825 * Decode READLINK reply 1640 * 3.3.7 WRITE3res
1641 *
1642 * enum stable_how {
1643 * UNSTABLE = 0,
1644 * DATA_SYNC = 1,
1645 * FILE_SYNC = 2
1646 * };
1647 *
1648 * struct WRITE3resok {
1649 * wcc_data file_wcc;
1650 * count3 count;
1651 * stable_how committed;
1652 * writeverf3 verf;
1653 * };
1654 *
1655 * struct WRITE3resfail {
1656 * wcc_data file_wcc;
1657 * };
1658 *
1659 * union WRITE3res switch (nfsstat3 status) {
1660 * case NFS3_OK:
1661 * WRITE3resok resok;
1662 * default:
1663 * WRITE3resfail resfail;
1664 * };
826 */ 1665 */
827static int 1666static int decode_write3resok(struct xdr_stream *xdr,
828nfs3_xdr_readlinkres(struct rpc_rqst *req, __be32 *p, struct nfs_fattr *fattr) 1667 struct nfs_writeres *result)
829{ 1668{
830 struct xdr_buf *rcvbuf = &req->rq_rcv_buf; 1669 __be32 *p;
831 struct kvec *iov = rcvbuf->head;
832 size_t hdrlen;
833 u32 len, recvd;
834 int status;
835
836 status = ntohl(*p++);
837 p = xdr_decode_post_op_attr(p, fattr);
838
839 if (status != 0)
840 return nfs_stat_to_errno(status);
841
842 /* Convert length of symlink */
843 len = ntohl(*p++);
844 if (len >= rcvbuf->page_len) {
845 dprintk("nfs: server returned giant symlink!\n");
846 return -ENAMETOOLONG;
847 }
848 1670
849 hdrlen = (u8 *) p - (u8 *) iov->iov_base; 1671 p = xdr_inline_decode(xdr, 4 + 4 + NFS3_WRITEVERFSIZE);
850 if (iov->iov_len < hdrlen) { 1672 if (unlikely(p == NULL))
851 dprintk("NFS: READLINK reply header overflowed:" 1673 goto out_overflow;
852 "length %Zu > %Zu\n", hdrlen, iov->iov_len); 1674 result->count = be32_to_cpup(p++);
853 return -errno_NFSERR_IO; 1675 result->verf->committed = be32_to_cpup(p++);
854 } else if (iov->iov_len != hdrlen) { 1676 if (unlikely(result->verf->committed > NFS_FILE_SYNC))
855 dprintk("NFS: READLINK header is short. " 1677 goto out_badvalue;
856 "iovec will be shifted.\n"); 1678 memcpy(result->verf->verifier, p, NFS3_WRITEVERFSIZE);
857 xdr_shift_buf(rcvbuf, iov->iov_len - hdrlen); 1679 return result->count;
858 } 1680out_badvalue:
859 recvd = req->rq_rcv_buf.len - hdrlen; 1681 dprintk("NFS: bad stable_how value: %u\n", result->verf->committed);
860 if (recvd < len) { 1682 return -EIO;
861 dprintk("NFS: server cheating in readlink reply: " 1683out_overflow:
862 "count %u > recvd %u\n", len, recvd); 1684 print_overflow_msg(__func__, xdr);
863 return -EIO; 1685 return -EIO;
864 } 1686}
865 1687
866 xdr_terminate_string(rcvbuf, len); 1688static int nfs3_xdr_dec_write3res(struct rpc_rqst *req, struct xdr_stream *xdr,
867 return 0; 1689 struct nfs_writeres *result)
1690{
1691 enum nfs_stat status;
1692 int error;
1693
1694 error = decode_nfsstat3(xdr, &status);
1695 if (unlikely(error))
1696 goto out;
1697 error = decode_wcc_data(xdr, result->fattr);
1698 if (unlikely(error))
1699 goto out;
1700 if (status != NFS3_OK)
1701 goto out_status;
1702 error = decode_write3resok(xdr, result);
1703out:
1704 return error;
1705out_status:
1706 return nfs_stat_to_errno(status);
868} 1707}
869 1708
870/* 1709/*
871 * Decode READ reply 1710 * 3.3.8 CREATE3res
1711 *
1712 * struct CREATE3resok {
1713 * post_op_fh3 obj;
1714 * post_op_attr obj_attributes;
1715 * wcc_data dir_wcc;
1716 * };
1717 *
1718 * struct CREATE3resfail {
1719 * wcc_data dir_wcc;
1720 * };
1721 *
1722 * union CREATE3res switch (nfsstat3 status) {
1723 * case NFS3_OK:
1724 * CREATE3resok resok;
1725 * default:
1726 * CREATE3resfail resfail;
1727 * };
872 */ 1728 */
873static int 1729static int decode_create3resok(struct xdr_stream *xdr,
874nfs3_xdr_readres(struct rpc_rqst *req, __be32 *p, struct nfs_readres *res) 1730 struct nfs3_diropres *result)
875{ 1731{
876 struct kvec *iov = req->rq_rcv_buf.head; 1732 int error;
877 size_t hdrlen; 1733
878 u32 count, ocount, recvd; 1734 error = decode_post_op_fh3(xdr, result->fh);
879 int status; 1735 if (unlikely(error))
1736 goto out;
1737 error = decode_post_op_attr(xdr, result->fattr);
1738 if (unlikely(error))
1739 goto out;
1740 /* The server isn't required to return a file handle.
1741 * If it didn't, force the client to perform a LOOKUP
1742 * to determine the correct file handle and attribute
1743 * values for the new object. */
1744 if (result->fh->size == 0)
1745 result->fattr->valid = 0;
1746 error = decode_wcc_data(xdr, result->dir_attr);
1747out:
1748 return error;
1749}
880 1750
881 status = ntohl(*p++); 1751static int nfs3_xdr_dec_create3res(struct rpc_rqst *req,
882 p = xdr_decode_post_op_attr(p, res->fattr); 1752 struct xdr_stream *xdr,
1753 struct nfs3_diropres *result)
1754{
1755 enum nfs_stat status;
1756 int error;
1757
1758 error = decode_nfsstat3(xdr, &status);
1759 if (unlikely(error))
1760 goto out;
1761 if (status != NFS3_OK)
1762 goto out_default;
1763 error = decode_create3resok(xdr, result);
1764out:
1765 return error;
1766out_default:
1767 error = decode_wcc_data(xdr, result->dir_attr);
1768 if (unlikely(error))
1769 goto out;
1770 return nfs_stat_to_errno(status);
1771}
883 1772
884 if (status != 0) 1773/*
885 return nfs_stat_to_errno(status); 1774 * 3.3.12 REMOVE3res
1775 *
1776 * struct REMOVE3resok {
1777 * wcc_data dir_wcc;
1778 * };
1779 *
1780 * struct REMOVE3resfail {
1781 * wcc_data dir_wcc;
1782 * };
1783 *
1784 * union REMOVE3res switch (nfsstat3 status) {
1785 * case NFS3_OK:
1786 * REMOVE3resok resok;
1787 * default:
1788 * REMOVE3resfail resfail;
1789 * };
1790 */
1791static int nfs3_xdr_dec_remove3res(struct rpc_rqst *req,
1792 struct xdr_stream *xdr,
1793 struct nfs_removeres *result)
1794{
1795 enum nfs_stat status;
1796 int error;
1797
1798 error = decode_nfsstat3(xdr, &status);
1799 if (unlikely(error))
1800 goto out;
1801 error = decode_wcc_data(xdr, result->dir_attr);
1802 if (unlikely(error))
1803 goto out;
1804 if (status != NFS3_OK)
1805 goto out_status;
1806out:
1807 return error;
1808out_status:
1809 return nfs_stat_to_errno(status);
1810}
886 1811
887 /* Decode reply count and EOF flag. NFSv3 is somewhat redundant 1812/*
888 * in that it puts the count both in the res struct and in the 1813 * 3.3.14 RENAME3res
889 * opaque data count. */ 1814 *
890 count = ntohl(*p++); 1815 * struct RENAME3resok {
891 res->eof = ntohl(*p++); 1816 * wcc_data fromdir_wcc;
892 ocount = ntohl(*p++); 1817 * wcc_data todir_wcc;
1818 * };
1819 *
1820 * struct RENAME3resfail {
1821 * wcc_data fromdir_wcc;
1822 * wcc_data todir_wcc;
1823 * };
1824 *
1825 * union RENAME3res switch (nfsstat3 status) {
1826 * case NFS3_OK:
1827 * RENAME3resok resok;
1828 * default:
1829 * RENAME3resfail resfail;
1830 * };
1831 */
1832static int nfs3_xdr_dec_rename3res(struct rpc_rqst *req,
1833 struct xdr_stream *xdr,
1834 struct nfs_renameres *result)
1835{
1836 enum nfs_stat status;
1837 int error;
1838
1839 error = decode_nfsstat3(xdr, &status);
1840 if (unlikely(error))
1841 goto out;
1842 error = decode_wcc_data(xdr, result->old_fattr);
1843 if (unlikely(error))
1844 goto out;
1845 error = decode_wcc_data(xdr, result->new_fattr);
1846 if (unlikely(error))
1847 goto out;
1848 if (status != NFS3_OK)
1849 goto out_status;
1850out:
1851 return error;
1852out_status:
1853 return nfs_stat_to_errno(status);
1854}
893 1855
894 if (ocount != count) { 1856/*
895 dprintk("NFS: READ count doesn't match RPC opaque count.\n"); 1857 * 3.3.15 LINK3res
896 return -errno_NFSERR_IO; 1858 *
897 } 1859 * struct LINK3resok {
1860 * post_op_attr file_attributes;
1861 * wcc_data linkdir_wcc;
1862 * };
1863 *
1864 * struct LINK3resfail {
1865 * post_op_attr file_attributes;
1866 * wcc_data linkdir_wcc;
1867 * };
1868 *
1869 * union LINK3res switch (nfsstat3 status) {
1870 * case NFS3_OK:
1871 * LINK3resok resok;
1872 * default:
1873 * LINK3resfail resfail;
1874 * };
1875 */
1876static int nfs3_xdr_dec_link3res(struct rpc_rqst *req, struct xdr_stream *xdr,
1877 struct nfs3_linkres *result)
1878{
1879 enum nfs_stat status;
1880 int error;
1881
1882 error = decode_nfsstat3(xdr, &status);
1883 if (unlikely(error))
1884 goto out;
1885 error = decode_post_op_attr(xdr, result->fattr);
1886 if (unlikely(error))
1887 goto out;
1888 error = decode_wcc_data(xdr, result->dir_attr);
1889 if (unlikely(error))
1890 goto out;
1891 if (status != NFS3_OK)
1892 goto out_status;
1893out:
1894 return error;
1895out_status:
1896 return nfs_stat_to_errno(status);
1897}
898 1898
899 hdrlen = (u8 *) p - (u8 *) iov->iov_base; 1899/**
900 if (iov->iov_len < hdrlen) { 1900 * nfs3_decode_dirent - Decode a single NFSv3 directory entry stored in
901 dprintk("NFS: READ reply header overflowed:" 1901 * the local page cache
902 "length %Zu > %Zu\n", hdrlen, iov->iov_len); 1902 * @xdr: XDR stream where entry resides
903 return -errno_NFSERR_IO; 1903 * @entry: buffer to fill in with entry data
904 } else if (iov->iov_len != hdrlen) { 1904 * @plus: boolean indicating whether this should be a readdirplus entry
905 dprintk("NFS: READ header is short. iovec will be shifted.\n"); 1905 *
906 xdr_shift_buf(&req->rq_rcv_buf, iov->iov_len - hdrlen); 1906 * Returns zero if successful, otherwise a negative errno value is
907 } 1907 * returned.
1908 *
1909 * This function is not invoked during READDIR reply decoding, but
1910 * rather whenever an application invokes the getdents(2) system call
1911 * on a directory already in our cache.
1912 *
1913 * 3.3.16 entry3
1914 *
1915 * struct entry3 {
1916 * fileid3 fileid;
1917 * filename3 name;
1918 * cookie3 cookie;
1919 * fhandle3 filehandle;
1920 * post_op_attr3 attributes;
1921 * entry3 *nextentry;
1922 * };
1923 *
1924 * 3.3.17 entryplus3
1925 * struct entryplus3 {
1926 * fileid3 fileid;
1927 * filename3 name;
1928 * cookie3 cookie;
1929 * post_op_attr name_attributes;
1930 * post_op_fh3 name_handle;
1931 * entryplus3 *nextentry;
1932 * };
1933 */
1934int nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
1935 int plus)
1936{
1937 struct nfs_entry old = *entry;
1938 __be32 *p;
1939 int error;
908 1940
909 recvd = req->rq_rcv_buf.len - hdrlen; 1941 p = xdr_inline_decode(xdr, 4);
910 if (count > recvd) { 1942 if (unlikely(p == NULL))
911 dprintk("NFS: server cheating in read reply: " 1943 goto out_overflow;
912 "count %u > recvd %u\n", count, recvd); 1944 if (*p == xdr_zero) {
913 count = recvd; 1945 p = xdr_inline_decode(xdr, 4);
914 res->eof = 0; 1946 if (unlikely(p == NULL))
1947 goto out_overflow;
1948 if (*p == xdr_zero)
1949 return -EAGAIN;
1950 entry->eof = 1;
1951 return -EBADCOOKIE;
915 } 1952 }
916 1953
917 if (count < res->count) 1954 error = decode_fileid3(xdr, &entry->ino);
918 res->count = count; 1955 if (unlikely(error))
1956 return error;
919 1957
920 return count; 1958 error = decode_inline_filename3(xdr, &entry->name, &entry->len);
921} 1959 if (unlikely(error))
1960 return error;
922 1961
923/* 1962 entry->prev_cookie = entry->cookie;
924 * Decode WRITE response 1963 error = decode_cookie3(xdr, &entry->cookie);
925 */ 1964 if (unlikely(error))
926static int 1965 return error;
927nfs3_xdr_writeres(struct rpc_rqst *req, __be32 *p, struct nfs_writeres *res)
928{
929 int status;
930 1966
931 status = ntohl(*p++); 1967 entry->d_type = DT_UNKNOWN;
932 p = xdr_decode_wcc_data(p, res->fattr);
933 1968
934 if (status != 0) 1969 if (plus) {
935 return nfs_stat_to_errno(status); 1970 entry->fattr->valid = 0;
1971 error = decode_post_op_attr(xdr, entry->fattr);
1972 if (unlikely(error))
1973 return error;
1974 if (entry->fattr->valid & NFS_ATTR_FATTR_V3)
1975 entry->d_type = nfs_umode_to_dtype(entry->fattr->mode);
936 1976
937 res->count = ntohl(*p++); 1977 /* In fact, a post_op_fh3: */
938 res->verf->committed = (enum nfs3_stable_how)ntohl(*p++); 1978 p = xdr_inline_decode(xdr, 4);
939 res->verf->verifier[0] = *p++; 1979 if (unlikely(p == NULL))
940 res->verf->verifier[1] = *p++; 1980 goto out_overflow;
1981 if (*p != xdr_zero) {
1982 error = decode_nfs_fh3(xdr, entry->fh);
1983 if (unlikely(error)) {
1984 if (error == -E2BIG)
1985 goto out_truncated;
1986 return error;
1987 }
1988 } else
1989 zero_nfs_fh3(entry->fh);
1990 }
941 1991
942 return res->count; 1992 return 0;
943}
944 1993
945/* 1994out_overflow:
946 * Decode a CREATE response 1995 print_overflow_msg(__func__, xdr);
947 */ 1996 return -EAGAIN;
948static int 1997out_truncated:
949nfs3_xdr_createres(struct rpc_rqst *req, __be32 *p, struct nfs3_diropres *res) 1998 dprintk("NFS: directory entry contains invalid file handle\n");
950{ 1999 *entry = old;
951 int status; 2000 return -EAGAIN;
952
953 status = ntohl(*p++);
954 if (status == 0) {
955 if (*p++) {
956 if (!(p = xdr_decode_fhandle(p, res->fh)))
957 return -errno_NFSERR_IO;
958 p = xdr_decode_post_op_attr(p, res->fattr);
959 } else {
960 memset(res->fh, 0, sizeof(*res->fh));
961 /* Do decode post_op_attr but set it to NULL */
962 p = xdr_decode_post_op_attr(p, res->fattr);
963 res->fattr->valid = 0;
964 }
965 } else {
966 status = nfs_stat_to_errno(status);
967 }
968 p = xdr_decode_wcc_data(p, res->dir_attr);
969 return status;
970} 2001}
971 2002
972/* 2003/*
973 * Decode RENAME reply 2004 * 3.3.16 READDIR3res
2005 *
2006 * struct dirlist3 {
2007 * entry3 *entries;
2008 * bool eof;
2009 * };
2010 *
2011 * struct READDIR3resok {
2012 * post_op_attr dir_attributes;
2013 * cookieverf3 cookieverf;
2014 * dirlist3 reply;
2015 * };
2016 *
2017 * struct READDIR3resfail {
2018 * post_op_attr dir_attributes;
2019 * };
2020 *
2021 * union READDIR3res switch (nfsstat3 status) {
2022 * case NFS3_OK:
2023 * READDIR3resok resok;
2024 * default:
2025 * READDIR3resfail resfail;
2026 * };
2027 *
2028 * Read the directory contents into the page cache, but otherwise
2029 * don't touch them. The actual decoding is done by nfs3_decode_entry()
2030 * during subsequent nfs_readdir() calls.
974 */ 2031 */
975static int 2032static int decode_dirlist3(struct xdr_stream *xdr)
976nfs3_xdr_renameres(struct rpc_rqst *req, __be32 *p, struct nfs_renameres *res)
977{ 2033{
978 int status; 2034 u32 recvd, pglen;
2035 size_t hdrlen;
979 2036
980 if ((status = ntohl(*p++)) != 0) 2037 pglen = xdr->buf->page_len;
981 status = nfs_stat_to_errno(status); 2038 hdrlen = (u8 *)xdr->p - (u8 *)xdr->iov->iov_base;
982 p = xdr_decode_wcc_data(p, res->old_fattr); 2039 recvd = xdr->buf->len - hdrlen;
983 p = xdr_decode_wcc_data(p, res->new_fattr); 2040 if (unlikely(pglen > recvd))
984 return status; 2041 goto out_cheating;
2042out:
2043 xdr_read_pages(xdr, pglen);
2044 return pglen;
2045out_cheating:
2046 dprintk("NFS: server cheating in readdir result: "
2047 "pglen %u > recvd %u\n", pglen, recvd);
2048 pglen = recvd;
2049 goto out;
985} 2050}
986 2051
987/* 2052static int decode_readdir3resok(struct xdr_stream *xdr,
988 * Decode LINK reply 2053 struct nfs3_readdirres *result)
989 */
990static int
991nfs3_xdr_linkres(struct rpc_rqst *req, __be32 *p, struct nfs3_linkres *res)
992{ 2054{
993 int status; 2055 int error;
2056
2057 error = decode_post_op_attr(xdr, result->dir_attr);
2058 if (unlikely(error))
2059 goto out;
2060 /* XXX: do we need to check if result->verf != NULL ? */
2061 error = decode_cookieverf3(xdr, result->verf);
2062 if (unlikely(error))
2063 goto out;
2064 error = decode_dirlist3(xdr);
2065out:
2066 return error;
2067}
994 2068
995 if ((status = ntohl(*p++)) != 0) 2069static int nfs3_xdr_dec_readdir3res(struct rpc_rqst *req,
996 status = nfs_stat_to_errno(status); 2070 struct xdr_stream *xdr,
997 p = xdr_decode_post_op_attr(p, res->fattr); 2071 struct nfs3_readdirres *result)
998 p = xdr_decode_wcc_data(p, res->dir_attr); 2072{
999 return status; 2073 enum nfs_stat status;
2074 int error;
2075
2076 error = decode_nfsstat3(xdr, &status);
2077 if (unlikely(error))
2078 goto out;
2079 if (status != NFS3_OK)
2080 goto out_default;
2081 error = decode_readdir3resok(xdr, result);
2082out:
2083 return error;
2084out_default:
2085 error = decode_post_op_attr(xdr, result->dir_attr);
2086 if (unlikely(error))
2087 goto out;
2088 return nfs_stat_to_errno(status);
1000} 2089}
1001 2090
1002/* 2091/*
1003 * Decode FSSTAT reply 2092 * 3.3.18 FSSTAT3res
2093 *
2094 * struct FSSTAT3resok {
2095 * post_op_attr obj_attributes;
2096 * size3 tbytes;
2097 * size3 fbytes;
2098 * size3 abytes;
2099 * size3 tfiles;
2100 * size3 ffiles;
2101 * size3 afiles;
2102 * uint32 invarsec;
2103 * };
2104 *
2105 * struct FSSTAT3resfail {
2106 * post_op_attr obj_attributes;
2107 * };
2108 *
2109 * union FSSTAT3res switch (nfsstat3 status) {
2110 * case NFS3_OK:
2111 * FSSTAT3resok resok;
2112 * default:
2113 * FSSTAT3resfail resfail;
2114 * };
1004 */ 2115 */
1005static int 2116static int decode_fsstat3resok(struct xdr_stream *xdr,
1006nfs3_xdr_fsstatres(struct rpc_rqst *req, __be32 *p, struct nfs_fsstat *res) 2117 struct nfs_fsstat *result)
1007{ 2118{
1008 int status; 2119 __be32 *p;
1009
1010 status = ntohl(*p++);
1011
1012 p = xdr_decode_post_op_attr(p, res->fattr);
1013 if (status != 0)
1014 return nfs_stat_to_errno(status);
1015
1016 p = xdr_decode_hyper(p, &res->tbytes);
1017 p = xdr_decode_hyper(p, &res->fbytes);
1018 p = xdr_decode_hyper(p, &res->abytes);
1019 p = xdr_decode_hyper(p, &res->tfiles);
1020 p = xdr_decode_hyper(p, &res->ffiles);
1021 p = xdr_decode_hyper(p, &res->afiles);
1022 2120
2121 p = xdr_inline_decode(xdr, 8 * 6 + 4);
2122 if (unlikely(p == NULL))
2123 goto out_overflow;
2124 p = xdr_decode_size3(p, &result->tbytes);
2125 p = xdr_decode_size3(p, &result->fbytes);
2126 p = xdr_decode_size3(p, &result->abytes);
2127 p = xdr_decode_size3(p, &result->tfiles);
2128 p = xdr_decode_size3(p, &result->ffiles);
2129 xdr_decode_size3(p, &result->afiles);
1023 /* ignore invarsec */ 2130 /* ignore invarsec */
1024 return 0; 2131 return 0;
2132out_overflow:
2133 print_overflow_msg(__func__, xdr);
2134 return -EIO;
2135}
2136
2137static int nfs3_xdr_dec_fsstat3res(struct rpc_rqst *req,
2138 struct xdr_stream *xdr,
2139 struct nfs_fsstat *result)
2140{
2141 enum nfs_stat status;
2142 int error;
2143
2144 error = decode_nfsstat3(xdr, &status);
2145 if (unlikely(error))
2146 goto out;
2147 error = decode_post_op_attr(xdr, result->fattr);
2148 if (unlikely(error))
2149 goto out;
2150 if (status != NFS3_OK)
2151 goto out_status;
2152 error = decode_fsstat3resok(xdr, result);
2153out:
2154 return error;
2155out_status:
2156 return nfs_stat_to_errno(status);
1025} 2157}
1026 2158
1027/* 2159/*
1028 * Decode FSINFO reply 2160 * 3.3.19 FSINFO3res
2161 *
2162 * struct FSINFO3resok {
2163 * post_op_attr obj_attributes;
2164 * uint32 rtmax;
2165 * uint32 rtpref;
2166 * uint32 rtmult;
2167 * uint32 wtmax;
2168 * uint32 wtpref;
2169 * uint32 wtmult;
2170 * uint32 dtpref;
2171 * size3 maxfilesize;
2172 * nfstime3 time_delta;
2173 * uint32 properties;
2174 * };
2175 *
2176 * struct FSINFO3resfail {
2177 * post_op_attr obj_attributes;
2178 * };
2179 *
2180 * union FSINFO3res switch (nfsstat3 status) {
2181 * case NFS3_OK:
2182 * FSINFO3resok resok;
2183 * default:
2184 * FSINFO3resfail resfail;
2185 * };
1029 */ 2186 */
1030static int 2187static int decode_fsinfo3resok(struct xdr_stream *xdr,
1031nfs3_xdr_fsinfores(struct rpc_rqst *req, __be32 *p, struct nfs_fsinfo *res) 2188 struct nfs_fsinfo *result)
1032{ 2189{
1033 int status; 2190 __be32 *p;
1034
1035 status = ntohl(*p++);
1036
1037 p = xdr_decode_post_op_attr(p, res->fattr);
1038 if (status != 0)
1039 return nfs_stat_to_errno(status);
1040 2191
1041 res->rtmax = ntohl(*p++); 2192 p = xdr_inline_decode(xdr, 4 * 7 + 8 + 8 + 4);
1042 res->rtpref = ntohl(*p++); 2193 if (unlikely(p == NULL))
1043 res->rtmult = ntohl(*p++); 2194 goto out_overflow;
1044 res->wtmax = ntohl(*p++); 2195 result->rtmax = be32_to_cpup(p++);
1045 res->wtpref = ntohl(*p++); 2196 result->rtpref = be32_to_cpup(p++);
1046 res->wtmult = ntohl(*p++); 2197 result->rtmult = be32_to_cpup(p++);
1047 res->dtpref = ntohl(*p++); 2198 result->wtmax = be32_to_cpup(p++);
1048 p = xdr_decode_hyper(p, &res->maxfilesize); 2199 result->wtpref = be32_to_cpup(p++);
1049 p = xdr_decode_time3(p, &res->time_delta); 2200 result->wtmult = be32_to_cpup(p++);
2201 result->dtpref = be32_to_cpup(p++);
2202 p = xdr_decode_size3(p, &result->maxfilesize);
2203 xdr_decode_nfstime3(p, &result->time_delta);
1050 2204
1051 /* ignore properties */ 2205 /* ignore properties */
1052 res->lease_time = 0; 2206 result->lease_time = 0;
1053 return 0; 2207 return 0;
2208out_overflow:
2209 print_overflow_msg(__func__, xdr);
2210 return -EIO;
2211}
2212
2213static int nfs3_xdr_dec_fsinfo3res(struct rpc_rqst *req,
2214 struct xdr_stream *xdr,
2215 struct nfs_fsinfo *result)
2216{
2217 enum nfs_stat status;
2218 int error;
2219
2220 error = decode_nfsstat3(xdr, &status);
2221 if (unlikely(error))
2222 goto out;
2223 error = decode_post_op_attr(xdr, result->fattr);
2224 if (unlikely(error))
2225 goto out;
2226 if (status != NFS3_OK)
2227 goto out_status;
2228 error = decode_fsinfo3resok(xdr, result);
2229out:
2230 return error;
2231out_status:
2232 return nfs_stat_to_errno(status);
1054} 2233}
1055 2234
1056/* 2235/*
1057 * Decode PATHCONF reply 2236 * 3.3.20 PATHCONF3res
2237 *
2238 * struct PATHCONF3resok {
2239 * post_op_attr obj_attributes;
2240 * uint32 linkmax;
2241 * uint32 name_max;
2242 * bool no_trunc;
2243 * bool chown_restricted;
2244 * bool case_insensitive;
2245 * bool case_preserving;
2246 * };
2247 *
2248 * struct PATHCONF3resfail {
2249 * post_op_attr obj_attributes;
2250 * };
2251 *
2252 * union PATHCONF3res switch (nfsstat3 status) {
2253 * case NFS3_OK:
2254 * PATHCONF3resok resok;
2255 * default:
2256 * PATHCONF3resfail resfail;
2257 * };
1058 */ 2258 */
1059static int 2259static int decode_pathconf3resok(struct xdr_stream *xdr,
1060nfs3_xdr_pathconfres(struct rpc_rqst *req, __be32 *p, struct nfs_pathconf *res) 2260 struct nfs_pathconf *result)
1061{ 2261{
1062 int status; 2262 __be32 *p;
1063
1064 status = ntohl(*p++);
1065
1066 p = xdr_decode_post_op_attr(p, res->fattr);
1067 if (status != 0)
1068 return nfs_stat_to_errno(status);
1069 res->max_link = ntohl(*p++);
1070 res->max_namelen = ntohl(*p++);
1071 2263
2264 p = xdr_inline_decode(xdr, 4 * 6);
2265 if (unlikely(p == NULL))
2266 goto out_overflow;
2267 result->max_link = be32_to_cpup(p++);
2268 result->max_namelen = be32_to_cpup(p);
1072 /* ignore remaining fields */ 2269 /* ignore remaining fields */
1073 return 0; 2270 return 0;
2271out_overflow:
2272 print_overflow_msg(__func__, xdr);
2273 return -EIO;
2274}
2275
2276static int nfs3_xdr_dec_pathconf3res(struct rpc_rqst *req,
2277 struct xdr_stream *xdr,
2278 struct nfs_pathconf *result)
2279{
2280 enum nfs_stat status;
2281 int error;
2282
2283 error = decode_nfsstat3(xdr, &status);
2284 if (unlikely(error))
2285 goto out;
2286 error = decode_post_op_attr(xdr, result->fattr);
2287 if (unlikely(error))
2288 goto out;
2289 if (status != NFS3_OK)
2290 goto out_status;
2291 error = decode_pathconf3resok(xdr, result);
2292out:
2293 return error;
2294out_status:
2295 return nfs_stat_to_errno(status);
1074} 2296}
1075 2297
1076/* 2298/*
1077 * Decode COMMIT reply 2299 * 3.3.21 COMMIT3res
2300 *
2301 * struct COMMIT3resok {
2302 * wcc_data file_wcc;
2303 * writeverf3 verf;
2304 * };
2305 *
2306 * struct COMMIT3resfail {
2307 * wcc_data file_wcc;
2308 * };
2309 *
2310 * union COMMIT3res switch (nfsstat3 status) {
2311 * case NFS3_OK:
2312 * COMMIT3resok resok;
2313 * default:
2314 * COMMIT3resfail resfail;
2315 * };
1078 */ 2316 */
1079static int 2317static int nfs3_xdr_dec_commit3res(struct rpc_rqst *req,
1080nfs3_xdr_commitres(struct rpc_rqst *req, __be32 *p, struct nfs_writeres *res) 2318 struct xdr_stream *xdr,
2319 struct nfs_writeres *result)
1081{ 2320{
1082 int status; 2321 enum nfs_stat status;
1083 2322 int error;
1084 status = ntohl(*p++); 2323
1085 p = xdr_decode_wcc_data(p, res->fattr); 2324 error = decode_nfsstat3(xdr, &status);
1086 if (status != 0) 2325 if (unlikely(error))
1087 return nfs_stat_to_errno(status); 2326 goto out;
1088 2327 error = decode_wcc_data(xdr, result->fattr);
1089 res->verf->verifier[0] = *p++; 2328 if (unlikely(error))
1090 res->verf->verifier[1] = *p++; 2329 goto out;
1091 return 0; 2330 if (status != NFS3_OK)
2331 goto out_status;
2332 error = decode_writeverf3(xdr, result->verf->verifier);
2333out:
2334 return error;
2335out_status:
2336 return nfs_stat_to_errno(status);
1092} 2337}
1093 2338
1094#ifdef CONFIG_NFS_V3_ACL 2339#ifdef CONFIG_NFS_V3_ACL
1095/* 2340
1096 * Decode GETACL reply 2341static inline int decode_getacl3resok(struct xdr_stream *xdr,
1097 */ 2342 struct nfs3_getaclres *result)
1098static int
1099nfs3_xdr_getaclres(struct rpc_rqst *req, __be32 *p,
1100 struct nfs3_getaclres *res)
1101{ 2343{
1102 struct xdr_buf *buf = &req->rq_rcv_buf;
1103 int status = ntohl(*p++);
1104 struct posix_acl **acl; 2344 struct posix_acl **acl;
1105 unsigned int *aclcnt; 2345 unsigned int *aclcnt;
1106 int err, base; 2346 size_t hdrlen;
1107 2347 int error;
1108 if (status != 0) 2348
1109 return nfs_stat_to_errno(status); 2349 error = decode_post_op_attr(xdr, result->fattr);
1110 p = xdr_decode_post_op_attr(p, res->fattr); 2350 if (unlikely(error))
1111 res->mask = ntohl(*p++); 2351 goto out;
1112 if (res->mask & ~(NFS_ACL|NFS_ACLCNT|NFS_DFACL|NFS_DFACLCNT)) 2352 error = decode_uint32(xdr, &result->mask);
1113 return -EINVAL; 2353 if (unlikely(error))
1114 base = (char *)p - (char *)req->rq_rcv_buf.head->iov_base; 2354 goto out;
1115 2355 error = -EINVAL;
1116 acl = (res->mask & NFS_ACL) ? &res->acl_access : NULL; 2356 if (result->mask & ~(NFS_ACL|NFS_ACLCNT|NFS_DFACL|NFS_DFACLCNT))
1117 aclcnt = (res->mask & NFS_ACLCNT) ? &res->acl_access_count : NULL; 2357 goto out;
1118 err = nfsacl_decode(buf, base, aclcnt, acl); 2358
1119 2359 hdrlen = (u8 *)xdr->p - (u8 *)xdr->iov->iov_base;
1120 acl = (res->mask & NFS_DFACL) ? &res->acl_default : NULL; 2360
1121 aclcnt = (res->mask & NFS_DFACLCNT) ? &res->acl_default_count : NULL; 2361 acl = NULL;
1122 if (err > 0) 2362 if (result->mask & NFS_ACL)
1123 err = nfsacl_decode(buf, base + err, aclcnt, acl); 2363 acl = &result->acl_access;
1124 return (err > 0) ? 0 : err; 2364 aclcnt = NULL;
2365 if (result->mask & NFS_ACLCNT)
2366 aclcnt = &result->acl_access_count;
2367 error = nfsacl_decode(xdr->buf, hdrlen, aclcnt, acl);
2368 if (unlikely(error <= 0))
2369 goto out;
2370
2371 acl = NULL;
2372 if (result->mask & NFS_DFACL)
2373 acl = &result->acl_default;
2374 aclcnt = NULL;
2375 if (result->mask & NFS_DFACLCNT)
2376 aclcnt = &result->acl_default_count;
2377 error = nfsacl_decode(xdr->buf, hdrlen + error, aclcnt, acl);
2378 if (unlikely(error <= 0))
2379 return error;
2380 error = 0;
2381out:
2382 return error;
1125} 2383}
1126 2384
1127/* 2385static int nfs3_xdr_dec_getacl3res(struct rpc_rqst *req,
1128 * Decode setacl reply. 2386 struct xdr_stream *xdr,
1129 */ 2387 struct nfs3_getaclres *result)
1130static int
1131nfs3_xdr_setaclres(struct rpc_rqst *req, __be32 *p, struct nfs_fattr *fattr)
1132{ 2388{
1133 int status = ntohl(*p++); 2389 enum nfs_stat status;
2390 int error;
2391
2392 error = decode_nfsstat3(xdr, &status);
2393 if (unlikely(error))
2394 goto out;
2395 if (status != NFS3_OK)
2396 goto out_default;
2397 error = decode_getacl3resok(xdr, result);
2398out:
2399 return error;
2400out_default:
2401 return nfs_stat_to_errno(status);
2402}
1134 2403
1135 if (status) 2404static int nfs3_xdr_dec_setacl3res(struct rpc_rqst *req,
1136 return nfs_stat_to_errno(status); 2405 struct xdr_stream *xdr,
1137 xdr_decode_post_op_attr(p, fattr); 2406 struct nfs_fattr *result)
1138 return 0; 2407{
2408 enum nfs_stat status;
2409 int error;
2410
2411 error = decode_nfsstat3(xdr, &status);
2412 if (unlikely(error))
2413 goto out;
2414 if (status != NFS3_OK)
2415 goto out_default;
2416 error = decode_post_op_attr(xdr, result);
2417out:
2418 return error;
2419out_default:
2420 return nfs_stat_to_errno(status);
1139} 2421}
2422
1140#endif /* CONFIG_NFS_V3_ACL */ 2423#endif /* CONFIG_NFS_V3_ACL */
1141 2424
1142#define PROC(proc, argtype, restype, timer) \ 2425#define PROC(proc, argtype, restype, timer) \
1143[NFS3PROC_##proc] = { \ 2426[NFS3PROC_##proc] = { \
1144 .p_proc = NFS3PROC_##proc, \ 2427 .p_proc = NFS3PROC_##proc, \
1145 .p_encode = (kxdrproc_t) nfs3_xdr_##argtype, \ 2428 .p_encode = (kxdreproc_t)nfs3_xdr_enc_##argtype##3args, \
1146 .p_decode = (kxdrproc_t) nfs3_xdr_##restype, \ 2429 .p_decode = (kxdrdproc_t)nfs3_xdr_dec_##restype##3res, \
1147 .p_arglen = NFS3_##argtype##_sz, \ 2430 .p_arglen = NFS3_##argtype##args_sz, \
1148 .p_replen = NFS3_##restype##_sz, \ 2431 .p_replen = NFS3_##restype##res_sz, \
1149 .p_timer = timer, \ 2432 .p_timer = timer, \
1150 .p_statidx = NFS3PROC_##proc, \ 2433 .p_statidx = NFS3PROC_##proc, \
1151 .p_name = #proc, \ 2434 .p_name = #proc, \
1152 } 2435 }
1153 2436
1154struct rpc_procinfo nfs3_procedures[] = { 2437struct rpc_procinfo nfs3_procedures[] = {
1155 PROC(GETATTR, fhandle, attrstat, 1), 2438 PROC(GETATTR, getattr, getattr, 1),
1156 PROC(SETATTR, sattrargs, wccstat, 0), 2439 PROC(SETATTR, setattr, setattr, 0),
1157 PROC(LOOKUP, diropargs, lookupres, 2), 2440 PROC(LOOKUP, lookup, lookup, 2),
1158 PROC(ACCESS, accessargs, accessres, 1), 2441 PROC(ACCESS, access, access, 1),
1159 PROC(READLINK, readlinkargs, readlinkres, 3), 2442 PROC(READLINK, readlink, readlink, 3),
1160 PROC(READ, readargs, readres, 3), 2443 PROC(READ, read, read, 3),
1161 PROC(WRITE, writeargs, writeres, 4), 2444 PROC(WRITE, write, write, 4),
1162 PROC(CREATE, createargs, createres, 0), 2445 PROC(CREATE, create, create, 0),
1163 PROC(MKDIR, mkdirargs, createres, 0), 2446 PROC(MKDIR, mkdir, create, 0),
1164 PROC(SYMLINK, symlinkargs, createres, 0), 2447 PROC(SYMLINK, symlink, create, 0),
1165 PROC(MKNOD, mknodargs, createres, 0), 2448 PROC(MKNOD, mknod, create, 0),
1166 PROC(REMOVE, removeargs, removeres, 0), 2449 PROC(REMOVE, remove, remove, 0),
1167 PROC(RMDIR, diropargs, wccstat, 0), 2450 PROC(RMDIR, lookup, setattr, 0),
1168 PROC(RENAME, renameargs, renameres, 0), 2451 PROC(RENAME, rename, rename, 0),
1169 PROC(LINK, linkargs, linkres, 0), 2452 PROC(LINK, link, link, 0),
1170 PROC(READDIR, readdirargs, readdirres, 3), 2453 PROC(READDIR, readdir, readdir, 3),
1171 PROC(READDIRPLUS, readdirargs, readdirres, 3), 2454 PROC(READDIRPLUS, readdirplus, readdir, 3),
1172 PROC(FSSTAT, fhandle, fsstatres, 0), 2455 PROC(FSSTAT, getattr, fsstat, 0),
1173 PROC(FSINFO, fhandle, fsinfores, 0), 2456 PROC(FSINFO, getattr, fsinfo, 0),
1174 PROC(PATHCONF, fhandle, pathconfres, 0), 2457 PROC(PATHCONF, getattr, pathconf, 0),
1175 PROC(COMMIT, commitargs, commitres, 5), 2458 PROC(COMMIT, commit, commit, 5),
1176}; 2459};
1177 2460
1178struct rpc_version nfs_version3 = { 2461struct rpc_version nfs_version3 = {
@@ -1185,8 +2468,8 @@ struct rpc_version nfs_version3 = {
1185static struct rpc_procinfo nfs3_acl_procedures[] = { 2468static struct rpc_procinfo nfs3_acl_procedures[] = {
1186 [ACLPROC3_GETACL] = { 2469 [ACLPROC3_GETACL] = {
1187 .p_proc = ACLPROC3_GETACL, 2470 .p_proc = ACLPROC3_GETACL,
1188 .p_encode = (kxdrproc_t) nfs3_xdr_getaclargs, 2471 .p_encode = (kxdreproc_t)nfs3_xdr_enc_getacl3args,
1189 .p_decode = (kxdrproc_t) nfs3_xdr_getaclres, 2472 .p_decode = (kxdrdproc_t)nfs3_xdr_dec_getacl3res,
1190 .p_arglen = ACL3_getaclargs_sz, 2473 .p_arglen = ACL3_getaclargs_sz,
1191 .p_replen = ACL3_getaclres_sz, 2474 .p_replen = ACL3_getaclres_sz,
1192 .p_timer = 1, 2475 .p_timer = 1,
@@ -1194,8 +2477,8 @@ static struct rpc_procinfo nfs3_acl_procedures[] = {
1194 }, 2477 },
1195 [ACLPROC3_SETACL] = { 2478 [ACLPROC3_SETACL] = {
1196 .p_proc = ACLPROC3_SETACL, 2479 .p_proc = ACLPROC3_SETACL,
1197 .p_encode = (kxdrproc_t) nfs3_xdr_setaclargs, 2480 .p_encode = (kxdreproc_t)nfs3_xdr_enc_setacl3args,
1198 .p_decode = (kxdrproc_t) nfs3_xdr_setaclres, 2481 .p_decode = (kxdrdproc_t)nfs3_xdr_dec_setacl3res,
1199 .p_arglen = ACL3_setaclargs_sz, 2482 .p_arglen = ACL3_setaclargs_sz,
1200 .p_replen = ACL3_setaclres_sz, 2483 .p_replen = ACL3_setaclres_sz,
1201 .p_timer = 0, 2484 .p_timer = 0,
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 9fa496387fdf..7a7474073148 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -44,6 +44,7 @@ enum nfs4_client_state {
44 NFS4CLNT_RECLAIM_REBOOT, 44 NFS4CLNT_RECLAIM_REBOOT,
45 NFS4CLNT_RECLAIM_NOGRACE, 45 NFS4CLNT_RECLAIM_NOGRACE,
46 NFS4CLNT_DELEGRETURN, 46 NFS4CLNT_DELEGRETURN,
47 NFS4CLNT_LAYOUTRECALL,
47 NFS4CLNT_SESSION_RESET, 48 NFS4CLNT_SESSION_RESET,
48 NFS4CLNT_RECALL_SLOT, 49 NFS4CLNT_RECALL_SLOT,
49}; 50};
@@ -109,7 +110,7 @@ struct nfs_unique_id {
109struct nfs4_state_owner { 110struct nfs4_state_owner {
110 struct nfs_unique_id so_owner_id; 111 struct nfs_unique_id so_owner_id;
111 struct nfs_server *so_server; 112 struct nfs_server *so_server;
112 struct rb_node so_client_node; 113 struct rb_node so_server_node;
113 114
114 struct rpc_cred *so_cred; /* Associated cred */ 115 struct rpc_cred *so_cred; /* Associated cred */
115 116
@@ -227,12 +228,6 @@ struct nfs4_state_maintenance_ops {
227extern const struct dentry_operations nfs4_dentry_operations; 228extern const struct dentry_operations nfs4_dentry_operations;
228extern const struct inode_operations nfs4_dir_inode_operations; 229extern const struct inode_operations nfs4_dir_inode_operations;
229 230
230/* inode.c */
231extern ssize_t nfs4_getxattr(struct dentry *, const char *, void *, size_t);
232extern int nfs4_setxattr(struct dentry *, const char *, const void *, size_t, int);
233extern ssize_t nfs4_listxattr(struct dentry *, char *, size_t);
234
235
236/* nfs4proc.c */ 231/* nfs4proc.c */
237extern int nfs4_proc_setclientid(struct nfs_client *, u32, unsigned short, struct rpc_cred *, struct nfs4_setclientid_res *); 232extern int nfs4_proc_setclientid(struct nfs_client *, u32, unsigned short, struct rpc_cred *, struct nfs4_setclientid_res *);
238extern int nfs4_proc_setclientid_confirm(struct nfs_client *, struct nfs4_setclientid_res *arg, struct rpc_cred *); 233extern int nfs4_proc_setclientid_confirm(struct nfs_client *, struct nfs4_setclientid_res *arg, struct rpc_cred *);
@@ -241,11 +236,12 @@ extern int nfs4_proc_async_renew(struct nfs_client *, struct rpc_cred *);
241extern int nfs4_proc_renew(struct nfs_client *, struct rpc_cred *); 236extern int nfs4_proc_renew(struct nfs_client *, struct rpc_cred *);
242extern int nfs4_init_clientid(struct nfs_client *, struct rpc_cred *); 237extern int nfs4_init_clientid(struct nfs_client *, struct rpc_cred *);
243extern int nfs41_init_clientid(struct nfs_client *, struct rpc_cred *); 238extern int nfs41_init_clientid(struct nfs_client *, struct rpc_cred *);
244extern int nfs4_do_close(struct path *path, struct nfs4_state *state, gfp_t gfp_mask, int wait); 239extern int nfs4_do_close(struct path *path, struct nfs4_state *state, gfp_t gfp_mask, int wait, bool roc);
245extern int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle); 240extern int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle);
246extern int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name, 241extern int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name,
247 struct nfs4_fs_locations *fs_locations, struct page *page); 242 struct nfs4_fs_locations *fs_locations, struct page *page);
248extern void nfs4_release_lockowner(const struct nfs4_lock_state *); 243extern void nfs4_release_lockowner(const struct nfs4_lock_state *);
244extern const struct xattr_handler *nfs4_xattr_handlers[];
249 245
250#if defined(CONFIG_NFS_V4_1) 246#if defined(CONFIG_NFS_V4_1)
251static inline struct nfs4_session *nfs4_get_session(const struct nfs_server *server) 247static inline struct nfs4_session *nfs4_get_session(const struct nfs_server *server)
@@ -331,7 +327,6 @@ extern void nfs_free_seqid(struct nfs_seqid *seqid);
331extern const nfs4_stateid zero_stateid; 327extern const nfs4_stateid zero_stateid;
332 328
333/* nfs4xdr.c */ 329/* nfs4xdr.c */
334extern __be32 *nfs4_decode_dirent(struct xdr_stream *, struct nfs_entry *, struct nfs_server *, int);
335extern struct rpc_procinfo nfs4_procedures[]; 330extern struct rpc_procinfo nfs4_procedures[];
336 331
337struct nfs4_mount_data; 332struct nfs4_mount_data;
diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c
index 2e92f0d8d654..23f930caf1e2 100644
--- a/fs/nfs/nfs4filelayout.c
+++ b/fs/nfs/nfs4filelayout.c
@@ -82,7 +82,7 @@ filelayout_check_layout(struct pnfs_layout_hdr *lo,
82{ 82{
83 struct nfs4_file_layout_dsaddr *dsaddr; 83 struct nfs4_file_layout_dsaddr *dsaddr;
84 int status = -EINVAL; 84 int status = -EINVAL;
85 struct nfs_server *nfss = NFS_SERVER(lo->inode); 85 struct nfs_server *nfss = NFS_SERVER(lo->plh_inode);
86 86
87 dprintk("--> %s\n", __func__); 87 dprintk("--> %s\n", __func__);
88 88
@@ -101,7 +101,7 @@ filelayout_check_layout(struct pnfs_layout_hdr *lo,
101 /* find and reference the deviceid */ 101 /* find and reference the deviceid */
102 dsaddr = nfs4_fl_find_get_deviceid(nfss->nfs_client, id); 102 dsaddr = nfs4_fl_find_get_deviceid(nfss->nfs_client, id);
103 if (dsaddr == NULL) { 103 if (dsaddr == NULL) {
104 dsaddr = get_device_info(lo->inode, id); 104 dsaddr = get_device_info(lo->plh_inode, id);
105 if (dsaddr == NULL) 105 if (dsaddr == NULL)
106 goto out; 106 goto out;
107 } 107 }
@@ -243,7 +243,7 @@ filelayout_alloc_lseg(struct pnfs_layout_hdr *layoutid,
243static void 243static void
244filelayout_free_lseg(struct pnfs_layout_segment *lseg) 244filelayout_free_lseg(struct pnfs_layout_segment *lseg)
245{ 245{
246 struct nfs_server *nfss = NFS_SERVER(lseg->layout->inode); 246 struct nfs_server *nfss = NFS_SERVER(lseg->pls_layout->plh_inode);
247 struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg); 247 struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg);
248 248
249 dprintk("--> %s\n", __func__); 249 dprintk("--> %s\n", __func__);
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 4435e5e1f904..9d992b0346e3 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -49,6 +49,7 @@
49#include <linux/mount.h> 49#include <linux/mount.h>
50#include <linux/module.h> 50#include <linux/module.h>
51#include <linux/sunrpc/bc_xprt.h> 51#include <linux/sunrpc/bc_xprt.h>
52#include <linux/xattr.h>
52 53
53#include "nfs4_fs.h" 54#include "nfs4_fs.h"
54#include "delegation.h" 55#include "delegation.h"
@@ -355,9 +356,9 @@ nfs4_free_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *free_slot)
355} 356}
356 357
357/* 358/*
358 * Signal state manager thread if session is drained 359 * Signal state manager thread if session fore channel is drained
359 */ 360 */
360static void nfs41_check_drain_session_complete(struct nfs4_session *ses) 361static void nfs4_check_drain_fc_complete(struct nfs4_session *ses)
361{ 362{
362 struct rpc_task *task; 363 struct rpc_task *task;
363 364
@@ -371,8 +372,20 @@ static void nfs41_check_drain_session_complete(struct nfs4_session *ses)
371 if (ses->fc_slot_table.highest_used_slotid != -1) 372 if (ses->fc_slot_table.highest_used_slotid != -1)
372 return; 373 return;
373 374
374 dprintk("%s COMPLETE: Session Drained\n", __func__); 375 dprintk("%s COMPLETE: Session Fore Channel Drained\n", __func__);
375 complete(&ses->complete); 376 complete(&ses->fc_slot_table.complete);
377}
378
379/*
380 * Signal state manager thread if session back channel is drained
381 */
382void nfs4_check_drain_bc_complete(struct nfs4_session *ses)
383{
384 if (!test_bit(NFS4_SESSION_DRAINING, &ses->session_state) ||
385 ses->bc_slot_table.highest_used_slotid != -1)
386 return;
387 dprintk("%s COMPLETE: Session Back Channel Drained\n", __func__);
388 complete(&ses->bc_slot_table.complete);
376} 389}
377 390
378static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res) 391static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res)
@@ -389,7 +402,7 @@ static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res)
389 402
390 spin_lock(&tbl->slot_tbl_lock); 403 spin_lock(&tbl->slot_tbl_lock);
391 nfs4_free_slot(tbl, res->sr_slot); 404 nfs4_free_slot(tbl, res->sr_slot);
392 nfs41_check_drain_session_complete(res->sr_session); 405 nfs4_check_drain_fc_complete(res->sr_session);
393 spin_unlock(&tbl->slot_tbl_lock); 406 spin_unlock(&tbl->slot_tbl_lock);
394 res->sr_slot = NULL; 407 res->sr_slot = NULL;
395} 408}
@@ -1826,6 +1839,8 @@ struct nfs4_closedata {
1826 struct nfs_closeres res; 1839 struct nfs_closeres res;
1827 struct nfs_fattr fattr; 1840 struct nfs_fattr fattr;
1828 unsigned long timestamp; 1841 unsigned long timestamp;
1842 bool roc;
1843 u32 roc_barrier;
1829}; 1844};
1830 1845
1831static void nfs4_free_closedata(void *data) 1846static void nfs4_free_closedata(void *data)
@@ -1833,6 +1848,8 @@ static void nfs4_free_closedata(void *data)
1833 struct nfs4_closedata *calldata = data; 1848 struct nfs4_closedata *calldata = data;
1834 struct nfs4_state_owner *sp = calldata->state->owner; 1849 struct nfs4_state_owner *sp = calldata->state->owner;
1835 1850
1851 if (calldata->roc)
1852 pnfs_roc_release(calldata->state->inode);
1836 nfs4_put_open_state(calldata->state); 1853 nfs4_put_open_state(calldata->state);
1837 nfs_free_seqid(calldata->arg.seqid); 1854 nfs_free_seqid(calldata->arg.seqid);
1838 nfs4_put_state_owner(sp); 1855 nfs4_put_state_owner(sp);
@@ -1865,6 +1882,9 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
1865 */ 1882 */
1866 switch (task->tk_status) { 1883 switch (task->tk_status) {
1867 case 0: 1884 case 0:
1885 if (calldata->roc)
1886 pnfs_roc_set_barrier(state->inode,
1887 calldata->roc_barrier);
1868 nfs_set_open_stateid(state, &calldata->res.stateid, 0); 1888 nfs_set_open_stateid(state, &calldata->res.stateid, 0);
1869 renew_lease(server, calldata->timestamp); 1889 renew_lease(server, calldata->timestamp);
1870 nfs4_close_clear_stateid_flags(state, 1890 nfs4_close_clear_stateid_flags(state,
@@ -1917,8 +1937,15 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
1917 return; 1937 return;
1918 } 1938 }
1919 1939
1920 if (calldata->arg.fmode == 0) 1940 if (calldata->arg.fmode == 0) {
1921 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE]; 1941 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE];
1942 if (calldata->roc &&
1943 pnfs_roc_drain(calldata->inode, &calldata->roc_barrier)) {
1944 rpc_sleep_on(&NFS_SERVER(calldata->inode)->roc_rpcwaitq,
1945 task, NULL);
1946 return;
1947 }
1948 }
1922 1949
1923 nfs_fattr_init(calldata->res.fattr); 1950 nfs_fattr_init(calldata->res.fattr);
1924 calldata->timestamp = jiffies; 1951 calldata->timestamp = jiffies;
@@ -1946,7 +1973,7 @@ static const struct rpc_call_ops nfs4_close_ops = {
1946 * 1973 *
1947 * NOTE: Caller must be holding the sp->so_owner semaphore! 1974 * NOTE: Caller must be holding the sp->so_owner semaphore!
1948 */ 1975 */
1949int nfs4_do_close(struct path *path, struct nfs4_state *state, gfp_t gfp_mask, int wait) 1976int nfs4_do_close(struct path *path, struct nfs4_state *state, gfp_t gfp_mask, int wait, bool roc)
1950{ 1977{
1951 struct nfs_server *server = NFS_SERVER(state->inode); 1978 struct nfs_server *server = NFS_SERVER(state->inode);
1952 struct nfs4_closedata *calldata; 1979 struct nfs4_closedata *calldata;
@@ -1981,11 +2008,12 @@ int nfs4_do_close(struct path *path, struct nfs4_state *state, gfp_t gfp_mask, i
1981 calldata->res.fattr = &calldata->fattr; 2008 calldata->res.fattr = &calldata->fattr;
1982 calldata->res.seqid = calldata->arg.seqid; 2009 calldata->res.seqid = calldata->arg.seqid;
1983 calldata->res.server = server; 2010 calldata->res.server = server;
2011 calldata->roc = roc;
1984 path_get(path); 2012 path_get(path);
1985 calldata->path = *path; 2013 calldata->path = *path;
1986 2014
1987 msg.rpc_argp = &calldata->arg, 2015 msg.rpc_argp = &calldata->arg;
1988 msg.rpc_resp = &calldata->res, 2016 msg.rpc_resp = &calldata->res;
1989 task_setup_data.callback_data = calldata; 2017 task_setup_data.callback_data = calldata;
1990 task = rpc_run_task(&task_setup_data); 2018 task = rpc_run_task(&task_setup_data);
1991 if (IS_ERR(task)) 2019 if (IS_ERR(task))
@@ -1998,6 +2026,8 @@ int nfs4_do_close(struct path *path, struct nfs4_state *state, gfp_t gfp_mask, i
1998out_free_calldata: 2026out_free_calldata:
1999 kfree(calldata); 2027 kfree(calldata);
2000out: 2028out:
2029 if (roc)
2030 pnfs_roc_release(state->inode);
2001 nfs4_put_open_state(state); 2031 nfs4_put_open_state(state);
2002 nfs4_put_state_owner(sp); 2032 nfs4_put_state_owner(sp);
2003 return status; 2033 return status;
@@ -2486,6 +2516,7 @@ nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
2486 path = &ctx->path; 2516 path = &ctx->path;
2487 fmode = ctx->mode; 2517 fmode = ctx->mode;
2488 } 2518 }
2519 sattr->ia_mode &= ~current_umask();
2489 state = nfs4_do_open(dir, path, fmode, flags, sattr, cred); 2520 state = nfs4_do_open(dir, path, fmode, flags, sattr, cred);
2490 d_drop(dentry); 2521 d_drop(dentry);
2491 if (IS_ERR(state)) { 2522 if (IS_ERR(state)) {
@@ -2816,6 +2847,8 @@ static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
2816{ 2847{
2817 struct nfs4_exception exception = { }; 2848 struct nfs4_exception exception = { };
2818 int err; 2849 int err;
2850
2851 sattr->ia_mode &= ~current_umask();
2819 do { 2852 do {
2820 err = nfs4_handle_exception(NFS_SERVER(dir), 2853 err = nfs4_handle_exception(NFS_SERVER(dir),
2821 _nfs4_proc_mkdir(dir, dentry, sattr), 2854 _nfs4_proc_mkdir(dir, dentry, sattr),
@@ -2916,6 +2949,8 @@ static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
2916{ 2949{
2917 struct nfs4_exception exception = { }; 2950 struct nfs4_exception exception = { };
2918 int err; 2951 int err;
2952
2953 sattr->ia_mode &= ~current_umask();
2919 do { 2954 do {
2920 err = nfs4_handle_exception(NFS_SERVER(dir), 2955 err = nfs4_handle_exception(NFS_SERVER(dir),
2921 _nfs4_proc_mknod(dir, dentry, sattr, rdev), 2956 _nfs4_proc_mknod(dir, dentry, sattr, rdev),
@@ -3478,6 +3513,7 @@ int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
3478 struct nfs4_setclientid setclientid = { 3513 struct nfs4_setclientid setclientid = {
3479 .sc_verifier = &sc_verifier, 3514 .sc_verifier = &sc_verifier,
3480 .sc_prog = program, 3515 .sc_prog = program,
3516 .sc_cb_ident = clp->cl_cb_ident,
3481 }; 3517 };
3482 struct rpc_message msg = { 3518 struct rpc_message msg = {
3483 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID], 3519 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID],
@@ -3517,7 +3553,7 @@ int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
3517 if (signalled()) 3553 if (signalled())
3518 break; 3554 break;
3519 if (loop++ & 1) 3555 if (loop++ & 1)
3520 ssleep(clp->cl_lease_time + 1); 3556 ssleep(clp->cl_lease_time / HZ + 1);
3521 else 3557 else
3522 if (++clp->cl_id_uniquifier == 0) 3558 if (++clp->cl_id_uniquifier == 0)
3523 break; 3559 break;
@@ -3663,8 +3699,8 @@ static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, co
3663 data->rpc_status = 0; 3699 data->rpc_status = 0;
3664 3700
3665 task_setup_data.callback_data = data; 3701 task_setup_data.callback_data = data;
3666 msg.rpc_argp = &data->args, 3702 msg.rpc_argp = &data->args;
3667 msg.rpc_resp = &data->res, 3703 msg.rpc_resp = &data->res;
3668 task = rpc_run_task(&task_setup_data); 3704 task = rpc_run_task(&task_setup_data);
3669 if (IS_ERR(task)) 3705 if (IS_ERR(task))
3670 return PTR_ERR(task); 3706 return PTR_ERR(task);
@@ -3743,6 +3779,7 @@ static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock
3743 goto out; 3779 goto out;
3744 lsp = request->fl_u.nfs4_fl.owner; 3780 lsp = request->fl_u.nfs4_fl.owner;
3745 arg.lock_owner.id = lsp->ls_id.id; 3781 arg.lock_owner.id = lsp->ls_id.id;
3782 arg.lock_owner.s_dev = server->s_dev;
3746 status = nfs4_call_sync(server, &msg, &arg, &res, 1); 3783 status = nfs4_call_sync(server, &msg, &arg, &res, 1);
3747 switch (status) { 3784 switch (status) {
3748 case 0: 3785 case 0:
@@ -3908,8 +3945,8 @@ static struct rpc_task *nfs4_do_unlck(struct file_lock *fl,
3908 return ERR_PTR(-ENOMEM); 3945 return ERR_PTR(-ENOMEM);
3909 } 3946 }
3910 3947
3911 msg.rpc_argp = &data->arg, 3948 msg.rpc_argp = &data->arg;
3912 msg.rpc_resp = &data->res, 3949 msg.rpc_resp = &data->res;
3913 task_setup_data.callback_data = data; 3950 task_setup_data.callback_data = data;
3914 return rpc_run_task(&task_setup_data); 3951 return rpc_run_task(&task_setup_data);
3915} 3952}
@@ -3988,6 +4025,7 @@ static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
3988 p->arg.lock_stateid = &lsp->ls_stateid; 4025 p->arg.lock_stateid = &lsp->ls_stateid;
3989 p->arg.lock_owner.clientid = server->nfs_client->cl_clientid; 4026 p->arg.lock_owner.clientid = server->nfs_client->cl_clientid;
3990 p->arg.lock_owner.id = lsp->ls_id.id; 4027 p->arg.lock_owner.id = lsp->ls_id.id;
4028 p->arg.lock_owner.s_dev = server->s_dev;
3991 p->res.lock_seqid = p->arg.lock_seqid; 4029 p->res.lock_seqid = p->arg.lock_seqid;
3992 p->lsp = lsp; 4030 p->lsp = lsp;
3993 p->server = server; 4031 p->server = server;
@@ -4145,8 +4183,8 @@ static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *f
4145 data->arg.reclaim = NFS_LOCK_RECLAIM; 4183 data->arg.reclaim = NFS_LOCK_RECLAIM;
4146 task_setup_data.callback_ops = &nfs4_recover_lock_ops; 4184 task_setup_data.callback_ops = &nfs4_recover_lock_ops;
4147 } 4185 }
4148 msg.rpc_argp = &data->arg, 4186 msg.rpc_argp = &data->arg;
4149 msg.rpc_resp = &data->res, 4187 msg.rpc_resp = &data->res;
4150 task_setup_data.callback_data = data; 4188 task_setup_data.callback_data = data;
4151 task = rpc_run_task(&task_setup_data); 4189 task = rpc_run_task(&task_setup_data);
4152 if (IS_ERR(task)) 4190 if (IS_ERR(task))
@@ -4392,48 +4430,43 @@ void nfs4_release_lockowner(const struct nfs4_lock_state *lsp)
4392 return; 4430 return;
4393 args->lock_owner.clientid = server->nfs_client->cl_clientid; 4431 args->lock_owner.clientid = server->nfs_client->cl_clientid;
4394 args->lock_owner.id = lsp->ls_id.id; 4432 args->lock_owner.id = lsp->ls_id.id;
4433 args->lock_owner.s_dev = server->s_dev;
4395 msg.rpc_argp = args; 4434 msg.rpc_argp = args;
4396 rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, args); 4435 rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, args);
4397} 4436}
4398 4437
4399#define XATTR_NAME_NFSV4_ACL "system.nfs4_acl" 4438#define XATTR_NAME_NFSV4_ACL "system.nfs4_acl"
4400 4439
4401int nfs4_setxattr(struct dentry *dentry, const char *key, const void *buf, 4440static int nfs4_xattr_set_nfs4_acl(struct dentry *dentry, const char *key,
4402 size_t buflen, int flags) 4441 const void *buf, size_t buflen,
4442 int flags, int type)
4403{ 4443{
4404 struct inode *inode = dentry->d_inode; 4444 if (strcmp(key, "") != 0)
4405 4445 return -EINVAL;
4406 if (strcmp(key, XATTR_NAME_NFSV4_ACL) != 0)
4407 return -EOPNOTSUPP;
4408 4446
4409 return nfs4_proc_set_acl(inode, buf, buflen); 4447 return nfs4_proc_set_acl(dentry->d_inode, buf, buflen);
4410} 4448}
4411 4449
4412/* The getxattr man page suggests returning -ENODATA for unknown attributes, 4450static int nfs4_xattr_get_nfs4_acl(struct dentry *dentry, const char *key,
4413 * and that's what we'll do for e.g. user attributes that haven't been set. 4451 void *buf, size_t buflen, int type)
4414 * But we'll follow ext2/ext3's lead by returning -EOPNOTSUPP for unsupported
4415 * attributes in kernel-managed attribute namespaces. */
4416ssize_t nfs4_getxattr(struct dentry *dentry, const char *key, void *buf,
4417 size_t buflen)
4418{ 4452{
4419 struct inode *inode = dentry->d_inode; 4453 if (strcmp(key, "") != 0)
4420 4454 return -EINVAL;
4421 if (strcmp(key, XATTR_NAME_NFSV4_ACL) != 0)
4422 return -EOPNOTSUPP;
4423 4455
4424 return nfs4_proc_get_acl(inode, buf, buflen); 4456 return nfs4_proc_get_acl(dentry->d_inode, buf, buflen);
4425} 4457}
4426 4458
4427ssize_t nfs4_listxattr(struct dentry *dentry, char *buf, size_t buflen) 4459static size_t nfs4_xattr_list_nfs4_acl(struct dentry *dentry, char *list,
4460 size_t list_len, const char *name,
4461 size_t name_len, int type)
4428{ 4462{
4429 size_t len = strlen(XATTR_NAME_NFSV4_ACL) + 1; 4463 size_t len = sizeof(XATTR_NAME_NFSV4_ACL);
4430 4464
4431 if (!nfs4_server_supports_acls(NFS_SERVER(dentry->d_inode))) 4465 if (!nfs4_server_supports_acls(NFS_SERVER(dentry->d_inode)))
4432 return 0; 4466 return 0;
4433 if (buf && buflen < len) 4467
4434 return -ERANGE; 4468 if (list && len <= list_len)
4435 if (buf) 4469 memcpy(list, XATTR_NAME_NFSV4_ACL, len);
4436 memcpy(buf, XATTR_NAME_NFSV4_ACL, len);
4437 return len; 4470 return len;
4438} 4471}
4439 4472
@@ -4486,6 +4519,25 @@ int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name,
4486 4519
4487#ifdef CONFIG_NFS_V4_1 4520#ifdef CONFIG_NFS_V4_1
4488/* 4521/*
4522 * Check the exchange flags returned by the server for invalid flags, having
4523 * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or
4524 * DS flags set.
4525 */
4526static int nfs4_check_cl_exchange_flags(u32 flags)
4527{
4528 if (flags & ~EXCHGID4_FLAG_MASK_R)
4529 goto out_inval;
4530 if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) &&
4531 (flags & EXCHGID4_FLAG_USE_NON_PNFS))
4532 goto out_inval;
4533 if (!(flags & (EXCHGID4_FLAG_MASK_PNFS)))
4534 goto out_inval;
4535 return NFS_OK;
4536out_inval:
4537 return -NFS4ERR_INVAL;
4538}
4539
4540/*
4489 * nfs4_proc_exchange_id() 4541 * nfs4_proc_exchange_id()
4490 * 4542 *
4491 * Since the clientid has expired, all compounds using sessions 4543 * Since the clientid has expired, all compounds using sessions
@@ -4498,7 +4550,7 @@ int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
4498 nfs4_verifier verifier; 4550 nfs4_verifier verifier;
4499 struct nfs41_exchange_id_args args = { 4551 struct nfs41_exchange_id_args args = {
4500 .client = clp, 4552 .client = clp,
4501 .flags = clp->cl_exchange_flags, 4553 .flags = EXCHGID4_FLAG_SUPP_MOVED_REFER,
4502 }; 4554 };
4503 struct nfs41_exchange_id_res res = { 4555 struct nfs41_exchange_id_res res = {
4504 .client = clp, 4556 .client = clp,
@@ -4515,9 +4567,6 @@ int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
4515 dprintk("--> %s\n", __func__); 4567 dprintk("--> %s\n", __func__);
4516 BUG_ON(clp == NULL); 4568 BUG_ON(clp == NULL);
4517 4569
4518 /* Remove server-only flags */
4519 args.flags &= ~EXCHGID4_FLAG_CONFIRMED_R;
4520
4521 p = (u32 *)verifier.data; 4570 p = (u32 *)verifier.data;
4522 *p++ = htonl((u32)clp->cl_boot_time.tv_sec); 4571 *p++ = htonl((u32)clp->cl_boot_time.tv_sec);
4523 *p = htonl((u32)clp->cl_boot_time.tv_nsec); 4572 *p = htonl((u32)clp->cl_boot_time.tv_nsec);
@@ -4543,6 +4592,7 @@ int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
4543 break; 4592 break;
4544 } 4593 }
4545 4594
4595 status = nfs4_check_cl_exchange_flags(clp->cl_exchange_flags);
4546 dprintk("<-- %s status= %d\n", __func__, status); 4596 dprintk("<-- %s status= %d\n", __func__, status);
4547 return status; 4597 return status;
4548} 4598}
@@ -4776,17 +4826,17 @@ struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp)
4776 if (!session) 4826 if (!session)
4777 return NULL; 4827 return NULL;
4778 4828
4779 init_completion(&session->complete);
4780
4781 tbl = &session->fc_slot_table; 4829 tbl = &session->fc_slot_table;
4782 tbl->highest_used_slotid = -1; 4830 tbl->highest_used_slotid = -1;
4783 spin_lock_init(&tbl->slot_tbl_lock); 4831 spin_lock_init(&tbl->slot_tbl_lock);
4784 rpc_init_priority_wait_queue(&tbl->slot_tbl_waitq, "ForeChannel Slot table"); 4832 rpc_init_priority_wait_queue(&tbl->slot_tbl_waitq, "ForeChannel Slot table");
4833 init_completion(&tbl->complete);
4785 4834
4786 tbl = &session->bc_slot_table; 4835 tbl = &session->bc_slot_table;
4787 tbl->highest_used_slotid = -1; 4836 tbl->highest_used_slotid = -1;
4788 spin_lock_init(&tbl->slot_tbl_lock); 4837 spin_lock_init(&tbl->slot_tbl_lock);
4789 rpc_init_wait_queue(&tbl->slot_tbl_waitq, "BackChannel Slot table"); 4838 rpc_init_wait_queue(&tbl->slot_tbl_waitq, "BackChannel Slot table");
4839 init_completion(&tbl->complete);
4790 4840
4791 session->session_state = 1<<NFS4_SESSION_INITING; 4841 session->session_state = 1<<NFS4_SESSION_INITING;
4792 4842
@@ -5280,13 +5330,23 @@ static void
5280nfs4_layoutget_prepare(struct rpc_task *task, void *calldata) 5330nfs4_layoutget_prepare(struct rpc_task *task, void *calldata)
5281{ 5331{
5282 struct nfs4_layoutget *lgp = calldata; 5332 struct nfs4_layoutget *lgp = calldata;
5283 struct inode *ino = lgp->args.inode; 5333 struct nfs_server *server = NFS_SERVER(lgp->args.inode);
5284 struct nfs_server *server = NFS_SERVER(ino);
5285 5334
5286 dprintk("--> %s\n", __func__); 5335 dprintk("--> %s\n", __func__);
5336 /* Note the is a race here, where a CB_LAYOUTRECALL can come in
5337 * right now covering the LAYOUTGET we are about to send.
5338 * However, that is not so catastrophic, and there seems
5339 * to be no way to prevent it completely.
5340 */
5287 if (nfs4_setup_sequence(server, &lgp->args.seq_args, 5341 if (nfs4_setup_sequence(server, &lgp->args.seq_args,
5288 &lgp->res.seq_res, 0, task)) 5342 &lgp->res.seq_res, 0, task))
5289 return; 5343 return;
5344 if (pnfs_choose_layoutget_stateid(&lgp->args.stateid,
5345 NFS_I(lgp->args.inode)->layout,
5346 lgp->args.ctx->state)) {
5347 rpc_exit(task, NFS4_OK);
5348 return;
5349 }
5290 rpc_call_start(task); 5350 rpc_call_start(task);
5291} 5351}
5292 5352
@@ -5313,7 +5373,6 @@ static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
5313 return; 5373 return;
5314 } 5374 }
5315 } 5375 }
5316 lgp->status = task->tk_status;
5317 dprintk("<-- %s\n", __func__); 5376 dprintk("<-- %s\n", __func__);
5318} 5377}
5319 5378
@@ -5322,7 +5381,6 @@ static void nfs4_layoutget_release(void *calldata)
5322 struct nfs4_layoutget *lgp = calldata; 5381 struct nfs4_layoutget *lgp = calldata;
5323 5382
5324 dprintk("--> %s\n", __func__); 5383 dprintk("--> %s\n", __func__);
5325 put_layout_hdr(lgp->args.inode);
5326 if (lgp->res.layout.buf != NULL) 5384 if (lgp->res.layout.buf != NULL)
5327 free_page((unsigned long) lgp->res.layout.buf); 5385 free_page((unsigned long) lgp->res.layout.buf);
5328 put_nfs_open_context(lgp->args.ctx); 5386 put_nfs_open_context(lgp->args.ctx);
@@ -5367,13 +5425,10 @@ int nfs4_proc_layoutget(struct nfs4_layoutget *lgp)
5367 if (IS_ERR(task)) 5425 if (IS_ERR(task))
5368 return PTR_ERR(task); 5426 return PTR_ERR(task);
5369 status = nfs4_wait_for_completion_rpc_task(task); 5427 status = nfs4_wait_for_completion_rpc_task(task);
5370 if (status != 0) 5428 if (status == 0)
5371 goto out; 5429 status = task->tk_status;
5372 status = lgp->status; 5430 if (status == 0)
5373 if (status != 0) 5431 status = pnfs_layout_process(lgp);
5374 goto out;
5375 status = pnfs_layout_process(lgp);
5376out:
5377 rpc_put_task(task); 5432 rpc_put_task(task);
5378 dprintk("<-- %s status=%d\n", __func__, status); 5433 dprintk("<-- %s status=%d\n", __func__, status);
5379 return status; 5434 return status;
@@ -5504,9 +5559,10 @@ static const struct inode_operations nfs4_file_inode_operations = {
5504 .permission = nfs_permission, 5559 .permission = nfs_permission,
5505 .getattr = nfs_getattr, 5560 .getattr = nfs_getattr,
5506 .setattr = nfs_setattr, 5561 .setattr = nfs_setattr,
5507 .getxattr = nfs4_getxattr, 5562 .getxattr = generic_getxattr,
5508 .setxattr = nfs4_setxattr, 5563 .setxattr = generic_setxattr,
5509 .listxattr = nfs4_listxattr, 5564 .listxattr = generic_listxattr,
5565 .removexattr = generic_removexattr,
5510}; 5566};
5511 5567
5512const struct nfs_rpc_ops nfs_v4_clientops = { 5568const struct nfs_rpc_ops nfs_v4_clientops = {
@@ -5551,6 +5607,18 @@ const struct nfs_rpc_ops nfs_v4_clientops = {
5551 .open_context = nfs4_atomic_open, 5607 .open_context = nfs4_atomic_open,
5552}; 5608};
5553 5609
5610static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = {
5611 .prefix = XATTR_NAME_NFSV4_ACL,
5612 .list = nfs4_xattr_list_nfs4_acl,
5613 .get = nfs4_xattr_get_nfs4_acl,
5614 .set = nfs4_xattr_set_nfs4_acl,
5615};
5616
5617const struct xattr_handler *nfs4_xattr_handlers[] = {
5618 &nfs4_xattr_nfs4_acl_handler,
5619 NULL
5620};
5621
5554/* 5622/*
5555 * Local variables: 5623 * Local variables:
5556 * c-basic-offset: 8 5624 * c-basic-offset: 8
diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c
index 72b6c580af13..402143d75fc5 100644
--- a/fs/nfs/nfs4renewd.c
+++ b/fs/nfs/nfs4renewd.c
@@ -63,9 +63,14 @@ nfs4_renew_state(struct work_struct *work)
63 63
64 ops = clp->cl_mvops->state_renewal_ops; 64 ops = clp->cl_mvops->state_renewal_ops;
65 dprintk("%s: start\n", __func__); 65 dprintk("%s: start\n", __func__);
66 /* Are there any active superblocks? */ 66
67 if (list_empty(&clp->cl_superblocks)) 67 rcu_read_lock();
68 if (list_empty(&clp->cl_superblocks)) {
69 rcu_read_unlock();
68 goto out; 70 goto out;
71 }
72 rcu_read_unlock();
73
69 spin_lock(&clp->cl_lock); 74 spin_lock(&clp->cl_lock);
70 lease = clp->cl_lease_time; 75 lease = clp->cl_lease_time;
71 last = clp->cl_last_renewal; 76 last = clp->cl_last_renewal;
@@ -75,7 +80,7 @@ nfs4_renew_state(struct work_struct *work)
75 cred = ops->get_state_renewal_cred_locked(clp); 80 cred = ops->get_state_renewal_cred_locked(clp);
76 spin_unlock(&clp->cl_lock); 81 spin_unlock(&clp->cl_lock);
77 if (cred == NULL) { 82 if (cred == NULL) {
78 if (list_empty(&clp->cl_delegations)) { 83 if (!nfs_delegations_present(clp)) {
79 set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); 84 set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
80 goto out; 85 goto out;
81 } 86 }
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index f575a3126737..2336d532cf66 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -105,14 +105,17 @@ static void nfs4_clear_machine_cred(struct nfs_client *clp)
105 put_rpccred(cred); 105 put_rpccred(cred);
106} 106}
107 107
108struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp) 108static struct rpc_cred *
109nfs4_get_renew_cred_server_locked(struct nfs_server *server)
109{ 110{
111 struct rpc_cred *cred = NULL;
110 struct nfs4_state_owner *sp; 112 struct nfs4_state_owner *sp;
111 struct rb_node *pos; 113 struct rb_node *pos;
112 struct rpc_cred *cred = NULL;
113 114
114 for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) { 115 for (pos = rb_first(&server->state_owners);
115 sp = rb_entry(pos, struct nfs4_state_owner, so_client_node); 116 pos != NULL;
117 pos = rb_next(pos)) {
118 sp = rb_entry(pos, struct nfs4_state_owner, so_server_node);
116 if (list_empty(&sp->so_states)) 119 if (list_empty(&sp->so_states))
117 continue; 120 continue;
118 cred = get_rpccred(sp->so_cred); 121 cred = get_rpccred(sp->so_cred);
@@ -121,6 +124,28 @@ struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp)
121 return cred; 124 return cred;
122} 125}
123 126
127/**
128 * nfs4_get_renew_cred_locked - Acquire credential for a renew operation
129 * @clp: client state handle
130 *
131 * Returns an rpc_cred with reference count bumped, or NULL.
132 * Caller must hold clp->cl_lock.
133 */
134struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp)
135{
136 struct rpc_cred *cred = NULL;
137 struct nfs_server *server;
138
139 rcu_read_lock();
140 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
141 cred = nfs4_get_renew_cred_server_locked(server);
142 if (cred != NULL)
143 break;
144 }
145 rcu_read_unlock();
146 return cred;
147}
148
124#if defined(CONFIG_NFS_V4_1) 149#if defined(CONFIG_NFS_V4_1)
125 150
126static int nfs41_setup_state_renewal(struct nfs_client *clp) 151static int nfs41_setup_state_renewal(struct nfs_client *clp)
@@ -142,6 +167,11 @@ static int nfs41_setup_state_renewal(struct nfs_client *clp)
142 return status; 167 return status;
143} 168}
144 169
170/*
171 * Back channel returns NFS4ERR_DELAY for new requests when
172 * NFS4_SESSION_DRAINING is set so there is no work to be done when draining
173 * is ended.
174 */
145static void nfs4_end_drain_session(struct nfs_client *clp) 175static void nfs4_end_drain_session(struct nfs_client *clp)
146{ 176{
147 struct nfs4_session *ses = clp->cl_session; 177 struct nfs4_session *ses = clp->cl_session;
@@ -165,22 +195,32 @@ static void nfs4_end_drain_session(struct nfs_client *clp)
165 } 195 }
166} 196}
167 197
168static int nfs4_begin_drain_session(struct nfs_client *clp) 198static int nfs4_wait_on_slot_tbl(struct nfs4_slot_table *tbl)
169{ 199{
170 struct nfs4_session *ses = clp->cl_session;
171 struct nfs4_slot_table *tbl = &ses->fc_slot_table;
172
173 spin_lock(&tbl->slot_tbl_lock); 200 spin_lock(&tbl->slot_tbl_lock);
174 set_bit(NFS4_SESSION_DRAINING, &ses->session_state);
175 if (tbl->highest_used_slotid != -1) { 201 if (tbl->highest_used_slotid != -1) {
176 INIT_COMPLETION(ses->complete); 202 INIT_COMPLETION(tbl->complete);
177 spin_unlock(&tbl->slot_tbl_lock); 203 spin_unlock(&tbl->slot_tbl_lock);
178 return wait_for_completion_interruptible(&ses->complete); 204 return wait_for_completion_interruptible(&tbl->complete);
179 } 205 }
180 spin_unlock(&tbl->slot_tbl_lock); 206 spin_unlock(&tbl->slot_tbl_lock);
181 return 0; 207 return 0;
182} 208}
183 209
210static int nfs4_begin_drain_session(struct nfs_client *clp)
211{
212 struct nfs4_session *ses = clp->cl_session;
213 int ret = 0;
214
215 set_bit(NFS4_SESSION_DRAINING, &ses->session_state);
216 /* back channel */
217 ret = nfs4_wait_on_slot_tbl(&ses->bc_slot_table);
218 if (ret)
219 return ret;
220 /* fore channel */
221 return nfs4_wait_on_slot_tbl(&ses->fc_slot_table);
222}
223
184int nfs41_init_clientid(struct nfs_client *clp, struct rpc_cred *cred) 224int nfs41_init_clientid(struct nfs_client *clp, struct rpc_cred *cred)
185{ 225{
186 int status; 226 int status;
@@ -192,6 +232,12 @@ int nfs41_init_clientid(struct nfs_client *clp, struct rpc_cred *cred)
192 status = nfs4_proc_create_session(clp); 232 status = nfs4_proc_create_session(clp);
193 if (status != 0) 233 if (status != 0)
194 goto out; 234 goto out;
235 status = nfs4_set_callback_sessionid(clp);
236 if (status != 0) {
237 printk(KERN_WARNING "Sessionid not set. No callback service\n");
238 nfs_callback_down(1);
239 status = 0;
240 }
195 nfs41_setup_state_renewal(clp); 241 nfs41_setup_state_renewal(clp);
196 nfs_mark_client_ready(clp, NFS_CS_READY); 242 nfs_mark_client_ready(clp, NFS_CS_READY);
197out: 243out:
@@ -210,28 +256,56 @@ struct rpc_cred *nfs4_get_exchange_id_cred(struct nfs_client *clp)
210 256
211#endif /* CONFIG_NFS_V4_1 */ 257#endif /* CONFIG_NFS_V4_1 */
212 258
213struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp) 259static struct rpc_cred *
260nfs4_get_setclientid_cred_server(struct nfs_server *server)
214{ 261{
262 struct nfs_client *clp = server->nfs_client;
263 struct rpc_cred *cred = NULL;
215 struct nfs4_state_owner *sp; 264 struct nfs4_state_owner *sp;
216 struct rb_node *pos; 265 struct rb_node *pos;
266
267 spin_lock(&clp->cl_lock);
268 pos = rb_first(&server->state_owners);
269 if (pos != NULL) {
270 sp = rb_entry(pos, struct nfs4_state_owner, so_server_node);
271 cred = get_rpccred(sp->so_cred);
272 }
273 spin_unlock(&clp->cl_lock);
274 return cred;
275}
276
277/**
278 * nfs4_get_setclientid_cred - Acquire credential for a setclientid operation
279 * @clp: client state handle
280 *
281 * Returns an rpc_cred with reference count bumped, or NULL.
282 */
283struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp)
284{
285 struct nfs_server *server;
217 struct rpc_cred *cred; 286 struct rpc_cred *cred;
218 287
219 spin_lock(&clp->cl_lock); 288 spin_lock(&clp->cl_lock);
220 cred = nfs4_get_machine_cred_locked(clp); 289 cred = nfs4_get_machine_cred_locked(clp);
290 spin_unlock(&clp->cl_lock);
221 if (cred != NULL) 291 if (cred != NULL)
222 goto out; 292 goto out;
223 pos = rb_first(&clp->cl_state_owners); 293
224 if (pos != NULL) { 294 rcu_read_lock();
225 sp = rb_entry(pos, struct nfs4_state_owner, so_client_node); 295 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
226 cred = get_rpccred(sp->so_cred); 296 cred = nfs4_get_setclientid_cred_server(server);
297 if (cred != NULL)
298 break;
227 } 299 }
300 rcu_read_unlock();
301
228out: 302out:
229 spin_unlock(&clp->cl_lock);
230 return cred; 303 return cred;
231} 304}
232 305
233static void nfs_alloc_unique_id(struct rb_root *root, struct nfs_unique_id *new, 306static void nfs_alloc_unique_id_locked(struct rb_root *root,
234 __u64 minval, int maxbits) 307 struct nfs_unique_id *new,
308 __u64 minval, int maxbits)
235{ 309{
236 struct rb_node **p, *parent; 310 struct rb_node **p, *parent;
237 struct nfs_unique_id *pos; 311 struct nfs_unique_id *pos;
@@ -286,16 +360,15 @@ static void nfs_free_unique_id(struct rb_root *root, struct nfs_unique_id *id)
286} 360}
287 361
288static struct nfs4_state_owner * 362static struct nfs4_state_owner *
289nfs4_find_state_owner(struct nfs_server *server, struct rpc_cred *cred) 363nfs4_find_state_owner_locked(struct nfs_server *server, struct rpc_cred *cred)
290{ 364{
291 struct nfs_client *clp = server->nfs_client; 365 struct rb_node **p = &server->state_owners.rb_node,
292 struct rb_node **p = &clp->cl_state_owners.rb_node,
293 *parent = NULL; 366 *parent = NULL;
294 struct nfs4_state_owner *sp, *res = NULL; 367 struct nfs4_state_owner *sp, *res = NULL;
295 368
296 while (*p != NULL) { 369 while (*p != NULL) {
297 parent = *p; 370 parent = *p;
298 sp = rb_entry(parent, struct nfs4_state_owner, so_client_node); 371 sp = rb_entry(parent, struct nfs4_state_owner, so_server_node);
299 372
300 if (server < sp->so_server) { 373 if (server < sp->so_server) {
301 p = &parent->rb_left; 374 p = &parent->rb_left;
@@ -319,24 +392,17 @@ nfs4_find_state_owner(struct nfs_server *server, struct rpc_cred *cred)
319} 392}
320 393
321static struct nfs4_state_owner * 394static struct nfs4_state_owner *
322nfs4_insert_state_owner(struct nfs_client *clp, struct nfs4_state_owner *new) 395nfs4_insert_state_owner_locked(struct nfs4_state_owner *new)
323{ 396{
324 struct rb_node **p = &clp->cl_state_owners.rb_node, 397 struct nfs_server *server = new->so_server;
398 struct rb_node **p = &server->state_owners.rb_node,
325 *parent = NULL; 399 *parent = NULL;
326 struct nfs4_state_owner *sp; 400 struct nfs4_state_owner *sp;
327 401
328 while (*p != NULL) { 402 while (*p != NULL) {
329 parent = *p; 403 parent = *p;
330 sp = rb_entry(parent, struct nfs4_state_owner, so_client_node); 404 sp = rb_entry(parent, struct nfs4_state_owner, so_server_node);
331 405
332 if (new->so_server < sp->so_server) {
333 p = &parent->rb_left;
334 continue;
335 }
336 if (new->so_server > sp->so_server) {
337 p = &parent->rb_right;
338 continue;
339 }
340 if (new->so_cred < sp->so_cred) 406 if (new->so_cred < sp->so_cred)
341 p = &parent->rb_left; 407 p = &parent->rb_left;
342 else if (new->so_cred > sp->so_cred) 408 else if (new->so_cred > sp->so_cred)
@@ -346,18 +412,21 @@ nfs4_insert_state_owner(struct nfs_client *clp, struct nfs4_state_owner *new)
346 return sp; 412 return sp;
347 } 413 }
348 } 414 }
349 nfs_alloc_unique_id(&clp->cl_openowner_id, &new->so_owner_id, 1, 64); 415 nfs_alloc_unique_id_locked(&server->openowner_id,
350 rb_link_node(&new->so_client_node, parent, p); 416 &new->so_owner_id, 1, 64);
351 rb_insert_color(&new->so_client_node, &clp->cl_state_owners); 417 rb_link_node(&new->so_server_node, parent, p);
418 rb_insert_color(&new->so_server_node, &server->state_owners);
352 return new; 419 return new;
353} 420}
354 421
355static void 422static void
356nfs4_remove_state_owner(struct nfs_client *clp, struct nfs4_state_owner *sp) 423nfs4_remove_state_owner_locked(struct nfs4_state_owner *sp)
357{ 424{
358 if (!RB_EMPTY_NODE(&sp->so_client_node)) 425 struct nfs_server *server = sp->so_server;
359 rb_erase(&sp->so_client_node, &clp->cl_state_owners); 426
360 nfs_free_unique_id(&clp->cl_openowner_id, &sp->so_owner_id); 427 if (!RB_EMPTY_NODE(&sp->so_server_node))
428 rb_erase(&sp->so_server_node, &server->state_owners);
429 nfs_free_unique_id(&server->openowner_id, &sp->so_owner_id);
361} 430}
362 431
363/* 432/*
@@ -386,23 +455,32 @@ nfs4_alloc_state_owner(void)
386static void 455static void
387nfs4_drop_state_owner(struct nfs4_state_owner *sp) 456nfs4_drop_state_owner(struct nfs4_state_owner *sp)
388{ 457{
389 if (!RB_EMPTY_NODE(&sp->so_client_node)) { 458 if (!RB_EMPTY_NODE(&sp->so_server_node)) {
390 struct nfs_client *clp = sp->so_server->nfs_client; 459 struct nfs_server *server = sp->so_server;
460 struct nfs_client *clp = server->nfs_client;
391 461
392 spin_lock(&clp->cl_lock); 462 spin_lock(&clp->cl_lock);
393 rb_erase(&sp->so_client_node, &clp->cl_state_owners); 463 rb_erase(&sp->so_server_node, &server->state_owners);
394 RB_CLEAR_NODE(&sp->so_client_node); 464 RB_CLEAR_NODE(&sp->so_server_node);
395 spin_unlock(&clp->cl_lock); 465 spin_unlock(&clp->cl_lock);
396 } 466 }
397} 467}
398 468
399struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, struct rpc_cred *cred) 469/**
470 * nfs4_get_state_owner - Look up a state owner given a credential
471 * @server: nfs_server to search
472 * @cred: RPC credential to match
473 *
474 * Returns a pointer to an instantiated nfs4_state_owner struct, or NULL.
475 */
476struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server,
477 struct rpc_cred *cred)
400{ 478{
401 struct nfs_client *clp = server->nfs_client; 479 struct nfs_client *clp = server->nfs_client;
402 struct nfs4_state_owner *sp, *new; 480 struct nfs4_state_owner *sp, *new;
403 481
404 spin_lock(&clp->cl_lock); 482 spin_lock(&clp->cl_lock);
405 sp = nfs4_find_state_owner(server, cred); 483 sp = nfs4_find_state_owner_locked(server, cred);
406 spin_unlock(&clp->cl_lock); 484 spin_unlock(&clp->cl_lock);
407 if (sp != NULL) 485 if (sp != NULL)
408 return sp; 486 return sp;
@@ -412,7 +490,7 @@ struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, struct
412 new->so_server = server; 490 new->so_server = server;
413 new->so_cred = cred; 491 new->so_cred = cred;
414 spin_lock(&clp->cl_lock); 492 spin_lock(&clp->cl_lock);
415 sp = nfs4_insert_state_owner(clp, new); 493 sp = nfs4_insert_state_owner_locked(new);
416 spin_unlock(&clp->cl_lock); 494 spin_unlock(&clp->cl_lock);
417 if (sp == new) 495 if (sp == new)
418 get_rpccred(cred); 496 get_rpccred(cred);
@@ -423,6 +501,11 @@ struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, struct
423 return sp; 501 return sp;
424} 502}
425 503
504/**
505 * nfs4_put_state_owner - Release a nfs4_state_owner
506 * @sp: state owner data to release
507 *
508 */
426void nfs4_put_state_owner(struct nfs4_state_owner *sp) 509void nfs4_put_state_owner(struct nfs4_state_owner *sp)
427{ 510{
428 struct nfs_client *clp = sp->so_server->nfs_client; 511 struct nfs_client *clp = sp->so_server->nfs_client;
@@ -430,7 +513,7 @@ void nfs4_put_state_owner(struct nfs4_state_owner *sp)
430 513
431 if (!atomic_dec_and_lock(&sp->so_count, &clp->cl_lock)) 514 if (!atomic_dec_and_lock(&sp->so_count, &clp->cl_lock))
432 return; 515 return;
433 nfs4_remove_state_owner(clp, sp); 516 nfs4_remove_state_owner_locked(sp);
434 spin_unlock(&clp->cl_lock); 517 spin_unlock(&clp->cl_lock);
435 rpc_destroy_wait_queue(&sp->so_sequence.wait); 518 rpc_destroy_wait_queue(&sp->so_sequence.wait);
436 put_rpccred(cred); 519 put_rpccred(cred);
@@ -585,8 +668,11 @@ static void __nfs4_close(struct path *path, struct nfs4_state *state,
585 if (!call_close) { 668 if (!call_close) {
586 nfs4_put_open_state(state); 669 nfs4_put_open_state(state);
587 nfs4_put_state_owner(owner); 670 nfs4_put_state_owner(owner);
588 } else 671 } else {
589 nfs4_do_close(path, state, gfp_mask, wait); 672 bool roc = pnfs_roc(state->inode);
673
674 nfs4_do_close(path, state, gfp_mask, wait, roc);
675 }
590} 676}
591 677
592void nfs4_close_state(struct path *path, struct nfs4_state *state, fmode_t fmode) 678void nfs4_close_state(struct path *path, struct nfs4_state *state, fmode_t fmode)
@@ -633,7 +719,8 @@ __nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner, pid_t fl_p
633static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, fl_owner_t fl_owner, pid_t fl_pid, unsigned int type) 719static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, fl_owner_t fl_owner, pid_t fl_pid, unsigned int type)
634{ 720{
635 struct nfs4_lock_state *lsp; 721 struct nfs4_lock_state *lsp;
636 struct nfs_client *clp = state->owner->so_server->nfs_client; 722 struct nfs_server *server = state->owner->so_server;
723 struct nfs_client *clp = server->nfs_client;
637 724
638 lsp = kzalloc(sizeof(*lsp), GFP_NOFS); 725 lsp = kzalloc(sizeof(*lsp), GFP_NOFS);
639 if (lsp == NULL) 726 if (lsp == NULL)
@@ -657,7 +744,7 @@ static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, f
657 return NULL; 744 return NULL;
658 } 745 }
659 spin_lock(&clp->cl_lock); 746 spin_lock(&clp->cl_lock);
660 nfs_alloc_unique_id(&clp->cl_lockowner_id, &lsp->ls_id, 1, 64); 747 nfs_alloc_unique_id_locked(&server->lockowner_id, &lsp->ls_id, 1, 64);
661 spin_unlock(&clp->cl_lock); 748 spin_unlock(&clp->cl_lock);
662 INIT_LIST_HEAD(&lsp->ls_locks); 749 INIT_LIST_HEAD(&lsp->ls_locks);
663 return lsp; 750 return lsp;
@@ -665,10 +752,11 @@ static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, f
665 752
666static void nfs4_free_lock_state(struct nfs4_lock_state *lsp) 753static void nfs4_free_lock_state(struct nfs4_lock_state *lsp)
667{ 754{
668 struct nfs_client *clp = lsp->ls_state->owner->so_server->nfs_client; 755 struct nfs_server *server = lsp->ls_state->owner->so_server;
756 struct nfs_client *clp = server->nfs_client;
669 757
670 spin_lock(&clp->cl_lock); 758 spin_lock(&clp->cl_lock);
671 nfs_free_unique_id(&clp->cl_lockowner_id, &lsp->ls_id); 759 nfs_free_unique_id(&server->lockowner_id, &lsp->ls_id);
672 spin_unlock(&clp->cl_lock); 760 spin_unlock(&clp->cl_lock);
673 rpc_destroy_wait_queue(&lsp->ls_sequence.wait); 761 rpc_destroy_wait_queue(&lsp->ls_sequence.wait);
674 kfree(lsp); 762 kfree(lsp);
@@ -1114,15 +1202,19 @@ static void nfs4_clear_open_state(struct nfs4_state *state)
1114 } 1202 }
1115} 1203}
1116 1204
1117static void nfs4_state_mark_reclaim_helper(struct nfs_client *clp, int (*mark_reclaim)(struct nfs_client *clp, struct nfs4_state *state)) 1205static void nfs4_reset_seqids(struct nfs_server *server,
1206 int (*mark_reclaim)(struct nfs_client *clp, struct nfs4_state *state))
1118{ 1207{
1208 struct nfs_client *clp = server->nfs_client;
1119 struct nfs4_state_owner *sp; 1209 struct nfs4_state_owner *sp;
1120 struct rb_node *pos; 1210 struct rb_node *pos;
1121 struct nfs4_state *state; 1211 struct nfs4_state *state;
1122 1212
1123 /* Reset all sequence ids to zero */ 1213 spin_lock(&clp->cl_lock);
1124 for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) { 1214 for (pos = rb_first(&server->state_owners);
1125 sp = rb_entry(pos, struct nfs4_state_owner, so_client_node); 1215 pos != NULL;
1216 pos = rb_next(pos)) {
1217 sp = rb_entry(pos, struct nfs4_state_owner, so_server_node);
1126 sp->so_seqid.flags = 0; 1218 sp->so_seqid.flags = 0;
1127 spin_lock(&sp->so_lock); 1219 spin_lock(&sp->so_lock);
1128 list_for_each_entry(state, &sp->so_states, open_states) { 1220 list_for_each_entry(state, &sp->so_states, open_states) {
@@ -1131,6 +1223,18 @@ static void nfs4_state_mark_reclaim_helper(struct nfs_client *clp, int (*mark_re
1131 } 1223 }
1132 spin_unlock(&sp->so_lock); 1224 spin_unlock(&sp->so_lock);
1133 } 1225 }
1226 spin_unlock(&clp->cl_lock);
1227}
1228
1229static void nfs4_state_mark_reclaim_helper(struct nfs_client *clp,
1230 int (*mark_reclaim)(struct nfs_client *clp, struct nfs4_state *state))
1231{
1232 struct nfs_server *server;
1233
1234 rcu_read_lock();
1235 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
1236 nfs4_reset_seqids(server, mark_reclaim);
1237 rcu_read_unlock();
1134} 1238}
1135 1239
1136static void nfs4_state_start_reclaim_reboot(struct nfs_client *clp) 1240static void nfs4_state_start_reclaim_reboot(struct nfs_client *clp)
@@ -1148,25 +1252,41 @@ static void nfs4_reclaim_complete(struct nfs_client *clp,
1148 (void)ops->reclaim_complete(clp); 1252 (void)ops->reclaim_complete(clp);
1149} 1253}
1150 1254
1151static int nfs4_state_clear_reclaim_reboot(struct nfs_client *clp) 1255static void nfs4_clear_reclaim_server(struct nfs_server *server)
1152{ 1256{
1257 struct nfs_client *clp = server->nfs_client;
1153 struct nfs4_state_owner *sp; 1258 struct nfs4_state_owner *sp;
1154 struct rb_node *pos; 1259 struct rb_node *pos;
1155 struct nfs4_state *state; 1260 struct nfs4_state *state;
1156 1261
1157 if (!test_and_clear_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state)) 1262 spin_lock(&clp->cl_lock);
1158 return 0; 1263 for (pos = rb_first(&server->state_owners);
1159 1264 pos != NULL;
1160 for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) { 1265 pos = rb_next(pos)) {
1161 sp = rb_entry(pos, struct nfs4_state_owner, so_client_node); 1266 sp = rb_entry(pos, struct nfs4_state_owner, so_server_node);
1162 spin_lock(&sp->so_lock); 1267 spin_lock(&sp->so_lock);
1163 list_for_each_entry(state, &sp->so_states, open_states) { 1268 list_for_each_entry(state, &sp->so_states, open_states) {
1164 if (!test_and_clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags)) 1269 if (!test_and_clear_bit(NFS_STATE_RECLAIM_REBOOT,
1270 &state->flags))
1165 continue; 1271 continue;
1166 nfs4_state_mark_reclaim_nograce(clp, state); 1272 nfs4_state_mark_reclaim_nograce(clp, state);
1167 } 1273 }
1168 spin_unlock(&sp->so_lock); 1274 spin_unlock(&sp->so_lock);
1169 } 1275 }
1276 spin_unlock(&clp->cl_lock);
1277}
1278
1279static int nfs4_state_clear_reclaim_reboot(struct nfs_client *clp)
1280{
1281 struct nfs_server *server;
1282
1283 if (!test_and_clear_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state))
1284 return 0;
1285
1286 rcu_read_lock();
1287 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
1288 nfs4_clear_reclaim_server(server);
1289 rcu_read_unlock();
1170 1290
1171 nfs_delegation_reap_unclaimed(clp); 1291 nfs_delegation_reap_unclaimed(clp);
1172 return 1; 1292 return 1;
@@ -1238,27 +1358,40 @@ static int nfs4_recovery_handle_error(struct nfs_client *clp, int error)
1238 1358
1239static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recovery_ops *ops) 1359static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recovery_ops *ops)
1240{ 1360{
1361 struct nfs4_state_owner *sp;
1362 struct nfs_server *server;
1241 struct rb_node *pos; 1363 struct rb_node *pos;
1242 int status = 0; 1364 int status = 0;
1243 1365
1244restart: 1366restart:
1245 spin_lock(&clp->cl_lock); 1367 rcu_read_lock();
1246 for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) { 1368 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
1247 struct nfs4_state_owner *sp = rb_entry(pos, struct nfs4_state_owner, so_client_node); 1369 spin_lock(&clp->cl_lock);
1248 if (!test_and_clear_bit(ops->owner_flag_bit, &sp->so_flags)) 1370 for (pos = rb_first(&server->state_owners);
1249 continue; 1371 pos != NULL;
1250 atomic_inc(&sp->so_count); 1372 pos = rb_next(pos)) {
1251 spin_unlock(&clp->cl_lock); 1373 sp = rb_entry(pos,
1252 status = nfs4_reclaim_open_state(sp, ops); 1374 struct nfs4_state_owner, so_server_node);
1253 if (status < 0) { 1375 if (!test_and_clear_bit(ops->owner_flag_bit,
1254 set_bit(ops->owner_flag_bit, &sp->so_flags); 1376 &sp->so_flags))
1377 continue;
1378 atomic_inc(&sp->so_count);
1379 spin_unlock(&clp->cl_lock);
1380 rcu_read_unlock();
1381
1382 status = nfs4_reclaim_open_state(sp, ops);
1383 if (status < 0) {
1384 set_bit(ops->owner_flag_bit, &sp->so_flags);
1385 nfs4_put_state_owner(sp);
1386 return nfs4_recovery_handle_error(clp, status);
1387 }
1388
1255 nfs4_put_state_owner(sp); 1389 nfs4_put_state_owner(sp);
1256 return nfs4_recovery_handle_error(clp, status); 1390 goto restart;
1257 } 1391 }
1258 nfs4_put_state_owner(sp); 1392 spin_unlock(&clp->cl_lock);
1259 goto restart;
1260 } 1393 }
1261 spin_unlock(&clp->cl_lock); 1394 rcu_read_unlock();
1262 return status; 1395 return status;
1263} 1396}
1264 1397
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 9f1826b012e6..2ab8e5cb8f59 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -71,8 +71,8 @@ static int nfs4_stat_to_errno(int);
71/* lock,open owner id: 71/* lock,open owner id:
72 * we currently use size 2 (u64) out of (NFS4_OPAQUE_LIMIT >> 2) 72 * we currently use size 2 (u64) out of (NFS4_OPAQUE_LIMIT >> 2)
73 */ 73 */
74#define open_owner_id_maxsz (1 + 4) 74#define open_owner_id_maxsz (1 + 1 + 4)
75#define lock_owner_id_maxsz (1 + 4) 75#define lock_owner_id_maxsz (1 + 1 + 4)
76#define decode_lockowner_maxsz (1 + XDR_QUADLEN(IDMAP_NAMESZ)) 76#define decode_lockowner_maxsz (1 + XDR_QUADLEN(IDMAP_NAMESZ))
77#define compound_encode_hdr_maxsz (3 + (NFS4_MAXTAGLEN >> 2)) 77#define compound_encode_hdr_maxsz (3 + (NFS4_MAXTAGLEN >> 2))
78#define compound_decode_hdr_maxsz (3 + (NFS4_MAXTAGLEN >> 2)) 78#define compound_decode_hdr_maxsz (3 + (NFS4_MAXTAGLEN >> 2))
@@ -1088,10 +1088,11 @@ static void encode_lockowner(struct xdr_stream *xdr, const struct nfs_lowner *lo
1088{ 1088{
1089 __be32 *p; 1089 __be32 *p;
1090 1090
1091 p = reserve_space(xdr, 28); 1091 p = reserve_space(xdr, 32);
1092 p = xdr_encode_hyper(p, lowner->clientid); 1092 p = xdr_encode_hyper(p, lowner->clientid);
1093 *p++ = cpu_to_be32(16); 1093 *p++ = cpu_to_be32(20);
1094 p = xdr_encode_opaque_fixed(p, "lock id:", 8); 1094 p = xdr_encode_opaque_fixed(p, "lock id:", 8);
1095 *p++ = cpu_to_be32(lowner->s_dev);
1095 xdr_encode_hyper(p, lowner->id); 1096 xdr_encode_hyper(p, lowner->id);
1096} 1097}
1097 1098
@@ -1210,10 +1211,11 @@ static inline void encode_openhdr(struct xdr_stream *xdr, const struct nfs_opena
1210 *p++ = cpu_to_be32(OP_OPEN); 1211 *p++ = cpu_to_be32(OP_OPEN);
1211 *p = cpu_to_be32(arg->seqid->sequence->counter); 1212 *p = cpu_to_be32(arg->seqid->sequence->counter);
1212 encode_share_access(xdr, arg->fmode); 1213 encode_share_access(xdr, arg->fmode);
1213 p = reserve_space(xdr, 28); 1214 p = reserve_space(xdr, 32);
1214 p = xdr_encode_hyper(p, arg->clientid); 1215 p = xdr_encode_hyper(p, arg->clientid);
1215 *p++ = cpu_to_be32(16); 1216 *p++ = cpu_to_be32(20);
1216 p = xdr_encode_opaque_fixed(p, "open id:", 8); 1217 p = xdr_encode_opaque_fixed(p, "open id:", 8);
1218 *p++ = cpu_to_be32(arg->server->s_dev);
1217 xdr_encode_hyper(p, arg->id); 1219 xdr_encode_hyper(p, arg->id);
1218} 1220}
1219 1221
@@ -1510,7 +1512,7 @@ encode_restorefh(struct xdr_stream *xdr, struct compound_hdr *hdr)
1510 hdr->replen += decode_restorefh_maxsz; 1512 hdr->replen += decode_restorefh_maxsz;
1511} 1513}
1512 1514
1513static int 1515static void
1514encode_setacl(struct xdr_stream *xdr, struct nfs_setaclargs *arg, struct compound_hdr *hdr) 1516encode_setacl(struct xdr_stream *xdr, struct nfs_setaclargs *arg, struct compound_hdr *hdr)
1515{ 1517{
1516 __be32 *p; 1518 __be32 *p;
@@ -1521,14 +1523,12 @@ encode_setacl(struct xdr_stream *xdr, struct nfs_setaclargs *arg, struct compoun
1521 p = reserve_space(xdr, 2*4); 1523 p = reserve_space(xdr, 2*4);
1522 *p++ = cpu_to_be32(1); 1524 *p++ = cpu_to_be32(1);
1523 *p = cpu_to_be32(FATTR4_WORD0_ACL); 1525 *p = cpu_to_be32(FATTR4_WORD0_ACL);
1524 if (arg->acl_len % 4) 1526 BUG_ON(arg->acl_len % 4);
1525 return -EINVAL;
1526 p = reserve_space(xdr, 4); 1527 p = reserve_space(xdr, 4);
1527 *p = cpu_to_be32(arg->acl_len); 1528 *p = cpu_to_be32(arg->acl_len);
1528 xdr_write_pages(xdr, arg->acl_pages, arg->acl_pgbase, arg->acl_len); 1529 xdr_write_pages(xdr, arg->acl_pages, arg->acl_pgbase, arg->acl_len);
1529 hdr->nops++; 1530 hdr->nops++;
1530 hdr->replen += decode_setacl_maxsz; 1531 hdr->replen += decode_setacl_maxsz;
1531 return 0;
1532} 1532}
1533 1533
1534static void 1534static void
@@ -1789,7 +1789,6 @@ encode_layoutget(struct xdr_stream *xdr,
1789 const struct nfs4_layoutget_args *args, 1789 const struct nfs4_layoutget_args *args,
1790 struct compound_hdr *hdr) 1790 struct compound_hdr *hdr)
1791{ 1791{
1792 nfs4_stateid stateid;
1793 __be32 *p; 1792 __be32 *p;
1794 1793
1795 p = reserve_space(xdr, 44 + NFS4_STATEID_SIZE); 1794 p = reserve_space(xdr, 44 + NFS4_STATEID_SIZE);
@@ -1800,9 +1799,7 @@ encode_layoutget(struct xdr_stream *xdr,
1800 p = xdr_encode_hyper(p, args->range.offset); 1799 p = xdr_encode_hyper(p, args->range.offset);
1801 p = xdr_encode_hyper(p, args->range.length); 1800 p = xdr_encode_hyper(p, args->range.length);
1802 p = xdr_encode_hyper(p, args->minlength); 1801 p = xdr_encode_hyper(p, args->minlength);
1803 pnfs_get_layout_stateid(&stateid, NFS_I(args->inode)->layout, 1802 p = xdr_encode_opaque_fixed(p, &args->stateid.data, NFS4_STATEID_SIZE);
1804 args->ctx->state);
1805 p = xdr_encode_opaque_fixed(p, &stateid.data, NFS4_STATEID_SIZE);
1806 *p = cpu_to_be32(args->maxcount); 1803 *p = cpu_to_be32(args->maxcount);
1807 1804
1808 dprintk("%s: 1st type:0x%x iomode:%d off:%lu len:%lu mc:%d\n", 1805 dprintk("%s: 1st type:0x%x iomode:%d off:%lu len:%lu mc:%d\n",
@@ -1833,393 +1830,362 @@ static u32 nfs4_xdr_minorversion(const struct nfs4_sequence_args *args)
1833/* 1830/*
1834 * Encode an ACCESS request 1831 * Encode an ACCESS request
1835 */ 1832 */
1836static int nfs4_xdr_enc_access(struct rpc_rqst *req, __be32 *p, const struct nfs4_accessargs *args) 1833static void nfs4_xdr_enc_access(struct rpc_rqst *req, struct xdr_stream *xdr,
1834 const struct nfs4_accessargs *args)
1837{ 1835{
1838 struct xdr_stream xdr;
1839 struct compound_hdr hdr = { 1836 struct compound_hdr hdr = {
1840 .minorversion = nfs4_xdr_minorversion(&args->seq_args), 1837 .minorversion = nfs4_xdr_minorversion(&args->seq_args),
1841 }; 1838 };
1842 1839
1843 xdr_init_encode(&xdr, &req->rq_snd_buf, p); 1840 encode_compound_hdr(xdr, req, &hdr);
1844 encode_compound_hdr(&xdr, req, &hdr); 1841 encode_sequence(xdr, &args->seq_args, &hdr);
1845 encode_sequence(&xdr, &args->seq_args, &hdr); 1842 encode_putfh(xdr, args->fh, &hdr);
1846 encode_putfh(&xdr, args->fh, &hdr); 1843 encode_access(xdr, args->access, &hdr);
1847 encode_access(&xdr, args->access, &hdr); 1844 encode_getfattr(xdr, args->bitmask, &hdr);
1848 encode_getfattr(&xdr, args->bitmask, &hdr);
1849 encode_nops(&hdr); 1845 encode_nops(&hdr);
1850 return 0;
1851} 1846}
1852 1847
1853/* 1848/*
1854 * Encode LOOKUP request 1849 * Encode LOOKUP request
1855 */ 1850 */
1856static int nfs4_xdr_enc_lookup(struct rpc_rqst *req, __be32 *p, const struct nfs4_lookup_arg *args) 1851static void nfs4_xdr_enc_lookup(struct rpc_rqst *req, struct xdr_stream *xdr,
1852 const struct nfs4_lookup_arg *args)
1857{ 1853{
1858 struct xdr_stream xdr;
1859 struct compound_hdr hdr = { 1854 struct compound_hdr hdr = {
1860 .minorversion = nfs4_xdr_minorversion(&args->seq_args), 1855 .minorversion = nfs4_xdr_minorversion(&args->seq_args),
1861 }; 1856 };
1862 1857
1863 xdr_init_encode(&xdr, &req->rq_snd_buf, p); 1858 encode_compound_hdr(xdr, req, &hdr);
1864 encode_compound_hdr(&xdr, req, &hdr); 1859 encode_sequence(xdr, &args->seq_args, &hdr);
1865 encode_sequence(&xdr, &args->seq_args, &hdr); 1860 encode_putfh(xdr, args->dir_fh, &hdr);
1866 encode_putfh(&xdr, args->dir_fh, &hdr); 1861 encode_lookup(xdr, args->name, &hdr);
1867 encode_lookup(&xdr, args->name, &hdr); 1862 encode_getfh(xdr, &hdr);
1868 encode_getfh(&xdr, &hdr); 1863 encode_getfattr(xdr, args->bitmask, &hdr);
1869 encode_getfattr(&xdr, args->bitmask, &hdr);
1870 encode_nops(&hdr); 1864 encode_nops(&hdr);
1871 return 0;
1872} 1865}
1873 1866
1874/* 1867/*
1875 * Encode LOOKUP_ROOT request 1868 * Encode LOOKUP_ROOT request
1876 */ 1869 */
1877static int nfs4_xdr_enc_lookup_root(struct rpc_rqst *req, __be32 *p, const struct nfs4_lookup_root_arg *args) 1870static void nfs4_xdr_enc_lookup_root(struct rpc_rqst *req,
1871 struct xdr_stream *xdr,
1872 const struct nfs4_lookup_root_arg *args)
1878{ 1873{
1879 struct xdr_stream xdr;
1880 struct compound_hdr hdr = { 1874 struct compound_hdr hdr = {
1881 .minorversion = nfs4_xdr_minorversion(&args->seq_args), 1875 .minorversion = nfs4_xdr_minorversion(&args->seq_args),
1882 }; 1876 };
1883 1877
1884 xdr_init_encode(&xdr, &req->rq_snd_buf, p); 1878 encode_compound_hdr(xdr, req, &hdr);
1885 encode_compound_hdr(&xdr, req, &hdr); 1879 encode_sequence(xdr, &args->seq_args, &hdr);
1886 encode_sequence(&xdr, &args->seq_args, &hdr); 1880 encode_putrootfh(xdr, &hdr);
1887 encode_putrootfh(&xdr, &hdr); 1881 encode_getfh(xdr, &hdr);
1888 encode_getfh(&xdr, &hdr); 1882 encode_getfattr(xdr, args->bitmask, &hdr);
1889 encode_getfattr(&xdr, args->bitmask, &hdr);
1890 encode_nops(&hdr); 1883 encode_nops(&hdr);
1891 return 0;
1892} 1884}
1893 1885
1894/* 1886/*
1895 * Encode REMOVE request 1887 * Encode REMOVE request
1896 */ 1888 */
1897static int nfs4_xdr_enc_remove(struct rpc_rqst *req, __be32 *p, const struct nfs_removeargs *args) 1889static void nfs4_xdr_enc_remove(struct rpc_rqst *req, struct xdr_stream *xdr,
1890 const struct nfs_removeargs *args)
1898{ 1891{
1899 struct xdr_stream xdr;
1900 struct compound_hdr hdr = { 1892 struct compound_hdr hdr = {
1901 .minorversion = nfs4_xdr_minorversion(&args->seq_args), 1893 .minorversion = nfs4_xdr_minorversion(&args->seq_args),
1902 }; 1894 };
1903 1895
1904 xdr_init_encode(&xdr, &req->rq_snd_buf, p); 1896 encode_compound_hdr(xdr, req, &hdr);
1905 encode_compound_hdr(&xdr, req, &hdr); 1897 encode_sequence(xdr, &args->seq_args, &hdr);
1906 encode_sequence(&xdr, &args->seq_args, &hdr); 1898 encode_putfh(xdr, args->fh, &hdr);
1907 encode_putfh(&xdr, args->fh, &hdr); 1899 encode_remove(xdr, &args->name, &hdr);
1908 encode_remove(&xdr, &args->name, &hdr); 1900 encode_getfattr(xdr, args->bitmask, &hdr);
1909 encode_getfattr(&xdr, args->bitmask, &hdr);
1910 encode_nops(&hdr); 1901 encode_nops(&hdr);
1911 return 0;
1912} 1902}
1913 1903
1914/* 1904/*
1915 * Encode RENAME request 1905 * Encode RENAME request
1916 */ 1906 */
1917static int nfs4_xdr_enc_rename(struct rpc_rqst *req, __be32 *p, const struct nfs_renameargs *args) 1907static void nfs4_xdr_enc_rename(struct rpc_rqst *req, struct xdr_stream *xdr,
1908 const struct nfs_renameargs *args)
1918{ 1909{
1919 struct xdr_stream xdr;
1920 struct compound_hdr hdr = { 1910 struct compound_hdr hdr = {
1921 .minorversion = nfs4_xdr_minorversion(&args->seq_args), 1911 .minorversion = nfs4_xdr_minorversion(&args->seq_args),
1922 }; 1912 };
1923 1913
1924 xdr_init_encode(&xdr, &req->rq_snd_buf, p); 1914 encode_compound_hdr(xdr, req, &hdr);
1925 encode_compound_hdr(&xdr, req, &hdr); 1915 encode_sequence(xdr, &args->seq_args, &hdr);
1926 encode_sequence(&xdr, &args->seq_args, &hdr); 1916 encode_putfh(xdr, args->old_dir, &hdr);
1927 encode_putfh(&xdr, args->old_dir, &hdr); 1917 encode_savefh(xdr, &hdr);
1928 encode_savefh(&xdr, &hdr); 1918 encode_putfh(xdr, args->new_dir, &hdr);
1929 encode_putfh(&xdr, args->new_dir, &hdr); 1919 encode_rename(xdr, args->old_name, args->new_name, &hdr);
1930 encode_rename(&xdr, args->old_name, args->new_name, &hdr); 1920 encode_getfattr(xdr, args->bitmask, &hdr);
1931 encode_getfattr(&xdr, args->bitmask, &hdr); 1921 encode_restorefh(xdr, &hdr);
1932 encode_restorefh(&xdr, &hdr); 1922 encode_getfattr(xdr, args->bitmask, &hdr);
1933 encode_getfattr(&xdr, args->bitmask, &hdr);
1934 encode_nops(&hdr); 1923 encode_nops(&hdr);
1935 return 0;
1936} 1924}
1937 1925
1938/* 1926/*
1939 * Encode LINK request 1927 * Encode LINK request
1940 */ 1928 */
1941static int nfs4_xdr_enc_link(struct rpc_rqst *req, __be32 *p, const struct nfs4_link_arg *args) 1929static void nfs4_xdr_enc_link(struct rpc_rqst *req, struct xdr_stream *xdr,
1930 const struct nfs4_link_arg *args)
1942{ 1931{
1943 struct xdr_stream xdr;
1944 struct compound_hdr hdr = { 1932 struct compound_hdr hdr = {
1945 .minorversion = nfs4_xdr_minorversion(&args->seq_args), 1933 .minorversion = nfs4_xdr_minorversion(&args->seq_args),
1946 }; 1934 };
1947 1935
1948 xdr_init_encode(&xdr, &req->rq_snd_buf, p); 1936 encode_compound_hdr(xdr, req, &hdr);
1949 encode_compound_hdr(&xdr, req, &hdr); 1937 encode_sequence(xdr, &args->seq_args, &hdr);
1950 encode_sequence(&xdr, &args->seq_args, &hdr); 1938 encode_putfh(xdr, args->fh, &hdr);
1951 encode_putfh(&xdr, args->fh, &hdr); 1939 encode_savefh(xdr, &hdr);
1952 encode_savefh(&xdr, &hdr); 1940 encode_putfh(xdr, args->dir_fh, &hdr);
1953 encode_putfh(&xdr, args->dir_fh, &hdr); 1941 encode_link(xdr, args->name, &hdr);
1954 encode_link(&xdr, args->name, &hdr); 1942 encode_getfattr(xdr, args->bitmask, &hdr);
1955 encode_getfattr(&xdr, args->bitmask, &hdr); 1943 encode_restorefh(xdr, &hdr);
1956 encode_restorefh(&xdr, &hdr); 1944 encode_getfattr(xdr, args->bitmask, &hdr);
1957 encode_getfattr(&xdr, args->bitmask, &hdr);
1958 encode_nops(&hdr); 1945 encode_nops(&hdr);
1959 return 0;
1960} 1946}
1961 1947
1962/* 1948/*
1963 * Encode CREATE request 1949 * Encode CREATE request
1964 */ 1950 */
1965static int nfs4_xdr_enc_create(struct rpc_rqst *req, __be32 *p, const struct nfs4_create_arg *args) 1951static void nfs4_xdr_enc_create(struct rpc_rqst *req, struct xdr_stream *xdr,
1952 const struct nfs4_create_arg *args)
1966{ 1953{
1967 struct xdr_stream xdr;
1968 struct compound_hdr hdr = { 1954 struct compound_hdr hdr = {
1969 .minorversion = nfs4_xdr_minorversion(&args->seq_args), 1955 .minorversion = nfs4_xdr_minorversion(&args->seq_args),
1970 }; 1956 };
1971 1957
1972 xdr_init_encode(&xdr, &req->rq_snd_buf, p); 1958 encode_compound_hdr(xdr, req, &hdr);
1973 encode_compound_hdr(&xdr, req, &hdr); 1959 encode_sequence(xdr, &args->seq_args, &hdr);
1974 encode_sequence(&xdr, &args->seq_args, &hdr); 1960 encode_putfh(xdr, args->dir_fh, &hdr);
1975 encode_putfh(&xdr, args->dir_fh, &hdr); 1961 encode_savefh(xdr, &hdr);
1976 encode_savefh(&xdr, &hdr); 1962 encode_create(xdr, args, &hdr);
1977 encode_create(&xdr, args, &hdr); 1963 encode_getfh(xdr, &hdr);
1978 encode_getfh(&xdr, &hdr); 1964 encode_getfattr(xdr, args->bitmask, &hdr);
1979 encode_getfattr(&xdr, args->bitmask, &hdr); 1965 encode_restorefh(xdr, &hdr);
1980 encode_restorefh(&xdr, &hdr); 1966 encode_getfattr(xdr, args->bitmask, &hdr);
1981 encode_getfattr(&xdr, args->bitmask, &hdr);
1982 encode_nops(&hdr); 1967 encode_nops(&hdr);
1983 return 0;
1984} 1968}
1985 1969
1986/* 1970/*
1987 * Encode SYMLINK request 1971 * Encode SYMLINK request
1988 */ 1972 */
1989static int nfs4_xdr_enc_symlink(struct rpc_rqst *req, __be32 *p, const struct nfs4_create_arg *args) 1973static void nfs4_xdr_enc_symlink(struct rpc_rqst *req, struct xdr_stream *xdr,
1974 const struct nfs4_create_arg *args)
1990{ 1975{
1991 return nfs4_xdr_enc_create(req, p, args); 1976 nfs4_xdr_enc_create(req, xdr, args);
1992} 1977}
1993 1978
1994/* 1979/*
1995 * Encode GETATTR request 1980 * Encode GETATTR request
1996 */ 1981 */
1997static int nfs4_xdr_enc_getattr(struct rpc_rqst *req, __be32 *p, const struct nfs4_getattr_arg *args) 1982static void nfs4_xdr_enc_getattr(struct rpc_rqst *req, struct xdr_stream *xdr,
1983 const struct nfs4_getattr_arg *args)
1998{ 1984{
1999 struct xdr_stream xdr;
2000 struct compound_hdr hdr = { 1985 struct compound_hdr hdr = {
2001 .minorversion = nfs4_xdr_minorversion(&args->seq_args), 1986 .minorversion = nfs4_xdr_minorversion(&args->seq_args),
2002 }; 1987 };
2003 1988
2004 xdr_init_encode(&xdr, &req->rq_snd_buf, p); 1989 encode_compound_hdr(xdr, req, &hdr);
2005 encode_compound_hdr(&xdr, req, &hdr); 1990 encode_sequence(xdr, &args->seq_args, &hdr);
2006 encode_sequence(&xdr, &args->seq_args, &hdr); 1991 encode_putfh(xdr, args->fh, &hdr);
2007 encode_putfh(&xdr, args->fh, &hdr); 1992 encode_getfattr(xdr, args->bitmask, &hdr);
2008 encode_getfattr(&xdr, args->bitmask, &hdr);
2009 encode_nops(&hdr); 1993 encode_nops(&hdr);
2010 return 0;
2011} 1994}
2012 1995
2013/* 1996/*
2014 * Encode a CLOSE request 1997 * Encode a CLOSE request
2015 */ 1998 */
2016static int nfs4_xdr_enc_close(struct rpc_rqst *req, __be32 *p, struct nfs_closeargs *args) 1999static void nfs4_xdr_enc_close(struct rpc_rqst *req, struct xdr_stream *xdr,
2000 struct nfs_closeargs *args)
2017{ 2001{
2018 struct xdr_stream xdr;
2019 struct compound_hdr hdr = { 2002 struct compound_hdr hdr = {
2020 .minorversion = nfs4_xdr_minorversion(&args->seq_args), 2003 .minorversion = nfs4_xdr_minorversion(&args->seq_args),
2021 }; 2004 };
2022 2005
2023 xdr_init_encode(&xdr, &req->rq_snd_buf, p); 2006 encode_compound_hdr(xdr, req, &hdr);
2024 encode_compound_hdr(&xdr, req, &hdr); 2007 encode_sequence(xdr, &args->seq_args, &hdr);
2025 encode_sequence(&xdr, &args->seq_args, &hdr); 2008 encode_putfh(xdr, args->fh, &hdr);
2026 encode_putfh(&xdr, args->fh, &hdr); 2009 encode_close(xdr, args, &hdr);
2027 encode_close(&xdr, args, &hdr); 2010 encode_getfattr(xdr, args->bitmask, &hdr);
2028 encode_getfattr(&xdr, args->bitmask, &hdr);
2029 encode_nops(&hdr); 2011 encode_nops(&hdr);
2030 return 0;
2031} 2012}
2032 2013
2033/* 2014/*
2034 * Encode an OPEN request 2015 * Encode an OPEN request
2035 */ 2016 */
2036static int nfs4_xdr_enc_open(struct rpc_rqst *req, __be32 *p, struct nfs_openargs *args) 2017static void nfs4_xdr_enc_open(struct rpc_rqst *req, struct xdr_stream *xdr,
2018 struct nfs_openargs *args)
2037{ 2019{
2038 struct xdr_stream xdr;
2039 struct compound_hdr hdr = { 2020 struct compound_hdr hdr = {
2040 .minorversion = nfs4_xdr_minorversion(&args->seq_args), 2021 .minorversion = nfs4_xdr_minorversion(&args->seq_args),
2041 }; 2022 };
2042 2023
2043 xdr_init_encode(&xdr, &req->rq_snd_buf, p); 2024 encode_compound_hdr(xdr, req, &hdr);
2044 encode_compound_hdr(&xdr, req, &hdr); 2025 encode_sequence(xdr, &args->seq_args, &hdr);
2045 encode_sequence(&xdr, &args->seq_args, &hdr); 2026 encode_putfh(xdr, args->fh, &hdr);
2046 encode_putfh(&xdr, args->fh, &hdr); 2027 encode_savefh(xdr, &hdr);
2047 encode_savefh(&xdr, &hdr); 2028 encode_open(xdr, args, &hdr);
2048 encode_open(&xdr, args, &hdr); 2029 encode_getfh(xdr, &hdr);
2049 encode_getfh(&xdr, &hdr); 2030 encode_getfattr(xdr, args->bitmask, &hdr);
2050 encode_getfattr(&xdr, args->bitmask, &hdr); 2031 encode_restorefh(xdr, &hdr);
2051 encode_restorefh(&xdr, &hdr); 2032 encode_getfattr(xdr, args->bitmask, &hdr);
2052 encode_getfattr(&xdr, args->bitmask, &hdr);
2053 encode_nops(&hdr); 2033 encode_nops(&hdr);
2054 return 0;
2055} 2034}
2056 2035
2057/* 2036/*
2058 * Encode an OPEN_CONFIRM request 2037 * Encode an OPEN_CONFIRM request
2059 */ 2038 */
2060static int nfs4_xdr_enc_open_confirm(struct rpc_rqst *req, __be32 *p, struct nfs_open_confirmargs *args) 2039static void nfs4_xdr_enc_open_confirm(struct rpc_rqst *req,
2040 struct xdr_stream *xdr,
2041 struct nfs_open_confirmargs *args)
2061{ 2042{
2062 struct xdr_stream xdr;
2063 struct compound_hdr hdr = { 2043 struct compound_hdr hdr = {
2064 .nops = 0, 2044 .nops = 0,
2065 }; 2045 };
2066 2046
2067 xdr_init_encode(&xdr, &req->rq_snd_buf, p); 2047 encode_compound_hdr(xdr, req, &hdr);
2068 encode_compound_hdr(&xdr, req, &hdr); 2048 encode_putfh(xdr, args->fh, &hdr);
2069 encode_putfh(&xdr, args->fh, &hdr); 2049 encode_open_confirm(xdr, args, &hdr);
2070 encode_open_confirm(&xdr, args, &hdr);
2071 encode_nops(&hdr); 2050 encode_nops(&hdr);
2072 return 0;
2073} 2051}
2074 2052
2075/* 2053/*
2076 * Encode an OPEN request with no attributes. 2054 * Encode an OPEN request with no attributes.
2077 */ 2055 */
2078static int nfs4_xdr_enc_open_noattr(struct rpc_rqst *req, __be32 *p, struct nfs_openargs *args) 2056static void nfs4_xdr_enc_open_noattr(struct rpc_rqst *req,
2057 struct xdr_stream *xdr,
2058 struct nfs_openargs *args)
2079{ 2059{
2080 struct xdr_stream xdr;
2081 struct compound_hdr hdr = { 2060 struct compound_hdr hdr = {
2082 .minorversion = nfs4_xdr_minorversion(&args->seq_args), 2061 .minorversion = nfs4_xdr_minorversion(&args->seq_args),
2083 }; 2062 };
2084 2063
2085 xdr_init_encode(&xdr, &req->rq_snd_buf, p); 2064 encode_compound_hdr(xdr, req, &hdr);
2086 encode_compound_hdr(&xdr, req, &hdr); 2065 encode_sequence(xdr, &args->seq_args, &hdr);
2087 encode_sequence(&xdr, &args->seq_args, &hdr); 2066 encode_putfh(xdr, args->fh, &hdr);
2088 encode_putfh(&xdr, args->fh, &hdr); 2067 encode_open(xdr, args, &hdr);
2089 encode_open(&xdr, args, &hdr); 2068 encode_getfattr(xdr, args->bitmask, &hdr);
2090 encode_getfattr(&xdr, args->bitmask, &hdr);
2091 encode_nops(&hdr); 2069 encode_nops(&hdr);
2092 return 0;
2093} 2070}
2094 2071
2095/* 2072/*
2096 * Encode an OPEN_DOWNGRADE request 2073 * Encode an OPEN_DOWNGRADE request
2097 */ 2074 */
2098static int nfs4_xdr_enc_open_downgrade(struct rpc_rqst *req, __be32 *p, struct nfs_closeargs *args) 2075static void nfs4_xdr_enc_open_downgrade(struct rpc_rqst *req,
2076 struct xdr_stream *xdr,
2077 struct nfs_closeargs *args)
2099{ 2078{
2100 struct xdr_stream xdr;
2101 struct compound_hdr hdr = { 2079 struct compound_hdr hdr = {
2102 .minorversion = nfs4_xdr_minorversion(&args->seq_args), 2080 .minorversion = nfs4_xdr_minorversion(&args->seq_args),
2103 }; 2081 };
2104 2082
2105 xdr_init_encode(&xdr, &req->rq_snd_buf, p); 2083 encode_compound_hdr(xdr, req, &hdr);
2106 encode_compound_hdr(&xdr, req, &hdr); 2084 encode_sequence(xdr, &args->seq_args, &hdr);
2107 encode_sequence(&xdr, &args->seq_args, &hdr); 2085 encode_putfh(xdr, args->fh, &hdr);
2108 encode_putfh(&xdr, args->fh, &hdr); 2086 encode_open_downgrade(xdr, args, &hdr);
2109 encode_open_downgrade(&xdr, args, &hdr); 2087 encode_getfattr(xdr, args->bitmask, &hdr);
2110 encode_getfattr(&xdr, args->bitmask, &hdr);
2111 encode_nops(&hdr); 2088 encode_nops(&hdr);
2112 return 0;
2113} 2089}
2114 2090
2115/* 2091/*
2116 * Encode a LOCK request 2092 * Encode a LOCK request
2117 */ 2093 */
2118static int nfs4_xdr_enc_lock(struct rpc_rqst *req, __be32 *p, struct nfs_lock_args *args) 2094static void nfs4_xdr_enc_lock(struct rpc_rqst *req, struct xdr_stream *xdr,
2095 struct nfs_lock_args *args)
2119{ 2096{
2120 struct xdr_stream xdr;
2121 struct compound_hdr hdr = { 2097 struct compound_hdr hdr = {
2122 .minorversion = nfs4_xdr_minorversion(&args->seq_args), 2098 .minorversion = nfs4_xdr_minorversion(&args->seq_args),
2123 }; 2099 };
2124 2100
2125 xdr_init_encode(&xdr, &req->rq_snd_buf, p); 2101 encode_compound_hdr(xdr, req, &hdr);
2126 encode_compound_hdr(&xdr, req, &hdr); 2102 encode_sequence(xdr, &args->seq_args, &hdr);
2127 encode_sequence(&xdr, &args->seq_args, &hdr); 2103 encode_putfh(xdr, args->fh, &hdr);
2128 encode_putfh(&xdr, args->fh, &hdr); 2104 encode_lock(xdr, args, &hdr);
2129 encode_lock(&xdr, args, &hdr);
2130 encode_nops(&hdr); 2105 encode_nops(&hdr);
2131 return 0;
2132} 2106}
2133 2107
2134/* 2108/*
2135 * Encode a LOCKT request 2109 * Encode a LOCKT request
2136 */ 2110 */
2137static int nfs4_xdr_enc_lockt(struct rpc_rqst *req, __be32 *p, struct nfs_lockt_args *args) 2111static void nfs4_xdr_enc_lockt(struct rpc_rqst *req, struct xdr_stream *xdr,
2112 struct nfs_lockt_args *args)
2138{ 2113{
2139 struct xdr_stream xdr;
2140 struct compound_hdr hdr = { 2114 struct compound_hdr hdr = {
2141 .minorversion = nfs4_xdr_minorversion(&args->seq_args), 2115 .minorversion = nfs4_xdr_minorversion(&args->seq_args),
2142 }; 2116 };
2143 2117
2144 xdr_init_encode(&xdr, &req->rq_snd_buf, p); 2118 encode_compound_hdr(xdr, req, &hdr);
2145 encode_compound_hdr(&xdr, req, &hdr); 2119 encode_sequence(xdr, &args->seq_args, &hdr);
2146 encode_sequence(&xdr, &args->seq_args, &hdr); 2120 encode_putfh(xdr, args->fh, &hdr);
2147 encode_putfh(&xdr, args->fh, &hdr); 2121 encode_lockt(xdr, args, &hdr);
2148 encode_lockt(&xdr, args, &hdr);
2149 encode_nops(&hdr); 2122 encode_nops(&hdr);
2150 return 0;
2151} 2123}
2152 2124
2153/* 2125/*
2154 * Encode a LOCKU request 2126 * Encode a LOCKU request
2155 */ 2127 */
2156static int nfs4_xdr_enc_locku(struct rpc_rqst *req, __be32 *p, struct nfs_locku_args *args) 2128static void nfs4_xdr_enc_locku(struct rpc_rqst *req, struct xdr_stream *xdr,
2129 struct nfs_locku_args *args)
2157{ 2130{
2158 struct xdr_stream xdr;
2159 struct compound_hdr hdr = { 2131 struct compound_hdr hdr = {
2160 .minorversion = nfs4_xdr_minorversion(&args->seq_args), 2132 .minorversion = nfs4_xdr_minorversion(&args->seq_args),
2161 }; 2133 };
2162 2134
2163 xdr_init_encode(&xdr, &req->rq_snd_buf, p); 2135 encode_compound_hdr(xdr, req, &hdr);
2164 encode_compound_hdr(&xdr, req, &hdr); 2136 encode_sequence(xdr, &args->seq_args, &hdr);
2165 encode_sequence(&xdr, &args->seq_args, &hdr); 2137 encode_putfh(xdr, args->fh, &hdr);
2166 encode_putfh(&xdr, args->fh, &hdr); 2138 encode_locku(xdr, args, &hdr);
2167 encode_locku(&xdr, args, &hdr);
2168 encode_nops(&hdr); 2139 encode_nops(&hdr);
2169 return 0;
2170} 2140}
2171 2141
2172static int nfs4_xdr_enc_release_lockowner(struct rpc_rqst *req, __be32 *p, struct nfs_release_lockowner_args *args) 2142static void nfs4_xdr_enc_release_lockowner(struct rpc_rqst *req,
2143 struct xdr_stream *xdr,
2144 struct nfs_release_lockowner_args *args)
2173{ 2145{
2174 struct xdr_stream xdr;
2175 struct compound_hdr hdr = { 2146 struct compound_hdr hdr = {
2176 .minorversion = 0, 2147 .minorversion = 0,
2177 }; 2148 };
2178 2149
2179 xdr_init_encode(&xdr, &req->rq_snd_buf, p); 2150 encode_compound_hdr(xdr, req, &hdr);
2180 encode_compound_hdr(&xdr, req, &hdr); 2151 encode_release_lockowner(xdr, &args->lock_owner, &hdr);
2181 encode_release_lockowner(&xdr, &args->lock_owner, &hdr);
2182 encode_nops(&hdr); 2152 encode_nops(&hdr);
2183 return 0;
2184} 2153}
2185 2154
2186/* 2155/*
2187 * Encode a READLINK request 2156 * Encode a READLINK request
2188 */ 2157 */
2189static int nfs4_xdr_enc_readlink(struct rpc_rqst *req, __be32 *p, const struct nfs4_readlink *args) 2158static void nfs4_xdr_enc_readlink(struct rpc_rqst *req, struct xdr_stream *xdr,
2159 const struct nfs4_readlink *args)
2190{ 2160{
2191 struct xdr_stream xdr;
2192 struct compound_hdr hdr = { 2161 struct compound_hdr hdr = {
2193 .minorversion = nfs4_xdr_minorversion(&args->seq_args), 2162 .minorversion = nfs4_xdr_minorversion(&args->seq_args),
2194 }; 2163 };
2195 2164
2196 xdr_init_encode(&xdr, &req->rq_snd_buf, p); 2165 encode_compound_hdr(xdr, req, &hdr);
2197 encode_compound_hdr(&xdr, req, &hdr); 2166 encode_sequence(xdr, &args->seq_args, &hdr);
2198 encode_sequence(&xdr, &args->seq_args, &hdr); 2167 encode_putfh(xdr, args->fh, &hdr);
2199 encode_putfh(&xdr, args->fh, &hdr); 2168 encode_readlink(xdr, args, req, &hdr);
2200 encode_readlink(&xdr, args, req, &hdr);
2201 2169
2202 xdr_inline_pages(&req->rq_rcv_buf, hdr.replen << 2, args->pages, 2170 xdr_inline_pages(&req->rq_rcv_buf, hdr.replen << 2, args->pages,
2203 args->pgbase, args->pglen); 2171 args->pgbase, args->pglen);
2204 encode_nops(&hdr); 2172 encode_nops(&hdr);
2205 return 0;
2206} 2173}
2207 2174
2208/* 2175/*
2209 * Encode a READDIR request 2176 * Encode a READDIR request
2210 */ 2177 */
2211static int nfs4_xdr_enc_readdir(struct rpc_rqst *req, __be32 *p, const struct nfs4_readdir_arg *args) 2178static void nfs4_xdr_enc_readdir(struct rpc_rqst *req, struct xdr_stream *xdr,
2179 const struct nfs4_readdir_arg *args)
2212{ 2180{
2213 struct xdr_stream xdr;
2214 struct compound_hdr hdr = { 2181 struct compound_hdr hdr = {
2215 .minorversion = nfs4_xdr_minorversion(&args->seq_args), 2182 .minorversion = nfs4_xdr_minorversion(&args->seq_args),
2216 }; 2183 };
2217 2184
2218 xdr_init_encode(&xdr, &req->rq_snd_buf, p); 2185 encode_compound_hdr(xdr, req, &hdr);
2219 encode_compound_hdr(&xdr, req, &hdr); 2186 encode_sequence(xdr, &args->seq_args, &hdr);
2220 encode_sequence(&xdr, &args->seq_args, &hdr); 2187 encode_putfh(xdr, args->fh, &hdr);
2221 encode_putfh(&xdr, args->fh, &hdr); 2188 encode_readdir(xdr, args, req, &hdr);
2222 encode_readdir(&xdr, args, req, &hdr);
2223 2189
2224 xdr_inline_pages(&req->rq_rcv_buf, hdr.replen << 2, args->pages, 2190 xdr_inline_pages(&req->rq_rcv_buf, hdr.replen << 2, args->pages,
2225 args->pgbase, args->count); 2191 args->pgbase, args->count);
@@ -2227,428 +2193,387 @@ static int nfs4_xdr_enc_readdir(struct rpc_rqst *req, __be32 *p, const struct nf
2227 __func__, hdr.replen << 2, args->pages, 2193 __func__, hdr.replen << 2, args->pages,
2228 args->pgbase, args->count); 2194 args->pgbase, args->count);
2229 encode_nops(&hdr); 2195 encode_nops(&hdr);
2230 return 0;
2231} 2196}
2232 2197
2233/* 2198/*
2234 * Encode a READ request 2199 * Encode a READ request
2235 */ 2200 */
2236static int nfs4_xdr_enc_read(struct rpc_rqst *req, __be32 *p, struct nfs_readargs *args) 2201static void nfs4_xdr_enc_read(struct rpc_rqst *req, struct xdr_stream *xdr,
2202 struct nfs_readargs *args)
2237{ 2203{
2238 struct xdr_stream xdr;
2239 struct compound_hdr hdr = { 2204 struct compound_hdr hdr = {
2240 .minorversion = nfs4_xdr_minorversion(&args->seq_args), 2205 .minorversion = nfs4_xdr_minorversion(&args->seq_args),
2241 }; 2206 };
2242 2207
2243 xdr_init_encode(&xdr, &req->rq_snd_buf, p); 2208 encode_compound_hdr(xdr, req, &hdr);
2244 encode_compound_hdr(&xdr, req, &hdr); 2209 encode_sequence(xdr, &args->seq_args, &hdr);
2245 encode_sequence(&xdr, &args->seq_args, &hdr); 2210 encode_putfh(xdr, args->fh, &hdr);
2246 encode_putfh(&xdr, args->fh, &hdr); 2211 encode_read(xdr, args, &hdr);
2247 encode_read(&xdr, args, &hdr);
2248 2212
2249 xdr_inline_pages(&req->rq_rcv_buf, hdr.replen << 2, 2213 xdr_inline_pages(&req->rq_rcv_buf, hdr.replen << 2,
2250 args->pages, args->pgbase, args->count); 2214 args->pages, args->pgbase, args->count);
2251 req->rq_rcv_buf.flags |= XDRBUF_READ; 2215 req->rq_rcv_buf.flags |= XDRBUF_READ;
2252 encode_nops(&hdr); 2216 encode_nops(&hdr);
2253 return 0;
2254} 2217}
2255 2218
2256/* 2219/*
2257 * Encode an SETATTR request 2220 * Encode an SETATTR request
2258 */ 2221 */
2259static int nfs4_xdr_enc_setattr(struct rpc_rqst *req, __be32 *p, struct nfs_setattrargs *args) 2222static void nfs4_xdr_enc_setattr(struct rpc_rqst *req, struct xdr_stream *xdr,
2223 struct nfs_setattrargs *args)
2260{ 2224{
2261 struct xdr_stream xdr;
2262 struct compound_hdr hdr = { 2225 struct compound_hdr hdr = {
2263 .minorversion = nfs4_xdr_minorversion(&args->seq_args), 2226 .minorversion = nfs4_xdr_minorversion(&args->seq_args),
2264 }; 2227 };
2265 2228
2266 xdr_init_encode(&xdr, &req->rq_snd_buf, p); 2229 encode_compound_hdr(xdr, req, &hdr);
2267 encode_compound_hdr(&xdr, req, &hdr); 2230 encode_sequence(xdr, &args->seq_args, &hdr);
2268 encode_sequence(&xdr, &args->seq_args, &hdr); 2231 encode_putfh(xdr, args->fh, &hdr);
2269 encode_putfh(&xdr, args->fh, &hdr); 2232 encode_setattr(xdr, args, args->server, &hdr);
2270 encode_setattr(&xdr, args, args->server, &hdr); 2233 encode_getfattr(xdr, args->bitmask, &hdr);
2271 encode_getfattr(&xdr, args->bitmask, &hdr);
2272 encode_nops(&hdr); 2234 encode_nops(&hdr);
2273 return 0;
2274} 2235}
2275 2236
2276/* 2237/*
2277 * Encode a GETACL request 2238 * Encode a GETACL request
2278 */ 2239 */
2279static int 2240static void nfs4_xdr_enc_getacl(struct rpc_rqst *req, struct xdr_stream *xdr,
2280nfs4_xdr_enc_getacl(struct rpc_rqst *req, __be32 *p, 2241 struct nfs_getaclargs *args)
2281 struct nfs_getaclargs *args)
2282{ 2242{
2283 struct xdr_stream xdr;
2284 struct compound_hdr hdr = { 2243 struct compound_hdr hdr = {
2285 .minorversion = nfs4_xdr_minorversion(&args->seq_args), 2244 .minorversion = nfs4_xdr_minorversion(&args->seq_args),
2286 }; 2245 };
2287 uint32_t replen; 2246 uint32_t replen;
2288 2247
2289 xdr_init_encode(&xdr, &req->rq_snd_buf, p); 2248 encode_compound_hdr(xdr, req, &hdr);
2290 encode_compound_hdr(&xdr, req, &hdr); 2249 encode_sequence(xdr, &args->seq_args, &hdr);
2291 encode_sequence(&xdr, &args->seq_args, &hdr); 2250 encode_putfh(xdr, args->fh, &hdr);
2292 encode_putfh(&xdr, args->fh, &hdr);
2293 replen = hdr.replen + op_decode_hdr_maxsz + nfs4_fattr_bitmap_maxsz + 1; 2251 replen = hdr.replen + op_decode_hdr_maxsz + nfs4_fattr_bitmap_maxsz + 1;
2294 encode_getattr_two(&xdr, FATTR4_WORD0_ACL, 0, &hdr); 2252 encode_getattr_two(xdr, FATTR4_WORD0_ACL, 0, &hdr);
2295 2253
2296 xdr_inline_pages(&req->rq_rcv_buf, replen << 2, 2254 xdr_inline_pages(&req->rq_rcv_buf, replen << 2,
2297 args->acl_pages, args->acl_pgbase, args->acl_len); 2255 args->acl_pages, args->acl_pgbase, args->acl_len);
2298 encode_nops(&hdr); 2256 encode_nops(&hdr);
2299 return 0;
2300} 2257}
2301 2258
2302/* 2259/*
2303 * Encode a WRITE request 2260 * Encode a WRITE request
2304 */ 2261 */
2305static int nfs4_xdr_enc_write(struct rpc_rqst *req, __be32 *p, struct nfs_writeargs *args) 2262static void nfs4_xdr_enc_write(struct rpc_rqst *req, struct xdr_stream *xdr,
2263 struct nfs_writeargs *args)
2306{ 2264{
2307 struct xdr_stream xdr;
2308 struct compound_hdr hdr = { 2265 struct compound_hdr hdr = {
2309 .minorversion = nfs4_xdr_minorversion(&args->seq_args), 2266 .minorversion = nfs4_xdr_minorversion(&args->seq_args),
2310 }; 2267 };
2311 2268
2312 xdr_init_encode(&xdr, &req->rq_snd_buf, p); 2269 encode_compound_hdr(xdr, req, &hdr);
2313 encode_compound_hdr(&xdr, req, &hdr); 2270 encode_sequence(xdr, &args->seq_args, &hdr);
2314 encode_sequence(&xdr, &args->seq_args, &hdr); 2271 encode_putfh(xdr, args->fh, &hdr);
2315 encode_putfh(&xdr, args->fh, &hdr); 2272 encode_write(xdr, args, &hdr);
2316 encode_write(&xdr, args, &hdr);
2317 req->rq_snd_buf.flags |= XDRBUF_WRITE; 2273 req->rq_snd_buf.flags |= XDRBUF_WRITE;
2318 encode_getfattr(&xdr, args->bitmask, &hdr); 2274 encode_getfattr(xdr, args->bitmask, &hdr);
2319 encode_nops(&hdr); 2275 encode_nops(&hdr);
2320 return 0;
2321} 2276}
2322 2277
2323/* 2278/*
2324 * a COMMIT request 2279 * a COMMIT request
2325 */ 2280 */
2326static int nfs4_xdr_enc_commit(struct rpc_rqst *req, __be32 *p, struct nfs_writeargs *args) 2281static void nfs4_xdr_enc_commit(struct rpc_rqst *req, struct xdr_stream *xdr,
2282 struct nfs_writeargs *args)
2327{ 2283{
2328 struct xdr_stream xdr;
2329 struct compound_hdr hdr = { 2284 struct compound_hdr hdr = {
2330 .minorversion = nfs4_xdr_minorversion(&args->seq_args), 2285 .minorversion = nfs4_xdr_minorversion(&args->seq_args),
2331 }; 2286 };
2332 2287
2333 xdr_init_encode(&xdr, &req->rq_snd_buf, p); 2288 encode_compound_hdr(xdr, req, &hdr);
2334 encode_compound_hdr(&xdr, req, &hdr); 2289 encode_sequence(xdr, &args->seq_args, &hdr);
2335 encode_sequence(&xdr, &args->seq_args, &hdr); 2290 encode_putfh(xdr, args->fh, &hdr);
2336 encode_putfh(&xdr, args->fh, &hdr); 2291 encode_commit(xdr, args, &hdr);
2337 encode_commit(&xdr, args, &hdr); 2292 encode_getfattr(xdr, args->bitmask, &hdr);
2338 encode_getfattr(&xdr, args->bitmask, &hdr);
2339 encode_nops(&hdr); 2293 encode_nops(&hdr);
2340 return 0;
2341} 2294}
2342 2295
2343/* 2296/*
2344 * FSINFO request 2297 * FSINFO request
2345 */ 2298 */
2346static int nfs4_xdr_enc_fsinfo(struct rpc_rqst *req, __be32 *p, struct nfs4_fsinfo_arg *args) 2299static void nfs4_xdr_enc_fsinfo(struct rpc_rqst *req, struct xdr_stream *xdr,
2300 struct nfs4_fsinfo_arg *args)
2347{ 2301{
2348 struct xdr_stream xdr;
2349 struct compound_hdr hdr = { 2302 struct compound_hdr hdr = {
2350 .minorversion = nfs4_xdr_minorversion(&args->seq_args), 2303 .minorversion = nfs4_xdr_minorversion(&args->seq_args),
2351 }; 2304 };
2352 2305
2353 xdr_init_encode(&xdr, &req->rq_snd_buf, p); 2306 encode_compound_hdr(xdr, req, &hdr);
2354 encode_compound_hdr(&xdr, req, &hdr); 2307 encode_sequence(xdr, &args->seq_args, &hdr);
2355 encode_sequence(&xdr, &args->seq_args, &hdr); 2308 encode_putfh(xdr, args->fh, &hdr);
2356 encode_putfh(&xdr, args->fh, &hdr); 2309 encode_fsinfo(xdr, args->bitmask, &hdr);
2357 encode_fsinfo(&xdr, args->bitmask, &hdr);
2358 encode_nops(&hdr); 2310 encode_nops(&hdr);
2359 return 0;
2360} 2311}
2361 2312
2362/* 2313/*
2363 * a PATHCONF request 2314 * a PATHCONF request
2364 */ 2315 */
2365static int nfs4_xdr_enc_pathconf(struct rpc_rqst *req, __be32 *p, const struct nfs4_pathconf_arg *args) 2316static void nfs4_xdr_enc_pathconf(struct rpc_rqst *req, struct xdr_stream *xdr,
2317 const struct nfs4_pathconf_arg *args)
2366{ 2318{
2367 struct xdr_stream xdr;
2368 struct compound_hdr hdr = { 2319 struct compound_hdr hdr = {
2369 .minorversion = nfs4_xdr_minorversion(&args->seq_args), 2320 .minorversion = nfs4_xdr_minorversion(&args->seq_args),
2370 }; 2321 };
2371 2322
2372 xdr_init_encode(&xdr, &req->rq_snd_buf, p); 2323 encode_compound_hdr(xdr, req, &hdr);
2373 encode_compound_hdr(&xdr, req, &hdr); 2324 encode_sequence(xdr, &args->seq_args, &hdr);
2374 encode_sequence(&xdr, &args->seq_args, &hdr); 2325 encode_putfh(xdr, args->fh, &hdr);
2375 encode_putfh(&xdr, args->fh, &hdr); 2326 encode_getattr_one(xdr, args->bitmask[0] & nfs4_pathconf_bitmap[0],
2376 encode_getattr_one(&xdr, args->bitmask[0] & nfs4_pathconf_bitmap[0],
2377 &hdr); 2327 &hdr);
2378 encode_nops(&hdr); 2328 encode_nops(&hdr);
2379 return 0;
2380} 2329}
2381 2330
2382/* 2331/*
2383 * a STATFS request 2332 * a STATFS request
2384 */ 2333 */
2385static int nfs4_xdr_enc_statfs(struct rpc_rqst *req, __be32 *p, const struct nfs4_statfs_arg *args) 2334static void nfs4_xdr_enc_statfs(struct rpc_rqst *req, struct xdr_stream *xdr,
2335 const struct nfs4_statfs_arg *args)
2386{ 2336{
2387 struct xdr_stream xdr;
2388 struct compound_hdr hdr = { 2337 struct compound_hdr hdr = {
2389 .minorversion = nfs4_xdr_minorversion(&args->seq_args), 2338 .minorversion = nfs4_xdr_minorversion(&args->seq_args),
2390 }; 2339 };
2391 2340
2392 xdr_init_encode(&xdr, &req->rq_snd_buf, p); 2341 encode_compound_hdr(xdr, req, &hdr);
2393 encode_compound_hdr(&xdr, req, &hdr); 2342 encode_sequence(xdr, &args->seq_args, &hdr);
2394 encode_sequence(&xdr, &args->seq_args, &hdr); 2343 encode_putfh(xdr, args->fh, &hdr);
2395 encode_putfh(&xdr, args->fh, &hdr); 2344 encode_getattr_two(xdr, args->bitmask[0] & nfs4_statfs_bitmap[0],
2396 encode_getattr_two(&xdr, args->bitmask[0] & nfs4_statfs_bitmap[0],
2397 args->bitmask[1] & nfs4_statfs_bitmap[1], &hdr); 2345 args->bitmask[1] & nfs4_statfs_bitmap[1], &hdr);
2398 encode_nops(&hdr); 2346 encode_nops(&hdr);
2399 return 0;
2400} 2347}
2401 2348
2402/* 2349/*
2403 * GETATTR_BITMAP request 2350 * GETATTR_BITMAP request
2404 */ 2351 */
2405static int nfs4_xdr_enc_server_caps(struct rpc_rqst *req, __be32 *p, 2352static void nfs4_xdr_enc_server_caps(struct rpc_rqst *req,
2406 struct nfs4_server_caps_arg *args) 2353 struct xdr_stream *xdr,
2354 struct nfs4_server_caps_arg *args)
2407{ 2355{
2408 struct xdr_stream xdr;
2409 struct compound_hdr hdr = { 2356 struct compound_hdr hdr = {
2410 .minorversion = nfs4_xdr_minorversion(&args->seq_args), 2357 .minorversion = nfs4_xdr_minorversion(&args->seq_args),
2411 }; 2358 };
2412 2359
2413 xdr_init_encode(&xdr, &req->rq_snd_buf, p); 2360 encode_compound_hdr(xdr, req, &hdr);
2414 encode_compound_hdr(&xdr, req, &hdr); 2361 encode_sequence(xdr, &args->seq_args, &hdr);
2415 encode_sequence(&xdr, &args->seq_args, &hdr); 2362 encode_putfh(xdr, args->fhandle, &hdr);
2416 encode_putfh(&xdr, args->fhandle, &hdr); 2363 encode_getattr_one(xdr, FATTR4_WORD0_SUPPORTED_ATTRS|
2417 encode_getattr_one(&xdr, FATTR4_WORD0_SUPPORTED_ATTRS|
2418 FATTR4_WORD0_LINK_SUPPORT| 2364 FATTR4_WORD0_LINK_SUPPORT|
2419 FATTR4_WORD0_SYMLINK_SUPPORT| 2365 FATTR4_WORD0_SYMLINK_SUPPORT|
2420 FATTR4_WORD0_ACLSUPPORT, &hdr); 2366 FATTR4_WORD0_ACLSUPPORT, &hdr);
2421 encode_nops(&hdr); 2367 encode_nops(&hdr);
2422 return 0;
2423} 2368}
2424 2369
2425/* 2370/*
2426 * a RENEW request 2371 * a RENEW request
2427 */ 2372 */
2428static int nfs4_xdr_enc_renew(struct rpc_rqst *req, __be32 *p, struct nfs_client *clp) 2373static void nfs4_xdr_enc_renew(struct rpc_rqst *req, struct xdr_stream *xdr,
2374 struct nfs_client *clp)
2429{ 2375{
2430 struct xdr_stream xdr;
2431 struct compound_hdr hdr = { 2376 struct compound_hdr hdr = {
2432 .nops = 0, 2377 .nops = 0,
2433 }; 2378 };
2434 2379
2435 xdr_init_encode(&xdr, &req->rq_snd_buf, p); 2380 encode_compound_hdr(xdr, req, &hdr);
2436 encode_compound_hdr(&xdr, req, &hdr); 2381 encode_renew(xdr, clp, &hdr);
2437 encode_renew(&xdr, clp, &hdr);
2438 encode_nops(&hdr); 2382 encode_nops(&hdr);
2439 return 0;
2440} 2383}
2441 2384
2442/* 2385/*
2443 * a SETCLIENTID request 2386 * a SETCLIENTID request
2444 */ 2387 */
2445static int nfs4_xdr_enc_setclientid(struct rpc_rqst *req, __be32 *p, struct nfs4_setclientid *sc) 2388static void nfs4_xdr_enc_setclientid(struct rpc_rqst *req,
2389 struct xdr_stream *xdr,
2390 struct nfs4_setclientid *sc)
2446{ 2391{
2447 struct xdr_stream xdr;
2448 struct compound_hdr hdr = { 2392 struct compound_hdr hdr = {
2449 .nops = 0, 2393 .nops = 0,
2450 }; 2394 };
2451 2395
2452 xdr_init_encode(&xdr, &req->rq_snd_buf, p); 2396 encode_compound_hdr(xdr, req, &hdr);
2453 encode_compound_hdr(&xdr, req, &hdr); 2397 encode_setclientid(xdr, sc, &hdr);
2454 encode_setclientid(&xdr, sc, &hdr);
2455 encode_nops(&hdr); 2398 encode_nops(&hdr);
2456 return 0;
2457} 2399}
2458 2400
2459/* 2401/*
2460 * a SETCLIENTID_CONFIRM request 2402 * a SETCLIENTID_CONFIRM request
2461 */ 2403 */
2462static int nfs4_xdr_enc_setclientid_confirm(struct rpc_rqst *req, __be32 *p, struct nfs4_setclientid_res *arg) 2404static void nfs4_xdr_enc_setclientid_confirm(struct rpc_rqst *req,
2405 struct xdr_stream *xdr,
2406 struct nfs4_setclientid_res *arg)
2463{ 2407{
2464 struct xdr_stream xdr;
2465 struct compound_hdr hdr = { 2408 struct compound_hdr hdr = {
2466 .nops = 0, 2409 .nops = 0,
2467 }; 2410 };
2468 const u32 lease_bitmap[2] = { FATTR4_WORD0_LEASE_TIME, 0 }; 2411 const u32 lease_bitmap[2] = { FATTR4_WORD0_LEASE_TIME, 0 };
2469 2412
2470 xdr_init_encode(&xdr, &req->rq_snd_buf, p); 2413 encode_compound_hdr(xdr, req, &hdr);
2471 encode_compound_hdr(&xdr, req, &hdr); 2414 encode_setclientid_confirm(xdr, arg, &hdr);
2472 encode_setclientid_confirm(&xdr, arg, &hdr); 2415 encode_putrootfh(xdr, &hdr);
2473 encode_putrootfh(&xdr, &hdr); 2416 encode_fsinfo(xdr, lease_bitmap, &hdr);
2474 encode_fsinfo(&xdr, lease_bitmap, &hdr);
2475 encode_nops(&hdr); 2417 encode_nops(&hdr);
2476 return 0;
2477} 2418}
2478 2419
2479/* 2420/*
2480 * DELEGRETURN request 2421 * DELEGRETURN request
2481 */ 2422 */
2482static int nfs4_xdr_enc_delegreturn(struct rpc_rqst *req, __be32 *p, const struct nfs4_delegreturnargs *args) 2423static void nfs4_xdr_enc_delegreturn(struct rpc_rqst *req,
2424 struct xdr_stream *xdr,
2425 const struct nfs4_delegreturnargs *args)
2483{ 2426{
2484 struct xdr_stream xdr;
2485 struct compound_hdr hdr = { 2427 struct compound_hdr hdr = {
2486 .minorversion = nfs4_xdr_minorversion(&args->seq_args), 2428 .minorversion = nfs4_xdr_minorversion(&args->seq_args),
2487 }; 2429 };
2488 2430
2489 xdr_init_encode(&xdr, &req->rq_snd_buf, p); 2431 encode_compound_hdr(xdr, req, &hdr);
2490 encode_compound_hdr(&xdr, req, &hdr); 2432 encode_sequence(xdr, &args->seq_args, &hdr);
2491 encode_sequence(&xdr, &args->seq_args, &hdr); 2433 encode_putfh(xdr, args->fhandle, &hdr);
2492 encode_putfh(&xdr, args->fhandle, &hdr); 2434 encode_delegreturn(xdr, args->stateid, &hdr);
2493 encode_delegreturn(&xdr, args->stateid, &hdr); 2435 encode_getfattr(xdr, args->bitmask, &hdr);
2494 encode_getfattr(&xdr, args->bitmask, &hdr);
2495 encode_nops(&hdr); 2436 encode_nops(&hdr);
2496 return 0;
2497} 2437}
2498 2438
2499/* 2439/*
2500 * Encode FS_LOCATIONS request 2440 * Encode FS_LOCATIONS request
2501 */ 2441 */
2502static int nfs4_xdr_enc_fs_locations(struct rpc_rqst *req, __be32 *p, struct nfs4_fs_locations_arg *args) 2442static void nfs4_xdr_enc_fs_locations(struct rpc_rqst *req,
2443 struct xdr_stream *xdr,
2444 struct nfs4_fs_locations_arg *args)
2503{ 2445{
2504 struct xdr_stream xdr;
2505 struct compound_hdr hdr = { 2446 struct compound_hdr hdr = {
2506 .minorversion = nfs4_xdr_minorversion(&args->seq_args), 2447 .minorversion = nfs4_xdr_minorversion(&args->seq_args),
2507 }; 2448 };
2508 uint32_t replen; 2449 uint32_t replen;
2509 2450
2510 xdr_init_encode(&xdr, &req->rq_snd_buf, p); 2451 encode_compound_hdr(xdr, req, &hdr);
2511 encode_compound_hdr(&xdr, req, &hdr); 2452 encode_sequence(xdr, &args->seq_args, &hdr);
2512 encode_sequence(&xdr, &args->seq_args, &hdr); 2453 encode_putfh(xdr, args->dir_fh, &hdr);
2513 encode_putfh(&xdr, args->dir_fh, &hdr); 2454 encode_lookup(xdr, args->name, &hdr);
2514 encode_lookup(&xdr, args->name, &hdr);
2515 replen = hdr.replen; /* get the attribute into args->page */ 2455 replen = hdr.replen; /* get the attribute into args->page */
2516 encode_fs_locations(&xdr, args->bitmask, &hdr); 2456 encode_fs_locations(xdr, args->bitmask, &hdr);
2517 2457
2518 xdr_inline_pages(&req->rq_rcv_buf, replen << 2, &args->page, 2458 xdr_inline_pages(&req->rq_rcv_buf, replen << 2, &args->page,
2519 0, PAGE_SIZE); 2459 0, PAGE_SIZE);
2520 encode_nops(&hdr); 2460 encode_nops(&hdr);
2521 return 0;
2522} 2461}
2523 2462
2524#if defined(CONFIG_NFS_V4_1) 2463#if defined(CONFIG_NFS_V4_1)
2525/* 2464/*
2526 * EXCHANGE_ID request 2465 * EXCHANGE_ID request
2527 */ 2466 */
2528static int nfs4_xdr_enc_exchange_id(struct rpc_rqst *req, uint32_t *p, 2467static void nfs4_xdr_enc_exchange_id(struct rpc_rqst *req,
2529 struct nfs41_exchange_id_args *args) 2468 struct xdr_stream *xdr,
2469 struct nfs41_exchange_id_args *args)
2530{ 2470{
2531 struct xdr_stream xdr;
2532 struct compound_hdr hdr = { 2471 struct compound_hdr hdr = {
2533 .minorversion = args->client->cl_mvops->minor_version, 2472 .minorversion = args->client->cl_mvops->minor_version,
2534 }; 2473 };
2535 2474
2536 xdr_init_encode(&xdr, &req->rq_snd_buf, p); 2475 encode_compound_hdr(xdr, req, &hdr);
2537 encode_compound_hdr(&xdr, req, &hdr); 2476 encode_exchange_id(xdr, args, &hdr);
2538 encode_exchange_id(&xdr, args, &hdr);
2539 encode_nops(&hdr); 2477 encode_nops(&hdr);
2540 return 0;
2541} 2478}
2542 2479
2543/* 2480/*
2544 * a CREATE_SESSION request 2481 * a CREATE_SESSION request
2545 */ 2482 */
2546static int nfs4_xdr_enc_create_session(struct rpc_rqst *req, uint32_t *p, 2483static void nfs4_xdr_enc_create_session(struct rpc_rqst *req,
2547 struct nfs41_create_session_args *args) 2484 struct xdr_stream *xdr,
2485 struct nfs41_create_session_args *args)
2548{ 2486{
2549 struct xdr_stream xdr;
2550 struct compound_hdr hdr = { 2487 struct compound_hdr hdr = {
2551 .minorversion = args->client->cl_mvops->minor_version, 2488 .minorversion = args->client->cl_mvops->minor_version,
2552 }; 2489 };
2553 2490
2554 xdr_init_encode(&xdr, &req->rq_snd_buf, p); 2491 encode_compound_hdr(xdr, req, &hdr);
2555 encode_compound_hdr(&xdr, req, &hdr); 2492 encode_create_session(xdr, args, &hdr);
2556 encode_create_session(&xdr, args, &hdr);
2557 encode_nops(&hdr); 2493 encode_nops(&hdr);
2558 return 0;
2559} 2494}
2560 2495
2561/* 2496/*
2562 * a DESTROY_SESSION request 2497 * a DESTROY_SESSION request
2563 */ 2498 */
2564static int nfs4_xdr_enc_destroy_session(struct rpc_rqst *req, uint32_t *p, 2499static void nfs4_xdr_enc_destroy_session(struct rpc_rqst *req,
2565 struct nfs4_session *session) 2500 struct xdr_stream *xdr,
2501 struct nfs4_session *session)
2566{ 2502{
2567 struct xdr_stream xdr;
2568 struct compound_hdr hdr = { 2503 struct compound_hdr hdr = {
2569 .minorversion = session->clp->cl_mvops->minor_version, 2504 .minorversion = session->clp->cl_mvops->minor_version,
2570 }; 2505 };
2571 2506
2572 xdr_init_encode(&xdr, &req->rq_snd_buf, p); 2507 encode_compound_hdr(xdr, req, &hdr);
2573 encode_compound_hdr(&xdr, req, &hdr); 2508 encode_destroy_session(xdr, session, &hdr);
2574 encode_destroy_session(&xdr, session, &hdr);
2575 encode_nops(&hdr); 2509 encode_nops(&hdr);
2576 return 0;
2577} 2510}
2578 2511
2579/* 2512/*
2580 * a SEQUENCE request 2513 * a SEQUENCE request
2581 */ 2514 */
2582static int nfs4_xdr_enc_sequence(struct rpc_rqst *req, uint32_t *p, 2515static void nfs4_xdr_enc_sequence(struct rpc_rqst *req, struct xdr_stream *xdr,
2583 struct nfs4_sequence_args *args) 2516 struct nfs4_sequence_args *args)
2584{ 2517{
2585 struct xdr_stream xdr;
2586 struct compound_hdr hdr = { 2518 struct compound_hdr hdr = {
2587 .minorversion = nfs4_xdr_minorversion(args), 2519 .minorversion = nfs4_xdr_minorversion(args),
2588 }; 2520 };
2589 2521
2590 xdr_init_encode(&xdr, &req->rq_snd_buf, p); 2522 encode_compound_hdr(xdr, req, &hdr);
2591 encode_compound_hdr(&xdr, req, &hdr); 2523 encode_sequence(xdr, args, &hdr);
2592 encode_sequence(&xdr, args, &hdr);
2593 encode_nops(&hdr); 2524 encode_nops(&hdr);
2594 return 0;
2595} 2525}
2596 2526
2597/* 2527/*
2598 * a GET_LEASE_TIME request 2528 * a GET_LEASE_TIME request
2599 */ 2529 */
2600static int nfs4_xdr_enc_get_lease_time(struct rpc_rqst *req, uint32_t *p, 2530static void nfs4_xdr_enc_get_lease_time(struct rpc_rqst *req,
2601 struct nfs4_get_lease_time_args *args) 2531 struct xdr_stream *xdr,
2532 struct nfs4_get_lease_time_args *args)
2602{ 2533{
2603 struct xdr_stream xdr;
2604 struct compound_hdr hdr = { 2534 struct compound_hdr hdr = {
2605 .minorversion = nfs4_xdr_minorversion(&args->la_seq_args), 2535 .minorversion = nfs4_xdr_minorversion(&args->la_seq_args),
2606 }; 2536 };
2607 const u32 lease_bitmap[2] = { FATTR4_WORD0_LEASE_TIME, 0 }; 2537 const u32 lease_bitmap[2] = { FATTR4_WORD0_LEASE_TIME, 0 };
2608 2538
2609 xdr_init_encode(&xdr, &req->rq_snd_buf, p); 2539 encode_compound_hdr(xdr, req, &hdr);
2610 encode_compound_hdr(&xdr, req, &hdr); 2540 encode_sequence(xdr, &args->la_seq_args, &hdr);
2611 encode_sequence(&xdr, &args->la_seq_args, &hdr); 2541 encode_putrootfh(xdr, &hdr);
2612 encode_putrootfh(&xdr, &hdr); 2542 encode_fsinfo(xdr, lease_bitmap, &hdr);
2613 encode_fsinfo(&xdr, lease_bitmap, &hdr);
2614 encode_nops(&hdr); 2543 encode_nops(&hdr);
2615 return 0;
2616} 2544}
2617 2545
2618/* 2546/*
2619 * a RECLAIM_COMPLETE request 2547 * a RECLAIM_COMPLETE request
2620 */ 2548 */
2621static int nfs4_xdr_enc_reclaim_complete(struct rpc_rqst *req, uint32_t *p, 2549static void nfs4_xdr_enc_reclaim_complete(struct rpc_rqst *req,
2622 struct nfs41_reclaim_complete_args *args) 2550 struct xdr_stream *xdr,
2551 struct nfs41_reclaim_complete_args *args)
2623{ 2552{
2624 struct xdr_stream xdr;
2625 struct compound_hdr hdr = { 2553 struct compound_hdr hdr = {
2626 .minorversion = nfs4_xdr_minorversion(&args->seq_args) 2554 .minorversion = nfs4_xdr_minorversion(&args->seq_args)
2627 }; 2555 };
2628 2556
2629 xdr_init_encode(&xdr, &req->rq_snd_buf, p); 2557 encode_compound_hdr(xdr, req, &hdr);
2630 encode_compound_hdr(&xdr, req, &hdr); 2558 encode_sequence(xdr, &args->seq_args, &hdr);
2631 encode_sequence(&xdr, &args->seq_args, &hdr); 2559 encode_reclaim_complete(xdr, args, &hdr);
2632 encode_reclaim_complete(&xdr, args, &hdr);
2633 encode_nops(&hdr); 2560 encode_nops(&hdr);
2634 return 0;
2635} 2561}
2636 2562
2637/* 2563/*
2638 * Encode GETDEVICEINFO request 2564 * Encode GETDEVICEINFO request
2639 */ 2565 */
2640static int nfs4_xdr_enc_getdeviceinfo(struct rpc_rqst *req, uint32_t *p, 2566static void nfs4_xdr_enc_getdeviceinfo(struct rpc_rqst *req,
2641 struct nfs4_getdeviceinfo_args *args) 2567 struct xdr_stream *xdr,
2568 struct nfs4_getdeviceinfo_args *args)
2642{ 2569{
2643 struct xdr_stream xdr;
2644 struct compound_hdr hdr = { 2570 struct compound_hdr hdr = {
2645 .minorversion = nfs4_xdr_minorversion(&args->seq_args), 2571 .minorversion = nfs4_xdr_minorversion(&args->seq_args),
2646 }; 2572 };
2647 2573
2648 xdr_init_encode(&xdr, &req->rq_snd_buf, p); 2574 encode_compound_hdr(xdr, req, &hdr);
2649 encode_compound_hdr(&xdr, req, &hdr); 2575 encode_sequence(xdr, &args->seq_args, &hdr);
2650 encode_sequence(&xdr, &args->seq_args, &hdr); 2576 encode_getdeviceinfo(xdr, args, &hdr);
2651 encode_getdeviceinfo(&xdr, args, &hdr);
2652 2577
2653 /* set up reply kvec. Subtract notification bitmap max size (2) 2578 /* set up reply kvec. Subtract notification bitmap max size (2)
2654 * so that notification bitmap is put in xdr_buf tail */ 2579 * so that notification bitmap is put in xdr_buf tail */
@@ -2657,27 +2582,24 @@ static int nfs4_xdr_enc_getdeviceinfo(struct rpc_rqst *req, uint32_t *p,
2657 args->pdev->pglen); 2582 args->pdev->pglen);
2658 2583
2659 encode_nops(&hdr); 2584 encode_nops(&hdr);
2660 return 0;
2661} 2585}
2662 2586
2663/* 2587/*
2664 * Encode LAYOUTGET request 2588 * Encode LAYOUTGET request
2665 */ 2589 */
2666static int nfs4_xdr_enc_layoutget(struct rpc_rqst *req, uint32_t *p, 2590static void nfs4_xdr_enc_layoutget(struct rpc_rqst *req,
2667 struct nfs4_layoutget_args *args) 2591 struct xdr_stream *xdr,
2592 struct nfs4_layoutget_args *args)
2668{ 2593{
2669 struct xdr_stream xdr;
2670 struct compound_hdr hdr = { 2594 struct compound_hdr hdr = {
2671 .minorversion = nfs4_xdr_minorversion(&args->seq_args), 2595 .minorversion = nfs4_xdr_minorversion(&args->seq_args),
2672 }; 2596 };
2673 2597
2674 xdr_init_encode(&xdr, &req->rq_snd_buf, p); 2598 encode_compound_hdr(xdr, req, &hdr);
2675 encode_compound_hdr(&xdr, req, &hdr); 2599 encode_sequence(xdr, &args->seq_args, &hdr);
2676 encode_sequence(&xdr, &args->seq_args, &hdr); 2600 encode_putfh(xdr, NFS_FH(args->inode), &hdr);
2677 encode_putfh(&xdr, NFS_FH(args->inode), &hdr); 2601 encode_layoutget(xdr, args, &hdr);
2678 encode_layoutget(&xdr, args, &hdr);
2679 encode_nops(&hdr); 2602 encode_nops(&hdr);
2680 return 0;
2681} 2603}
2682#endif /* CONFIG_NFS_V4_1 */ 2604#endif /* CONFIG_NFS_V4_1 */
2683 2605
@@ -4475,7 +4397,7 @@ static int decode_read(struct xdr_stream *xdr, struct rpc_rqst *req, struct nfs_
4475 goto out_overflow; 4397 goto out_overflow;
4476 eof = be32_to_cpup(p++); 4398 eof = be32_to_cpup(p++);
4477 count = be32_to_cpup(p); 4399 count = be32_to_cpup(p);
4478 hdrlen = (u8 *) p - (u8 *) iov->iov_base; 4400 hdrlen = (u8 *) xdr->p - (u8 *) iov->iov_base;
4479 recvd = req->rq_rcv_buf.len - hdrlen; 4401 recvd = req->rq_rcv_buf.len - hdrlen;
4480 if (count > recvd) { 4402 if (count > recvd) {
4481 dprintk("NFS: server cheating in read reply: " 4403 dprintk("NFS: server cheating in read reply: "
@@ -5000,7 +4922,7 @@ static int decode_getdeviceinfo(struct xdr_stream *xdr,
5000 goto out_overflow; 4922 goto out_overflow;
5001 len = be32_to_cpup(p); 4923 len = be32_to_cpup(p);
5002 if (len) { 4924 if (len) {
5003 int i; 4925 uint32_t i;
5004 4926
5005 p = xdr_inline_decode(xdr, 4 * len); 4927 p = xdr_inline_decode(xdr, 4 * len);
5006 if (unlikely(!p)) 4928 if (unlikely(!p))
@@ -5090,26 +5012,26 @@ out_overflow:
5090/* 5012/*
5091 * Decode OPEN_DOWNGRADE response 5013 * Decode OPEN_DOWNGRADE response
5092 */ 5014 */
5093static int nfs4_xdr_dec_open_downgrade(struct rpc_rqst *rqstp, __be32 *p, struct nfs_closeres *res) 5015static int nfs4_xdr_dec_open_downgrade(struct rpc_rqst *rqstp,
5016 struct xdr_stream *xdr,
5017 struct nfs_closeres *res)
5094{ 5018{
5095 struct xdr_stream xdr;
5096 struct compound_hdr hdr; 5019 struct compound_hdr hdr;
5097 int status; 5020 int status;
5098 5021
5099 xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); 5022 status = decode_compound_hdr(xdr, &hdr);
5100 status = decode_compound_hdr(&xdr, &hdr);
5101 if (status) 5023 if (status)
5102 goto out; 5024 goto out;
5103 status = decode_sequence(&xdr, &res->seq_res, rqstp); 5025 status = decode_sequence(xdr, &res->seq_res, rqstp);
5104 if (status) 5026 if (status)
5105 goto out; 5027 goto out;
5106 status = decode_putfh(&xdr); 5028 status = decode_putfh(xdr);
5107 if (status) 5029 if (status)
5108 goto out; 5030 goto out;
5109 status = decode_open_downgrade(&xdr, res); 5031 status = decode_open_downgrade(xdr, res);
5110 if (status != 0) 5032 if (status != 0)
5111 goto out; 5033 goto out;
5112 decode_getfattr(&xdr, res->fattr, res->server, 5034 decode_getfattr(xdr, res->fattr, res->server,
5113 !RPC_IS_ASYNC(rqstp->rq_task)); 5035 !RPC_IS_ASYNC(rqstp->rq_task));
5114out: 5036out:
5115 return status; 5037 return status;
@@ -5118,26 +5040,25 @@ out:
5118/* 5040/*
5119 * Decode ACCESS response 5041 * Decode ACCESS response
5120 */ 5042 */
5121static int nfs4_xdr_dec_access(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_accessres *res) 5043static int nfs4_xdr_dec_access(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
5044 struct nfs4_accessres *res)
5122{ 5045{
5123 struct xdr_stream xdr;
5124 struct compound_hdr hdr; 5046 struct compound_hdr hdr;
5125 int status; 5047 int status;
5126 5048
5127 xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); 5049 status = decode_compound_hdr(xdr, &hdr);
5128 status = decode_compound_hdr(&xdr, &hdr);
5129 if (status) 5050 if (status)
5130 goto out; 5051 goto out;
5131 status = decode_sequence(&xdr, &res->seq_res, rqstp); 5052 status = decode_sequence(xdr, &res->seq_res, rqstp);
5132 if (status) 5053 if (status)
5133 goto out; 5054 goto out;
5134 status = decode_putfh(&xdr); 5055 status = decode_putfh(xdr);
5135 if (status != 0) 5056 if (status != 0)
5136 goto out; 5057 goto out;
5137 status = decode_access(&xdr, res); 5058 status = decode_access(xdr, res);
5138 if (status != 0) 5059 if (status != 0)
5139 goto out; 5060 goto out;
5140 decode_getfattr(&xdr, res->fattr, res->server, 5061 decode_getfattr(xdr, res->fattr, res->server,
5141 !RPC_IS_ASYNC(rqstp->rq_task)); 5062 !RPC_IS_ASYNC(rqstp->rq_task));
5142out: 5063out:
5143 return status; 5064 return status;
@@ -5146,26 +5067,28 @@ out:
5146/* 5067/*
5147 * Decode LOOKUP response 5068 * Decode LOOKUP response
5148 */ 5069 */
5149static int nfs4_xdr_dec_lookup(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_lookup_res *res) 5070static int nfs4_xdr_dec_lookup(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
5071 struct nfs4_lookup_res *res)
5150{ 5072{
5151 struct xdr_stream xdr;
5152 struct compound_hdr hdr; 5073 struct compound_hdr hdr;
5153 int status; 5074 int status;
5154 5075
5155 xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); 5076 status = decode_compound_hdr(xdr, &hdr);
5156 status = decode_compound_hdr(&xdr, &hdr);
5157 if (status) 5077 if (status)
5158 goto out; 5078 goto out;
5159 status = decode_sequence(&xdr, &res->seq_res, rqstp); 5079 status = decode_sequence(xdr, &res->seq_res, rqstp);
5160 if (status) 5080 if (status)
5161 goto out; 5081 goto out;
5162 if ((status = decode_putfh(&xdr)) != 0) 5082 status = decode_putfh(xdr);
5083 if (status)
5163 goto out; 5084 goto out;
5164 if ((status = decode_lookup(&xdr)) != 0) 5085 status = decode_lookup(xdr);
5086 if (status)
5165 goto out; 5087 goto out;
5166 if ((status = decode_getfh(&xdr, res->fh)) != 0) 5088 status = decode_getfh(xdr, res->fh);
5089 if (status)
5167 goto out; 5090 goto out;
5168 status = decode_getfattr(&xdr, res->fattr, res->server 5091 status = decode_getfattr(xdr, res->fattr, res->server
5169 ,!RPC_IS_ASYNC(rqstp->rq_task)); 5092 ,!RPC_IS_ASYNC(rqstp->rq_task));
5170out: 5093out:
5171 return status; 5094 return status;
@@ -5174,23 +5097,25 @@ out:
5174/* 5097/*
5175 * Decode LOOKUP_ROOT response 5098 * Decode LOOKUP_ROOT response
5176 */ 5099 */
5177static int nfs4_xdr_dec_lookup_root(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_lookup_res *res) 5100static int nfs4_xdr_dec_lookup_root(struct rpc_rqst *rqstp,
5101 struct xdr_stream *xdr,
5102 struct nfs4_lookup_res *res)
5178{ 5103{
5179 struct xdr_stream xdr;
5180 struct compound_hdr hdr; 5104 struct compound_hdr hdr;
5181 int status; 5105 int status;
5182 5106
5183 xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); 5107 status = decode_compound_hdr(xdr, &hdr);
5184 status = decode_compound_hdr(&xdr, &hdr);
5185 if (status) 5108 if (status)
5186 goto out; 5109 goto out;
5187 status = decode_sequence(&xdr, &res->seq_res, rqstp); 5110 status = decode_sequence(xdr, &res->seq_res, rqstp);
5188 if (status) 5111 if (status)
5189 goto out; 5112 goto out;
5190 if ((status = decode_putrootfh(&xdr)) != 0) 5113 status = decode_putrootfh(xdr);
5114 if (status)
5191 goto out; 5115 goto out;
5192 if ((status = decode_getfh(&xdr, res->fh)) == 0) 5116 status = decode_getfh(xdr, res->fh);
5193 status = decode_getfattr(&xdr, res->fattr, res->server, 5117 if (status == 0)
5118 status = decode_getfattr(xdr, res->fattr, res->server,
5194 !RPC_IS_ASYNC(rqstp->rq_task)); 5119 !RPC_IS_ASYNC(rqstp->rq_task));
5195out: 5120out:
5196 return status; 5121 return status;
@@ -5199,24 +5124,25 @@ out:
5199/* 5124/*
5200 * Decode REMOVE response 5125 * Decode REMOVE response
5201 */ 5126 */
5202static int nfs4_xdr_dec_remove(struct rpc_rqst *rqstp, __be32 *p, struct nfs_removeres *res) 5127static int nfs4_xdr_dec_remove(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
5128 struct nfs_removeres *res)
5203{ 5129{
5204 struct xdr_stream xdr;
5205 struct compound_hdr hdr; 5130 struct compound_hdr hdr;
5206 int status; 5131 int status;
5207 5132
5208 xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); 5133 status = decode_compound_hdr(xdr, &hdr);
5209 status = decode_compound_hdr(&xdr, &hdr);
5210 if (status) 5134 if (status)
5211 goto out; 5135 goto out;
5212 status = decode_sequence(&xdr, &res->seq_res, rqstp); 5136 status = decode_sequence(xdr, &res->seq_res, rqstp);
5213 if (status) 5137 if (status)
5214 goto out; 5138 goto out;
5215 if ((status = decode_putfh(&xdr)) != 0) 5139 status = decode_putfh(xdr);
5140 if (status)
5216 goto out; 5141 goto out;
5217 if ((status = decode_remove(&xdr, &res->cinfo)) != 0) 5142 status = decode_remove(xdr, &res->cinfo);
5143 if (status)
5218 goto out; 5144 goto out;
5219 decode_getfattr(&xdr, res->dir_attr, res->server, 5145 decode_getfattr(xdr, res->dir_attr, res->server,
5220 !RPC_IS_ASYNC(rqstp->rq_task)); 5146 !RPC_IS_ASYNC(rqstp->rq_task));
5221out: 5147out:
5222 return status; 5148 return status;
@@ -5225,34 +5151,38 @@ out:
5225/* 5151/*
5226 * Decode RENAME response 5152 * Decode RENAME response
5227 */ 5153 */
5228static int nfs4_xdr_dec_rename(struct rpc_rqst *rqstp, __be32 *p, struct nfs_renameres *res) 5154static int nfs4_xdr_dec_rename(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
5155 struct nfs_renameres *res)
5229{ 5156{
5230 struct xdr_stream xdr;
5231 struct compound_hdr hdr; 5157 struct compound_hdr hdr;
5232 int status; 5158 int status;
5233 5159
5234 xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); 5160 status = decode_compound_hdr(xdr, &hdr);
5235 status = decode_compound_hdr(&xdr, &hdr);
5236 if (status) 5161 if (status)
5237 goto out; 5162 goto out;
5238 status = decode_sequence(&xdr, &res->seq_res, rqstp); 5163 status = decode_sequence(xdr, &res->seq_res, rqstp);
5239 if (status) 5164 if (status)
5240 goto out; 5165 goto out;
5241 if ((status = decode_putfh(&xdr)) != 0) 5166 status = decode_putfh(xdr);
5167 if (status)
5242 goto out; 5168 goto out;
5243 if ((status = decode_savefh(&xdr)) != 0) 5169 status = decode_savefh(xdr);
5170 if (status)
5244 goto out; 5171 goto out;
5245 if ((status = decode_putfh(&xdr)) != 0) 5172 status = decode_putfh(xdr);
5173 if (status)
5246 goto out; 5174 goto out;
5247 if ((status = decode_rename(&xdr, &res->old_cinfo, &res->new_cinfo)) != 0) 5175 status = decode_rename(xdr, &res->old_cinfo, &res->new_cinfo);
5176 if (status)
5248 goto out; 5177 goto out;
5249 /* Current FH is target directory */ 5178 /* Current FH is target directory */
5250 if (decode_getfattr(&xdr, res->new_fattr, res->server, 5179 if (decode_getfattr(xdr, res->new_fattr, res->server,
5251 !RPC_IS_ASYNC(rqstp->rq_task)) != 0) 5180 !RPC_IS_ASYNC(rqstp->rq_task)) != 0)
5252 goto out; 5181 goto out;
5253 if ((status = decode_restorefh(&xdr)) != 0) 5182 status = decode_restorefh(xdr);
5183 if (status)
5254 goto out; 5184 goto out;
5255 decode_getfattr(&xdr, res->old_fattr, res->server, 5185 decode_getfattr(xdr, res->old_fattr, res->server,
5256 !RPC_IS_ASYNC(rqstp->rq_task)); 5186 !RPC_IS_ASYNC(rqstp->rq_task));
5257out: 5187out:
5258 return status; 5188 return status;
@@ -5261,37 +5191,41 @@ out:
5261/* 5191/*
5262 * Decode LINK response 5192 * Decode LINK response
5263 */ 5193 */
5264static int nfs4_xdr_dec_link(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_link_res *res) 5194static int nfs4_xdr_dec_link(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
5195 struct nfs4_link_res *res)
5265{ 5196{
5266 struct xdr_stream xdr;
5267 struct compound_hdr hdr; 5197 struct compound_hdr hdr;
5268 int status; 5198 int status;
5269 5199
5270 xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); 5200 status = decode_compound_hdr(xdr, &hdr);
5271 status = decode_compound_hdr(&xdr, &hdr);
5272 if (status) 5201 if (status)
5273 goto out; 5202 goto out;
5274 status = decode_sequence(&xdr, &res->seq_res, rqstp); 5203 status = decode_sequence(xdr, &res->seq_res, rqstp);
5275 if (status) 5204 if (status)
5276 goto out; 5205 goto out;
5277 if ((status = decode_putfh(&xdr)) != 0) 5206 status = decode_putfh(xdr);
5207 if (status)
5278 goto out; 5208 goto out;
5279 if ((status = decode_savefh(&xdr)) != 0) 5209 status = decode_savefh(xdr);
5210 if (status)
5280 goto out; 5211 goto out;
5281 if ((status = decode_putfh(&xdr)) != 0) 5212 status = decode_putfh(xdr);
5213 if (status)
5282 goto out; 5214 goto out;
5283 if ((status = decode_link(&xdr, &res->cinfo)) != 0) 5215 status = decode_link(xdr, &res->cinfo);
5216 if (status)
5284 goto out; 5217 goto out;
5285 /* 5218 /*
5286 * Note order: OP_LINK leaves the directory as the current 5219 * Note order: OP_LINK leaves the directory as the current
5287 * filehandle. 5220 * filehandle.
5288 */ 5221 */
5289 if (decode_getfattr(&xdr, res->dir_attr, res->server, 5222 if (decode_getfattr(xdr, res->dir_attr, res->server,
5290 !RPC_IS_ASYNC(rqstp->rq_task)) != 0) 5223 !RPC_IS_ASYNC(rqstp->rq_task)) != 0)
5291 goto out; 5224 goto out;
5292 if ((status = decode_restorefh(&xdr)) != 0) 5225 status = decode_restorefh(xdr);
5226 if (status)
5293 goto out; 5227 goto out;
5294 decode_getfattr(&xdr, res->fattr, res->server, 5228 decode_getfattr(xdr, res->fattr, res->server,
5295 !RPC_IS_ASYNC(rqstp->rq_task)); 5229 !RPC_IS_ASYNC(rqstp->rq_task));
5296out: 5230out:
5297 return status; 5231 return status;
@@ -5300,33 +5234,37 @@ out:
5300/* 5234/*
5301 * Decode CREATE response 5235 * Decode CREATE response
5302 */ 5236 */
5303static int nfs4_xdr_dec_create(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_create_res *res) 5237static int nfs4_xdr_dec_create(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
5238 struct nfs4_create_res *res)
5304{ 5239{
5305 struct xdr_stream xdr;
5306 struct compound_hdr hdr; 5240 struct compound_hdr hdr;
5307 int status; 5241 int status;
5308 5242
5309 xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); 5243 status = decode_compound_hdr(xdr, &hdr);
5310 status = decode_compound_hdr(&xdr, &hdr);
5311 if (status) 5244 if (status)
5312 goto out; 5245 goto out;
5313 status = decode_sequence(&xdr, &res->seq_res, rqstp); 5246 status = decode_sequence(xdr, &res->seq_res, rqstp);
5314 if (status) 5247 if (status)
5315 goto out; 5248 goto out;
5316 if ((status = decode_putfh(&xdr)) != 0) 5249 status = decode_putfh(xdr);
5250 if (status)
5317 goto out; 5251 goto out;
5318 if ((status = decode_savefh(&xdr)) != 0) 5252 status = decode_savefh(xdr);
5253 if (status)
5319 goto out; 5254 goto out;
5320 if ((status = decode_create(&xdr,&res->dir_cinfo)) != 0) 5255 status = decode_create(xdr, &res->dir_cinfo);
5256 if (status)
5321 goto out; 5257 goto out;
5322 if ((status = decode_getfh(&xdr, res->fh)) != 0) 5258 status = decode_getfh(xdr, res->fh);
5259 if (status)
5323 goto out; 5260 goto out;
5324 if (decode_getfattr(&xdr, res->fattr, res->server, 5261 if (decode_getfattr(xdr, res->fattr, res->server,
5325 !RPC_IS_ASYNC(rqstp->rq_task)) != 0) 5262 !RPC_IS_ASYNC(rqstp->rq_task)) != 0)
5326 goto out; 5263 goto out;
5327 if ((status = decode_restorefh(&xdr)) != 0) 5264 status = decode_restorefh(xdr);
5265 if (status)
5328 goto out; 5266 goto out;
5329 decode_getfattr(&xdr, res->dir_fattr, res->server, 5267 decode_getfattr(xdr, res->dir_fattr, res->server,
5330 !RPC_IS_ASYNC(rqstp->rq_task)); 5268 !RPC_IS_ASYNC(rqstp->rq_task));
5331out: 5269out:
5332 return status; 5270 return status;
@@ -5335,31 +5273,31 @@ out:
5335/* 5273/*
5336 * Decode SYMLINK response 5274 * Decode SYMLINK response
5337 */ 5275 */
5338static int nfs4_xdr_dec_symlink(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_create_res *res) 5276static int nfs4_xdr_dec_symlink(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
5277 struct nfs4_create_res *res)
5339{ 5278{
5340 return nfs4_xdr_dec_create(rqstp, p, res); 5279 return nfs4_xdr_dec_create(rqstp, xdr, res);
5341} 5280}
5342 5281
5343/* 5282/*
5344 * Decode GETATTR response 5283 * Decode GETATTR response
5345 */ 5284 */
5346static int nfs4_xdr_dec_getattr(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_getattr_res *res) 5285static int nfs4_xdr_dec_getattr(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
5286 struct nfs4_getattr_res *res)
5347{ 5287{
5348 struct xdr_stream xdr;
5349 struct compound_hdr hdr; 5288 struct compound_hdr hdr;
5350 int status; 5289 int status;
5351 5290
5352 xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); 5291 status = decode_compound_hdr(xdr, &hdr);
5353 status = decode_compound_hdr(&xdr, &hdr);
5354 if (status) 5292 if (status)
5355 goto out; 5293 goto out;
5356 status = decode_sequence(&xdr, &res->seq_res, rqstp); 5294 status = decode_sequence(xdr, &res->seq_res, rqstp);
5357 if (status) 5295 if (status)
5358 goto out; 5296 goto out;
5359 status = decode_putfh(&xdr); 5297 status = decode_putfh(xdr);
5360 if (status) 5298 if (status)
5361 goto out; 5299 goto out;
5362 status = decode_getfattr(&xdr, res->fattr, res->server, 5300 status = decode_getfattr(xdr, res->fattr, res->server,
5363 !RPC_IS_ASYNC(rqstp->rq_task)); 5301 !RPC_IS_ASYNC(rqstp->rq_task));
5364out: 5302out:
5365 return status; 5303 return status;
@@ -5368,46 +5306,40 @@ out:
5368/* 5306/*
5369 * Encode an SETACL request 5307 * Encode an SETACL request
5370 */ 5308 */
5371static int 5309static void nfs4_xdr_enc_setacl(struct rpc_rqst *req, struct xdr_stream *xdr,
5372nfs4_xdr_enc_setacl(struct rpc_rqst *req, __be32 *p, struct nfs_setaclargs *args) 5310 struct nfs_setaclargs *args)
5373{ 5311{
5374 struct xdr_stream xdr;
5375 struct compound_hdr hdr = { 5312 struct compound_hdr hdr = {
5376 .minorversion = nfs4_xdr_minorversion(&args->seq_args), 5313 .minorversion = nfs4_xdr_minorversion(&args->seq_args),
5377 }; 5314 };
5378 int status;
5379 5315
5380 xdr_init_encode(&xdr, &req->rq_snd_buf, p); 5316 encode_compound_hdr(xdr, req, &hdr);
5381 encode_compound_hdr(&xdr, req, &hdr); 5317 encode_sequence(xdr, &args->seq_args, &hdr);
5382 encode_sequence(&xdr, &args->seq_args, &hdr); 5318 encode_putfh(xdr, args->fh, &hdr);
5383 encode_putfh(&xdr, args->fh, &hdr); 5319 encode_setacl(xdr, args, &hdr);
5384 status = encode_setacl(&xdr, args, &hdr);
5385 encode_nops(&hdr); 5320 encode_nops(&hdr);
5386 return status;
5387} 5321}
5388 5322
5389/* 5323/*
5390 * Decode SETACL response 5324 * Decode SETACL response
5391 */ 5325 */
5392static int 5326static int
5393nfs4_xdr_dec_setacl(struct rpc_rqst *rqstp, __be32 *p, 5327nfs4_xdr_dec_setacl(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
5394 struct nfs_setaclres *res) 5328 struct nfs_setaclres *res)
5395{ 5329{
5396 struct xdr_stream xdr;
5397 struct compound_hdr hdr; 5330 struct compound_hdr hdr;
5398 int status; 5331 int status;
5399 5332
5400 xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); 5333 status = decode_compound_hdr(xdr, &hdr);
5401 status = decode_compound_hdr(&xdr, &hdr);
5402 if (status) 5334 if (status)
5403 goto out; 5335 goto out;
5404 status = decode_sequence(&xdr, &res->seq_res, rqstp); 5336 status = decode_sequence(xdr, &res->seq_res, rqstp);
5405 if (status) 5337 if (status)
5406 goto out; 5338 goto out;
5407 status = decode_putfh(&xdr); 5339 status = decode_putfh(xdr);
5408 if (status) 5340 if (status)
5409 goto out; 5341 goto out;
5410 status = decode_setattr(&xdr); 5342 status = decode_setattr(xdr);
5411out: 5343out:
5412 return status; 5344 return status;
5413} 5345}
@@ -5416,24 +5348,22 @@ out:
5416 * Decode GETACL response 5348 * Decode GETACL response
5417 */ 5349 */
5418static int 5350static int
5419nfs4_xdr_dec_getacl(struct rpc_rqst *rqstp, __be32 *p, 5351nfs4_xdr_dec_getacl(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
5420 struct nfs_getaclres *res) 5352 struct nfs_getaclres *res)
5421{ 5353{
5422 struct xdr_stream xdr;
5423 struct compound_hdr hdr; 5354 struct compound_hdr hdr;
5424 int status; 5355 int status;
5425 5356
5426 xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); 5357 status = decode_compound_hdr(xdr, &hdr);
5427 status = decode_compound_hdr(&xdr, &hdr);
5428 if (status) 5358 if (status)
5429 goto out; 5359 goto out;
5430 status = decode_sequence(&xdr, &res->seq_res, rqstp); 5360 status = decode_sequence(xdr, &res->seq_res, rqstp);
5431 if (status) 5361 if (status)
5432 goto out; 5362 goto out;
5433 status = decode_putfh(&xdr); 5363 status = decode_putfh(xdr);
5434 if (status) 5364 if (status)
5435 goto out; 5365 goto out;
5436 status = decode_getacl(&xdr, rqstp, &res->acl_len); 5366 status = decode_getacl(xdr, rqstp, &res->acl_len);
5437 5367
5438out: 5368out:
5439 return status; 5369 return status;
@@ -5442,23 +5372,22 @@ out:
5442/* 5372/*
5443 * Decode CLOSE response 5373 * Decode CLOSE response
5444 */ 5374 */
5445static int nfs4_xdr_dec_close(struct rpc_rqst *rqstp, __be32 *p, struct nfs_closeres *res) 5375static int nfs4_xdr_dec_close(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
5376 struct nfs_closeres *res)
5446{ 5377{
5447 struct xdr_stream xdr;
5448 struct compound_hdr hdr; 5378 struct compound_hdr hdr;
5449 int status; 5379 int status;
5450 5380
5451 xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); 5381 status = decode_compound_hdr(xdr, &hdr);
5452 status = decode_compound_hdr(&xdr, &hdr);
5453 if (status) 5382 if (status)
5454 goto out; 5383 goto out;
5455 status = decode_sequence(&xdr, &res->seq_res, rqstp); 5384 status = decode_sequence(xdr, &res->seq_res, rqstp);
5456 if (status) 5385 if (status)
5457 goto out; 5386 goto out;
5458 status = decode_putfh(&xdr); 5387 status = decode_putfh(xdr);
5459 if (status) 5388 if (status)
5460 goto out; 5389 goto out;
5461 status = decode_close(&xdr, res); 5390 status = decode_close(xdr, res);
5462 if (status != 0) 5391 if (status != 0)
5463 goto out; 5392 goto out;
5464 /* 5393 /*
@@ -5467,7 +5396,7 @@ static int nfs4_xdr_dec_close(struct rpc_rqst *rqstp, __be32 *p, struct nfs_clos
5467 * an ESTALE error. Shouldn't be a problem, 5396 * an ESTALE error. Shouldn't be a problem,
5468 * though, since fattr->valid will remain unset. 5397 * though, since fattr->valid will remain unset.
5469 */ 5398 */
5470 decode_getfattr(&xdr, res->fattr, res->server, 5399 decode_getfattr(xdr, res->fattr, res->server,
5471 !RPC_IS_ASYNC(rqstp->rq_task)); 5400 !RPC_IS_ASYNC(rqstp->rq_task));
5472out: 5401out:
5473 return status; 5402 return status;
@@ -5476,36 +5405,35 @@ out:
5476/* 5405/*
5477 * Decode OPEN response 5406 * Decode OPEN response
5478 */ 5407 */
5479static int nfs4_xdr_dec_open(struct rpc_rqst *rqstp, __be32 *p, struct nfs_openres *res) 5408static int nfs4_xdr_dec_open(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
5409 struct nfs_openres *res)
5480{ 5410{
5481 struct xdr_stream xdr;
5482 struct compound_hdr hdr; 5411 struct compound_hdr hdr;
5483 int status; 5412 int status;
5484 5413
5485 xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); 5414 status = decode_compound_hdr(xdr, &hdr);
5486 status = decode_compound_hdr(&xdr, &hdr);
5487 if (status) 5415 if (status)
5488 goto out; 5416 goto out;
5489 status = decode_sequence(&xdr, &res->seq_res, rqstp); 5417 status = decode_sequence(xdr, &res->seq_res, rqstp);
5490 if (status) 5418 if (status)
5491 goto out; 5419 goto out;
5492 status = decode_putfh(&xdr); 5420 status = decode_putfh(xdr);
5493 if (status) 5421 if (status)
5494 goto out; 5422 goto out;
5495 status = decode_savefh(&xdr); 5423 status = decode_savefh(xdr);
5496 if (status) 5424 if (status)
5497 goto out; 5425 goto out;
5498 status = decode_open(&xdr, res); 5426 status = decode_open(xdr, res);
5499 if (status) 5427 if (status)
5500 goto out; 5428 goto out;
5501 if (decode_getfh(&xdr, &res->fh) != 0) 5429 if (decode_getfh(xdr, &res->fh) != 0)
5502 goto out; 5430 goto out;
5503 if (decode_getfattr(&xdr, res->f_attr, res->server, 5431 if (decode_getfattr(xdr, res->f_attr, res->server,
5504 !RPC_IS_ASYNC(rqstp->rq_task)) != 0) 5432 !RPC_IS_ASYNC(rqstp->rq_task)) != 0)
5505 goto out; 5433 goto out;
5506 if (decode_restorefh(&xdr) != 0) 5434 if (decode_restorefh(xdr) != 0)
5507 goto out; 5435 goto out;
5508 decode_getfattr(&xdr, res->dir_attr, res->server, 5436 decode_getfattr(xdr, res->dir_attr, res->server,
5509 !RPC_IS_ASYNC(rqstp->rq_task)); 5437 !RPC_IS_ASYNC(rqstp->rq_task));
5510out: 5438out:
5511 return status; 5439 return status;
@@ -5514,20 +5442,20 @@ out:
5514/* 5442/*
5515 * Decode OPEN_CONFIRM response 5443 * Decode OPEN_CONFIRM response
5516 */ 5444 */
5517static int nfs4_xdr_dec_open_confirm(struct rpc_rqst *rqstp, __be32 *p, struct nfs_open_confirmres *res) 5445static int nfs4_xdr_dec_open_confirm(struct rpc_rqst *rqstp,
5446 struct xdr_stream *xdr,
5447 struct nfs_open_confirmres *res)
5518{ 5448{
5519 struct xdr_stream xdr;
5520 struct compound_hdr hdr; 5449 struct compound_hdr hdr;
5521 int status; 5450 int status;
5522 5451
5523 xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); 5452 status = decode_compound_hdr(xdr, &hdr);
5524 status = decode_compound_hdr(&xdr, &hdr);
5525 if (status) 5453 if (status)
5526 goto out; 5454 goto out;
5527 status = decode_putfh(&xdr); 5455 status = decode_putfh(xdr);
5528 if (status) 5456 if (status)
5529 goto out; 5457 goto out;
5530 status = decode_open_confirm(&xdr, res); 5458 status = decode_open_confirm(xdr, res);
5531out: 5459out:
5532 return status; 5460 return status;
5533} 5461}
@@ -5535,26 +5463,26 @@ out:
5535/* 5463/*
5536 * Decode OPEN response 5464 * Decode OPEN response
5537 */ 5465 */
5538static int nfs4_xdr_dec_open_noattr(struct rpc_rqst *rqstp, __be32 *p, struct nfs_openres *res) 5466static int nfs4_xdr_dec_open_noattr(struct rpc_rqst *rqstp,
5467 struct xdr_stream *xdr,
5468 struct nfs_openres *res)
5539{ 5469{
5540 struct xdr_stream xdr;
5541 struct compound_hdr hdr; 5470 struct compound_hdr hdr;
5542 int status; 5471 int status;
5543 5472
5544 xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); 5473 status = decode_compound_hdr(xdr, &hdr);
5545 status = decode_compound_hdr(&xdr, &hdr);
5546 if (status) 5474 if (status)
5547 goto out; 5475 goto out;
5548 status = decode_sequence(&xdr, &res->seq_res, rqstp); 5476 status = decode_sequence(xdr, &res->seq_res, rqstp);
5549 if (status) 5477 if (status)
5550 goto out; 5478 goto out;
5551 status = decode_putfh(&xdr); 5479 status = decode_putfh(xdr);
5552 if (status) 5480 if (status)
5553 goto out; 5481 goto out;
5554 status = decode_open(&xdr, res); 5482 status = decode_open(xdr, res);
5555 if (status) 5483 if (status)
5556 goto out; 5484 goto out;
5557 decode_getfattr(&xdr, res->f_attr, res->server, 5485 decode_getfattr(xdr, res->f_attr, res->server,
5558 !RPC_IS_ASYNC(rqstp->rq_task)); 5486 !RPC_IS_ASYNC(rqstp->rq_task));
5559out: 5487out:
5560 return status; 5488 return status;
@@ -5563,26 +5491,26 @@ out:
5563/* 5491/*
5564 * Decode SETATTR response 5492 * Decode SETATTR response
5565 */ 5493 */
5566static int nfs4_xdr_dec_setattr(struct rpc_rqst *rqstp, __be32 *p, struct nfs_setattrres *res) 5494static int nfs4_xdr_dec_setattr(struct rpc_rqst *rqstp,
5495 struct xdr_stream *xdr,
5496 struct nfs_setattrres *res)
5567{ 5497{
5568 struct xdr_stream xdr;
5569 struct compound_hdr hdr; 5498 struct compound_hdr hdr;
5570 int status; 5499 int status;
5571 5500
5572 xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); 5501 status = decode_compound_hdr(xdr, &hdr);
5573 status = decode_compound_hdr(&xdr, &hdr);
5574 if (status) 5502 if (status)
5575 goto out; 5503 goto out;
5576 status = decode_sequence(&xdr, &res->seq_res, rqstp); 5504 status = decode_sequence(xdr, &res->seq_res, rqstp);
5577 if (status) 5505 if (status)
5578 goto out; 5506 goto out;
5579 status = decode_putfh(&xdr); 5507 status = decode_putfh(xdr);
5580 if (status) 5508 if (status)
5581 goto out; 5509 goto out;
5582 status = decode_setattr(&xdr); 5510 status = decode_setattr(xdr);
5583 if (status) 5511 if (status)
5584 goto out; 5512 goto out;
5585 decode_getfattr(&xdr, res->fattr, res->server, 5513 decode_getfattr(xdr, res->fattr, res->server,
5586 !RPC_IS_ASYNC(rqstp->rq_task)); 5514 !RPC_IS_ASYNC(rqstp->rq_task));
5587out: 5515out:
5588 return status; 5516 return status;
@@ -5591,23 +5519,22 @@ out:
5591/* 5519/*
5592 * Decode LOCK response 5520 * Decode LOCK response
5593 */ 5521 */
5594static int nfs4_xdr_dec_lock(struct rpc_rqst *rqstp, __be32 *p, struct nfs_lock_res *res) 5522static int nfs4_xdr_dec_lock(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
5523 struct nfs_lock_res *res)
5595{ 5524{
5596 struct xdr_stream xdr;
5597 struct compound_hdr hdr; 5525 struct compound_hdr hdr;
5598 int status; 5526 int status;
5599 5527
5600 xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); 5528 status = decode_compound_hdr(xdr, &hdr);
5601 status = decode_compound_hdr(&xdr, &hdr);
5602 if (status) 5529 if (status)
5603 goto out; 5530 goto out;
5604 status = decode_sequence(&xdr, &res->seq_res, rqstp); 5531 status = decode_sequence(xdr, &res->seq_res, rqstp);
5605 if (status) 5532 if (status)
5606 goto out; 5533 goto out;
5607 status = decode_putfh(&xdr); 5534 status = decode_putfh(xdr);
5608 if (status) 5535 if (status)
5609 goto out; 5536 goto out;
5610 status = decode_lock(&xdr, res); 5537 status = decode_lock(xdr, res);
5611out: 5538out:
5612 return status; 5539 return status;
5613} 5540}
@@ -5615,23 +5542,22 @@ out:
5615/* 5542/*
5616 * Decode LOCKT response 5543 * Decode LOCKT response
5617 */ 5544 */
5618static int nfs4_xdr_dec_lockt(struct rpc_rqst *rqstp, __be32 *p, struct nfs_lockt_res *res) 5545static int nfs4_xdr_dec_lockt(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
5546 struct nfs_lockt_res *res)
5619{ 5547{
5620 struct xdr_stream xdr;
5621 struct compound_hdr hdr; 5548 struct compound_hdr hdr;
5622 int status; 5549 int status;
5623 5550
5624 xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); 5551 status = decode_compound_hdr(xdr, &hdr);
5625 status = decode_compound_hdr(&xdr, &hdr);
5626 if (status) 5552 if (status)
5627 goto out; 5553 goto out;
5628 status = decode_sequence(&xdr, &res->seq_res, rqstp); 5554 status = decode_sequence(xdr, &res->seq_res, rqstp);
5629 if (status) 5555 if (status)
5630 goto out; 5556 goto out;
5631 status = decode_putfh(&xdr); 5557 status = decode_putfh(xdr);
5632 if (status) 5558 if (status)
5633 goto out; 5559 goto out;
5634 status = decode_lockt(&xdr, res); 5560 status = decode_lockt(xdr, res);
5635out: 5561out:
5636 return status; 5562 return status;
5637} 5563}
@@ -5639,61 +5565,58 @@ out:
5639/* 5565/*
5640 * Decode LOCKU response 5566 * Decode LOCKU response
5641 */ 5567 */
5642static int nfs4_xdr_dec_locku(struct rpc_rqst *rqstp, __be32 *p, struct nfs_locku_res *res) 5568static int nfs4_xdr_dec_locku(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
5569 struct nfs_locku_res *res)
5643{ 5570{
5644 struct xdr_stream xdr;
5645 struct compound_hdr hdr; 5571 struct compound_hdr hdr;
5646 int status; 5572 int status;
5647 5573
5648 xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); 5574 status = decode_compound_hdr(xdr, &hdr);
5649 status = decode_compound_hdr(&xdr, &hdr);
5650 if (status) 5575 if (status)
5651 goto out; 5576 goto out;
5652 status = decode_sequence(&xdr, &res->seq_res, rqstp); 5577 status = decode_sequence(xdr, &res->seq_res, rqstp);
5653 if (status) 5578 if (status)
5654 goto out; 5579 goto out;
5655 status = decode_putfh(&xdr); 5580 status = decode_putfh(xdr);
5656 if (status) 5581 if (status)
5657 goto out; 5582 goto out;
5658 status = decode_locku(&xdr, res); 5583 status = decode_locku(xdr, res);
5659out: 5584out:
5660 return status; 5585 return status;
5661} 5586}
5662 5587
5663static int nfs4_xdr_dec_release_lockowner(struct rpc_rqst *rqstp, __be32 *p, void *dummy) 5588static int nfs4_xdr_dec_release_lockowner(struct rpc_rqst *rqstp,
5589 struct xdr_stream *xdr, void *dummy)
5664{ 5590{
5665 struct xdr_stream xdr;
5666 struct compound_hdr hdr; 5591 struct compound_hdr hdr;
5667 int status; 5592 int status;
5668 5593
5669 xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); 5594 status = decode_compound_hdr(xdr, &hdr);
5670 status = decode_compound_hdr(&xdr, &hdr);
5671 if (!status) 5595 if (!status)
5672 status = decode_release_lockowner(&xdr); 5596 status = decode_release_lockowner(xdr);
5673 return status; 5597 return status;
5674} 5598}
5675 5599
5676/* 5600/*
5677 * Decode READLINK response 5601 * Decode READLINK response
5678 */ 5602 */
5679static int nfs4_xdr_dec_readlink(struct rpc_rqst *rqstp, __be32 *p, 5603static int nfs4_xdr_dec_readlink(struct rpc_rqst *rqstp,
5604 struct xdr_stream *xdr,
5680 struct nfs4_readlink_res *res) 5605 struct nfs4_readlink_res *res)
5681{ 5606{
5682 struct xdr_stream xdr;
5683 struct compound_hdr hdr; 5607 struct compound_hdr hdr;
5684 int status; 5608 int status;
5685 5609
5686 xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); 5610 status = decode_compound_hdr(xdr, &hdr);
5687 status = decode_compound_hdr(&xdr, &hdr);
5688 if (status) 5611 if (status)
5689 goto out; 5612 goto out;
5690 status = decode_sequence(&xdr, &res->seq_res, rqstp); 5613 status = decode_sequence(xdr, &res->seq_res, rqstp);
5691 if (status) 5614 if (status)
5692 goto out; 5615 goto out;
5693 status = decode_putfh(&xdr); 5616 status = decode_putfh(xdr);
5694 if (status) 5617 if (status)
5695 goto out; 5618 goto out;
5696 status = decode_readlink(&xdr, rqstp); 5619 status = decode_readlink(xdr, rqstp);
5697out: 5620out:
5698 return status; 5621 return status;
5699} 5622}
@@ -5701,23 +5624,22 @@ out:
5701/* 5624/*
5702 * Decode READDIR response 5625 * Decode READDIR response
5703 */ 5626 */
5704static int nfs4_xdr_dec_readdir(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_readdir_res *res) 5627static int nfs4_xdr_dec_readdir(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
5628 struct nfs4_readdir_res *res)
5705{ 5629{
5706 struct xdr_stream xdr;
5707 struct compound_hdr hdr; 5630 struct compound_hdr hdr;
5708 int status; 5631 int status;
5709 5632
5710 xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); 5633 status = decode_compound_hdr(xdr, &hdr);
5711 status = decode_compound_hdr(&xdr, &hdr);
5712 if (status) 5634 if (status)
5713 goto out; 5635 goto out;
5714 status = decode_sequence(&xdr, &res->seq_res, rqstp); 5636 status = decode_sequence(xdr, &res->seq_res, rqstp);
5715 if (status) 5637 if (status)
5716 goto out; 5638 goto out;
5717 status = decode_putfh(&xdr); 5639 status = decode_putfh(xdr);
5718 if (status) 5640 if (status)
5719 goto out; 5641 goto out;
5720 status = decode_readdir(&xdr, rqstp, res); 5642 status = decode_readdir(xdr, rqstp, res);
5721out: 5643out:
5722 return status; 5644 return status;
5723} 5645}
@@ -5725,23 +5647,22 @@ out:
5725/* 5647/*
5726 * Decode Read response 5648 * Decode Read response
5727 */ 5649 */
5728static int nfs4_xdr_dec_read(struct rpc_rqst *rqstp, __be32 *p, struct nfs_readres *res) 5650static int nfs4_xdr_dec_read(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
5651 struct nfs_readres *res)
5729{ 5652{
5730 struct xdr_stream xdr;
5731 struct compound_hdr hdr; 5653 struct compound_hdr hdr;
5732 int status; 5654 int status;
5733 5655
5734 xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); 5656 status = decode_compound_hdr(xdr, &hdr);
5735 status = decode_compound_hdr(&xdr, &hdr);
5736 if (status) 5657 if (status)
5737 goto out; 5658 goto out;
5738 status = decode_sequence(&xdr, &res->seq_res, rqstp); 5659 status = decode_sequence(xdr, &res->seq_res, rqstp);
5739 if (status) 5660 if (status)
5740 goto out; 5661 goto out;
5741 status = decode_putfh(&xdr); 5662 status = decode_putfh(xdr);
5742 if (status) 5663 if (status)
5743 goto out; 5664 goto out;
5744 status = decode_read(&xdr, rqstp, res); 5665 status = decode_read(xdr, rqstp, res);
5745 if (!status) 5666 if (!status)
5746 status = res->count; 5667 status = res->count;
5747out: 5668out:
@@ -5751,26 +5672,25 @@ out:
5751/* 5672/*
5752 * Decode WRITE response 5673 * Decode WRITE response
5753 */ 5674 */
5754static int nfs4_xdr_dec_write(struct rpc_rqst *rqstp, __be32 *p, struct nfs_writeres *res) 5675static int nfs4_xdr_dec_write(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
5676 struct nfs_writeres *res)
5755{ 5677{
5756 struct xdr_stream xdr;
5757 struct compound_hdr hdr; 5678 struct compound_hdr hdr;
5758 int status; 5679 int status;
5759 5680
5760 xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); 5681 status = decode_compound_hdr(xdr, &hdr);
5761 status = decode_compound_hdr(&xdr, &hdr);
5762 if (status) 5682 if (status)
5763 goto out; 5683 goto out;
5764 status = decode_sequence(&xdr, &res->seq_res, rqstp); 5684 status = decode_sequence(xdr, &res->seq_res, rqstp);
5765 if (status) 5685 if (status)
5766 goto out; 5686 goto out;
5767 status = decode_putfh(&xdr); 5687 status = decode_putfh(xdr);
5768 if (status) 5688 if (status)
5769 goto out; 5689 goto out;
5770 status = decode_write(&xdr, res); 5690 status = decode_write(xdr, res);
5771 if (status) 5691 if (status)
5772 goto out; 5692 goto out;
5773 decode_getfattr(&xdr, res->fattr, res->server, 5693 decode_getfattr(xdr, res->fattr, res->server,
5774 !RPC_IS_ASYNC(rqstp->rq_task)); 5694 !RPC_IS_ASYNC(rqstp->rq_task));
5775 if (!status) 5695 if (!status)
5776 status = res->count; 5696 status = res->count;
@@ -5781,26 +5701,25 @@ out:
5781/* 5701/*
5782 * Decode COMMIT response 5702 * Decode COMMIT response
5783 */ 5703 */
5784static int nfs4_xdr_dec_commit(struct rpc_rqst *rqstp, __be32 *p, struct nfs_writeres *res) 5704static int nfs4_xdr_dec_commit(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
5705 struct nfs_writeres *res)
5785{ 5706{
5786 struct xdr_stream xdr;
5787 struct compound_hdr hdr; 5707 struct compound_hdr hdr;
5788 int status; 5708 int status;
5789 5709
5790 xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); 5710 status = decode_compound_hdr(xdr, &hdr);
5791 status = decode_compound_hdr(&xdr, &hdr);
5792 if (status) 5711 if (status)
5793 goto out; 5712 goto out;
5794 status = decode_sequence(&xdr, &res->seq_res, rqstp); 5713 status = decode_sequence(xdr, &res->seq_res, rqstp);
5795 if (status) 5714 if (status)
5796 goto out; 5715 goto out;
5797 status = decode_putfh(&xdr); 5716 status = decode_putfh(xdr);
5798 if (status) 5717 if (status)
5799 goto out; 5718 goto out;
5800 status = decode_commit(&xdr, res); 5719 status = decode_commit(xdr, res);
5801 if (status) 5720 if (status)
5802 goto out; 5721 goto out;
5803 decode_getfattr(&xdr, res->fattr, res->server, 5722 decode_getfattr(xdr, res->fattr, res->server,
5804 !RPC_IS_ASYNC(rqstp->rq_task)); 5723 !RPC_IS_ASYNC(rqstp->rq_task));
5805out: 5724out:
5806 return status; 5725 return status;
@@ -5809,85 +5728,80 @@ out:
5809/* 5728/*
5810 * Decode FSINFO response 5729 * Decode FSINFO response
5811 */ 5730 */
5812static int nfs4_xdr_dec_fsinfo(struct rpc_rqst *req, __be32 *p, 5731static int nfs4_xdr_dec_fsinfo(struct rpc_rqst *req, struct xdr_stream *xdr,
5813 struct nfs4_fsinfo_res *res) 5732 struct nfs4_fsinfo_res *res)
5814{ 5733{
5815 struct xdr_stream xdr;
5816 struct compound_hdr hdr; 5734 struct compound_hdr hdr;
5817 int status; 5735 int status;
5818 5736
5819 xdr_init_decode(&xdr, &req->rq_rcv_buf, p); 5737 status = decode_compound_hdr(xdr, &hdr);
5820 status = decode_compound_hdr(&xdr, &hdr);
5821 if (!status) 5738 if (!status)
5822 status = decode_sequence(&xdr, &res->seq_res, req); 5739 status = decode_sequence(xdr, &res->seq_res, req);
5823 if (!status) 5740 if (!status)
5824 status = decode_putfh(&xdr); 5741 status = decode_putfh(xdr);
5825 if (!status) 5742 if (!status)
5826 status = decode_fsinfo(&xdr, res->fsinfo); 5743 status = decode_fsinfo(xdr, res->fsinfo);
5827 return status; 5744 return status;
5828} 5745}
5829 5746
5830/* 5747/*
5831 * Decode PATHCONF response 5748 * Decode PATHCONF response
5832 */ 5749 */
5833static int nfs4_xdr_dec_pathconf(struct rpc_rqst *req, __be32 *p, 5750static int nfs4_xdr_dec_pathconf(struct rpc_rqst *req, struct xdr_stream *xdr,
5834 struct nfs4_pathconf_res *res) 5751 struct nfs4_pathconf_res *res)
5835{ 5752{
5836 struct xdr_stream xdr;
5837 struct compound_hdr hdr; 5753 struct compound_hdr hdr;
5838 int status; 5754 int status;
5839 5755
5840 xdr_init_decode(&xdr, &req->rq_rcv_buf, p); 5756 status = decode_compound_hdr(xdr, &hdr);
5841 status = decode_compound_hdr(&xdr, &hdr);
5842 if (!status) 5757 if (!status)
5843 status = decode_sequence(&xdr, &res->seq_res, req); 5758 status = decode_sequence(xdr, &res->seq_res, req);
5844 if (!status) 5759 if (!status)
5845 status = decode_putfh(&xdr); 5760 status = decode_putfh(xdr);
5846 if (!status) 5761 if (!status)
5847 status = decode_pathconf(&xdr, res->pathconf); 5762 status = decode_pathconf(xdr, res->pathconf);
5848 return status; 5763 return status;
5849} 5764}
5850 5765
5851/* 5766/*
5852 * Decode STATFS response 5767 * Decode STATFS response
5853 */ 5768 */
5854static int nfs4_xdr_dec_statfs(struct rpc_rqst *req, __be32 *p, 5769static int nfs4_xdr_dec_statfs(struct rpc_rqst *req, struct xdr_stream *xdr,
5855 struct nfs4_statfs_res *res) 5770 struct nfs4_statfs_res *res)
5856{ 5771{
5857 struct xdr_stream xdr;
5858 struct compound_hdr hdr; 5772 struct compound_hdr hdr;
5859 int status; 5773 int status;
5860 5774
5861 xdr_init_decode(&xdr, &req->rq_rcv_buf, p); 5775 status = decode_compound_hdr(xdr, &hdr);
5862 status = decode_compound_hdr(&xdr, &hdr);
5863 if (!status) 5776 if (!status)
5864 status = decode_sequence(&xdr, &res->seq_res, req); 5777 status = decode_sequence(xdr, &res->seq_res, req);
5865 if (!status) 5778 if (!status)
5866 status = decode_putfh(&xdr); 5779 status = decode_putfh(xdr);
5867 if (!status) 5780 if (!status)
5868 status = decode_statfs(&xdr, res->fsstat); 5781 status = decode_statfs(xdr, res->fsstat);
5869 return status; 5782 return status;
5870} 5783}
5871 5784
5872/* 5785/*
5873 * Decode GETATTR_BITMAP response 5786 * Decode GETATTR_BITMAP response
5874 */ 5787 */
5875static int nfs4_xdr_dec_server_caps(struct rpc_rqst *req, __be32 *p, struct nfs4_server_caps_res *res) 5788static int nfs4_xdr_dec_server_caps(struct rpc_rqst *req,
5789 struct xdr_stream *xdr,
5790 struct nfs4_server_caps_res *res)
5876{ 5791{
5877 struct xdr_stream xdr;
5878 struct compound_hdr hdr; 5792 struct compound_hdr hdr;
5879 int status; 5793 int status;
5880 5794
5881 xdr_init_decode(&xdr, &req->rq_rcv_buf, p); 5795 status = decode_compound_hdr(xdr, &hdr);
5882 status = decode_compound_hdr(&xdr, &hdr);
5883 if (status) 5796 if (status)
5884 goto out; 5797 goto out;
5885 status = decode_sequence(&xdr, &res->seq_res, req); 5798 status = decode_sequence(xdr, &res->seq_res, req);
5886 if (status) 5799 if (status)
5887 goto out; 5800 goto out;
5888 if ((status = decode_putfh(&xdr)) != 0) 5801 status = decode_putfh(xdr);
5802 if (status)
5889 goto out; 5803 goto out;
5890 status = decode_server_caps(&xdr, res); 5804 status = decode_server_caps(xdr, res);
5891out: 5805out:
5892 return status; 5806 return status;
5893} 5807}
@@ -5895,79 +5809,77 @@ out:
5895/* 5809/*
5896 * Decode RENEW response 5810 * Decode RENEW response
5897 */ 5811 */
5898static int nfs4_xdr_dec_renew(struct rpc_rqst *rqstp, __be32 *p, void *dummy) 5812static int nfs4_xdr_dec_renew(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
5813 void *__unused)
5899{ 5814{
5900 struct xdr_stream xdr;
5901 struct compound_hdr hdr; 5815 struct compound_hdr hdr;
5902 int status; 5816 int status;
5903 5817
5904 xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); 5818 status = decode_compound_hdr(xdr, &hdr);
5905 status = decode_compound_hdr(&xdr, &hdr);
5906 if (!status) 5819 if (!status)
5907 status = decode_renew(&xdr); 5820 status = decode_renew(xdr);
5908 return status; 5821 return status;
5909} 5822}
5910 5823
5911/* 5824/*
5912 * Decode SETCLIENTID response 5825 * Decode SETCLIENTID response
5913 */ 5826 */
5914static int nfs4_xdr_dec_setclientid(struct rpc_rqst *req, __be32 *p, 5827static int nfs4_xdr_dec_setclientid(struct rpc_rqst *req,
5915 struct nfs4_setclientid_res *res) 5828 struct xdr_stream *xdr,
5829 struct nfs4_setclientid_res *res)
5916{ 5830{
5917 struct xdr_stream xdr;
5918 struct compound_hdr hdr; 5831 struct compound_hdr hdr;
5919 int status; 5832 int status;
5920 5833
5921 xdr_init_decode(&xdr, &req->rq_rcv_buf, p); 5834 status = decode_compound_hdr(xdr, &hdr);
5922 status = decode_compound_hdr(&xdr, &hdr);
5923 if (!status) 5835 if (!status)
5924 status = decode_setclientid(&xdr, res); 5836 status = decode_setclientid(xdr, res);
5925 return status; 5837 return status;
5926} 5838}
5927 5839
5928/* 5840/*
5929 * Decode SETCLIENTID_CONFIRM response 5841 * Decode SETCLIENTID_CONFIRM response
5930 */ 5842 */
5931static int nfs4_xdr_dec_setclientid_confirm(struct rpc_rqst *req, __be32 *p, struct nfs_fsinfo *fsinfo) 5843static int nfs4_xdr_dec_setclientid_confirm(struct rpc_rqst *req,
5844 struct xdr_stream *xdr,
5845 struct nfs_fsinfo *fsinfo)
5932{ 5846{
5933 struct xdr_stream xdr;
5934 struct compound_hdr hdr; 5847 struct compound_hdr hdr;
5935 int status; 5848 int status;
5936 5849
5937 xdr_init_decode(&xdr, &req->rq_rcv_buf, p); 5850 status = decode_compound_hdr(xdr, &hdr);
5938 status = decode_compound_hdr(&xdr, &hdr);
5939 if (!status) 5851 if (!status)
5940 status = decode_setclientid_confirm(&xdr); 5852 status = decode_setclientid_confirm(xdr);
5941 if (!status) 5853 if (!status)
5942 status = decode_putrootfh(&xdr); 5854 status = decode_putrootfh(xdr);
5943 if (!status) 5855 if (!status)
5944 status = decode_fsinfo(&xdr, fsinfo); 5856 status = decode_fsinfo(xdr, fsinfo);
5945 return status; 5857 return status;
5946} 5858}
5947 5859
5948/* 5860/*
5949 * Decode DELEGRETURN response 5861 * Decode DELEGRETURN response
5950 */ 5862 */
5951static int nfs4_xdr_dec_delegreturn(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_delegreturnres *res) 5863static int nfs4_xdr_dec_delegreturn(struct rpc_rqst *rqstp,
5864 struct xdr_stream *xdr,
5865 struct nfs4_delegreturnres *res)
5952{ 5866{
5953 struct xdr_stream xdr;
5954 struct compound_hdr hdr; 5867 struct compound_hdr hdr;
5955 int status; 5868 int status;
5956 5869
5957 xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); 5870 status = decode_compound_hdr(xdr, &hdr);
5958 status = decode_compound_hdr(&xdr, &hdr);
5959 if (status) 5871 if (status)
5960 goto out; 5872 goto out;
5961 status = decode_sequence(&xdr, &res->seq_res, rqstp); 5873 status = decode_sequence(xdr, &res->seq_res, rqstp);
5962 if (status) 5874 if (status)
5963 goto out; 5875 goto out;
5964 status = decode_putfh(&xdr); 5876 status = decode_putfh(xdr);
5965 if (status != 0) 5877 if (status != 0)
5966 goto out; 5878 goto out;
5967 status = decode_delegreturn(&xdr); 5879 status = decode_delegreturn(xdr);
5968 if (status != 0) 5880 if (status != 0)
5969 goto out; 5881 goto out;
5970 decode_getfattr(&xdr, res->fattr, res->server, 5882 decode_getfattr(xdr, res->fattr, res->server,
5971 !RPC_IS_ASYNC(rqstp->rq_task)); 5883 !RPC_IS_ASYNC(rqstp->rq_task));
5972out: 5884out:
5973 return status; 5885 return status;
@@ -5976,26 +5888,27 @@ out:
5976/* 5888/*
5977 * Decode FS_LOCATIONS response 5889 * Decode FS_LOCATIONS response
5978 */ 5890 */
5979static int nfs4_xdr_dec_fs_locations(struct rpc_rqst *req, __be32 *p, 5891static int nfs4_xdr_dec_fs_locations(struct rpc_rqst *req,
5892 struct xdr_stream *xdr,
5980 struct nfs4_fs_locations_res *res) 5893 struct nfs4_fs_locations_res *res)
5981{ 5894{
5982 struct xdr_stream xdr;
5983 struct compound_hdr hdr; 5895 struct compound_hdr hdr;
5984 int status; 5896 int status;
5985 5897
5986 xdr_init_decode(&xdr, &req->rq_rcv_buf, p); 5898 status = decode_compound_hdr(xdr, &hdr);
5987 status = decode_compound_hdr(&xdr, &hdr);
5988 if (status) 5899 if (status)
5989 goto out; 5900 goto out;
5990 status = decode_sequence(&xdr, &res->seq_res, req); 5901 status = decode_sequence(xdr, &res->seq_res, req);
5991 if (status) 5902 if (status)
5992 goto out; 5903 goto out;
5993 if ((status = decode_putfh(&xdr)) != 0) 5904 status = decode_putfh(xdr);
5905 if (status)
5994 goto out; 5906 goto out;
5995 if ((status = decode_lookup(&xdr)) != 0) 5907 status = decode_lookup(xdr);
5908 if (status)
5996 goto out; 5909 goto out;
5997 xdr_enter_page(&xdr, PAGE_SIZE); 5910 xdr_enter_page(xdr, PAGE_SIZE);
5998 status = decode_getfattr(&xdr, &res->fs_locations->fattr, 5911 status = decode_getfattr(xdr, &res->fs_locations->fattr,
5999 res->fs_locations->server, 5912 res->fs_locations->server,
6000 !RPC_IS_ASYNC(req->rq_task)); 5913 !RPC_IS_ASYNC(req->rq_task));
6001out: 5914out:
@@ -6006,129 +5919,122 @@ out:
6006/* 5919/*
6007 * Decode EXCHANGE_ID response 5920 * Decode EXCHANGE_ID response
6008 */ 5921 */
6009static int nfs4_xdr_dec_exchange_id(struct rpc_rqst *rqstp, uint32_t *p, 5922static int nfs4_xdr_dec_exchange_id(struct rpc_rqst *rqstp,
5923 struct xdr_stream *xdr,
6010 void *res) 5924 void *res)
6011{ 5925{
6012 struct xdr_stream xdr;
6013 struct compound_hdr hdr; 5926 struct compound_hdr hdr;
6014 int status; 5927 int status;
6015 5928
6016 xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); 5929 status = decode_compound_hdr(xdr, &hdr);
6017 status = decode_compound_hdr(&xdr, &hdr);
6018 if (!status) 5930 if (!status)
6019 status = decode_exchange_id(&xdr, res); 5931 status = decode_exchange_id(xdr, res);
6020 return status; 5932 return status;
6021} 5933}
6022 5934
6023/* 5935/*
6024 * Decode CREATE_SESSION response 5936 * Decode CREATE_SESSION response
6025 */ 5937 */
6026static int nfs4_xdr_dec_create_session(struct rpc_rqst *rqstp, uint32_t *p, 5938static int nfs4_xdr_dec_create_session(struct rpc_rqst *rqstp,
5939 struct xdr_stream *xdr,
6027 struct nfs41_create_session_res *res) 5940 struct nfs41_create_session_res *res)
6028{ 5941{
6029 struct xdr_stream xdr;
6030 struct compound_hdr hdr; 5942 struct compound_hdr hdr;
6031 int status; 5943 int status;
6032 5944
6033 xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); 5945 status = decode_compound_hdr(xdr, &hdr);
6034 status = decode_compound_hdr(&xdr, &hdr);
6035 if (!status) 5946 if (!status)
6036 status = decode_create_session(&xdr, res); 5947 status = decode_create_session(xdr, res);
6037 return status; 5948 return status;
6038} 5949}
6039 5950
6040/* 5951/*
6041 * Decode DESTROY_SESSION response 5952 * Decode DESTROY_SESSION response
6042 */ 5953 */
6043static int nfs4_xdr_dec_destroy_session(struct rpc_rqst *rqstp, uint32_t *p, 5954static int nfs4_xdr_dec_destroy_session(struct rpc_rqst *rqstp,
6044 void *dummy) 5955 struct xdr_stream *xdr,
5956 void *res)
6045{ 5957{
6046 struct xdr_stream xdr;
6047 struct compound_hdr hdr; 5958 struct compound_hdr hdr;
6048 int status; 5959 int status;
6049 5960
6050 xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); 5961 status = decode_compound_hdr(xdr, &hdr);
6051 status = decode_compound_hdr(&xdr, &hdr);
6052 if (!status) 5962 if (!status)
6053 status = decode_destroy_session(&xdr, dummy); 5963 status = decode_destroy_session(xdr, res);
6054 return status; 5964 return status;
6055} 5965}
6056 5966
6057/* 5967/*
6058 * Decode SEQUENCE response 5968 * Decode SEQUENCE response
6059 */ 5969 */
6060static int nfs4_xdr_dec_sequence(struct rpc_rqst *rqstp, uint32_t *p, 5970static int nfs4_xdr_dec_sequence(struct rpc_rqst *rqstp,
5971 struct xdr_stream *xdr,
6061 struct nfs4_sequence_res *res) 5972 struct nfs4_sequence_res *res)
6062{ 5973{
6063 struct xdr_stream xdr;
6064 struct compound_hdr hdr; 5974 struct compound_hdr hdr;
6065 int status; 5975 int status;
6066 5976
6067 xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); 5977 status = decode_compound_hdr(xdr, &hdr);
6068 status = decode_compound_hdr(&xdr, &hdr);
6069 if (!status) 5978 if (!status)
6070 status = decode_sequence(&xdr, res, rqstp); 5979 status = decode_sequence(xdr, res, rqstp);
6071 return status; 5980 return status;
6072} 5981}
6073 5982
6074/* 5983/*
6075 * Decode GET_LEASE_TIME response 5984 * Decode GET_LEASE_TIME response
6076 */ 5985 */
6077static int nfs4_xdr_dec_get_lease_time(struct rpc_rqst *rqstp, uint32_t *p, 5986static int nfs4_xdr_dec_get_lease_time(struct rpc_rqst *rqstp,
5987 struct xdr_stream *xdr,
6078 struct nfs4_get_lease_time_res *res) 5988 struct nfs4_get_lease_time_res *res)
6079{ 5989{
6080 struct xdr_stream xdr;
6081 struct compound_hdr hdr; 5990 struct compound_hdr hdr;
6082 int status; 5991 int status;
6083 5992
6084 xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); 5993 status = decode_compound_hdr(xdr, &hdr);
6085 status = decode_compound_hdr(&xdr, &hdr);
6086 if (!status) 5994 if (!status)
6087 status = decode_sequence(&xdr, &res->lr_seq_res, rqstp); 5995 status = decode_sequence(xdr, &res->lr_seq_res, rqstp);
6088 if (!status) 5996 if (!status)
6089 status = decode_putrootfh(&xdr); 5997 status = decode_putrootfh(xdr);
6090 if (!status) 5998 if (!status)
6091 status = decode_fsinfo(&xdr, res->lr_fsinfo); 5999 status = decode_fsinfo(xdr, res->lr_fsinfo);
6092 return status; 6000 return status;
6093} 6001}
6094 6002
6095/* 6003/*
6096 * Decode RECLAIM_COMPLETE response 6004 * Decode RECLAIM_COMPLETE response
6097 */ 6005 */
6098static int nfs4_xdr_dec_reclaim_complete(struct rpc_rqst *rqstp, uint32_t *p, 6006static int nfs4_xdr_dec_reclaim_complete(struct rpc_rqst *rqstp,
6007 struct xdr_stream *xdr,
6099 struct nfs41_reclaim_complete_res *res) 6008 struct nfs41_reclaim_complete_res *res)
6100{ 6009{
6101 struct xdr_stream xdr;
6102 struct compound_hdr hdr; 6010 struct compound_hdr hdr;
6103 int status; 6011 int status;
6104 6012
6105 xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); 6013 status = decode_compound_hdr(xdr, &hdr);
6106 status = decode_compound_hdr(&xdr, &hdr);
6107 if (!status) 6014 if (!status)
6108 status = decode_sequence(&xdr, &res->seq_res, rqstp); 6015 status = decode_sequence(xdr, &res->seq_res, rqstp);
6109 if (!status) 6016 if (!status)
6110 status = decode_reclaim_complete(&xdr, (void *)NULL); 6017 status = decode_reclaim_complete(xdr, (void *)NULL);
6111 return status; 6018 return status;
6112} 6019}
6113 6020
6114/* 6021/*
6115 * Decode GETDEVINFO response 6022 * Decode GETDEVINFO response
6116 */ 6023 */
6117static int nfs4_xdr_dec_getdeviceinfo(struct rpc_rqst *rqstp, uint32_t *p, 6024static int nfs4_xdr_dec_getdeviceinfo(struct rpc_rqst *rqstp,
6025 struct xdr_stream *xdr,
6118 struct nfs4_getdeviceinfo_res *res) 6026 struct nfs4_getdeviceinfo_res *res)
6119{ 6027{
6120 struct xdr_stream xdr;
6121 struct compound_hdr hdr; 6028 struct compound_hdr hdr;
6122 int status; 6029 int status;
6123 6030
6124 xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); 6031 status = decode_compound_hdr(xdr, &hdr);
6125 status = decode_compound_hdr(&xdr, &hdr);
6126 if (status != 0) 6032 if (status != 0)
6127 goto out; 6033 goto out;
6128 status = decode_sequence(&xdr, &res->seq_res, rqstp); 6034 status = decode_sequence(xdr, &res->seq_res, rqstp);
6129 if (status != 0) 6035 if (status != 0)
6130 goto out; 6036 goto out;
6131 status = decode_getdeviceinfo(&xdr, res->pdev); 6037 status = decode_getdeviceinfo(xdr, res->pdev);
6132out: 6038out:
6133 return status; 6039 return status;
6134} 6040}
@@ -6136,31 +6042,44 @@ out:
6136/* 6042/*
6137 * Decode LAYOUTGET response 6043 * Decode LAYOUTGET response
6138 */ 6044 */
6139static int nfs4_xdr_dec_layoutget(struct rpc_rqst *rqstp, uint32_t *p, 6045static int nfs4_xdr_dec_layoutget(struct rpc_rqst *rqstp,
6046 struct xdr_stream *xdr,
6140 struct nfs4_layoutget_res *res) 6047 struct nfs4_layoutget_res *res)
6141{ 6048{
6142 struct xdr_stream xdr;
6143 struct compound_hdr hdr; 6049 struct compound_hdr hdr;
6144 int status; 6050 int status;
6145 6051
6146 xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); 6052 status = decode_compound_hdr(xdr, &hdr);
6147 status = decode_compound_hdr(&xdr, &hdr);
6148 if (status) 6053 if (status)
6149 goto out; 6054 goto out;
6150 status = decode_sequence(&xdr, &res->seq_res, rqstp); 6055 status = decode_sequence(xdr, &res->seq_res, rqstp);
6151 if (status) 6056 if (status)
6152 goto out; 6057 goto out;
6153 status = decode_putfh(&xdr); 6058 status = decode_putfh(xdr);
6154 if (status) 6059 if (status)
6155 goto out; 6060 goto out;
6156 status = decode_layoutget(&xdr, rqstp, res); 6061 status = decode_layoutget(xdr, rqstp, res);
6157out: 6062out:
6158 return status; 6063 return status;
6159} 6064}
6160#endif /* CONFIG_NFS_V4_1 */ 6065#endif /* CONFIG_NFS_V4_1 */
6161 6066
6162__be32 *nfs4_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, 6067/**
6163 struct nfs_server *server, int plus) 6068 * nfs4_decode_dirent - Decode a single NFSv4 directory entry stored in
6069 * the local page cache.
6070 * @xdr: XDR stream where entry resides
6071 * @entry: buffer to fill in with entry data
6072 * @plus: boolean indicating whether this should be a readdirplus entry
6073 *
6074 * Returns zero if successful, otherwise a negative errno value is
6075 * returned.
6076 *
6077 * This function is not invoked during READDIR reply decoding, but
6078 * rather whenever an application invokes the getdents(2) system call
6079 * on a directory already in our cache.
6080 */
6081int nfs4_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
6082 int plus)
6164{ 6083{
6165 uint32_t bitmap[2] = {0}; 6084 uint32_t bitmap[2] = {0};
6166 uint32_t len; 6085 uint32_t len;
@@ -6172,9 +6091,9 @@ __be32 *nfs4_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
6172 if (unlikely(!p)) 6091 if (unlikely(!p))
6173 goto out_overflow; 6092 goto out_overflow;
6174 if (!ntohl(*p++)) 6093 if (!ntohl(*p++))
6175 return ERR_PTR(-EAGAIN); 6094 return -EAGAIN;
6176 entry->eof = 1; 6095 entry->eof = 1;
6177 return ERR_PTR(-EBADCOOKIE); 6096 return -EBADCOOKIE;
6178 } 6097 }
6179 6098
6180 p = xdr_inline_decode(xdr, 12); 6099 p = xdr_inline_decode(xdr, 12);
@@ -6203,7 +6122,8 @@ __be32 *nfs4_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
6203 if (decode_attr_length(xdr, &len, &p) < 0) 6122 if (decode_attr_length(xdr, &len, &p) < 0)
6204 goto out_overflow; 6123 goto out_overflow;
6205 6124
6206 if (decode_getfattr_attrs(xdr, bitmap, entry->fattr, entry->fh, server, 1) < 0) 6125 if (decode_getfattr_attrs(xdr, bitmap, entry->fattr, entry->fh,
6126 entry->server, 1) < 0)
6207 goto out_overflow; 6127 goto out_overflow;
6208 if (entry->fattr->valid & NFS_ATTR_FATTR_FILEID) 6128 if (entry->fattr->valid & NFS_ATTR_FATTR_FILEID)
6209 entry->ino = entry->fattr->fileid; 6129 entry->ino = entry->fattr->fileid;
@@ -6215,17 +6135,11 @@ __be32 *nfs4_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
6215 if (verify_attr_len(xdr, p, len) < 0) 6135 if (verify_attr_len(xdr, p, len) < 0)
6216 goto out_overflow; 6136 goto out_overflow;
6217 6137
6218 p = xdr_inline_peek(xdr, 8); 6138 return 0;
6219 if (p != NULL)
6220 entry->eof = !p[0] && p[1];
6221 else
6222 entry->eof = 0;
6223
6224 return p;
6225 6139
6226out_overflow: 6140out_overflow:
6227 print_overflow_msg(__func__, xdr); 6141 print_overflow_msg(__func__, xdr);
6228 return ERR_PTR(-EAGAIN); 6142 return -EAGAIN;
6229} 6143}
6230 6144
6231/* 6145/*
@@ -6301,8 +6215,8 @@ nfs4_stat_to_errno(int stat)
6301#define PROC(proc, argtype, restype) \ 6215#define PROC(proc, argtype, restype) \
6302[NFSPROC4_CLNT_##proc] = { \ 6216[NFSPROC4_CLNT_##proc] = { \
6303 .p_proc = NFSPROC4_COMPOUND, \ 6217 .p_proc = NFSPROC4_COMPOUND, \
6304 .p_encode = (kxdrproc_t) nfs4_xdr_##argtype, \ 6218 .p_encode = (kxdreproc_t)nfs4_xdr_##argtype, \
6305 .p_decode = (kxdrproc_t) nfs4_xdr_##restype, \ 6219 .p_decode = (kxdrdproc_t)nfs4_xdr_##restype, \
6306 .p_arglen = NFS4_##argtype##_sz, \ 6220 .p_arglen = NFS4_##argtype##_sz, \
6307 .p_replen = NFS4_##restype##_sz, \ 6221 .p_replen = NFS4_##restype##_sz, \
6308 .p_statidx = NFSPROC4_CLNT_##proc, \ 6222 .p_statidx = NFSPROC4_CLNT_##proc, \
@@ -6310,50 +6224,50 @@ nfs4_stat_to_errno(int stat)
6310} 6224}
6311 6225
6312struct rpc_procinfo nfs4_procedures[] = { 6226struct rpc_procinfo nfs4_procedures[] = {
6313 PROC(READ, enc_read, dec_read), 6227 PROC(READ, enc_read, dec_read),
6314 PROC(WRITE, enc_write, dec_write), 6228 PROC(WRITE, enc_write, dec_write),
6315 PROC(COMMIT, enc_commit, dec_commit), 6229 PROC(COMMIT, enc_commit, dec_commit),
6316 PROC(OPEN, enc_open, dec_open), 6230 PROC(OPEN, enc_open, dec_open),
6317 PROC(OPEN_CONFIRM, enc_open_confirm, dec_open_confirm), 6231 PROC(OPEN_CONFIRM, enc_open_confirm, dec_open_confirm),
6318 PROC(OPEN_NOATTR, enc_open_noattr, dec_open_noattr), 6232 PROC(OPEN_NOATTR, enc_open_noattr, dec_open_noattr),
6319 PROC(OPEN_DOWNGRADE, enc_open_downgrade, dec_open_downgrade), 6233 PROC(OPEN_DOWNGRADE, enc_open_downgrade, dec_open_downgrade),
6320 PROC(CLOSE, enc_close, dec_close), 6234 PROC(CLOSE, enc_close, dec_close),
6321 PROC(SETATTR, enc_setattr, dec_setattr), 6235 PROC(SETATTR, enc_setattr, dec_setattr),
6322 PROC(FSINFO, enc_fsinfo, dec_fsinfo), 6236 PROC(FSINFO, enc_fsinfo, dec_fsinfo),
6323 PROC(RENEW, enc_renew, dec_renew), 6237 PROC(RENEW, enc_renew, dec_renew),
6324 PROC(SETCLIENTID, enc_setclientid, dec_setclientid), 6238 PROC(SETCLIENTID, enc_setclientid, dec_setclientid),
6325 PROC(SETCLIENTID_CONFIRM, enc_setclientid_confirm, dec_setclientid_confirm), 6239 PROC(SETCLIENTID_CONFIRM, enc_setclientid_confirm, dec_setclientid_confirm),
6326 PROC(LOCK, enc_lock, dec_lock), 6240 PROC(LOCK, enc_lock, dec_lock),
6327 PROC(LOCKT, enc_lockt, dec_lockt), 6241 PROC(LOCKT, enc_lockt, dec_lockt),
6328 PROC(LOCKU, enc_locku, dec_locku), 6242 PROC(LOCKU, enc_locku, dec_locku),
6329 PROC(ACCESS, enc_access, dec_access), 6243 PROC(ACCESS, enc_access, dec_access),
6330 PROC(GETATTR, enc_getattr, dec_getattr), 6244 PROC(GETATTR, enc_getattr, dec_getattr),
6331 PROC(LOOKUP, enc_lookup, dec_lookup), 6245 PROC(LOOKUP, enc_lookup, dec_lookup),
6332 PROC(LOOKUP_ROOT, enc_lookup_root, dec_lookup_root), 6246 PROC(LOOKUP_ROOT, enc_lookup_root, dec_lookup_root),
6333 PROC(REMOVE, enc_remove, dec_remove), 6247 PROC(REMOVE, enc_remove, dec_remove),
6334 PROC(RENAME, enc_rename, dec_rename), 6248 PROC(RENAME, enc_rename, dec_rename),
6335 PROC(LINK, enc_link, dec_link), 6249 PROC(LINK, enc_link, dec_link),
6336 PROC(SYMLINK, enc_symlink, dec_symlink), 6250 PROC(SYMLINK, enc_symlink, dec_symlink),
6337 PROC(CREATE, enc_create, dec_create), 6251 PROC(CREATE, enc_create, dec_create),
6338 PROC(PATHCONF, enc_pathconf, dec_pathconf), 6252 PROC(PATHCONF, enc_pathconf, dec_pathconf),
6339 PROC(STATFS, enc_statfs, dec_statfs), 6253 PROC(STATFS, enc_statfs, dec_statfs),
6340 PROC(READLINK, enc_readlink, dec_readlink), 6254 PROC(READLINK, enc_readlink, dec_readlink),
6341 PROC(READDIR, enc_readdir, dec_readdir), 6255 PROC(READDIR, enc_readdir, dec_readdir),
6342 PROC(SERVER_CAPS, enc_server_caps, dec_server_caps), 6256 PROC(SERVER_CAPS, enc_server_caps, dec_server_caps),
6343 PROC(DELEGRETURN, enc_delegreturn, dec_delegreturn), 6257 PROC(DELEGRETURN, enc_delegreturn, dec_delegreturn),
6344 PROC(GETACL, enc_getacl, dec_getacl), 6258 PROC(GETACL, enc_getacl, dec_getacl),
6345 PROC(SETACL, enc_setacl, dec_setacl), 6259 PROC(SETACL, enc_setacl, dec_setacl),
6346 PROC(FS_LOCATIONS, enc_fs_locations, dec_fs_locations), 6260 PROC(FS_LOCATIONS, enc_fs_locations, dec_fs_locations),
6347 PROC(RELEASE_LOCKOWNER, enc_release_lockowner, dec_release_lockowner), 6261 PROC(RELEASE_LOCKOWNER, enc_release_lockowner, dec_release_lockowner),
6348#if defined(CONFIG_NFS_V4_1) 6262#if defined(CONFIG_NFS_V4_1)
6349 PROC(EXCHANGE_ID, enc_exchange_id, dec_exchange_id), 6263 PROC(EXCHANGE_ID, enc_exchange_id, dec_exchange_id),
6350 PROC(CREATE_SESSION, enc_create_session, dec_create_session), 6264 PROC(CREATE_SESSION, enc_create_session, dec_create_session),
6351 PROC(DESTROY_SESSION, enc_destroy_session, dec_destroy_session), 6265 PROC(DESTROY_SESSION, enc_destroy_session, dec_destroy_session),
6352 PROC(SEQUENCE, enc_sequence, dec_sequence), 6266 PROC(SEQUENCE, enc_sequence, dec_sequence),
6353 PROC(GET_LEASE_TIME, enc_get_lease_time, dec_get_lease_time), 6267 PROC(GET_LEASE_TIME, enc_get_lease_time, dec_get_lease_time),
6354 PROC(RECLAIM_COMPLETE, enc_reclaim_complete, dec_reclaim_complete), 6268 PROC(RECLAIM_COMPLETE, enc_reclaim_complete, dec_reclaim_complete),
6355 PROC(GETDEVICEINFO, enc_getdeviceinfo, dec_getdeviceinfo), 6269 PROC(GETDEVICEINFO, enc_getdeviceinfo, dec_getdeviceinfo),
6356 PROC(LAYOUTGET, enc_layoutget, dec_layoutget), 6270 PROC(LAYOUTGET, enc_layoutget, dec_layoutget),
6357#endif /* CONFIG_NFS_V4_1 */ 6271#endif /* CONFIG_NFS_V4_1 */
6358}; 6272};
6359 6273
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index b68536cc9046..e1164e3f9e69 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -26,12 +26,9 @@ static struct kmem_cache *nfs_page_cachep;
26static inline struct nfs_page * 26static inline struct nfs_page *
27nfs_page_alloc(void) 27nfs_page_alloc(void)
28{ 28{
29 struct nfs_page *p; 29 struct nfs_page *p = kmem_cache_zalloc(nfs_page_cachep, GFP_KERNEL);
30 p = kmem_cache_alloc(nfs_page_cachep, GFP_KERNEL); 30 if (p)
31 if (p) {
32 memset(p, 0, sizeof(*p));
33 INIT_LIST_HEAD(&p->wb_list); 31 INIT_LIST_HEAD(&p->wb_list);
34 }
35 return p; 32 return p;
36} 33}
37 34
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index db773428f95f..bc4089769735 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -177,105 +177,149 @@ EXPORT_SYMBOL_GPL(pnfs_unregister_layoutdriver);
177 * pNFS client layout cache 177 * pNFS client layout cache
178 */ 178 */
179 179
180/* Need to hold i_lock if caller does not already hold reference */
181void
182get_layout_hdr(struct pnfs_layout_hdr *lo)
183{
184 atomic_inc(&lo->plh_refcount);
185}
186
180static void 187static void
181get_layout_hdr_locked(struct pnfs_layout_hdr *lo) 188destroy_layout_hdr(struct pnfs_layout_hdr *lo)
182{ 189{
183 assert_spin_locked(&lo->inode->i_lock); 190 dprintk("%s: freeing layout cache %p\n", __func__, lo);
184 lo->refcount++; 191 BUG_ON(!list_empty(&lo->plh_layouts));
192 NFS_I(lo->plh_inode)->layout = NULL;
193 kfree(lo);
185} 194}
186 195
187static void 196static void
188put_layout_hdr_locked(struct pnfs_layout_hdr *lo) 197put_layout_hdr_locked(struct pnfs_layout_hdr *lo)
189{ 198{
190 assert_spin_locked(&lo->inode->i_lock); 199 if (atomic_dec_and_test(&lo->plh_refcount))
191 BUG_ON(lo->refcount == 0); 200 destroy_layout_hdr(lo);
192
193 lo->refcount--;
194 if (!lo->refcount) {
195 dprintk("%s: freeing layout cache %p\n", __func__, lo);
196 BUG_ON(!list_empty(&lo->layouts));
197 NFS_I(lo->inode)->layout = NULL;
198 kfree(lo);
199 }
200} 201}
201 202
202void 203void
203put_layout_hdr(struct inode *inode) 204put_layout_hdr(struct pnfs_layout_hdr *lo)
204{ 205{
205 spin_lock(&inode->i_lock); 206 struct inode *inode = lo->plh_inode;
206 put_layout_hdr_locked(NFS_I(inode)->layout); 207
207 spin_unlock(&inode->i_lock); 208 if (atomic_dec_and_lock(&lo->plh_refcount, &inode->i_lock)) {
209 destroy_layout_hdr(lo);
210 spin_unlock(&inode->i_lock);
211 }
208} 212}
209 213
210static void 214static void
211init_lseg(struct pnfs_layout_hdr *lo, struct pnfs_layout_segment *lseg) 215init_lseg(struct pnfs_layout_hdr *lo, struct pnfs_layout_segment *lseg)
212{ 216{
213 INIT_LIST_HEAD(&lseg->fi_list); 217 INIT_LIST_HEAD(&lseg->pls_list);
214 kref_init(&lseg->kref); 218 atomic_set(&lseg->pls_refcount, 1);
215 lseg->layout = lo; 219 smp_mb();
220 set_bit(NFS_LSEG_VALID, &lseg->pls_flags);
221 lseg->pls_layout = lo;
216} 222}
217 223
218/* Called without i_lock held, as the free_lseg call may sleep */ 224static void free_lseg(struct pnfs_layout_segment *lseg)
219static void
220destroy_lseg(struct kref *kref)
221{ 225{
222 struct pnfs_layout_segment *lseg = 226 struct inode *ino = lseg->pls_layout->plh_inode;
223 container_of(kref, struct pnfs_layout_segment, kref);
224 struct inode *ino = lseg->layout->inode;
225 227
226 dprintk("--> %s\n", __func__);
227 NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg); 228 NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
228 /* Matched by get_layout_hdr_locked in pnfs_insert_layout */ 229 /* Matched by get_layout_hdr in pnfs_insert_layout */
229 put_layout_hdr(ino); 230 put_layout_hdr(NFS_I(ino)->layout);
230} 231}
231 232
232static void 233/* The use of tmp_list is necessary because pnfs_curr_ld->free_lseg
233put_lseg(struct pnfs_layout_segment *lseg) 234 * could sleep, so must be called outside of the lock.
235 * Returns 1 if object was removed, otherwise return 0.
236 */
237static int
238put_lseg_locked(struct pnfs_layout_segment *lseg,
239 struct list_head *tmp_list)
240{
241 dprintk("%s: lseg %p ref %d valid %d\n", __func__, lseg,
242 atomic_read(&lseg->pls_refcount),
243 test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
244 if (atomic_dec_and_test(&lseg->pls_refcount)) {
245 struct inode *ino = lseg->pls_layout->plh_inode;
246
247 BUG_ON(test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
248 list_del(&lseg->pls_list);
249 if (list_empty(&lseg->pls_layout->plh_segs)) {
250 struct nfs_client *clp;
251
252 clp = NFS_SERVER(ino)->nfs_client;
253 spin_lock(&clp->cl_lock);
254 /* List does not take a reference, so no need for put here */
255 list_del_init(&lseg->pls_layout->plh_layouts);
256 spin_unlock(&clp->cl_lock);
257 clear_bit(NFS_LAYOUT_BULK_RECALL, &lseg->pls_layout->plh_flags);
258 }
259 rpc_wake_up(&NFS_SERVER(ino)->roc_rpcwaitq);
260 list_add(&lseg->pls_list, tmp_list);
261 return 1;
262 }
263 return 0;
264}
265
266static bool
267should_free_lseg(u32 lseg_iomode, u32 recall_iomode)
234{ 268{
235 if (!lseg) 269 return (recall_iomode == IOMODE_ANY ||
236 return; 270 lseg_iomode == recall_iomode);
271}
237 272
238 dprintk("%s: lseg %p ref %d\n", __func__, lseg, 273/* Returns 1 if lseg is removed from list, 0 otherwise */
239 atomic_read(&lseg->kref.refcount)); 274static int mark_lseg_invalid(struct pnfs_layout_segment *lseg,
240 kref_put(&lseg->kref, destroy_lseg); 275 struct list_head *tmp_list)
276{
277 int rv = 0;
278
279 if (test_and_clear_bit(NFS_LSEG_VALID, &lseg->pls_flags)) {
280 /* Remove the reference keeping the lseg in the
281 * list. It will now be removed when all
282 * outstanding io is finished.
283 */
284 rv = put_lseg_locked(lseg, tmp_list);
285 }
286 return rv;
241} 287}
242 288
243static void 289/* Returns count of number of matching invalid lsegs remaining in list
244pnfs_clear_lseg_list(struct pnfs_layout_hdr *lo, struct list_head *tmp_list) 290 * after call.
291 */
292int
293mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
294 struct list_head *tmp_list,
295 u32 iomode)
245{ 296{
246 struct pnfs_layout_segment *lseg, *next; 297 struct pnfs_layout_segment *lseg, *next;
247 struct nfs_client *clp; 298 int invalid = 0, removed = 0;
248 299
249 dprintk("%s:Begin lo %p\n", __func__, lo); 300 dprintk("%s:Begin lo %p\n", __func__, lo);
250 301
251 assert_spin_locked(&lo->inode->i_lock); 302 list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
252 list_for_each_entry_safe(lseg, next, &lo->segs, fi_list) { 303 if (should_free_lseg(lseg->pls_range.iomode, iomode)) {
253 dprintk("%s: freeing lseg %p\n", __func__, lseg); 304 dprintk("%s: freeing lseg %p iomode %d "
254 list_move(&lseg->fi_list, tmp_list); 305 "offset %llu length %llu\n", __func__,
255 } 306 lseg, lseg->pls_range.iomode, lseg->pls_range.offset,
256 clp = NFS_SERVER(lo->inode)->nfs_client; 307 lseg->pls_range.length);
257 spin_lock(&clp->cl_lock); 308 invalid++;
258 /* List does not take a reference, so no need for put here */ 309 removed += mark_lseg_invalid(lseg, tmp_list);
259 list_del_init(&lo->layouts); 310 }
260 spin_unlock(&clp->cl_lock); 311 dprintk("%s:Return %i\n", __func__, invalid - removed);
261 write_seqlock(&lo->seqlock); 312 return invalid - removed;
262 clear_bit(NFS_LAYOUT_STATEID_SET, &lo->state);
263 write_sequnlock(&lo->seqlock);
264
265 dprintk("%s:Return\n", __func__);
266} 313}
267 314
268static void 315void
269pnfs_free_lseg_list(struct list_head *tmp_list) 316pnfs_free_lseg_list(struct list_head *free_me)
270{ 317{
271 struct pnfs_layout_segment *lseg; 318 struct pnfs_layout_segment *lseg, *tmp;
272 319
273 while (!list_empty(tmp_list)) { 320 list_for_each_entry_safe(lseg, tmp, free_me, pls_list) {
274 lseg = list_entry(tmp_list->next, struct pnfs_layout_segment, 321 list_del(&lseg->pls_list);
275 fi_list); 322 free_lseg(lseg);
276 dprintk("%s calling put_lseg on %p\n", __func__, lseg);
277 list_del(&lseg->fi_list);
278 put_lseg(lseg);
279 } 323 }
280} 324}
281 325
@@ -288,7 +332,8 @@ pnfs_destroy_layout(struct nfs_inode *nfsi)
288 spin_lock(&nfsi->vfs_inode.i_lock); 332 spin_lock(&nfsi->vfs_inode.i_lock);
289 lo = nfsi->layout; 333 lo = nfsi->layout;
290 if (lo) { 334 if (lo) {
291 pnfs_clear_lseg_list(lo, &tmp_list); 335 set_bit(NFS_LAYOUT_DESTROYED, &nfsi->layout->plh_flags);
336 mark_matching_lsegs_invalid(lo, &tmp_list, IOMODE_ANY);
292 /* Matched by refcount set to 1 in alloc_init_layout_hdr */ 337 /* Matched by refcount set to 1 in alloc_init_layout_hdr */
293 put_layout_hdr_locked(lo); 338 put_layout_hdr_locked(lo);
294 } 339 }
@@ -312,76 +357,80 @@ pnfs_destroy_all_layouts(struct nfs_client *clp)
312 357
313 while (!list_empty(&tmp_list)) { 358 while (!list_empty(&tmp_list)) {
314 lo = list_entry(tmp_list.next, struct pnfs_layout_hdr, 359 lo = list_entry(tmp_list.next, struct pnfs_layout_hdr,
315 layouts); 360 plh_layouts);
316 dprintk("%s freeing layout for inode %lu\n", __func__, 361 dprintk("%s freeing layout for inode %lu\n", __func__,
317 lo->inode->i_ino); 362 lo->plh_inode->i_ino);
318 pnfs_destroy_layout(NFS_I(lo->inode)); 363 pnfs_destroy_layout(NFS_I(lo->plh_inode));
319 } 364 }
320} 365}
321 366
322/* update lo->stateid with new if is more recent 367/* update lo->plh_stateid with new if is more recent */
323 * 368void
324 * lo->stateid could be the open stateid, in which case we just use what given. 369pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new,
325 */ 370 bool update_barrier)
326static void 371{
327pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, 372 u32 oldseq, newseq;
328 const nfs4_stateid *new) 373
329{ 374 oldseq = be32_to_cpu(lo->plh_stateid.stateid.seqid);
330 nfs4_stateid *old = &lo->stateid; 375 newseq = be32_to_cpu(new->stateid.seqid);
331 bool overwrite = false; 376 if ((int)(newseq - oldseq) > 0) {
332 377 memcpy(&lo->plh_stateid, &new->stateid, sizeof(new->stateid));
333 write_seqlock(&lo->seqlock); 378 if (update_barrier) {
334 if (!test_bit(NFS_LAYOUT_STATEID_SET, &lo->state) || 379 u32 new_barrier = be32_to_cpu(new->stateid.seqid);
335 memcmp(old->stateid.other, new->stateid.other, sizeof(new->stateid.other))) 380
336 overwrite = true; 381 if ((int)(new_barrier - lo->plh_barrier))
337 else { 382 lo->plh_barrier = new_barrier;
338 u32 oldseq, newseq; 383 } else {
339 384 /* Because of wraparound, we want to keep the barrier
340 oldseq = be32_to_cpu(old->stateid.seqid); 385 * "close" to the current seqids. It needs to be
341 newseq = be32_to_cpu(new->stateid.seqid); 386 * within 2**31 to count as "behind", so if it
342 if ((int)(newseq - oldseq) > 0) 387 * gets too near that limit, give us a litle leeway
343 overwrite = true; 388 * and bring it to within 2**30.
389 * NOTE - and yes, this is all unsigned arithmetic.
390 */
391 if (unlikely((newseq - lo->plh_barrier) > (3 << 29)))
392 lo->plh_barrier = newseq - (1 << 30);
393 }
344 } 394 }
345 if (overwrite)
346 memcpy(&old->stateid, &new->stateid, sizeof(new->stateid));
347 write_sequnlock(&lo->seqlock);
348} 395}
349 396
350static void 397/* lget is set to 1 if called from inside send_layoutget call chain */
351pnfs_layout_from_open_stateid(struct pnfs_layout_hdr *lo, 398static bool
352 struct nfs4_state *state) 399pnfs_layoutgets_blocked(struct pnfs_layout_hdr *lo, nfs4_stateid *stateid,
400 int lget)
353{ 401{
354 int seq; 402 if ((stateid) &&
355 403 (int)(lo->plh_barrier - be32_to_cpu(stateid->stateid.seqid)) >= 0)
356 dprintk("--> %s\n", __func__); 404 return true;
357 write_seqlock(&lo->seqlock); 405 return lo->plh_block_lgets ||
358 do { 406 test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags) ||
359 seq = read_seqbegin(&state->seqlock); 407 (list_empty(&lo->plh_segs) &&
360 memcpy(lo->stateid.data, state->stateid.data, 408 (atomic_read(&lo->plh_outstanding) > lget));
361 sizeof(state->stateid.data));
362 } while (read_seqretry(&state->seqlock, seq));
363 set_bit(NFS_LAYOUT_STATEID_SET, &lo->state);
364 write_sequnlock(&lo->seqlock);
365 dprintk("<-- %s\n", __func__);
366} 409}
367 410
368void 411int
369pnfs_get_layout_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo, 412pnfs_choose_layoutget_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo,
370 struct nfs4_state *open_state) 413 struct nfs4_state *open_state)
371{ 414{
372 int seq; 415 int status = 0;
373 416
374 dprintk("--> %s\n", __func__); 417 dprintk("--> %s\n", __func__);
375 do { 418 spin_lock(&lo->plh_inode->i_lock);
376 seq = read_seqbegin(&lo->seqlock); 419 if (pnfs_layoutgets_blocked(lo, NULL, 1)) {
377 if (!test_bit(NFS_LAYOUT_STATEID_SET, &lo->state)) { 420 status = -EAGAIN;
378 /* This will trigger retry of the read */ 421 } else if (list_empty(&lo->plh_segs)) {
379 pnfs_layout_from_open_stateid(lo, open_state); 422 int seq;
380 } else 423
381 memcpy(dst->data, lo->stateid.data, 424 do {
382 sizeof(lo->stateid.data)); 425 seq = read_seqbegin(&open_state->seqlock);
383 } while (read_seqretry(&lo->seqlock, seq)); 426 memcpy(dst->data, open_state->stateid.data,
427 sizeof(open_state->stateid.data));
428 } while (read_seqretry(&open_state->seqlock, seq));
429 } else
430 memcpy(dst->data, lo->plh_stateid.data, sizeof(lo->plh_stateid.data));
431 spin_unlock(&lo->plh_inode->i_lock);
384 dprintk("<-- %s\n", __func__); 432 dprintk("<-- %s\n", __func__);
433 return status;
385} 434}
386 435
387/* 436/*
@@ -395,7 +444,7 @@ send_layoutget(struct pnfs_layout_hdr *lo,
395 struct nfs_open_context *ctx, 444 struct nfs_open_context *ctx,
396 u32 iomode) 445 u32 iomode)
397{ 446{
398 struct inode *ino = lo->inode; 447 struct inode *ino = lo->plh_inode;
399 struct nfs_server *server = NFS_SERVER(ino); 448 struct nfs_server *server = NFS_SERVER(ino);
400 struct nfs4_layoutget *lgp; 449 struct nfs4_layoutget *lgp;
401 struct pnfs_layout_segment *lseg = NULL; 450 struct pnfs_layout_segment *lseg = NULL;
@@ -404,10 +453,8 @@ send_layoutget(struct pnfs_layout_hdr *lo,
404 453
405 BUG_ON(ctx == NULL); 454 BUG_ON(ctx == NULL);
406 lgp = kzalloc(sizeof(*lgp), GFP_KERNEL); 455 lgp = kzalloc(sizeof(*lgp), GFP_KERNEL);
407 if (lgp == NULL) { 456 if (lgp == NULL)
408 put_layout_hdr(lo->inode);
409 return NULL; 457 return NULL;
410 }
411 lgp->args.minlength = NFS4_MAX_UINT64; 458 lgp->args.minlength = NFS4_MAX_UINT64;
412 lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE; 459 lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE;
413 lgp->args.range.iomode = iomode; 460 lgp->args.range.iomode = iomode;
@@ -424,11 +471,88 @@ send_layoutget(struct pnfs_layout_hdr *lo,
424 nfs4_proc_layoutget(lgp); 471 nfs4_proc_layoutget(lgp);
425 if (!lseg) { 472 if (!lseg) {
426 /* remember that LAYOUTGET failed and suspend trying */ 473 /* remember that LAYOUTGET failed and suspend trying */
427 set_bit(lo_fail_bit(iomode), &lo->state); 474 set_bit(lo_fail_bit(iomode), &lo->plh_flags);
428 } 475 }
429 return lseg; 476 return lseg;
430} 477}
431 478
479bool pnfs_roc(struct inode *ino)
480{
481 struct pnfs_layout_hdr *lo;
482 struct pnfs_layout_segment *lseg, *tmp;
483 LIST_HEAD(tmp_list);
484 bool found = false;
485
486 spin_lock(&ino->i_lock);
487 lo = NFS_I(ino)->layout;
488 if (!lo || !test_and_clear_bit(NFS_LAYOUT_ROC, &lo->plh_flags) ||
489 test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags))
490 goto out_nolayout;
491 list_for_each_entry_safe(lseg, tmp, &lo->plh_segs, pls_list)
492 if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) {
493 mark_lseg_invalid(lseg, &tmp_list);
494 found = true;
495 }
496 if (!found)
497 goto out_nolayout;
498 lo->plh_block_lgets++;
499 get_layout_hdr(lo); /* matched in pnfs_roc_release */
500 spin_unlock(&ino->i_lock);
501 pnfs_free_lseg_list(&tmp_list);
502 return true;
503
504out_nolayout:
505 spin_unlock(&ino->i_lock);
506 return false;
507}
508
509void pnfs_roc_release(struct inode *ino)
510{
511 struct pnfs_layout_hdr *lo;
512
513 spin_lock(&ino->i_lock);
514 lo = NFS_I(ino)->layout;
515 lo->plh_block_lgets--;
516 put_layout_hdr_locked(lo);
517 spin_unlock(&ino->i_lock);
518}
519
520void pnfs_roc_set_barrier(struct inode *ino, u32 barrier)
521{
522 struct pnfs_layout_hdr *lo;
523
524 spin_lock(&ino->i_lock);
525 lo = NFS_I(ino)->layout;
526 if ((int)(barrier - lo->plh_barrier) > 0)
527 lo->plh_barrier = barrier;
528 spin_unlock(&ino->i_lock);
529}
530
531bool pnfs_roc_drain(struct inode *ino, u32 *barrier)
532{
533 struct nfs_inode *nfsi = NFS_I(ino);
534 struct pnfs_layout_segment *lseg;
535 bool found = false;
536
537 spin_lock(&ino->i_lock);
538 list_for_each_entry(lseg, &nfsi->layout->plh_segs, pls_list)
539 if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) {
540 found = true;
541 break;
542 }
543 if (!found) {
544 struct pnfs_layout_hdr *lo = nfsi->layout;
545 u32 current_seqid = be32_to_cpu(lo->plh_stateid.stateid.seqid);
546
547 /* Since close does not return a layout stateid for use as
548 * a barrier, we choose the worst-case barrier.
549 */
550 *barrier = current_seqid + atomic_read(&lo->plh_outstanding);
551 }
552 spin_unlock(&ino->i_lock);
553 return found;
554}
555
432/* 556/*
433 * Compare two layout segments for sorting into layout cache. 557 * Compare two layout segments for sorting into layout cache.
434 * We want to preferentially return RW over RO layouts, so ensure those 558 * We want to preferentially return RW over RO layouts, so ensure those
@@ -450,37 +574,29 @@ pnfs_insert_layout(struct pnfs_layout_hdr *lo,
450 574
451 dprintk("%s:Begin\n", __func__); 575 dprintk("%s:Begin\n", __func__);
452 576
453 assert_spin_locked(&lo->inode->i_lock); 577 assert_spin_locked(&lo->plh_inode->i_lock);
454 if (list_empty(&lo->segs)) { 578 list_for_each_entry(lp, &lo->plh_segs, pls_list) {
455 struct nfs_client *clp = NFS_SERVER(lo->inode)->nfs_client; 579 if (cmp_layout(lp->pls_range.iomode, lseg->pls_range.iomode) > 0)
456
457 spin_lock(&clp->cl_lock);
458 BUG_ON(!list_empty(&lo->layouts));
459 list_add_tail(&lo->layouts, &clp->cl_layouts);
460 spin_unlock(&clp->cl_lock);
461 }
462 list_for_each_entry(lp, &lo->segs, fi_list) {
463 if (cmp_layout(lp->range.iomode, lseg->range.iomode) > 0)
464 continue; 580 continue;
465 list_add_tail(&lseg->fi_list, &lp->fi_list); 581 list_add_tail(&lseg->pls_list, &lp->pls_list);
466 dprintk("%s: inserted lseg %p " 582 dprintk("%s: inserted lseg %p "
467 "iomode %d offset %llu length %llu before " 583 "iomode %d offset %llu length %llu before "
468 "lp %p iomode %d offset %llu length %llu\n", 584 "lp %p iomode %d offset %llu length %llu\n",
469 __func__, lseg, lseg->range.iomode, 585 __func__, lseg, lseg->pls_range.iomode,
470 lseg->range.offset, lseg->range.length, 586 lseg->pls_range.offset, lseg->pls_range.length,
471 lp, lp->range.iomode, lp->range.offset, 587 lp, lp->pls_range.iomode, lp->pls_range.offset,
472 lp->range.length); 588 lp->pls_range.length);
473 found = 1; 589 found = 1;
474 break; 590 break;
475 } 591 }
476 if (!found) { 592 if (!found) {
477 list_add_tail(&lseg->fi_list, &lo->segs); 593 list_add_tail(&lseg->pls_list, &lo->plh_segs);
478 dprintk("%s: inserted lseg %p " 594 dprintk("%s: inserted lseg %p "
479 "iomode %d offset %llu length %llu at tail\n", 595 "iomode %d offset %llu length %llu at tail\n",
480 __func__, lseg, lseg->range.iomode, 596 __func__, lseg, lseg->pls_range.iomode,
481 lseg->range.offset, lseg->range.length); 597 lseg->pls_range.offset, lseg->pls_range.length);
482 } 598 }
483 get_layout_hdr_locked(lo); 599 get_layout_hdr(lo);
484 600
485 dprintk("%s:Return\n", __func__); 601 dprintk("%s:Return\n", __func__);
486} 602}
@@ -493,11 +609,11 @@ alloc_init_layout_hdr(struct inode *ino)
493 lo = kzalloc(sizeof(struct pnfs_layout_hdr), GFP_KERNEL); 609 lo = kzalloc(sizeof(struct pnfs_layout_hdr), GFP_KERNEL);
494 if (!lo) 610 if (!lo)
495 return NULL; 611 return NULL;
496 lo->refcount = 1; 612 atomic_set(&lo->plh_refcount, 1);
497 INIT_LIST_HEAD(&lo->layouts); 613 INIT_LIST_HEAD(&lo->plh_layouts);
498 INIT_LIST_HEAD(&lo->segs); 614 INIT_LIST_HEAD(&lo->plh_segs);
499 seqlock_init(&lo->seqlock); 615 INIT_LIST_HEAD(&lo->plh_bulk_recall);
500 lo->inode = ino; 616 lo->plh_inode = ino;
501 return lo; 617 return lo;
502} 618}
503 619
@@ -510,9 +626,12 @@ pnfs_find_alloc_layout(struct inode *ino)
510 dprintk("%s Begin ino=%p layout=%p\n", __func__, ino, nfsi->layout); 626 dprintk("%s Begin ino=%p layout=%p\n", __func__, ino, nfsi->layout);
511 627
512 assert_spin_locked(&ino->i_lock); 628 assert_spin_locked(&ino->i_lock);
513 if (nfsi->layout) 629 if (nfsi->layout) {
514 return nfsi->layout; 630 if (test_bit(NFS_LAYOUT_DESTROYED, &nfsi->layout->plh_flags))
515 631 return NULL;
632 else
633 return nfsi->layout;
634 }
516 spin_unlock(&ino->i_lock); 635 spin_unlock(&ino->i_lock);
517 new = alloc_init_layout_hdr(ino); 636 new = alloc_init_layout_hdr(ino);
518 spin_lock(&ino->i_lock); 637 spin_lock(&ino->i_lock);
@@ -538,31 +657,32 @@ pnfs_find_alloc_layout(struct inode *ino)
538static int 657static int
539is_matching_lseg(struct pnfs_layout_segment *lseg, u32 iomode) 658is_matching_lseg(struct pnfs_layout_segment *lseg, u32 iomode)
540{ 659{
541 return (iomode != IOMODE_RW || lseg->range.iomode == IOMODE_RW); 660 return (iomode != IOMODE_RW || lseg->pls_range.iomode == IOMODE_RW);
542} 661}
543 662
544/* 663/*
545 * lookup range in layout 664 * lookup range in layout
546 */ 665 */
547static struct pnfs_layout_segment * 666static struct pnfs_layout_segment *
548pnfs_has_layout(struct pnfs_layout_hdr *lo, u32 iomode) 667pnfs_find_lseg(struct pnfs_layout_hdr *lo, u32 iomode)
549{ 668{
550 struct pnfs_layout_segment *lseg, *ret = NULL; 669 struct pnfs_layout_segment *lseg, *ret = NULL;
551 670
552 dprintk("%s:Begin\n", __func__); 671 dprintk("%s:Begin\n", __func__);
553 672
554 assert_spin_locked(&lo->inode->i_lock); 673 assert_spin_locked(&lo->plh_inode->i_lock);
555 list_for_each_entry(lseg, &lo->segs, fi_list) { 674 list_for_each_entry(lseg, &lo->plh_segs, pls_list) {
556 if (is_matching_lseg(lseg, iomode)) { 675 if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags) &&
676 is_matching_lseg(lseg, iomode)) {
557 ret = lseg; 677 ret = lseg;
558 break; 678 break;
559 } 679 }
560 if (cmp_layout(iomode, lseg->range.iomode) > 0) 680 if (cmp_layout(iomode, lseg->pls_range.iomode) > 0)
561 break; 681 break;
562 } 682 }
563 683
564 dprintk("%s:Return lseg %p ref %d\n", 684 dprintk("%s:Return lseg %p ref %d\n",
565 __func__, ret, ret ? atomic_read(&ret->kref.refcount) : 0); 685 __func__, ret, ret ? atomic_read(&ret->pls_refcount) : 0);
566 return ret; 686 return ret;
567} 687}
568 688
@@ -576,6 +696,7 @@ pnfs_update_layout(struct inode *ino,
576 enum pnfs_iomode iomode) 696 enum pnfs_iomode iomode)
577{ 697{
578 struct nfs_inode *nfsi = NFS_I(ino); 698 struct nfs_inode *nfsi = NFS_I(ino);
699 struct nfs_client *clp = NFS_SERVER(ino)->nfs_client;
579 struct pnfs_layout_hdr *lo; 700 struct pnfs_layout_hdr *lo;
580 struct pnfs_layout_segment *lseg = NULL; 701 struct pnfs_layout_segment *lseg = NULL;
581 702
@@ -588,25 +709,53 @@ pnfs_update_layout(struct inode *ino,
588 goto out_unlock; 709 goto out_unlock;
589 } 710 }
590 711
591 /* Check to see if the layout for the given range already exists */ 712 /* Do we even need to bother with this? */
592 lseg = pnfs_has_layout(lo, iomode); 713 if (test_bit(NFS4CLNT_LAYOUTRECALL, &clp->cl_state) ||
593 if (lseg) { 714 test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
594 dprintk("%s: Using cached lseg %p for iomode %d)\n", 715 dprintk("%s matches recall, use MDS\n", __func__);
595 __func__, lseg, iomode);
596 goto out_unlock; 716 goto out_unlock;
597 } 717 }
718 /* Check to see if the layout for the given range already exists */
719 lseg = pnfs_find_lseg(lo, iomode);
720 if (lseg)
721 goto out_unlock;
598 722
599 /* if LAYOUTGET already failed once we don't try again */ 723 /* if LAYOUTGET already failed once we don't try again */
600 if (test_bit(lo_fail_bit(iomode), &nfsi->layout->state)) 724 if (test_bit(lo_fail_bit(iomode), &nfsi->layout->plh_flags))
725 goto out_unlock;
726
727 if (pnfs_layoutgets_blocked(lo, NULL, 0))
601 goto out_unlock; 728 goto out_unlock;
729 atomic_inc(&lo->plh_outstanding);
602 730
603 get_layout_hdr_locked(lo); /* Matched in nfs4_layoutget_release */ 731 get_layout_hdr(lo);
732 if (list_empty(&lo->plh_segs)) {
733 /* The lo must be on the clp list if there is any
734 * chance of a CB_LAYOUTRECALL(FILE) coming in.
735 */
736 spin_lock(&clp->cl_lock);
737 BUG_ON(!list_empty(&lo->plh_layouts));
738 list_add_tail(&lo->plh_layouts, &clp->cl_layouts);
739 spin_unlock(&clp->cl_lock);
740 }
604 spin_unlock(&ino->i_lock); 741 spin_unlock(&ino->i_lock);
605 742
606 lseg = send_layoutget(lo, ctx, iomode); 743 lseg = send_layoutget(lo, ctx, iomode);
744 if (!lseg) {
745 spin_lock(&ino->i_lock);
746 if (list_empty(&lo->plh_segs)) {
747 spin_lock(&clp->cl_lock);
748 list_del_init(&lo->plh_layouts);
749 spin_unlock(&clp->cl_lock);
750 clear_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
751 }
752 spin_unlock(&ino->i_lock);
753 }
754 atomic_dec(&lo->plh_outstanding);
755 put_layout_hdr(lo);
607out: 756out:
608 dprintk("%s end, state 0x%lx lseg %p\n", __func__, 757 dprintk("%s end, state 0x%lx lseg %p\n", __func__,
609 nfsi->layout->state, lseg); 758 nfsi->layout->plh_flags, lseg);
610 return lseg; 759 return lseg;
611out_unlock: 760out_unlock:
612 spin_unlock(&ino->i_lock); 761 spin_unlock(&ino->i_lock);
@@ -619,9 +768,21 @@ pnfs_layout_process(struct nfs4_layoutget *lgp)
619 struct pnfs_layout_hdr *lo = NFS_I(lgp->args.inode)->layout; 768 struct pnfs_layout_hdr *lo = NFS_I(lgp->args.inode)->layout;
620 struct nfs4_layoutget_res *res = &lgp->res; 769 struct nfs4_layoutget_res *res = &lgp->res;
621 struct pnfs_layout_segment *lseg; 770 struct pnfs_layout_segment *lseg;
622 struct inode *ino = lo->inode; 771 struct inode *ino = lo->plh_inode;
772 struct nfs_client *clp = NFS_SERVER(ino)->nfs_client;
623 int status = 0; 773 int status = 0;
624 774
775 /* Verify we got what we asked for.
776 * Note that because the xdr parsing only accepts a single
777 * element array, this can fail even if the server is behaving
778 * correctly.
779 */
780 if (lgp->args.range.iomode > res->range.iomode ||
781 res->range.offset != 0 ||
782 res->range.length != NFS4_MAX_UINT64) {
783 status = -EINVAL;
784 goto out;
785 }
625 /* Inject layout blob into I/O device driver */ 786 /* Inject layout blob into I/O device driver */
626 lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res); 787 lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res);
627 if (!lseg || IS_ERR(lseg)) { 788 if (!lseg || IS_ERR(lseg)) {
@@ -635,16 +796,37 @@ pnfs_layout_process(struct nfs4_layoutget *lgp)
635 } 796 }
636 797
637 spin_lock(&ino->i_lock); 798 spin_lock(&ino->i_lock);
799 if (test_bit(NFS4CLNT_LAYOUTRECALL, &clp->cl_state) ||
800 test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
801 dprintk("%s forget reply due to recall\n", __func__);
802 goto out_forget_reply;
803 }
804
805 if (pnfs_layoutgets_blocked(lo, &res->stateid, 1)) {
806 dprintk("%s forget reply due to state\n", __func__);
807 goto out_forget_reply;
808 }
638 init_lseg(lo, lseg); 809 init_lseg(lo, lseg);
639 lseg->range = res->range; 810 lseg->pls_range = res->range;
640 *lgp->lsegpp = lseg; 811 *lgp->lsegpp = lseg;
641 pnfs_insert_layout(lo, lseg); 812 pnfs_insert_layout(lo, lseg);
642 813
814 if (res->return_on_close) {
815 set_bit(NFS_LSEG_ROC, &lseg->pls_flags);
816 set_bit(NFS_LAYOUT_ROC, &lo->plh_flags);
817 }
818
643 /* Done processing layoutget. Set the layout stateid */ 819 /* Done processing layoutget. Set the layout stateid */
644 pnfs_set_layout_stateid(lo, &res->stateid); 820 pnfs_set_layout_stateid(lo, &res->stateid, false);
645 spin_unlock(&ino->i_lock); 821 spin_unlock(&ino->i_lock);
646out: 822out:
647 return status; 823 return status;
824
825out_forget_reply:
826 spin_unlock(&ino->i_lock);
827 lseg->pls_layout = lo;
828 NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
829 goto out;
648} 830}
649 831
650/* 832/*
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index e12367d50489..e2612ea0cbed 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -30,11 +30,17 @@
30#ifndef FS_NFS_PNFS_H 30#ifndef FS_NFS_PNFS_H
31#define FS_NFS_PNFS_H 31#define FS_NFS_PNFS_H
32 32
33enum {
34 NFS_LSEG_VALID = 0, /* cleared when lseg is recalled/returned */
35 NFS_LSEG_ROC, /* roc bit received from server */
36};
37
33struct pnfs_layout_segment { 38struct pnfs_layout_segment {
34 struct list_head fi_list; 39 struct list_head pls_list;
35 struct pnfs_layout_range range; 40 struct pnfs_layout_range pls_range;
36 struct kref kref; 41 atomic_t pls_refcount;
37 struct pnfs_layout_hdr *layout; 42 unsigned long pls_flags;
43 struct pnfs_layout_hdr *pls_layout;
38}; 44};
39 45
40#ifdef CONFIG_NFS_V4_1 46#ifdef CONFIG_NFS_V4_1
@@ -44,7 +50,9 @@ struct pnfs_layout_segment {
44enum { 50enum {
45 NFS_LAYOUT_RO_FAILED = 0, /* get ro layout failed stop trying */ 51 NFS_LAYOUT_RO_FAILED = 0, /* get ro layout failed stop trying */
46 NFS_LAYOUT_RW_FAILED, /* get rw layout failed stop trying */ 52 NFS_LAYOUT_RW_FAILED, /* get rw layout failed stop trying */
47 NFS_LAYOUT_STATEID_SET, /* have a valid layout stateid */ 53 NFS_LAYOUT_BULK_RECALL, /* bulk recall affecting layout */
54 NFS_LAYOUT_ROC, /* some lseg had roc bit set */
55 NFS_LAYOUT_DESTROYED, /* no new use of layout allowed */
48}; 56};
49 57
50/* Per-layout driver specific registration structure */ 58/* Per-layout driver specific registration structure */
@@ -60,13 +68,16 @@ struct pnfs_layoutdriver_type {
60}; 68};
61 69
62struct pnfs_layout_hdr { 70struct pnfs_layout_hdr {
63 unsigned long refcount; 71 atomic_t plh_refcount;
64 struct list_head layouts; /* other client layouts */ 72 struct list_head plh_layouts; /* other client layouts */
65 struct list_head segs; /* layout segments list */ 73 struct list_head plh_bulk_recall; /* clnt list of bulk recalls */
66 seqlock_t seqlock; /* Protects the stateid */ 74 struct list_head plh_segs; /* layout segments list */
67 nfs4_stateid stateid; 75 nfs4_stateid plh_stateid;
68 unsigned long state; 76 atomic_t plh_outstanding; /* number of RPCs out */
69 struct inode *inode; 77 unsigned long plh_block_lgets; /* block LAYOUTGET if >0 */
78 u32 plh_barrier; /* ignore lower seqids */
79 unsigned long plh_flags;
80 struct inode *plh_inode;
70}; 81};
71 82
72struct pnfs_device { 83struct pnfs_device {
@@ -134,17 +145,30 @@ extern int nfs4_proc_getdeviceinfo(struct nfs_server *server,
134extern int nfs4_proc_layoutget(struct nfs4_layoutget *lgp); 145extern int nfs4_proc_layoutget(struct nfs4_layoutget *lgp);
135 146
136/* pnfs.c */ 147/* pnfs.c */
148void get_layout_hdr(struct pnfs_layout_hdr *lo);
137struct pnfs_layout_segment * 149struct pnfs_layout_segment *
138pnfs_update_layout(struct inode *ino, struct nfs_open_context *ctx, 150pnfs_update_layout(struct inode *ino, struct nfs_open_context *ctx,
139 enum pnfs_iomode access_type); 151 enum pnfs_iomode access_type);
140void set_pnfs_layoutdriver(struct nfs_server *, u32 id); 152void set_pnfs_layoutdriver(struct nfs_server *, u32 id);
141void unset_pnfs_layoutdriver(struct nfs_server *); 153void unset_pnfs_layoutdriver(struct nfs_server *);
142int pnfs_layout_process(struct nfs4_layoutget *lgp); 154int pnfs_layout_process(struct nfs4_layoutget *lgp);
155void pnfs_free_lseg_list(struct list_head *tmp_list);
143void pnfs_destroy_layout(struct nfs_inode *); 156void pnfs_destroy_layout(struct nfs_inode *);
144void pnfs_destroy_all_layouts(struct nfs_client *); 157void pnfs_destroy_all_layouts(struct nfs_client *);
145void put_layout_hdr(struct inode *inode); 158void put_layout_hdr(struct pnfs_layout_hdr *lo);
146void pnfs_get_layout_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo, 159void pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo,
147 struct nfs4_state *open_state); 160 const nfs4_stateid *new,
161 bool update_barrier);
162int pnfs_choose_layoutget_stateid(nfs4_stateid *dst,
163 struct pnfs_layout_hdr *lo,
164 struct nfs4_state *open_state);
165int mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
166 struct list_head *tmp_list,
167 u32 iomode);
168bool pnfs_roc(struct inode *ino);
169void pnfs_roc_release(struct inode *ino);
170void pnfs_roc_set_barrier(struct inode *ino, u32 barrier);
171bool pnfs_roc_drain(struct inode *ino, u32 *barrier);
148 172
149 173
150static inline int lo_fail_bit(u32 iomode) 174static inline int lo_fail_bit(u32 iomode)
@@ -176,6 +200,28 @@ pnfs_update_layout(struct inode *ino, struct nfs_open_context *ctx,
176 return NULL; 200 return NULL;
177} 201}
178 202
203static inline bool
204pnfs_roc(struct inode *ino)
205{
206 return false;
207}
208
209static inline void
210pnfs_roc_release(struct inode *ino)
211{
212}
213
214static inline void
215pnfs_roc_set_barrier(struct inode *ino, u32 barrier)
216{
217}
218
219static inline bool
220pnfs_roc_drain(struct inode *ino, u32 *barrier)
221{
222 return false;
223}
224
179static inline void set_pnfs_layoutdriver(struct nfs_server *s, u32 id) 225static inline void set_pnfs_layoutdriver(struct nfs_server *s, u32 id)
180{ 226{
181} 227}
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
index 58e7f84fc1fd..77d5e21c4ad6 100644
--- a/fs/nfs/proc.c
+++ b/fs/nfs/proc.c
@@ -458,7 +458,7 @@ nfs_proc_symlink(struct inode *dir, struct dentry *dentry, struct page *page,
458 fattr = nfs_alloc_fattr(); 458 fattr = nfs_alloc_fattr();
459 status = -ENOMEM; 459 status = -ENOMEM;
460 if (fh == NULL || fattr == NULL) 460 if (fh == NULL || fattr == NULL)
461 goto out; 461 goto out_free;
462 462
463 status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0); 463 status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
464 nfs_mark_for_revalidate(dir); 464 nfs_mark_for_revalidate(dir);
@@ -471,6 +471,7 @@ nfs_proc_symlink(struct inode *dir, struct dentry *dentry, struct page *page,
471 if (status == 0) 471 if (status == 0)
472 status = nfs_instantiate(dentry, fh, fattr); 472 status = nfs_instantiate(dentry, fh, fattr);
473 473
474out_free:
474 nfs_free_fattr(fattr); 475 nfs_free_fattr(fattr);
475 nfs_free_fhandle(fh); 476 nfs_free_fhandle(fh);
476out: 477out:
@@ -731,7 +732,7 @@ const struct nfs_rpc_ops nfs_v2_clientops = {
731 .statfs = nfs_proc_statfs, 732 .statfs = nfs_proc_statfs,
732 .fsinfo = nfs_proc_fsinfo, 733 .fsinfo = nfs_proc_fsinfo,
733 .pathconf = nfs_proc_pathconf, 734 .pathconf = nfs_proc_pathconf,
734 .decode_dirent = nfs_decode_dirent, 735 .decode_dirent = nfs2_decode_dirent,
735 .read_setup = nfs_proc_read_setup, 736 .read_setup = nfs_proc_read_setup,
736 .read_done = nfs_read_done, 737 .read_done = nfs_read_done,
737 .write_setup = nfs_proc_write_setup, 738 .write_setup = nfs_proc_write_setup,
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 4100630c9a5b..0f9ea73e7789 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -598,7 +598,9 @@ static void nfs_show_mountd_options(struct seq_file *m, struct nfs_server *nfss,
598 598
599 if (nfss->mountd_version || showdefaults) 599 if (nfss->mountd_version || showdefaults)
600 seq_printf(m, ",mountvers=%u", nfss->mountd_version); 600 seq_printf(m, ",mountvers=%u", nfss->mountd_version);
601 if (nfss->mountd_port || showdefaults) 601 if ((nfss->mountd_port &&
602 nfss->mountd_port != (unsigned short)NFS_UNSPEC_PORT) ||
603 showdefaults)
602 seq_printf(m, ",mountport=%u", nfss->mountd_port); 604 seq_printf(m, ",mountport=%u", nfss->mountd_port);
603 605
604 nfs_show_mountd_netid(m, nfss, showdefaults); 606 nfs_show_mountd_netid(m, nfss, showdefaults);
@@ -2494,7 +2496,13 @@ static void nfs4_clone_super(struct super_block *sb,
2494 sb->s_maxbytes = old_sb->s_maxbytes; 2496 sb->s_maxbytes = old_sb->s_maxbytes;
2495 sb->s_time_gran = 1; 2497 sb->s_time_gran = 1;
2496 sb->s_op = old_sb->s_op; 2498 sb->s_op = old_sb->s_op;
2497 nfs_initialise_sb(sb); 2499 /*
2500 * The VFS shouldn't apply the umask to mode bits. We will do
2501 * so ourselves when necessary.
2502 */
2503 sb->s_flags |= MS_POSIXACL;
2504 sb->s_xattr = old_sb->s_xattr;
2505 nfs_initialise_sb(sb);
2498} 2506}
2499 2507
2500/* 2508/*
@@ -2504,6 +2512,12 @@ static void nfs4_fill_super(struct super_block *sb)
2504{ 2512{
2505 sb->s_time_gran = 1; 2513 sb->s_time_gran = 1;
2506 sb->s_op = &nfs4_sops; 2514 sb->s_op = &nfs4_sops;
2515 /*
2516 * The VFS shouldn't apply the umask to mode bits. We will do
2517 * so ourselves when necessary.
2518 */
2519 sb->s_flags |= MS_POSIXACL;
2520 sb->s_xattr = nfs4_xattr_handlers;
2507 nfs_initialise_sb(sb); 2521 nfs_initialise_sb(sb);
2508} 2522}
2509 2523
diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c
index 8fe9eb47a97f..e313a51acdd1 100644
--- a/fs/nfs/unlink.c
+++ b/fs/nfs/unlink.c
@@ -429,7 +429,7 @@ nfs_async_rename(struct inode *old_dir, struct inode *new_dir,
429 data = kzalloc(sizeof(*data), GFP_KERNEL); 429 data = kzalloc(sizeof(*data), GFP_KERNEL);
430 if (data == NULL) 430 if (data == NULL)
431 return ERR_PTR(-ENOMEM); 431 return ERR_PTR(-ENOMEM);
432 task_setup_data.callback_data = data, 432 task_setup_data.callback_data = data;
433 433
434 data->cred = rpc_lookup_cred(); 434 data->cred = rpc_lookup_cred();
435 if (IS_ERR(data->cred)) { 435 if (IS_ERR(data->cred)) {
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 143da2eecd7b..21a63da305ff 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -50,11 +50,6 @@ enum {
50 NFSPROC4_CLNT_CB_SEQUENCE, 50 NFSPROC4_CLNT_CB_SEQUENCE,
51}; 51};
52 52
53enum nfs_cb_opnum4 {
54 OP_CB_RECALL = 4,
55 OP_CB_SEQUENCE = 11,
56};
57
58#define NFS4_MAXTAGLEN 20 53#define NFS4_MAXTAGLEN 20
59 54
60#define NFS4_enc_cb_null_sz 0 55#define NFS4_enc_cb_null_sz 0
@@ -79,61 +74,6 @@ enum nfs_cb_opnum4 {
79 cb_sequence_dec_sz + \ 74 cb_sequence_dec_sz + \
80 op_dec_sz) 75 op_dec_sz)
81 76
82/*
83* Generic encode routines from fs/nfs/nfs4xdr.c
84*/
85static inline __be32 *
86xdr_writemem(__be32 *p, const void *ptr, int nbytes)
87{
88 int tmp = XDR_QUADLEN(nbytes);
89 if (!tmp)
90 return p;
91 p[tmp-1] = 0;
92 memcpy(p, ptr, nbytes);
93 return p + tmp;
94}
95
96#define WRITE32(n) *p++ = htonl(n)
97#define WRITEMEM(ptr,nbytes) do { \
98 p = xdr_writemem(p, ptr, nbytes); \
99} while (0)
100#define RESERVE_SPACE(nbytes) do { \
101 p = xdr_reserve_space(xdr, nbytes); \
102 if (!p) dprintk("NFSD: RESERVE_SPACE(%d) failed in function %s\n", (int) (nbytes), __func__); \
103 BUG_ON(!p); \
104} while (0)
105
106/*
107 * Generic decode routines from fs/nfs/nfs4xdr.c
108 */
109#define DECODE_TAIL \
110 status = 0; \
111out: \
112 return status; \
113xdr_error: \
114 dprintk("NFSD: xdr error! (%s:%d)\n", __FILE__, __LINE__); \
115 status = -EIO; \
116 goto out
117
118#define READ32(x) (x) = ntohl(*p++)
119#define READ64(x) do { \
120 (x) = (u64)ntohl(*p++) << 32; \
121 (x) |= ntohl(*p++); \
122} while (0)
123#define READTIME(x) do { \
124 p++; \
125 (x.tv_sec) = ntohl(*p++); \
126 (x.tv_nsec) = ntohl(*p++); \
127} while (0)
128#define READ_BUF(nbytes) do { \
129 p = xdr_inline_decode(xdr, nbytes); \
130 if (!p) { \
131 dprintk("NFSD: %s: reply buffer overflowed in line %d.\n", \
132 __func__, __LINE__); \
133 return -EIO; \
134 } \
135} while (0)
136
137struct nfs4_cb_compound_hdr { 77struct nfs4_cb_compound_hdr {
138 /* args */ 78 /* args */
139 u32 ident; /* minorversion 0 only */ 79 u32 ident; /* minorversion 0 only */
@@ -144,295 +84,513 @@ struct nfs4_cb_compound_hdr {
144 int status; 84 int status;
145}; 85};
146 86
147static struct { 87/*
148int stat; 88 * Handle decode buffer overflows out-of-line.
149int errno; 89 */
150} nfs_cb_errtbl[] = { 90static void print_overflow_msg(const char *func, const struct xdr_stream *xdr)
151 { NFS4_OK, 0 }, 91{
152 { NFS4ERR_PERM, EPERM }, 92 dprintk("NFS: %s prematurely hit the end of our receive buffer. "
153 { NFS4ERR_NOENT, ENOENT }, 93 "Remaining buffer length is %tu words.\n",
154 { NFS4ERR_IO, EIO }, 94 func, xdr->end - xdr->p);
155 { NFS4ERR_NXIO, ENXIO }, 95}
156 { NFS4ERR_ACCESS, EACCES },
157 { NFS4ERR_EXIST, EEXIST },
158 { NFS4ERR_XDEV, EXDEV },
159 { NFS4ERR_NOTDIR, ENOTDIR },
160 { NFS4ERR_ISDIR, EISDIR },
161 { NFS4ERR_INVAL, EINVAL },
162 { NFS4ERR_FBIG, EFBIG },
163 { NFS4ERR_NOSPC, ENOSPC },
164 { NFS4ERR_ROFS, EROFS },
165 { NFS4ERR_MLINK, EMLINK },
166 { NFS4ERR_NAMETOOLONG, ENAMETOOLONG },
167 { NFS4ERR_NOTEMPTY, ENOTEMPTY },
168 { NFS4ERR_DQUOT, EDQUOT },
169 { NFS4ERR_STALE, ESTALE },
170 { NFS4ERR_BADHANDLE, EBADHANDLE },
171 { NFS4ERR_BAD_COOKIE, EBADCOOKIE },
172 { NFS4ERR_NOTSUPP, ENOTSUPP },
173 { NFS4ERR_TOOSMALL, ETOOSMALL },
174 { NFS4ERR_SERVERFAULT, ESERVERFAULT },
175 { NFS4ERR_BADTYPE, EBADTYPE },
176 { NFS4ERR_LOCKED, EAGAIN },
177 { NFS4ERR_RESOURCE, EREMOTEIO },
178 { NFS4ERR_SYMLINK, ELOOP },
179 { NFS4ERR_OP_ILLEGAL, EOPNOTSUPP },
180 { NFS4ERR_DEADLOCK, EDEADLK },
181 { -1, EIO }
182};
183 96
184static int 97static __be32 *xdr_encode_empty_array(__be32 *p)
185nfs_cb_stat_to_errno(int stat)
186{ 98{
187 int i; 99 *p++ = xdr_zero;
188 for (i = 0; nfs_cb_errtbl[i].stat != -1; i++) { 100 return p;
189 if (nfs_cb_errtbl[i].stat == stat)
190 return nfs_cb_errtbl[i].errno;
191 }
192 /* If we cannot translate the error, the recovery routines should
193 * handle it.
194 * Note: remaining NFSv4 error codes have values > 10000, so should
195 * not conflict with native Linux error codes.
196 */
197 return stat;
198} 101}
199 102
200/* 103/*
201 * XDR encode 104 * Encode/decode NFSv4 CB basic data types
105 *
106 * Basic NFSv4 callback data types are defined in section 15 of RFC
107 * 3530: "Network File System (NFS) version 4 Protocol" and section
108 * 20 of RFC 5661: "Network File System (NFS) Version 4 Minor Version
109 * 1 Protocol"
110 */
111
112/*
113 * nfs_cb_opnum4
114 *
115 * enum nfs_cb_opnum4 {
116 * OP_CB_GETATTR = 3,
117 * ...
118 * };
202 */ 119 */
120enum nfs_cb_opnum4 {
121 OP_CB_GETATTR = 3,
122 OP_CB_RECALL = 4,
123 OP_CB_LAYOUTRECALL = 5,
124 OP_CB_NOTIFY = 6,
125 OP_CB_PUSH_DELEG = 7,
126 OP_CB_RECALL_ANY = 8,
127 OP_CB_RECALLABLE_OBJ_AVAIL = 9,
128 OP_CB_RECALL_SLOT = 10,
129 OP_CB_SEQUENCE = 11,
130 OP_CB_WANTS_CANCELLED = 12,
131 OP_CB_NOTIFY_LOCK = 13,
132 OP_CB_NOTIFY_DEVICEID = 14,
133 OP_CB_ILLEGAL = 10044
134};
203 135
204static void 136static void encode_nfs_cb_opnum4(struct xdr_stream *xdr, enum nfs_cb_opnum4 op)
205encode_stateid(struct xdr_stream *xdr, stateid_t *sid)
206{ 137{
207 __be32 *p; 138 __be32 *p;
208 139
209 RESERVE_SPACE(sizeof(stateid_t)); 140 p = xdr_reserve_space(xdr, 4);
210 WRITE32(sid->si_generation); 141 *p = cpu_to_be32(op);
211 WRITEMEM(&sid->si_opaque, sizeof(stateid_opaque_t));
212} 142}
213 143
214static void 144/*
215encode_cb_compound_hdr(struct xdr_stream *xdr, struct nfs4_cb_compound_hdr *hdr) 145 * nfs_fh4
146 *
147 * typedef opaque nfs_fh4<NFS4_FHSIZE>;
148 */
149static void encode_nfs_fh4(struct xdr_stream *xdr, const struct knfsd_fh *fh)
216{ 150{
217 __be32 * p; 151 u32 length = fh->fh_size;
152 __be32 *p;
218 153
219 RESERVE_SPACE(16); 154 BUG_ON(length > NFS4_FHSIZE);
220 WRITE32(0); /* tag length is always 0 */ 155 p = xdr_reserve_space(xdr, 4 + length);
221 WRITE32(hdr->minorversion); 156 xdr_encode_opaque(p, &fh->fh_base, length);
222 WRITE32(hdr->ident);
223 hdr->nops_p = p;
224 WRITE32(hdr->nops);
225} 157}
226 158
227static void encode_cb_nops(struct nfs4_cb_compound_hdr *hdr) 159/*
160 * stateid4
161 *
162 * struct stateid4 {
163 * uint32_t seqid;
164 * opaque other[12];
165 * };
166 */
167static void encode_stateid4(struct xdr_stream *xdr, const stateid_t *sid)
228{ 168{
229 *hdr->nops_p = htonl(hdr->nops); 169 __be32 *p;
170
171 p = xdr_reserve_space(xdr, NFS4_STATEID_SIZE);
172 *p++ = cpu_to_be32(sid->si_generation);
173 xdr_encode_opaque_fixed(p, &sid->si_opaque, NFS4_STATEID_OTHER_SIZE);
230} 174}
231 175
232static void 176/*
233encode_cb_recall(struct xdr_stream *xdr, struct nfs4_delegation *dp, 177 * sessionid4
234 struct nfs4_cb_compound_hdr *hdr) 178 *
179 * typedef opaque sessionid4[NFS4_SESSIONID_SIZE];
180 */
181static void encode_sessionid4(struct xdr_stream *xdr,
182 const struct nfsd4_session *session)
235{ 183{
236 __be32 *p; 184 __be32 *p;
237 int len = dp->dl_fh.fh_size; 185
238 186 p = xdr_reserve_space(xdr, NFS4_MAX_SESSIONID_LEN);
239 RESERVE_SPACE(4); 187 xdr_encode_opaque_fixed(p, session->se_sessionid.data,
240 WRITE32(OP_CB_RECALL); 188 NFS4_MAX_SESSIONID_LEN);
241 encode_stateid(xdr, &dp->dl_stateid);
242 RESERVE_SPACE(8 + (XDR_QUADLEN(len) << 2));
243 WRITE32(0); /* truncate optimization not implemented */
244 WRITE32(len);
245 WRITEMEM(&dp->dl_fh.fh_base, len);
246 hdr->nops++;
247} 189}
248 190
249static void 191/*
250encode_cb_sequence(struct xdr_stream *xdr, struct nfsd4_callback *cb, 192 * nfsstat4
251 struct nfs4_cb_compound_hdr *hdr) 193 */
252{ 194static const struct {
253 __be32 *p; 195 int stat;
254 struct nfsd4_session *ses = cb->cb_clp->cl_cb_session; 196 int errno;
197} nfs_cb_errtbl[] = {
198 { NFS4_OK, 0 },
199 { NFS4ERR_PERM, -EPERM },
200 { NFS4ERR_NOENT, -ENOENT },
201 { NFS4ERR_IO, -EIO },
202 { NFS4ERR_NXIO, -ENXIO },
203 { NFS4ERR_ACCESS, -EACCES },
204 { NFS4ERR_EXIST, -EEXIST },
205 { NFS4ERR_XDEV, -EXDEV },
206 { NFS4ERR_NOTDIR, -ENOTDIR },
207 { NFS4ERR_ISDIR, -EISDIR },
208 { NFS4ERR_INVAL, -EINVAL },
209 { NFS4ERR_FBIG, -EFBIG },
210 { NFS4ERR_NOSPC, -ENOSPC },
211 { NFS4ERR_ROFS, -EROFS },
212 { NFS4ERR_MLINK, -EMLINK },
213 { NFS4ERR_NAMETOOLONG, -ENAMETOOLONG },
214 { NFS4ERR_NOTEMPTY, -ENOTEMPTY },
215 { NFS4ERR_DQUOT, -EDQUOT },
216 { NFS4ERR_STALE, -ESTALE },
217 { NFS4ERR_BADHANDLE, -EBADHANDLE },
218 { NFS4ERR_BAD_COOKIE, -EBADCOOKIE },
219 { NFS4ERR_NOTSUPP, -ENOTSUPP },
220 { NFS4ERR_TOOSMALL, -ETOOSMALL },
221 { NFS4ERR_SERVERFAULT, -ESERVERFAULT },
222 { NFS4ERR_BADTYPE, -EBADTYPE },
223 { NFS4ERR_LOCKED, -EAGAIN },
224 { NFS4ERR_RESOURCE, -EREMOTEIO },
225 { NFS4ERR_SYMLINK, -ELOOP },
226 { NFS4ERR_OP_ILLEGAL, -EOPNOTSUPP },
227 { NFS4ERR_DEADLOCK, -EDEADLK },
228 { -1, -EIO }
229};
255 230
256 if (hdr->minorversion == 0) 231/*
257 return; 232 * If we cannot translate the error, the recovery routines should
233 * handle it.
234 *
235 * Note: remaining NFSv4 error codes have values > 10000, so should
236 * not conflict with native Linux error codes.
237 */
238static int nfs_cb_stat_to_errno(int status)
239{
240 int i;
258 241
259 RESERVE_SPACE(1 + NFS4_MAX_SESSIONID_LEN + 20); 242 for (i = 0; nfs_cb_errtbl[i].stat != -1; i++) {
243 if (nfs_cb_errtbl[i].stat == status)
244 return nfs_cb_errtbl[i].errno;
245 }
260 246
261 WRITE32(OP_CB_SEQUENCE); 247 dprintk("NFSD: Unrecognized NFS CB status value: %u\n", status);
262 WRITEMEM(ses->se_sessionid.data, NFS4_MAX_SESSIONID_LEN); 248 return -status;
263 WRITE32(ses->se_cb_seq_nr);
264 WRITE32(0); /* slotid, always 0 */
265 WRITE32(0); /* highest slotid always 0 */
266 WRITE32(0); /* cachethis always 0 */
267 WRITE32(0); /* FIXME: support referring_call_lists */
268 hdr->nops++;
269} 249}
270 250
271static int 251static int decode_cb_op_status(struct xdr_stream *xdr, enum nfs_opnum4 expected,
272nfs4_xdr_enc_cb_null(struct rpc_rqst *req, __be32 *p) 252 enum nfsstat4 *status)
273{ 253{
274 struct xdr_stream xdrs, *xdr = &xdrs; 254 __be32 *p;
255 u32 op;
275 256
276 xdr_init_encode(&xdrs, &req->rq_snd_buf, p); 257 p = xdr_inline_decode(xdr, 4 + 4);
277 RESERVE_SPACE(0); 258 if (unlikely(p == NULL))
259 goto out_overflow;
260 op = be32_to_cpup(p++);
261 if (unlikely(op != expected))
262 goto out_unexpected;
263 *status = be32_to_cpup(p);
278 return 0; 264 return 0;
265out_overflow:
266 print_overflow_msg(__func__, xdr);
267 return -EIO;
268out_unexpected:
269 dprintk("NFSD: Callback server returned operation %d but "
270 "we issued a request for %d\n", op, expected);
271 return -EIO;
279} 272}
280 273
281static int 274/*
282nfs4_xdr_enc_cb_recall(struct rpc_rqst *req, __be32 *p, 275 * CB_COMPOUND4args
283 struct nfsd4_callback *cb) 276 *
277 * struct CB_COMPOUND4args {
278 * utf8str_cs tag;
279 * uint32_t minorversion;
280 * uint32_t callback_ident;
281 * nfs_cb_argop4 argarray<>;
282 * };
283*/
284static void encode_cb_compound4args(struct xdr_stream *xdr,
285 struct nfs4_cb_compound_hdr *hdr)
284{ 286{
285 struct xdr_stream xdr; 287 __be32 * p;
286 struct nfs4_delegation *args = cb->cb_op;
287 struct nfs4_cb_compound_hdr hdr = {
288 .ident = cb->cb_clp->cl_cb_ident,
289 .minorversion = cb->cb_minorversion,
290 };
291 288
292 xdr_init_encode(&xdr, &req->rq_snd_buf, p); 289 p = xdr_reserve_space(xdr, 4 + 4 + 4 + 4);
293 encode_cb_compound_hdr(&xdr, &hdr); 290 p = xdr_encode_empty_array(p); /* empty tag */
294 encode_cb_sequence(&xdr, cb, &hdr); 291 *p++ = cpu_to_be32(hdr->minorversion);
295 encode_cb_recall(&xdr, args, &hdr); 292 *p++ = cpu_to_be32(hdr->ident);
296 encode_cb_nops(&hdr); 293
294 hdr->nops_p = p;
295 *p = cpu_to_be32(hdr->nops); /* argarray element count */
296}
297
298/*
299 * Update argarray element count
300 */
301static void encode_cb_nops(struct nfs4_cb_compound_hdr *hdr)
302{
303 BUG_ON(hdr->nops > NFS4_MAX_BACK_CHANNEL_OPS);
304 *hdr->nops_p = cpu_to_be32(hdr->nops);
305}
306
307/*
308 * CB_COMPOUND4res
309 *
310 * struct CB_COMPOUND4res {
311 * nfsstat4 status;
312 * utf8str_cs tag;
313 * nfs_cb_resop4 resarray<>;
314 * };
315 */
316static int decode_cb_compound4res(struct xdr_stream *xdr,
317 struct nfs4_cb_compound_hdr *hdr)
318{
319 u32 length;
320 __be32 *p;
321
322 p = xdr_inline_decode(xdr, 4 + 4);
323 if (unlikely(p == NULL))
324 goto out_overflow;
325 hdr->status = be32_to_cpup(p++);
326 /* Ignore the tag */
327 length = be32_to_cpup(p++);
328 p = xdr_inline_decode(xdr, length + 4);
329 if (unlikely(p == NULL))
330 goto out_overflow;
331 hdr->nops = be32_to_cpup(p);
297 return 0; 332 return 0;
333out_overflow:
334 print_overflow_msg(__func__, xdr);
335 return -EIO;
298} 336}
299 337
338/*
339 * CB_RECALL4args
340 *
341 * struct CB_RECALL4args {
342 * stateid4 stateid;
343 * bool truncate;
344 * nfs_fh4 fh;
345 * };
346 */
347static void encode_cb_recall4args(struct xdr_stream *xdr,
348 const struct nfs4_delegation *dp,
349 struct nfs4_cb_compound_hdr *hdr)
350{
351 __be32 *p;
352
353 encode_nfs_cb_opnum4(xdr, OP_CB_RECALL);
354 encode_stateid4(xdr, &dp->dl_stateid);
355
356 p = xdr_reserve_space(xdr, 4);
357 *p++ = xdr_zero; /* truncate */
300 358
301static int 359 encode_nfs_fh4(xdr, &dp->dl_fh);
302decode_cb_compound_hdr(struct xdr_stream *xdr, struct nfs4_cb_compound_hdr *hdr){
303 __be32 *p;
304 u32 taglen;
305 360
306 READ_BUF(8); 361 hdr->nops++;
307 READ32(hdr->status);
308 /* We've got no use for the tag; ignore it: */
309 READ32(taglen);
310 READ_BUF(taglen + 4);
311 p += XDR_QUADLEN(taglen);
312 READ32(hdr->nops);
313 return 0;
314} 362}
315 363
316static int 364/*
317decode_cb_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 expected) 365 * CB_SEQUENCE4args
366 *
367 * struct CB_SEQUENCE4args {
368 * sessionid4 csa_sessionid;
369 * sequenceid4 csa_sequenceid;
370 * slotid4 csa_slotid;
371 * slotid4 csa_highest_slotid;
372 * bool csa_cachethis;
373 * referring_call_list4 csa_referring_call_lists<>;
374 * };
375 */
376static void encode_cb_sequence4args(struct xdr_stream *xdr,
377 const struct nfsd4_callback *cb,
378 struct nfs4_cb_compound_hdr *hdr)
318{ 379{
380 struct nfsd4_session *session = cb->cb_clp->cl_cb_session;
319 __be32 *p; 381 __be32 *p;
320 u32 op; 382
321 int32_t nfserr; 383 if (hdr->minorversion == 0)
322 384 return;
323 READ_BUF(8); 385
324 READ32(op); 386 encode_nfs_cb_opnum4(xdr, OP_CB_SEQUENCE);
325 if (op != expected) { 387 encode_sessionid4(xdr, session);
326 dprintk("NFSD: decode_cb_op_hdr: Callback server returned " 388
327 " operation %d but we issued a request for %d\n", 389 p = xdr_reserve_space(xdr, 4 + 4 + 4 + 4 + 4);
328 op, expected); 390 *p++ = cpu_to_be32(session->se_cb_seq_nr); /* csa_sequenceid */
329 return -EIO; 391 *p++ = xdr_zero; /* csa_slotid */
330 } 392 *p++ = xdr_zero; /* csa_highest_slotid */
331 READ32(nfserr); 393 *p++ = xdr_zero; /* csa_cachethis */
332 if (nfserr != NFS_OK) 394 xdr_encode_empty_array(p); /* csa_referring_call_lists */
333 return -nfs_cb_stat_to_errno(nfserr); 395
334 return 0; 396 hdr->nops++;
335} 397}
336 398
337/* 399/*
400 * CB_SEQUENCE4resok
401 *
402 * struct CB_SEQUENCE4resok {
403 * sessionid4 csr_sessionid;
404 * sequenceid4 csr_sequenceid;
405 * slotid4 csr_slotid;
406 * slotid4 csr_highest_slotid;
407 * slotid4 csr_target_highest_slotid;
408 * };
409 *
410 * union CB_SEQUENCE4res switch (nfsstat4 csr_status) {
411 * case NFS4_OK:
412 * CB_SEQUENCE4resok csr_resok4;
413 * default:
414 * void;
415 * };
416 *
338 * Our current back channel implmentation supports a single backchannel 417 * Our current back channel implmentation supports a single backchannel
339 * with a single slot. 418 * with a single slot.
340 */ 419 */
341static int 420static int decode_cb_sequence4resok(struct xdr_stream *xdr,
342decode_cb_sequence(struct xdr_stream *xdr, struct nfsd4_callback *cb, 421 struct nfsd4_callback *cb)
343 struct rpc_rqst *rqstp)
344{ 422{
345 struct nfsd4_session *ses = cb->cb_clp->cl_cb_session; 423 struct nfsd4_session *session = cb->cb_clp->cl_cb_session;
346 struct nfs4_sessionid id; 424 struct nfs4_sessionid id;
347 int status; 425 int status;
348 u32 dummy;
349 __be32 *p; 426 __be32 *p;
427 u32 dummy;
350 428
351 if (cb->cb_minorversion == 0) 429 status = -ESERVERFAULT;
352 return 0;
353
354 status = decode_cb_op_hdr(xdr, OP_CB_SEQUENCE);
355 if (status)
356 return status;
357 430
358 /* 431 /*
359 * If the server returns different values for sessionID, slotID or 432 * If the server returns different values for sessionID, slotID or
360 * sequence number, the server is looney tunes. 433 * sequence number, the server is looney tunes.
361 */ 434 */
362 status = -ESERVERFAULT; 435 p = xdr_inline_decode(xdr, NFS4_MAX_SESSIONID_LEN + 4 + 4);
363 436 if (unlikely(p == NULL))
364 READ_BUF(NFS4_MAX_SESSIONID_LEN + 16); 437 goto out_overflow;
365 memcpy(id.data, p, NFS4_MAX_SESSIONID_LEN); 438 memcpy(id.data, p, NFS4_MAX_SESSIONID_LEN);
366 p += XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN); 439 if (memcmp(id.data, session->se_sessionid.data,
367 if (memcmp(id.data, ses->se_sessionid.data, NFS4_MAX_SESSIONID_LEN)) { 440 NFS4_MAX_SESSIONID_LEN) != 0) {
368 dprintk("%s Invalid session id\n", __func__); 441 dprintk("NFS: %s Invalid session id\n", __func__);
369 goto out; 442 goto out;
370 } 443 }
371 READ32(dummy); 444 p += XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN);
372 if (dummy != ses->se_cb_seq_nr) { 445
373 dprintk("%s Invalid sequence number\n", __func__); 446 dummy = be32_to_cpup(p++);
447 if (dummy != session->se_cb_seq_nr) {
448 dprintk("NFS: %s Invalid sequence number\n", __func__);
374 goto out; 449 goto out;
375 } 450 }
376 READ32(dummy); /* slotid must be 0 */ 451
452 dummy = be32_to_cpup(p++);
377 if (dummy != 0) { 453 if (dummy != 0) {
378 dprintk("%s Invalid slotid\n", __func__); 454 dprintk("NFS: %s Invalid slotid\n", __func__);
379 goto out; 455 goto out;
380 } 456 }
381 /* FIXME: process highest slotid and target highest slotid */ 457
458 /*
459 * FIXME: process highest slotid and target highest slotid
460 */
382 status = 0; 461 status = 0;
383out: 462out:
384 return status; 463 return status;
464out_overflow:
465 print_overflow_msg(__func__, xdr);
466 return -EIO;
385} 467}
386 468
469static int decode_cb_sequence4res(struct xdr_stream *xdr,
470 struct nfsd4_callback *cb)
471{
472 enum nfsstat4 nfserr;
473 int status;
474
475 if (cb->cb_minorversion == 0)
476 return 0;
477
478 status = decode_cb_op_status(xdr, OP_CB_SEQUENCE, &nfserr);
479 if (unlikely(status))
480 goto out;
481 if (unlikely(nfserr != NFS4_OK))
482 goto out_default;
483 status = decode_cb_sequence4resok(xdr, cb);
484out:
485 return status;
486out_default:
487 return nfs_cb_stat_to_errno(status);
488}
387 489
388static int 490/*
389nfs4_xdr_dec_cb_null(struct rpc_rqst *req, __be32 *p) 491 * NFSv4.0 and NFSv4.1 XDR encode functions
492 *
493 * NFSv4.0 callback argument types are defined in section 15 of RFC
494 * 3530: "Network File System (NFS) version 4 Protocol" and section 20
495 * of RFC 5661: "Network File System (NFS) Version 4 Minor Version 1
496 * Protocol".
497 */
498
499/*
500 * NB: Without this zero space reservation, callbacks over krb5p fail
501 */
502static void nfs4_xdr_enc_cb_null(struct rpc_rqst *req, struct xdr_stream *xdr,
503 void *__unused)
504{
505 xdr_reserve_space(xdr, 0);
506}
507
508/*
509 * 20.2. Operation 4: CB_RECALL - Recall a Delegation
510 */
511static void nfs4_xdr_enc_cb_recall(struct rpc_rqst *req, struct xdr_stream *xdr,
512 const struct nfsd4_callback *cb)
513{
514 const struct nfs4_delegation *args = cb->cb_op;
515 struct nfs4_cb_compound_hdr hdr = {
516 .ident = cb->cb_clp->cl_cb_ident,
517 .minorversion = cb->cb_minorversion,
518 };
519
520 encode_cb_compound4args(xdr, &hdr);
521 encode_cb_sequence4args(xdr, cb, &hdr);
522 encode_cb_recall4args(xdr, args, &hdr);
523 encode_cb_nops(&hdr);
524}
525
526
527/*
528 * NFSv4.0 and NFSv4.1 XDR decode functions
529 *
530 * NFSv4.0 callback result types are defined in section 15 of RFC
531 * 3530: "Network File System (NFS) version 4 Protocol" and section 20
532 * of RFC 5661: "Network File System (NFS) Version 4 Minor Version 1
533 * Protocol".
534 */
535
536static int nfs4_xdr_dec_cb_null(struct rpc_rqst *req, struct xdr_stream *xdr,
537 void *__unused)
390{ 538{
391 return 0; 539 return 0;
392} 540}
393 541
394static int 542/*
395nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp, __be32 *p, 543 * 20.2. Operation 4: CB_RECALL - Recall a Delegation
396 struct nfsd4_callback *cb) 544 */
545static int nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp,
546 struct xdr_stream *xdr,
547 struct nfsd4_callback *cb)
397{ 548{
398 struct xdr_stream xdr;
399 struct nfs4_cb_compound_hdr hdr; 549 struct nfs4_cb_compound_hdr hdr;
550 enum nfsstat4 nfserr;
400 int status; 551 int status;
401 552
402 xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); 553 status = decode_cb_compound4res(xdr, &hdr);
403 status = decode_cb_compound_hdr(&xdr, &hdr); 554 if (unlikely(status))
404 if (status)
405 goto out; 555 goto out;
406 if (cb) { 556
407 status = decode_cb_sequence(&xdr, cb, rqstp); 557 if (cb != NULL) {
408 if (status) 558 status = decode_cb_sequence4res(xdr, cb);
559 if (unlikely(status))
409 goto out; 560 goto out;
410 } 561 }
411 status = decode_cb_op_hdr(&xdr, OP_CB_RECALL); 562
563 status = decode_cb_op_status(xdr, OP_CB_RECALL, &nfserr);
564 if (unlikely(status))
565 goto out;
566 if (unlikely(nfserr != NFS4_OK))
567 goto out_default;
412out: 568out:
413 return status; 569 return status;
570out_default:
571 return nfs_cb_stat_to_errno(status);
414} 572}
415 573
416/* 574/*
417 * RPC procedure tables 575 * RPC procedure tables
418 */ 576 */
419#define PROC(proc, call, argtype, restype) \ 577#define PROC(proc, call, argtype, restype) \
420[NFSPROC4_CLNT_##proc] = { \ 578[NFSPROC4_CLNT_##proc] = { \
421 .p_proc = NFSPROC4_CB_##call, \ 579 .p_proc = NFSPROC4_CB_##call, \
422 .p_encode = (kxdrproc_t) nfs4_xdr_##argtype, \ 580 .p_encode = (kxdreproc_t)nfs4_xdr_enc_##argtype, \
423 .p_decode = (kxdrproc_t) nfs4_xdr_##restype, \ 581 .p_decode = (kxdrdproc_t)nfs4_xdr_dec_##restype, \
424 .p_arglen = NFS4_##argtype##_sz, \ 582 .p_arglen = NFS4_enc_##argtype##_sz, \
425 .p_replen = NFS4_##restype##_sz, \ 583 .p_replen = NFS4_dec_##restype##_sz, \
426 .p_statidx = NFSPROC4_CB_##call, \ 584 .p_statidx = NFSPROC4_CB_##call, \
427 .p_name = #proc, \ 585 .p_name = #proc, \
428} 586}
429 587
430static struct rpc_procinfo nfs4_cb_procedures[] = { 588static struct rpc_procinfo nfs4_cb_procedures[] = {
431 PROC(CB_NULL, NULL, enc_cb_null, dec_cb_null), 589 PROC(CB_NULL, NULL, cb_null, cb_null),
432 PROC(CB_RECALL, COMPOUND, enc_cb_recall, dec_cb_recall), 590 PROC(CB_RECALL, COMPOUND, cb_recall, cb_recall),
433}; 591};
434 592
435static struct rpc_version nfs_cb_version4 = { 593static struct rpc_version nfs_cb_version4 = {
436/* 594/*
437 * Note on the callback rpc program version number: despite language in rfc 595 * Note on the callback rpc program version number: despite language in rfc
438 * 5661 section 18.36.3 requiring servers to use 4 in this field, the 596 * 5661 section 18.36.3 requiring servers to use 4 in this field, the
@@ -440,29 +598,29 @@ static struct rpc_version nfs_cb_version4 = {
440 * in practice that appears to be what implementations use. The section 598 * in practice that appears to be what implementations use. The section
441 * 18.36.3 language is expected to be fixed in an erratum. 599 * 18.36.3 language is expected to be fixed in an erratum.
442 */ 600 */
443 .number = 1, 601 .number = 1,
444 .nrprocs = ARRAY_SIZE(nfs4_cb_procedures), 602 .nrprocs = ARRAY_SIZE(nfs4_cb_procedures),
445 .procs = nfs4_cb_procedures 603 .procs = nfs4_cb_procedures
446}; 604};
447 605
448static struct rpc_version * nfs_cb_version[] = { 606static struct rpc_version *nfs_cb_version[] = {
449 &nfs_cb_version4, 607 &nfs_cb_version4,
450}; 608};
451 609
452static struct rpc_program cb_program; 610static struct rpc_program cb_program;
453 611
454static struct rpc_stat cb_stats = { 612static struct rpc_stat cb_stats = {
455 .program = &cb_program 613 .program = &cb_program
456}; 614};
457 615
458#define NFS4_CALLBACK 0x40000000 616#define NFS4_CALLBACK 0x40000000
459static struct rpc_program cb_program = { 617static struct rpc_program cb_program = {
460 .name = "nfs4_cb", 618 .name = "nfs4_cb",
461 .number = NFS4_CALLBACK, 619 .number = NFS4_CALLBACK,
462 .nrvers = ARRAY_SIZE(nfs_cb_version), 620 .nrvers = ARRAY_SIZE(nfs_cb_version),
463 .version = nfs_cb_version, 621 .version = nfs_cb_version,
464 .stats = &cb_stats, 622 .stats = &cb_stats,
465 .pipe_dir_name = "/nfsd4_cb", 623 .pipe_dir_name = "/nfsd4_cb",
466}; 624};
467 625
468static int max_cb_time(void) 626static int max_cb_time(void)
diff --git a/fs/ocfs2/Kconfig b/fs/ocfs2/Kconfig
index 0d840669698e..ab152c00cd3a 100644
--- a/fs/ocfs2/Kconfig
+++ b/fs/ocfs2/Kconfig
@@ -51,7 +51,7 @@ config OCFS2_FS_USERSPACE_CLUSTER
51 51
52config OCFS2_FS_STATS 52config OCFS2_FS_STATS
53 bool "OCFS2 statistics" 53 bool "OCFS2 statistics"
54 depends on OCFS2_FS 54 depends on OCFS2_FS && DEBUG_FS
55 default y 55 default y
56 help 56 help
57 This option allows some fs statistics to be captured. Enabling 57 This option allows some fs statistics to be captured. Enabling
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index 592fae5007d1..e4984e259cb6 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -565,7 +565,6 @@ static inline int ocfs2_et_sanity_check(struct ocfs2_extent_tree *et)
565 return ret; 565 return ret;
566} 566}
567 567
568static void ocfs2_free_truncate_context(struct ocfs2_truncate_context *tc);
569static int ocfs2_cache_extent_block_free(struct ocfs2_cached_dealloc_ctxt *ctxt, 568static int ocfs2_cache_extent_block_free(struct ocfs2_cached_dealloc_ctxt *ctxt,
570 struct ocfs2_extent_block *eb); 569 struct ocfs2_extent_block *eb);
571static void ocfs2_adjust_rightmost_records(handle_t *handle, 570static void ocfs2_adjust_rightmost_records(handle_t *handle,
@@ -5858,6 +5857,7 @@ int ocfs2_truncate_log_append(struct ocfs2_super *osb,
5858 5857
5859 ocfs2_journal_dirty(handle, tl_bh); 5858 ocfs2_journal_dirty(handle, tl_bh);
5860 5859
5860 osb->truncated_clusters += num_clusters;
5861bail: 5861bail:
5862 mlog_exit(status); 5862 mlog_exit(status);
5863 return status; 5863 return status;
@@ -5929,6 +5929,8 @@ static int ocfs2_replay_truncate_records(struct ocfs2_super *osb,
5929 i--; 5929 i--;
5930 } 5930 }
5931 5931
5932 osb->truncated_clusters = 0;
5933
5932bail: 5934bail:
5933 mlog_exit(status); 5935 mlog_exit(status);
5934 return status; 5936 return status;
@@ -7139,64 +7141,6 @@ bail:
7139} 7141}
7140 7142
7141/* 7143/*
7142 * Expects the inode to already be locked.
7143 */
7144int ocfs2_prepare_truncate(struct ocfs2_super *osb,
7145 struct inode *inode,
7146 struct buffer_head *fe_bh,
7147 struct ocfs2_truncate_context **tc)
7148{
7149 int status;
7150 unsigned int new_i_clusters;
7151 struct ocfs2_dinode *fe;
7152 struct ocfs2_extent_block *eb;
7153 struct buffer_head *last_eb_bh = NULL;
7154
7155 mlog_entry_void();
7156
7157 *tc = NULL;
7158
7159 new_i_clusters = ocfs2_clusters_for_bytes(osb->sb,
7160 i_size_read(inode));
7161 fe = (struct ocfs2_dinode *) fe_bh->b_data;
7162
7163 mlog(0, "fe->i_clusters = %u, new_i_clusters = %u, fe->i_size ="
7164 "%llu\n", le32_to_cpu(fe->i_clusters), new_i_clusters,
7165 (unsigned long long)le64_to_cpu(fe->i_size));
7166
7167 *tc = kzalloc(sizeof(struct ocfs2_truncate_context), GFP_KERNEL);
7168 if (!(*tc)) {
7169 status = -ENOMEM;
7170 mlog_errno(status);
7171 goto bail;
7172 }
7173 ocfs2_init_dealloc_ctxt(&(*tc)->tc_dealloc);
7174
7175 if (fe->id2.i_list.l_tree_depth) {
7176 status = ocfs2_read_extent_block(INODE_CACHE(inode),
7177 le64_to_cpu(fe->i_last_eb_blk),
7178 &last_eb_bh);
7179 if (status < 0) {
7180 mlog_errno(status);
7181 goto bail;
7182 }
7183 eb = (struct ocfs2_extent_block *) last_eb_bh->b_data;
7184 }
7185
7186 (*tc)->tc_last_eb_bh = last_eb_bh;
7187
7188 status = 0;
7189bail:
7190 if (status < 0) {
7191 if (*tc)
7192 ocfs2_free_truncate_context(*tc);
7193 *tc = NULL;
7194 }
7195 mlog_exit_void();
7196 return status;
7197}
7198
7199/*
7200 * 'start' is inclusive, 'end' is not. 7144 * 'start' is inclusive, 'end' is not.
7201 */ 7145 */
7202int ocfs2_truncate_inline(struct inode *inode, struct buffer_head *di_bh, 7146int ocfs2_truncate_inline(struct inode *inode, struct buffer_head *di_bh,
@@ -7270,18 +7214,3 @@ out_commit:
7270out: 7214out:
7271 return ret; 7215 return ret;
7272} 7216}
7273
7274static void ocfs2_free_truncate_context(struct ocfs2_truncate_context *tc)
7275{
7276 /*
7277 * The caller is responsible for completing deallocation
7278 * before freeing the context.
7279 */
7280 if (tc->tc_dealloc.c_first_suballocator != NULL)
7281 mlog(ML_NOTICE,
7282 "Truncate completion has non-empty dealloc context\n");
7283
7284 brelse(tc->tc_last_eb_bh);
7285
7286 kfree(tc);
7287}
diff --git a/fs/ocfs2/alloc.h b/fs/ocfs2/alloc.h
index 55762b554b99..3bd08a03251c 100644
--- a/fs/ocfs2/alloc.h
+++ b/fs/ocfs2/alloc.h
@@ -228,10 +228,6 @@ struct ocfs2_truncate_context {
228 228
229int ocfs2_zero_range_for_truncate(struct inode *inode, handle_t *handle, 229int ocfs2_zero_range_for_truncate(struct inode *inode, handle_t *handle,
230 u64 range_start, u64 range_end); 230 u64 range_start, u64 range_end);
231int ocfs2_prepare_truncate(struct ocfs2_super *osb,
232 struct inode *inode,
233 struct buffer_head *fe_bh,
234 struct ocfs2_truncate_context **tc);
235int ocfs2_commit_truncate(struct ocfs2_super *osb, 231int ocfs2_commit_truncate(struct ocfs2_super *osb,
236 struct inode *inode, 232 struct inode *inode,
237 struct buffer_head *di_bh); 233 struct buffer_head *di_bh);
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 0d7c5540ad66..1fbb0e20131b 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -1630,6 +1630,43 @@ static int ocfs2_zero_tail(struct inode *inode, struct buffer_head *di_bh,
1630 return ret; 1630 return ret;
1631} 1631}
1632 1632
1633/*
1634 * Try to flush truncate logs if we can free enough clusters from it.
1635 * As for return value, "< 0" means error, "0" no space and "1" means
1636 * we have freed enough spaces and let the caller try to allocate again.
1637 */
1638static int ocfs2_try_to_free_truncate_log(struct ocfs2_super *osb,
1639 unsigned int needed)
1640{
1641 tid_t target;
1642 int ret = 0;
1643 unsigned int truncated_clusters;
1644
1645 mutex_lock(&osb->osb_tl_inode->i_mutex);
1646 truncated_clusters = osb->truncated_clusters;
1647 mutex_unlock(&osb->osb_tl_inode->i_mutex);
1648
1649 /*
1650 * Check whether we can succeed in allocating if we free
1651 * the truncate log.
1652 */
1653 if (truncated_clusters < needed)
1654 goto out;
1655
1656 ret = ocfs2_flush_truncate_log(osb);
1657 if (ret) {
1658 mlog_errno(ret);
1659 goto out;
1660 }
1661
1662 if (jbd2_journal_start_commit(osb->journal->j_journal, &target)) {
1663 jbd2_log_wait_commit(osb->journal->j_journal, target);
1664 ret = 1;
1665 }
1666out:
1667 return ret;
1668}
1669
1633int ocfs2_write_begin_nolock(struct file *filp, 1670int ocfs2_write_begin_nolock(struct file *filp,
1634 struct address_space *mapping, 1671 struct address_space *mapping,
1635 loff_t pos, unsigned len, unsigned flags, 1672 loff_t pos, unsigned len, unsigned flags,
@@ -1637,7 +1674,7 @@ int ocfs2_write_begin_nolock(struct file *filp,
1637 struct buffer_head *di_bh, struct page *mmap_page) 1674 struct buffer_head *di_bh, struct page *mmap_page)
1638{ 1675{
1639 int ret, cluster_of_pages, credits = OCFS2_INODE_UPDATE_CREDITS; 1676 int ret, cluster_of_pages, credits = OCFS2_INODE_UPDATE_CREDITS;
1640 unsigned int clusters_to_alloc, extents_to_split; 1677 unsigned int clusters_to_alloc, extents_to_split, clusters_need = 0;
1641 struct ocfs2_write_ctxt *wc; 1678 struct ocfs2_write_ctxt *wc;
1642 struct inode *inode = mapping->host; 1679 struct inode *inode = mapping->host;
1643 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 1680 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
@@ -1646,7 +1683,9 @@ int ocfs2_write_begin_nolock(struct file *filp,
1646 struct ocfs2_alloc_context *meta_ac = NULL; 1683 struct ocfs2_alloc_context *meta_ac = NULL;
1647 handle_t *handle; 1684 handle_t *handle;
1648 struct ocfs2_extent_tree et; 1685 struct ocfs2_extent_tree et;
1686 int try_free = 1, ret1;
1649 1687
1688try_again:
1650 ret = ocfs2_alloc_write_ctxt(&wc, osb, pos, len, di_bh); 1689 ret = ocfs2_alloc_write_ctxt(&wc, osb, pos, len, di_bh);
1651 if (ret) { 1690 if (ret) {
1652 mlog_errno(ret); 1691 mlog_errno(ret);
@@ -1681,6 +1720,7 @@ int ocfs2_write_begin_nolock(struct file *filp,
1681 mlog_errno(ret); 1720 mlog_errno(ret);
1682 goto out; 1721 goto out;
1683 } else if (ret == 1) { 1722 } else if (ret == 1) {
1723 clusters_need = wc->w_clen;
1684 ret = ocfs2_refcount_cow(inode, filp, di_bh, 1724 ret = ocfs2_refcount_cow(inode, filp, di_bh,
1685 wc->w_cpos, wc->w_clen, UINT_MAX); 1725 wc->w_cpos, wc->w_clen, UINT_MAX);
1686 if (ret) { 1726 if (ret) {
@@ -1695,6 +1735,7 @@ int ocfs2_write_begin_nolock(struct file *filp,
1695 mlog_errno(ret); 1735 mlog_errno(ret);
1696 goto out; 1736 goto out;
1697 } 1737 }
1738 clusters_need += clusters_to_alloc;
1698 1739
1699 di = (struct ocfs2_dinode *)wc->w_di_bh->b_data; 1740 di = (struct ocfs2_dinode *)wc->w_di_bh->b_data;
1700 1741
@@ -1817,6 +1858,22 @@ out:
1817 ocfs2_free_alloc_context(data_ac); 1858 ocfs2_free_alloc_context(data_ac);
1818 if (meta_ac) 1859 if (meta_ac)
1819 ocfs2_free_alloc_context(meta_ac); 1860 ocfs2_free_alloc_context(meta_ac);
1861
1862 if (ret == -ENOSPC && try_free) {
1863 /*
1864 * Try to free some truncate log so that we can have enough
1865 * clusters to allocate.
1866 */
1867 try_free = 0;
1868
1869 ret1 = ocfs2_try_to_free_truncate_log(osb, clusters_need);
1870 if (ret1 == 1)
1871 goto try_again;
1872
1873 if (ret1 < 0)
1874 mlog_errno(ret1);
1875 }
1876
1820 return ret; 1877 return ret;
1821} 1878}
1822 1879
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index 9e3d45bcb5fd..a6cc05302e9f 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -82,6 +82,7 @@ static unsigned long o2hb_failed_region_bitmap[BITS_TO_LONGS(O2NM_MAX_REGIONS)];
82#define O2HB_DB_TYPE_REGION_LIVENODES 4 82#define O2HB_DB_TYPE_REGION_LIVENODES 4
83#define O2HB_DB_TYPE_REGION_NUMBER 5 83#define O2HB_DB_TYPE_REGION_NUMBER 5
84#define O2HB_DB_TYPE_REGION_ELAPSED_TIME 6 84#define O2HB_DB_TYPE_REGION_ELAPSED_TIME 6
85#define O2HB_DB_TYPE_REGION_PINNED 7
85struct o2hb_debug_buf { 86struct o2hb_debug_buf {
86 int db_type; 87 int db_type;
87 int db_size; 88 int db_size;
@@ -101,6 +102,7 @@ static struct o2hb_debug_buf *o2hb_db_failedregions;
101#define O2HB_DEBUG_FAILEDREGIONS "failed_regions" 102#define O2HB_DEBUG_FAILEDREGIONS "failed_regions"
102#define O2HB_DEBUG_REGION_NUMBER "num" 103#define O2HB_DEBUG_REGION_NUMBER "num"
103#define O2HB_DEBUG_REGION_ELAPSED_TIME "elapsed_time_in_ms" 104#define O2HB_DEBUG_REGION_ELAPSED_TIME "elapsed_time_in_ms"
105#define O2HB_DEBUG_REGION_PINNED "pinned"
104 106
105static struct dentry *o2hb_debug_dir; 107static struct dentry *o2hb_debug_dir;
106static struct dentry *o2hb_debug_livenodes; 108static struct dentry *o2hb_debug_livenodes;
@@ -132,6 +134,33 @@ char *o2hb_heartbeat_mode_desc[O2HB_HEARTBEAT_NUM_MODES] = {
132unsigned int o2hb_dead_threshold = O2HB_DEFAULT_DEAD_THRESHOLD; 134unsigned int o2hb_dead_threshold = O2HB_DEFAULT_DEAD_THRESHOLD;
133unsigned int o2hb_heartbeat_mode = O2HB_HEARTBEAT_LOCAL; 135unsigned int o2hb_heartbeat_mode = O2HB_HEARTBEAT_LOCAL;
134 136
137/*
138 * o2hb_dependent_users tracks the number of registered callbacks that depend
139 * on heartbeat. o2net and o2dlm are two entities that register this callback.
140 * However only o2dlm depends on the heartbeat. It does not want the heartbeat
141 * to stop while a dlm domain is still active.
142 */
143unsigned int o2hb_dependent_users;
144
145/*
146 * In global heartbeat mode, all regions are pinned if there are one or more
147 * dependent users and the quorum region count is <= O2HB_PIN_CUT_OFF. All
148 * regions are unpinned if the region count exceeds the cut off or the number
149 * of dependent users falls to zero.
150 */
151#define O2HB_PIN_CUT_OFF 3
152
153/*
154 * In local heartbeat mode, we assume the dlm domain name to be the same as
155 * region uuid. This is true for domains created for the file system but not
156 * necessarily true for userdlm domains. This is a known limitation.
157 *
158 * In global heartbeat mode, we pin/unpin all o2hb regions. This solution
159 * works for both file system and userdlm domains.
160 */
161static int o2hb_region_pin(const char *region_uuid);
162static void o2hb_region_unpin(const char *region_uuid);
163
135/* Only sets a new threshold if there are no active regions. 164/* Only sets a new threshold if there are no active regions.
136 * 165 *
137 * No locking or otherwise interesting code is required for reading 166 * No locking or otherwise interesting code is required for reading
@@ -186,7 +215,9 @@ struct o2hb_region {
186 struct config_item hr_item; 215 struct config_item hr_item;
187 216
188 struct list_head hr_all_item; 217 struct list_head hr_all_item;
189 unsigned hr_unclean_stop:1; 218 unsigned hr_unclean_stop:1,
219 hr_item_pinned:1,
220 hr_item_dropped:1;
190 221
191 /* protected by the hr_callback_sem */ 222 /* protected by the hr_callback_sem */
192 struct task_struct *hr_task; 223 struct task_struct *hr_task;
@@ -212,9 +243,11 @@ struct o2hb_region {
212 struct dentry *hr_debug_livenodes; 243 struct dentry *hr_debug_livenodes;
213 struct dentry *hr_debug_regnum; 244 struct dentry *hr_debug_regnum;
214 struct dentry *hr_debug_elapsed_time; 245 struct dentry *hr_debug_elapsed_time;
246 struct dentry *hr_debug_pinned;
215 struct o2hb_debug_buf *hr_db_livenodes; 247 struct o2hb_debug_buf *hr_db_livenodes;
216 struct o2hb_debug_buf *hr_db_regnum; 248 struct o2hb_debug_buf *hr_db_regnum;
217 struct o2hb_debug_buf *hr_db_elapsed_time; 249 struct o2hb_debug_buf *hr_db_elapsed_time;
250 struct o2hb_debug_buf *hr_db_pinned;
218 251
219 /* let the person setting up hb wait for it to return until it 252 /* let the person setting up hb wait for it to return until it
220 * has reached a 'steady' state. This will be fixed when we have 253 * has reached a 'steady' state. This will be fixed when we have
@@ -701,6 +734,14 @@ static void o2hb_set_quorum_device(struct o2hb_region *reg,
701 config_item_name(&reg->hr_item)); 734 config_item_name(&reg->hr_item));
702 735
703 set_bit(reg->hr_region_num, o2hb_quorum_region_bitmap); 736 set_bit(reg->hr_region_num, o2hb_quorum_region_bitmap);
737
738 /*
739 * If global heartbeat active, unpin all regions if the
740 * region count > CUT_OFF
741 */
742 if (o2hb_pop_count(&o2hb_quorum_region_bitmap,
743 O2NM_MAX_REGIONS) > O2HB_PIN_CUT_OFF)
744 o2hb_region_unpin(NULL);
704} 745}
705 746
706static int o2hb_check_slot(struct o2hb_region *reg, 747static int o2hb_check_slot(struct o2hb_region *reg,
@@ -1041,6 +1082,9 @@ static int o2hb_thread(void *data)
1041 1082
1042 set_user_nice(current, -20); 1083 set_user_nice(current, -20);
1043 1084
1085 /* Pin node */
1086 o2nm_depend_this_node();
1087
1044 while (!kthread_should_stop() && !reg->hr_unclean_stop) { 1088 while (!kthread_should_stop() && !reg->hr_unclean_stop) {
1045 /* We track the time spent inside 1089 /* We track the time spent inside
1046 * o2hb_do_disk_heartbeat so that we avoid more than 1090 * o2hb_do_disk_heartbeat so that we avoid more than
@@ -1090,6 +1134,9 @@ static int o2hb_thread(void *data)
1090 mlog_errno(ret); 1134 mlog_errno(ret);
1091 } 1135 }
1092 1136
1137 /* Unpin node */
1138 o2nm_undepend_this_node();
1139
1093 mlog(ML_HEARTBEAT|ML_KTHREAD, "hb thread exiting\n"); 1140 mlog(ML_HEARTBEAT|ML_KTHREAD, "hb thread exiting\n");
1094 1141
1095 return 0; 1142 return 0;
@@ -1142,6 +1189,12 @@ static int o2hb_debug_open(struct inode *inode, struct file *file)
1142 reg->hr_last_timeout_start)); 1189 reg->hr_last_timeout_start));
1143 goto done; 1190 goto done;
1144 1191
1192 case O2HB_DB_TYPE_REGION_PINNED:
1193 reg = (struct o2hb_region *)db->db_data;
1194 out += snprintf(buf + out, PAGE_SIZE - out, "%u\n",
1195 !!reg->hr_item_pinned);
1196 goto done;
1197
1145 default: 1198 default:
1146 goto done; 1199 goto done;
1147 } 1200 }
@@ -1315,6 +1368,8 @@ int o2hb_init(void)
1315 memset(o2hb_quorum_region_bitmap, 0, sizeof(o2hb_quorum_region_bitmap)); 1368 memset(o2hb_quorum_region_bitmap, 0, sizeof(o2hb_quorum_region_bitmap));
1316 memset(o2hb_failed_region_bitmap, 0, sizeof(o2hb_failed_region_bitmap)); 1369 memset(o2hb_failed_region_bitmap, 0, sizeof(o2hb_failed_region_bitmap));
1317 1370
1371 o2hb_dependent_users = 0;
1372
1318 return o2hb_debug_init(); 1373 return o2hb_debug_init();
1319} 1374}
1320 1375
@@ -1384,6 +1439,7 @@ static void o2hb_region_release(struct config_item *item)
1384 debugfs_remove(reg->hr_debug_livenodes); 1439 debugfs_remove(reg->hr_debug_livenodes);
1385 debugfs_remove(reg->hr_debug_regnum); 1440 debugfs_remove(reg->hr_debug_regnum);
1386 debugfs_remove(reg->hr_debug_elapsed_time); 1441 debugfs_remove(reg->hr_debug_elapsed_time);
1442 debugfs_remove(reg->hr_debug_pinned);
1387 debugfs_remove(reg->hr_debug_dir); 1443 debugfs_remove(reg->hr_debug_dir);
1388 1444
1389 spin_lock(&o2hb_live_lock); 1445 spin_lock(&o2hb_live_lock);
@@ -1948,6 +2004,18 @@ static int o2hb_debug_region_init(struct o2hb_region *reg, struct dentry *dir)
1948 goto bail; 2004 goto bail;
1949 } 2005 }
1950 2006
2007 reg->hr_debug_pinned =
2008 o2hb_debug_create(O2HB_DEBUG_REGION_PINNED,
2009 reg->hr_debug_dir,
2010 &(reg->hr_db_pinned),
2011 sizeof(*(reg->hr_db_pinned)),
2012 O2HB_DB_TYPE_REGION_PINNED,
2013 0, 0, reg);
2014 if (!reg->hr_debug_pinned) {
2015 mlog_errno(ret);
2016 goto bail;
2017 }
2018
1951 ret = 0; 2019 ret = 0;
1952bail: 2020bail:
1953 return ret; 2021 return ret;
@@ -2002,15 +2070,20 @@ static void o2hb_heartbeat_group_drop_item(struct config_group *group,
2002{ 2070{
2003 struct task_struct *hb_task; 2071 struct task_struct *hb_task;
2004 struct o2hb_region *reg = to_o2hb_region(item); 2072 struct o2hb_region *reg = to_o2hb_region(item);
2073 int quorum_region = 0;
2005 2074
2006 /* stop the thread when the user removes the region dir */ 2075 /* stop the thread when the user removes the region dir */
2007 spin_lock(&o2hb_live_lock); 2076 spin_lock(&o2hb_live_lock);
2008 if (o2hb_global_heartbeat_active()) { 2077 if (o2hb_global_heartbeat_active()) {
2009 clear_bit(reg->hr_region_num, o2hb_region_bitmap); 2078 clear_bit(reg->hr_region_num, o2hb_region_bitmap);
2010 clear_bit(reg->hr_region_num, o2hb_live_region_bitmap); 2079 clear_bit(reg->hr_region_num, o2hb_live_region_bitmap);
2080 if (test_bit(reg->hr_region_num, o2hb_quorum_region_bitmap))
2081 quorum_region = 1;
2082 clear_bit(reg->hr_region_num, o2hb_quorum_region_bitmap);
2011 } 2083 }
2012 hb_task = reg->hr_task; 2084 hb_task = reg->hr_task;
2013 reg->hr_task = NULL; 2085 reg->hr_task = NULL;
2086 reg->hr_item_dropped = 1;
2014 spin_unlock(&o2hb_live_lock); 2087 spin_unlock(&o2hb_live_lock);
2015 2088
2016 if (hb_task) 2089 if (hb_task)
@@ -2028,7 +2101,27 @@ static void o2hb_heartbeat_group_drop_item(struct config_group *group,
2028 if (o2hb_global_heartbeat_active()) 2101 if (o2hb_global_heartbeat_active())
2029 printk(KERN_NOTICE "o2hb: Heartbeat stopped on region %s\n", 2102 printk(KERN_NOTICE "o2hb: Heartbeat stopped on region %s\n",
2030 config_item_name(&reg->hr_item)); 2103 config_item_name(&reg->hr_item));
2104
2031 config_item_put(item); 2105 config_item_put(item);
2106
2107 if (!o2hb_global_heartbeat_active() || !quorum_region)
2108 return;
2109
2110 /*
2111 * If global heartbeat active and there are dependent users,
2112 * pin all regions if quorum region count <= CUT_OFF
2113 */
2114 spin_lock(&o2hb_live_lock);
2115
2116 if (!o2hb_dependent_users)
2117 goto unlock;
2118
2119 if (o2hb_pop_count(&o2hb_quorum_region_bitmap,
2120 O2NM_MAX_REGIONS) <= O2HB_PIN_CUT_OFF)
2121 o2hb_region_pin(NULL);
2122
2123unlock:
2124 spin_unlock(&o2hb_live_lock);
2032} 2125}
2033 2126
2034struct o2hb_heartbeat_group_attribute { 2127struct o2hb_heartbeat_group_attribute {
@@ -2214,63 +2307,138 @@ void o2hb_setup_callback(struct o2hb_callback_func *hc,
2214} 2307}
2215EXPORT_SYMBOL_GPL(o2hb_setup_callback); 2308EXPORT_SYMBOL_GPL(o2hb_setup_callback);
2216 2309
2217static struct o2hb_region *o2hb_find_region(const char *region_uuid) 2310/*
2311 * In local heartbeat mode, region_uuid passed matches the dlm domain name.
2312 * In global heartbeat mode, region_uuid passed is NULL.
2313 *
2314 * In local, we only pin the matching region. In global we pin all the active
2315 * regions.
2316 */
2317static int o2hb_region_pin(const char *region_uuid)
2218{ 2318{
2219 struct o2hb_region *p, *reg = NULL; 2319 int ret = 0, found = 0;
2320 struct o2hb_region *reg;
2321 char *uuid;
2220 2322
2221 assert_spin_locked(&o2hb_live_lock); 2323 assert_spin_locked(&o2hb_live_lock);
2222 2324
2223 list_for_each_entry(p, &o2hb_all_regions, hr_all_item) { 2325 list_for_each_entry(reg, &o2hb_all_regions, hr_all_item) {
2224 if (!strcmp(region_uuid, config_item_name(&p->hr_item))) { 2326 uuid = config_item_name(&reg->hr_item);
2225 reg = p; 2327
2226 break; 2328 /* local heartbeat */
2329 if (region_uuid) {
2330 if (strcmp(region_uuid, uuid))
2331 continue;
2332 found = 1;
2333 }
2334
2335 if (reg->hr_item_pinned || reg->hr_item_dropped)
2336 goto skip_pin;
2337
2338 /* Ignore ENOENT only for local hb (userdlm domain) */
2339 ret = o2nm_depend_item(&reg->hr_item);
2340 if (!ret) {
2341 mlog(ML_CLUSTER, "Pin region %s\n", uuid);
2342 reg->hr_item_pinned = 1;
2343 } else {
2344 if (ret == -ENOENT && found)
2345 ret = 0;
2346 else {
2347 mlog(ML_ERROR, "Pin region %s fails with %d\n",
2348 uuid, ret);
2349 break;
2350 }
2227 } 2351 }
2352skip_pin:
2353 if (found)
2354 break;
2228 } 2355 }
2229 2356
2230 return reg; 2357 return ret;
2231} 2358}
2232 2359
2233static int o2hb_region_get(const char *region_uuid) 2360/*
2361 * In local heartbeat mode, region_uuid passed matches the dlm domain name.
2362 * In global heartbeat mode, region_uuid passed is NULL.
2363 *
2364 * In local, we only unpin the matching region. In global we unpin all the
2365 * active regions.
2366 */
2367static void o2hb_region_unpin(const char *region_uuid)
2234{ 2368{
2235 int ret = 0;
2236 struct o2hb_region *reg; 2369 struct o2hb_region *reg;
2370 char *uuid;
2371 int found = 0;
2237 2372
2238 spin_lock(&o2hb_live_lock); 2373 assert_spin_locked(&o2hb_live_lock);
2239 2374
2240 reg = o2hb_find_region(region_uuid); 2375 list_for_each_entry(reg, &o2hb_all_regions, hr_all_item) {
2241 if (!reg) 2376 uuid = config_item_name(&reg->hr_item);
2242 ret = -ENOENT; 2377 if (region_uuid) {
2243 spin_unlock(&o2hb_live_lock); 2378 if (strcmp(region_uuid, uuid))
2379 continue;
2380 found = 1;
2381 }
2244 2382
2245 if (ret) 2383 if (reg->hr_item_pinned) {
2246 goto out; 2384 mlog(ML_CLUSTER, "Unpin region %s\n", uuid);
2385 o2nm_undepend_item(&reg->hr_item);
2386 reg->hr_item_pinned = 0;
2387 }
2388 if (found)
2389 break;
2390 }
2391}
2247 2392
2248 ret = o2nm_depend_this_node(); 2393static int o2hb_region_inc_user(const char *region_uuid)
2249 if (ret) 2394{
2250 goto out; 2395 int ret = 0;
2251 2396
2252 ret = o2nm_depend_item(&reg->hr_item); 2397 spin_lock(&o2hb_live_lock);
2253 if (ret)
2254 o2nm_undepend_this_node();
2255 2398
2256out: 2399 /* local heartbeat */
2400 if (!o2hb_global_heartbeat_active()) {
2401 ret = o2hb_region_pin(region_uuid);
2402 goto unlock;
2403 }
2404
2405 /*
2406 * if global heartbeat active and this is the first dependent user,
2407 * pin all regions if quorum region count <= CUT_OFF
2408 */
2409 o2hb_dependent_users++;
2410 if (o2hb_dependent_users > 1)
2411 goto unlock;
2412
2413 if (o2hb_pop_count(&o2hb_quorum_region_bitmap,
2414 O2NM_MAX_REGIONS) <= O2HB_PIN_CUT_OFF)
2415 ret = o2hb_region_pin(NULL);
2416
2417unlock:
2418 spin_unlock(&o2hb_live_lock);
2257 return ret; 2419 return ret;
2258} 2420}
2259 2421
2260static void o2hb_region_put(const char *region_uuid) 2422void o2hb_region_dec_user(const char *region_uuid)
2261{ 2423{
2262 struct o2hb_region *reg;
2263
2264 spin_lock(&o2hb_live_lock); 2424 spin_lock(&o2hb_live_lock);
2265 2425
2266 reg = o2hb_find_region(region_uuid); 2426 /* local heartbeat */
2427 if (!o2hb_global_heartbeat_active()) {
2428 o2hb_region_unpin(region_uuid);
2429 goto unlock;
2430 }
2267 2431
2268 spin_unlock(&o2hb_live_lock); 2432 /*
2433 * if global heartbeat active and there are no dependent users,
2434 * unpin all quorum regions
2435 */
2436 o2hb_dependent_users--;
2437 if (!o2hb_dependent_users)
2438 o2hb_region_unpin(NULL);
2269 2439
2270 if (reg) { 2440unlock:
2271 o2nm_undepend_item(&reg->hr_item); 2441 spin_unlock(&o2hb_live_lock);
2272 o2nm_undepend_this_node();
2273 }
2274} 2442}
2275 2443
2276int o2hb_register_callback(const char *region_uuid, 2444int o2hb_register_callback(const char *region_uuid,
@@ -2291,9 +2459,11 @@ int o2hb_register_callback(const char *region_uuid,
2291 } 2459 }
2292 2460
2293 if (region_uuid) { 2461 if (region_uuid) {
2294 ret = o2hb_region_get(region_uuid); 2462 ret = o2hb_region_inc_user(region_uuid);
2295 if (ret) 2463 if (ret) {
2464 mlog_errno(ret);
2296 goto out; 2465 goto out;
2466 }
2297 } 2467 }
2298 2468
2299 down_write(&o2hb_callback_sem); 2469 down_write(&o2hb_callback_sem);
@@ -2311,7 +2481,7 @@ int o2hb_register_callback(const char *region_uuid,
2311 up_write(&o2hb_callback_sem); 2481 up_write(&o2hb_callback_sem);
2312 ret = 0; 2482 ret = 0;
2313out: 2483out:
2314 mlog(ML_HEARTBEAT, "returning %d on behalf of %p for funcs %p\n", 2484 mlog(ML_CLUSTER, "returning %d on behalf of %p for funcs %p\n",
2315 ret, __builtin_return_address(0), hc); 2485 ret, __builtin_return_address(0), hc);
2316 return ret; 2486 return ret;
2317} 2487}
@@ -2322,7 +2492,7 @@ void o2hb_unregister_callback(const char *region_uuid,
2322{ 2492{
2323 BUG_ON(hc->hc_magic != O2HB_CB_MAGIC); 2493 BUG_ON(hc->hc_magic != O2HB_CB_MAGIC);
2324 2494
2325 mlog(ML_HEARTBEAT, "on behalf of %p for funcs %p\n", 2495 mlog(ML_CLUSTER, "on behalf of %p for funcs %p\n",
2326 __builtin_return_address(0), hc); 2496 __builtin_return_address(0), hc);
2327 2497
2328 /* XXX Can this happen _with_ a region reference? */ 2498 /* XXX Can this happen _with_ a region reference? */
@@ -2330,7 +2500,7 @@ void o2hb_unregister_callback(const char *region_uuid,
2330 return; 2500 return;
2331 2501
2332 if (region_uuid) 2502 if (region_uuid)
2333 o2hb_region_put(region_uuid); 2503 o2hb_region_dec_user(region_uuid);
2334 2504
2335 down_write(&o2hb_callback_sem); 2505 down_write(&o2hb_callback_sem);
2336 2506
diff --git a/fs/ocfs2/cluster/netdebug.c b/fs/ocfs2/cluster/netdebug.c
index a3f150e52b02..3a5835904b3d 100644
--- a/fs/ocfs2/cluster/netdebug.c
+++ b/fs/ocfs2/cluster/netdebug.c
@@ -46,10 +46,15 @@
46#define O2NET_DEBUG_DIR "o2net" 46#define O2NET_DEBUG_DIR "o2net"
47#define SC_DEBUG_NAME "sock_containers" 47#define SC_DEBUG_NAME "sock_containers"
48#define NST_DEBUG_NAME "send_tracking" 48#define NST_DEBUG_NAME "send_tracking"
49#define STATS_DEBUG_NAME "stats"
50
51#define SHOW_SOCK_CONTAINERS 0
52#define SHOW_SOCK_STATS 1
49 53
50static struct dentry *o2net_dentry; 54static struct dentry *o2net_dentry;
51static struct dentry *sc_dentry; 55static struct dentry *sc_dentry;
52static struct dentry *nst_dentry; 56static struct dentry *nst_dentry;
57static struct dentry *stats_dentry;
53 58
54static DEFINE_SPINLOCK(o2net_debug_lock); 59static DEFINE_SPINLOCK(o2net_debug_lock);
55 60
@@ -123,37 +128,42 @@ static void *nst_seq_next(struct seq_file *seq, void *v, loff_t *pos)
123static int nst_seq_show(struct seq_file *seq, void *v) 128static int nst_seq_show(struct seq_file *seq, void *v)
124{ 129{
125 struct o2net_send_tracking *nst, *dummy_nst = seq->private; 130 struct o2net_send_tracking *nst, *dummy_nst = seq->private;
131 ktime_t now;
132 s64 sock, send, status;
126 133
127 spin_lock(&o2net_debug_lock); 134 spin_lock(&o2net_debug_lock);
128 nst = next_nst(dummy_nst); 135 nst = next_nst(dummy_nst);
136 if (!nst)
137 goto out;
129 138
130 if (nst != NULL) { 139 now = ktime_get();
131 /* get_task_comm isn't exported. oh well. */ 140 sock = ktime_to_us(ktime_sub(now, nst->st_sock_time));
132 seq_printf(seq, "%p:\n" 141 send = ktime_to_us(ktime_sub(now, nst->st_send_time));
133 " pid: %lu\n" 142 status = ktime_to_us(ktime_sub(now, nst->st_status_time));
134 " tgid: %lu\n" 143
135 " process name: %s\n" 144 /* get_task_comm isn't exported. oh well. */
136 " node: %u\n" 145 seq_printf(seq, "%p:\n"
137 " sc: %p\n" 146 " pid: %lu\n"
138 " message id: %d\n" 147 " tgid: %lu\n"
139 " message type: %u\n" 148 " process name: %s\n"
140 " message key: 0x%08x\n" 149 " node: %u\n"
141 " sock acquiry: %lu.%ld\n" 150 " sc: %p\n"
142 " send start: %lu.%ld\n" 151 " message id: %d\n"
143 " wait start: %lu.%ld\n", 152 " message type: %u\n"
144 nst, (unsigned long)nst->st_task->pid, 153 " message key: 0x%08x\n"
145 (unsigned long)nst->st_task->tgid, 154 " sock acquiry: %lld usecs ago\n"
146 nst->st_task->comm, nst->st_node, 155 " send start: %lld usecs ago\n"
147 nst->st_sc, nst->st_id, nst->st_msg_type, 156 " wait start: %lld usecs ago\n",
148 nst->st_msg_key, 157 nst, (unsigned long)task_pid_nr(nst->st_task),
149 nst->st_sock_time.tv_sec, 158 (unsigned long)nst->st_task->tgid,
150 (long)nst->st_sock_time.tv_usec, 159 nst->st_task->comm, nst->st_node,
151 nst->st_send_time.tv_sec, 160 nst->st_sc, nst->st_id, nst->st_msg_type,
152 (long)nst->st_send_time.tv_usec, 161 nst->st_msg_key,
153 nst->st_status_time.tv_sec, 162 (long long)sock,
154 (long)nst->st_status_time.tv_usec); 163 (long long)send,
155 } 164 (long long)status);
156 165
166out:
157 spin_unlock(&o2net_debug_lock); 167 spin_unlock(&o2net_debug_lock);
158 168
159 return 0; 169 return 0;
@@ -228,6 +238,11 @@ void o2net_debug_del_sc(struct o2net_sock_container *sc)
228 spin_unlock(&o2net_debug_lock); 238 spin_unlock(&o2net_debug_lock);
229} 239}
230 240
241struct o2net_sock_debug {
242 int dbg_ctxt;
243 struct o2net_sock_container *dbg_sock;
244};
245
231static struct o2net_sock_container 246static struct o2net_sock_container
232 *next_sc(struct o2net_sock_container *sc_start) 247 *next_sc(struct o2net_sock_container *sc_start)
233{ 248{
@@ -253,7 +268,8 @@ static struct o2net_sock_container
253 268
254static void *sc_seq_start(struct seq_file *seq, loff_t *pos) 269static void *sc_seq_start(struct seq_file *seq, loff_t *pos)
255{ 270{
256 struct o2net_sock_container *sc, *dummy_sc = seq->private; 271 struct o2net_sock_debug *sd = seq->private;
272 struct o2net_sock_container *sc, *dummy_sc = sd->dbg_sock;
257 273
258 spin_lock(&o2net_debug_lock); 274 spin_lock(&o2net_debug_lock);
259 sc = next_sc(dummy_sc); 275 sc = next_sc(dummy_sc);
@@ -264,7 +280,8 @@ static void *sc_seq_start(struct seq_file *seq, loff_t *pos)
264 280
265static void *sc_seq_next(struct seq_file *seq, void *v, loff_t *pos) 281static void *sc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
266{ 282{
267 struct o2net_sock_container *sc, *dummy_sc = seq->private; 283 struct o2net_sock_debug *sd = seq->private;
284 struct o2net_sock_container *sc, *dummy_sc = sd->dbg_sock;
268 285
269 spin_lock(&o2net_debug_lock); 286 spin_lock(&o2net_debug_lock);
270 sc = next_sc(dummy_sc); 287 sc = next_sc(dummy_sc);
@@ -276,65 +293,107 @@ static void *sc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
276 return sc; /* unused, just needs to be null when done */ 293 return sc; /* unused, just needs to be null when done */
277} 294}
278 295
279#define TV_SEC_USEC(TV) TV.tv_sec, (long)TV.tv_usec 296#ifdef CONFIG_OCFS2_FS_STATS
297# define sc_send_count(_s) ((_s)->sc_send_count)
298# define sc_recv_count(_s) ((_s)->sc_recv_count)
299# define sc_tv_acquiry_total_ns(_s) (ktime_to_ns((_s)->sc_tv_acquiry_total))
300# define sc_tv_send_total_ns(_s) (ktime_to_ns((_s)->sc_tv_send_total))
301# define sc_tv_status_total_ns(_s) (ktime_to_ns((_s)->sc_tv_status_total))
302# define sc_tv_process_total_ns(_s) (ktime_to_ns((_s)->sc_tv_process_total))
303#else
304# define sc_send_count(_s) (0U)
305# define sc_recv_count(_s) (0U)
306# define sc_tv_acquiry_total_ns(_s) (0LL)
307# define sc_tv_send_total_ns(_s) (0LL)
308# define sc_tv_status_total_ns(_s) (0LL)
309# define sc_tv_process_total_ns(_s) (0LL)
310#endif
311
312/* So that debugfs.ocfs2 can determine which format is being used */
313#define O2NET_STATS_STR_VERSION 1
314static void sc_show_sock_stats(struct seq_file *seq,
315 struct o2net_sock_container *sc)
316{
317 if (!sc)
318 return;
319
320 seq_printf(seq, "%d,%u,%lu,%lld,%lld,%lld,%lu,%lld\n", O2NET_STATS_STR_VERSION,
321 sc->sc_node->nd_num, (unsigned long)sc_send_count(sc),
322 (long long)sc_tv_acquiry_total_ns(sc),
323 (long long)sc_tv_send_total_ns(sc),
324 (long long)sc_tv_status_total_ns(sc),
325 (unsigned long)sc_recv_count(sc),
326 (long long)sc_tv_process_total_ns(sc));
327}
328
329static void sc_show_sock_container(struct seq_file *seq,
330 struct o2net_sock_container *sc)
331{
332 struct inet_sock *inet = NULL;
333 __be32 saddr = 0, daddr = 0;
334 __be16 sport = 0, dport = 0;
335
336 if (!sc)
337 return;
338
339 if (sc->sc_sock) {
340 inet = inet_sk(sc->sc_sock->sk);
341 /* the stack's structs aren't sparse endian clean */
342 saddr = (__force __be32)inet->inet_saddr;
343 daddr = (__force __be32)inet->inet_daddr;
344 sport = (__force __be16)inet->inet_sport;
345 dport = (__force __be16)inet->inet_dport;
346 }
347
348 /* XXX sigh, inet-> doesn't have sparse annotation so any
349 * use of it here generates a warning with -Wbitwise */
350 seq_printf(seq, "%p:\n"
351 " krefs: %d\n"
352 " sock: %pI4:%u -> "
353 "%pI4:%u\n"
354 " remote node: %s\n"
355 " page off: %zu\n"
356 " handshake ok: %u\n"
357 " timer: %lld usecs\n"
358 " data ready: %lld usecs\n"
359 " advance start: %lld usecs\n"
360 " advance stop: %lld usecs\n"
361 " func start: %lld usecs\n"
362 " func stop: %lld usecs\n"
363 " func key: 0x%08x\n"
364 " func type: %u\n",
365 sc,
366 atomic_read(&sc->sc_kref.refcount),
367 &saddr, inet ? ntohs(sport) : 0,
368 &daddr, inet ? ntohs(dport) : 0,
369 sc->sc_node->nd_name,
370 sc->sc_page_off,
371 sc->sc_handshake_ok,
372 (long long)ktime_to_us(sc->sc_tv_timer),
373 (long long)ktime_to_us(sc->sc_tv_data_ready),
374 (long long)ktime_to_us(sc->sc_tv_advance_start),
375 (long long)ktime_to_us(sc->sc_tv_advance_stop),
376 (long long)ktime_to_us(sc->sc_tv_func_start),
377 (long long)ktime_to_us(sc->sc_tv_func_stop),
378 sc->sc_msg_key,
379 sc->sc_msg_type);
380}
280 381
281static int sc_seq_show(struct seq_file *seq, void *v) 382static int sc_seq_show(struct seq_file *seq, void *v)
282{ 383{
283 struct o2net_sock_container *sc, *dummy_sc = seq->private; 384 struct o2net_sock_debug *sd = seq->private;
385 struct o2net_sock_container *sc, *dummy_sc = sd->dbg_sock;
284 386
285 spin_lock(&o2net_debug_lock); 387 spin_lock(&o2net_debug_lock);
286 sc = next_sc(dummy_sc); 388 sc = next_sc(dummy_sc);
287 389
288 if (sc != NULL) { 390 if (sc) {
289 struct inet_sock *inet = NULL; 391 if (sd->dbg_ctxt == SHOW_SOCK_CONTAINERS)
290 392 sc_show_sock_container(seq, sc);
291 __be32 saddr = 0, daddr = 0; 393 else
292 __be16 sport = 0, dport = 0; 394 sc_show_sock_stats(seq, sc);
293
294 if (sc->sc_sock) {
295 inet = inet_sk(sc->sc_sock->sk);
296 /* the stack's structs aren't sparse endian clean */
297 saddr = (__force __be32)inet->inet_saddr;
298 daddr = (__force __be32)inet->inet_daddr;
299 sport = (__force __be16)inet->inet_sport;
300 dport = (__force __be16)inet->inet_dport;
301 }
302
303 /* XXX sigh, inet-> doesn't have sparse annotation so any
304 * use of it here generates a warning with -Wbitwise */
305 seq_printf(seq, "%p:\n"
306 " krefs: %d\n"
307 " sock: %pI4:%u -> "
308 "%pI4:%u\n"
309 " remote node: %s\n"
310 " page off: %zu\n"
311 " handshake ok: %u\n"
312 " timer: %lu.%ld\n"
313 " data ready: %lu.%ld\n"
314 " advance start: %lu.%ld\n"
315 " advance stop: %lu.%ld\n"
316 " func start: %lu.%ld\n"
317 " func stop: %lu.%ld\n"
318 " func key: %u\n"
319 " func type: %u\n",
320 sc,
321 atomic_read(&sc->sc_kref.refcount),
322 &saddr, inet ? ntohs(sport) : 0,
323 &daddr, inet ? ntohs(dport) : 0,
324 sc->sc_node->nd_name,
325 sc->sc_page_off,
326 sc->sc_handshake_ok,
327 TV_SEC_USEC(sc->sc_tv_timer),
328 TV_SEC_USEC(sc->sc_tv_data_ready),
329 TV_SEC_USEC(sc->sc_tv_advance_start),
330 TV_SEC_USEC(sc->sc_tv_advance_stop),
331 TV_SEC_USEC(sc->sc_tv_func_start),
332 TV_SEC_USEC(sc->sc_tv_func_stop),
333 sc->sc_msg_key,
334 sc->sc_msg_type);
335 } 395 }
336 396
337
338 spin_unlock(&o2net_debug_lock); 397 spin_unlock(&o2net_debug_lock);
339 398
340 return 0; 399 return 0;
@@ -351,7 +410,7 @@ static const struct seq_operations sc_seq_ops = {
351 .show = sc_seq_show, 410 .show = sc_seq_show,
352}; 411};
353 412
354static int sc_fop_open(struct inode *inode, struct file *file) 413static int sc_common_open(struct file *file, struct o2net_sock_debug *sd)
355{ 414{
356 struct o2net_sock_container *dummy_sc; 415 struct o2net_sock_container *dummy_sc;
357 struct seq_file *seq; 416 struct seq_file *seq;
@@ -369,7 +428,8 @@ static int sc_fop_open(struct inode *inode, struct file *file)
369 goto out; 428 goto out;
370 429
371 seq = file->private_data; 430 seq = file->private_data;
372 seq->private = dummy_sc; 431 seq->private = sd;
432 sd->dbg_sock = dummy_sc;
373 o2net_debug_add_sc(dummy_sc); 433 o2net_debug_add_sc(dummy_sc);
374 434
375 dummy_sc = NULL; 435 dummy_sc = NULL;
@@ -382,12 +442,48 @@ out:
382static int sc_fop_release(struct inode *inode, struct file *file) 442static int sc_fop_release(struct inode *inode, struct file *file)
383{ 443{
384 struct seq_file *seq = file->private_data; 444 struct seq_file *seq = file->private_data;
385 struct o2net_sock_container *dummy_sc = seq->private; 445 struct o2net_sock_debug *sd = seq->private;
446 struct o2net_sock_container *dummy_sc = sd->dbg_sock;
386 447
387 o2net_debug_del_sc(dummy_sc); 448 o2net_debug_del_sc(dummy_sc);
388 return seq_release_private(inode, file); 449 return seq_release_private(inode, file);
389} 450}
390 451
452static int stats_fop_open(struct inode *inode, struct file *file)
453{
454 struct o2net_sock_debug *sd;
455
456 sd = kmalloc(sizeof(struct o2net_sock_debug), GFP_KERNEL);
457 if (sd == NULL)
458 return -ENOMEM;
459
460 sd->dbg_ctxt = SHOW_SOCK_STATS;
461 sd->dbg_sock = NULL;
462
463 return sc_common_open(file, sd);
464}
465
466static const struct file_operations stats_seq_fops = {
467 .open = stats_fop_open,
468 .read = seq_read,
469 .llseek = seq_lseek,
470 .release = sc_fop_release,
471};
472
473static int sc_fop_open(struct inode *inode, struct file *file)
474{
475 struct o2net_sock_debug *sd;
476
477 sd = kmalloc(sizeof(struct o2net_sock_debug), GFP_KERNEL);
478 if (sd == NULL)
479 return -ENOMEM;
480
481 sd->dbg_ctxt = SHOW_SOCK_CONTAINERS;
482 sd->dbg_sock = NULL;
483
484 return sc_common_open(file, sd);
485}
486
391static const struct file_operations sc_seq_fops = { 487static const struct file_operations sc_seq_fops = {
392 .open = sc_fop_open, 488 .open = sc_fop_open,
393 .read = seq_read, 489 .read = seq_read,
@@ -419,25 +515,29 @@ int o2net_debugfs_init(void)
419 goto bail; 515 goto bail;
420 } 516 }
421 517
518 stats_dentry = debugfs_create_file(STATS_DEBUG_NAME, S_IFREG|S_IRUSR,
519 o2net_dentry, NULL,
520 &stats_seq_fops);
521 if (!stats_dentry) {
522 mlog_errno(-ENOMEM);
523 goto bail;
524 }
525
422 return 0; 526 return 0;
423bail: 527bail:
424 if (sc_dentry) 528 debugfs_remove(stats_dentry);
425 debugfs_remove(sc_dentry); 529 debugfs_remove(sc_dentry);
426 if (nst_dentry) 530 debugfs_remove(nst_dentry);
427 debugfs_remove(nst_dentry); 531 debugfs_remove(o2net_dentry);
428 if (o2net_dentry)
429 debugfs_remove(o2net_dentry);
430 return -ENOMEM; 532 return -ENOMEM;
431} 533}
432 534
433void o2net_debugfs_exit(void) 535void o2net_debugfs_exit(void)
434{ 536{
435 if (sc_dentry) 537 debugfs_remove(stats_dentry);
436 debugfs_remove(sc_dentry); 538 debugfs_remove(sc_dentry);
437 if (nst_dentry) 539 debugfs_remove(nst_dentry);
438 debugfs_remove(nst_dentry); 540 debugfs_remove(o2net_dentry);
439 if (o2net_dentry)
440 debugfs_remove(o2net_dentry);
441} 541}
442 542
443#endif /* CONFIG_DEBUG_FS */ 543#endif /* CONFIG_DEBUG_FS */
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index 9aa426e42123..3b11cb1e38fc 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -153,63 +153,114 @@ static void o2net_init_nst(struct o2net_send_tracking *nst, u32 msgtype,
153 nst->st_node = node; 153 nst->st_node = node;
154} 154}
155 155
156static void o2net_set_nst_sock_time(struct o2net_send_tracking *nst) 156static inline void o2net_set_nst_sock_time(struct o2net_send_tracking *nst)
157{ 157{
158 do_gettimeofday(&nst->st_sock_time); 158 nst->st_sock_time = ktime_get();
159} 159}
160 160
161static void o2net_set_nst_send_time(struct o2net_send_tracking *nst) 161static inline void o2net_set_nst_send_time(struct o2net_send_tracking *nst)
162{ 162{
163 do_gettimeofday(&nst->st_send_time); 163 nst->st_send_time = ktime_get();
164} 164}
165 165
166static void o2net_set_nst_status_time(struct o2net_send_tracking *nst) 166static inline void o2net_set_nst_status_time(struct o2net_send_tracking *nst)
167{ 167{
168 do_gettimeofday(&nst->st_status_time); 168 nst->st_status_time = ktime_get();
169} 169}
170 170
171static void o2net_set_nst_sock_container(struct o2net_send_tracking *nst, 171static inline void o2net_set_nst_sock_container(struct o2net_send_tracking *nst,
172 struct o2net_sock_container *sc) 172 struct o2net_sock_container *sc)
173{ 173{
174 nst->st_sc = sc; 174 nst->st_sc = sc;
175} 175}
176 176
177static void o2net_set_nst_msg_id(struct o2net_send_tracking *nst, u32 msg_id) 177static inline void o2net_set_nst_msg_id(struct o2net_send_tracking *nst,
178 u32 msg_id)
178{ 179{
179 nst->st_id = msg_id; 180 nst->st_id = msg_id;
180} 181}
181 182
182#else /* CONFIG_DEBUG_FS */ 183static inline void o2net_set_sock_timer(struct o2net_sock_container *sc)
183
184static inline void o2net_init_nst(struct o2net_send_tracking *nst, u32 msgtype,
185 u32 msgkey, struct task_struct *task, u8 node)
186{ 184{
185 sc->sc_tv_timer = ktime_get();
187} 186}
188 187
189static inline void o2net_set_nst_sock_time(struct o2net_send_tracking *nst) 188static inline void o2net_set_data_ready_time(struct o2net_sock_container *sc)
190{ 189{
190 sc->sc_tv_data_ready = ktime_get();
191} 191}
192 192
193static inline void o2net_set_nst_send_time(struct o2net_send_tracking *nst) 193static inline void o2net_set_advance_start_time(struct o2net_sock_container *sc)
194{ 194{
195 sc->sc_tv_advance_start = ktime_get();
195} 196}
196 197
197static inline void o2net_set_nst_status_time(struct o2net_send_tracking *nst) 198static inline void o2net_set_advance_stop_time(struct o2net_sock_container *sc)
198{ 199{
200 sc->sc_tv_advance_stop = ktime_get();
199} 201}
200 202
201static inline void o2net_set_nst_sock_container(struct o2net_send_tracking *nst, 203static inline void o2net_set_func_start_time(struct o2net_sock_container *sc)
202 struct o2net_sock_container *sc)
203{ 204{
205 sc->sc_tv_func_start = ktime_get();
204} 206}
205 207
206static inline void o2net_set_nst_msg_id(struct o2net_send_tracking *nst, 208static inline void o2net_set_func_stop_time(struct o2net_sock_container *sc)
207 u32 msg_id)
208{ 209{
210 sc->sc_tv_func_stop = ktime_get();
209} 211}
210 212
213static ktime_t o2net_get_func_run_time(struct o2net_sock_container *sc)
214{
215 return ktime_sub(sc->sc_tv_func_stop, sc->sc_tv_func_start);
216}
217#else /* CONFIG_DEBUG_FS */
218# define o2net_init_nst(a, b, c, d, e)
219# define o2net_set_nst_sock_time(a)
220# define o2net_set_nst_send_time(a)
221# define o2net_set_nst_status_time(a)
222# define o2net_set_nst_sock_container(a, b)
223# define o2net_set_nst_msg_id(a, b)
224# define o2net_set_sock_timer(a)
225# define o2net_set_data_ready_time(a)
226# define o2net_set_advance_start_time(a)
227# define o2net_set_advance_stop_time(a)
228# define o2net_set_func_start_time(a)
229# define o2net_set_func_stop_time(a)
230# define o2net_get_func_run_time(a) (ktime_t)0
211#endif /* CONFIG_DEBUG_FS */ 231#endif /* CONFIG_DEBUG_FS */
212 232
233#ifdef CONFIG_OCFS2_FS_STATS
234static void o2net_update_send_stats(struct o2net_send_tracking *nst,
235 struct o2net_sock_container *sc)
236{
237 sc->sc_tv_status_total = ktime_add(sc->sc_tv_status_total,
238 ktime_sub(ktime_get(),
239 nst->st_status_time));
240 sc->sc_tv_send_total = ktime_add(sc->sc_tv_send_total,
241 ktime_sub(nst->st_status_time,
242 nst->st_send_time));
243 sc->sc_tv_acquiry_total = ktime_add(sc->sc_tv_acquiry_total,
244 ktime_sub(nst->st_send_time,
245 nst->st_sock_time));
246 sc->sc_send_count++;
247}
248
249static void o2net_update_recv_stats(struct o2net_sock_container *sc)
250{
251 sc->sc_tv_process_total = ktime_add(sc->sc_tv_process_total,
252 o2net_get_func_run_time(sc));
253 sc->sc_recv_count++;
254}
255
256#else
257
258# define o2net_update_send_stats(a, b)
259
260# define o2net_update_recv_stats(sc)
261
262#endif /* CONFIG_OCFS2_FS_STATS */
263
213static inline int o2net_reconnect_delay(void) 264static inline int o2net_reconnect_delay(void)
214{ 265{
215 return o2nm_single_cluster->cl_reconnect_delay_ms; 266 return o2nm_single_cluster->cl_reconnect_delay_ms;
@@ -355,6 +406,7 @@ static void sc_kref_release(struct kref *kref)
355 sc->sc_sock = NULL; 406 sc->sc_sock = NULL;
356 } 407 }
357 408
409 o2nm_undepend_item(&sc->sc_node->nd_item);
358 o2nm_node_put(sc->sc_node); 410 o2nm_node_put(sc->sc_node);
359 sc->sc_node = NULL; 411 sc->sc_node = NULL;
360 412
@@ -376,6 +428,7 @@ static struct o2net_sock_container *sc_alloc(struct o2nm_node *node)
376{ 428{
377 struct o2net_sock_container *sc, *ret = NULL; 429 struct o2net_sock_container *sc, *ret = NULL;
378 struct page *page = NULL; 430 struct page *page = NULL;
431 int status = 0;
379 432
380 page = alloc_page(GFP_NOFS); 433 page = alloc_page(GFP_NOFS);
381 sc = kzalloc(sizeof(*sc), GFP_NOFS); 434 sc = kzalloc(sizeof(*sc), GFP_NOFS);
@@ -386,6 +439,13 @@ static struct o2net_sock_container *sc_alloc(struct o2nm_node *node)
386 o2nm_node_get(node); 439 o2nm_node_get(node);
387 sc->sc_node = node; 440 sc->sc_node = node;
388 441
442 /* pin the node item of the remote node */
443 status = o2nm_depend_item(&node->nd_item);
444 if (status) {
445 mlog_errno(status);
446 o2nm_node_put(node);
447 goto out;
448 }
389 INIT_WORK(&sc->sc_connect_work, o2net_sc_connect_completed); 449 INIT_WORK(&sc->sc_connect_work, o2net_sc_connect_completed);
390 INIT_WORK(&sc->sc_rx_work, o2net_rx_until_empty); 450 INIT_WORK(&sc->sc_rx_work, o2net_rx_until_empty);
391 INIT_WORK(&sc->sc_shutdown_work, o2net_shutdown_sc); 451 INIT_WORK(&sc->sc_shutdown_work, o2net_shutdown_sc);
@@ -546,7 +606,7 @@ static void o2net_data_ready(struct sock *sk, int bytes)
546 if (sk->sk_user_data) { 606 if (sk->sk_user_data) {
547 struct o2net_sock_container *sc = sk->sk_user_data; 607 struct o2net_sock_container *sc = sk->sk_user_data;
548 sclog(sc, "data_ready hit\n"); 608 sclog(sc, "data_ready hit\n");
549 do_gettimeofday(&sc->sc_tv_data_ready); 609 o2net_set_data_ready_time(sc);
550 o2net_sc_queue_work(sc, &sc->sc_rx_work); 610 o2net_sc_queue_work(sc, &sc->sc_rx_work);
551 ready = sc->sc_data_ready; 611 ready = sc->sc_data_ready;
552 } else { 612 } else {
@@ -1070,6 +1130,8 @@ int o2net_send_message_vec(u32 msg_type, u32 key, struct kvec *caller_vec,
1070 o2net_set_nst_status_time(&nst); 1130 o2net_set_nst_status_time(&nst);
1071 wait_event(nsw.ns_wq, o2net_nsw_completed(nn, &nsw)); 1131 wait_event(nsw.ns_wq, o2net_nsw_completed(nn, &nsw));
1072 1132
1133 o2net_update_send_stats(&nst, sc);
1134
1073 /* Note that we avoid overwriting the callers status return 1135 /* Note that we avoid overwriting the callers status return
1074 * variable if a system error was reported on the other 1136 * variable if a system error was reported on the other
1075 * side. Callers beware. */ 1137 * side. Callers beware. */
@@ -1183,13 +1245,15 @@ static int o2net_process_message(struct o2net_sock_container *sc,
1183 if (syserr != O2NET_ERR_NONE) 1245 if (syserr != O2NET_ERR_NONE)
1184 goto out_respond; 1246 goto out_respond;
1185 1247
1186 do_gettimeofday(&sc->sc_tv_func_start); 1248 o2net_set_func_start_time(sc);
1187 sc->sc_msg_key = be32_to_cpu(hdr->key); 1249 sc->sc_msg_key = be32_to_cpu(hdr->key);
1188 sc->sc_msg_type = be16_to_cpu(hdr->msg_type); 1250 sc->sc_msg_type = be16_to_cpu(hdr->msg_type);
1189 handler_status = (nmh->nh_func)(hdr, sizeof(struct o2net_msg) + 1251 handler_status = (nmh->nh_func)(hdr, sizeof(struct o2net_msg) +
1190 be16_to_cpu(hdr->data_len), 1252 be16_to_cpu(hdr->data_len),
1191 nmh->nh_func_data, &ret_data); 1253 nmh->nh_func_data, &ret_data);
1192 do_gettimeofday(&sc->sc_tv_func_stop); 1254 o2net_set_func_stop_time(sc);
1255
1256 o2net_update_recv_stats(sc);
1193 1257
1194out_respond: 1258out_respond:
1195 /* this destroys the hdr, so don't use it after this */ 1259 /* this destroys the hdr, so don't use it after this */
@@ -1300,7 +1364,7 @@ static int o2net_advance_rx(struct o2net_sock_container *sc)
1300 size_t datalen; 1364 size_t datalen;
1301 1365
1302 sclog(sc, "receiving\n"); 1366 sclog(sc, "receiving\n");
1303 do_gettimeofday(&sc->sc_tv_advance_start); 1367 o2net_set_advance_start_time(sc);
1304 1368
1305 if (unlikely(sc->sc_handshake_ok == 0)) { 1369 if (unlikely(sc->sc_handshake_ok == 0)) {
1306 if(sc->sc_page_off < sizeof(struct o2net_handshake)) { 1370 if(sc->sc_page_off < sizeof(struct o2net_handshake)) {
@@ -1375,7 +1439,7 @@ static int o2net_advance_rx(struct o2net_sock_container *sc)
1375 1439
1376out: 1440out:
1377 sclog(sc, "ret = %d\n", ret); 1441 sclog(sc, "ret = %d\n", ret);
1378 do_gettimeofday(&sc->sc_tv_advance_stop); 1442 o2net_set_advance_stop_time(sc);
1379 return ret; 1443 return ret;
1380} 1444}
1381 1445
@@ -1475,27 +1539,28 @@ static void o2net_idle_timer(unsigned long data)
1475{ 1539{
1476 struct o2net_sock_container *sc = (struct o2net_sock_container *)data; 1540 struct o2net_sock_container *sc = (struct o2net_sock_container *)data;
1477 struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num); 1541 struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num);
1478 struct timeval now;
1479 1542
1480 do_gettimeofday(&now); 1543#ifdef CONFIG_DEBUG_FS
1544 ktime_t now = ktime_get();
1545#endif
1481 1546
1482 printk(KERN_NOTICE "o2net: connection to " SC_NODEF_FMT " has been idle for %u.%u " 1547 printk(KERN_NOTICE "o2net: connection to " SC_NODEF_FMT " has been idle for %u.%u "
1483 "seconds, shutting it down.\n", SC_NODEF_ARGS(sc), 1548 "seconds, shutting it down.\n", SC_NODEF_ARGS(sc),
1484 o2net_idle_timeout() / 1000, 1549 o2net_idle_timeout() / 1000,
1485 o2net_idle_timeout() % 1000); 1550 o2net_idle_timeout() % 1000);
1486 mlog(ML_NOTICE, "here are some times that might help debug the " 1551
1487 "situation: (tmr %ld.%ld now %ld.%ld dr %ld.%ld adv " 1552#ifdef CONFIG_DEBUG_FS
1488 "%ld.%ld:%ld.%ld func (%08x:%u) %ld.%ld:%ld.%ld)\n", 1553 mlog(ML_NOTICE, "Here are some times that might help debug the "
1489 sc->sc_tv_timer.tv_sec, (long) sc->sc_tv_timer.tv_usec, 1554 "situation: (Timer: %lld, Now %lld, DataReady %lld, Advance %lld-%lld, "
1490 now.tv_sec, (long) now.tv_usec, 1555 "Key 0x%08x, Func %u, FuncTime %lld-%lld)\n",
1491 sc->sc_tv_data_ready.tv_sec, (long) sc->sc_tv_data_ready.tv_usec, 1556 (long long)ktime_to_us(sc->sc_tv_timer), (long long)ktime_to_us(now),
1492 sc->sc_tv_advance_start.tv_sec, 1557 (long long)ktime_to_us(sc->sc_tv_data_ready),
1493 (long) sc->sc_tv_advance_start.tv_usec, 1558 (long long)ktime_to_us(sc->sc_tv_advance_start),
1494 sc->sc_tv_advance_stop.tv_sec, 1559 (long long)ktime_to_us(sc->sc_tv_advance_stop),
1495 (long) sc->sc_tv_advance_stop.tv_usec,
1496 sc->sc_msg_key, sc->sc_msg_type, 1560 sc->sc_msg_key, sc->sc_msg_type,
1497 sc->sc_tv_func_start.tv_sec, (long) sc->sc_tv_func_start.tv_usec, 1561 (long long)ktime_to_us(sc->sc_tv_func_start),
1498 sc->sc_tv_func_stop.tv_sec, (long) sc->sc_tv_func_stop.tv_usec); 1562 (long long)ktime_to_us(sc->sc_tv_func_stop));
1563#endif
1499 1564
1500 /* 1565 /*
1501 * Initialize the nn_timeout so that the next connection attempt 1566 * Initialize the nn_timeout so that the next connection attempt
@@ -1511,7 +1576,7 @@ static void o2net_sc_reset_idle_timer(struct o2net_sock_container *sc)
1511 o2net_sc_cancel_delayed_work(sc, &sc->sc_keepalive_work); 1576 o2net_sc_cancel_delayed_work(sc, &sc->sc_keepalive_work);
1512 o2net_sc_queue_delayed_work(sc, &sc->sc_keepalive_work, 1577 o2net_sc_queue_delayed_work(sc, &sc->sc_keepalive_work,
1513 msecs_to_jiffies(o2net_keepalive_delay())); 1578 msecs_to_jiffies(o2net_keepalive_delay()));
1514 do_gettimeofday(&sc->sc_tv_timer); 1579 o2net_set_sock_timer(sc);
1515 mod_timer(&sc->sc_idle_timeout, 1580 mod_timer(&sc->sc_idle_timeout,
1516 jiffies + msecs_to_jiffies(o2net_idle_timeout())); 1581 jiffies + msecs_to_jiffies(o2net_idle_timeout()));
1517} 1582}
diff --git a/fs/ocfs2/cluster/tcp_internal.h b/fs/ocfs2/cluster/tcp_internal.h
index 15fdbdf9eb4b..4cbcb65784a3 100644
--- a/fs/ocfs2/cluster/tcp_internal.h
+++ b/fs/ocfs2/cluster/tcp_internal.h
@@ -166,18 +166,27 @@ struct o2net_sock_container {
166 /* original handlers for the sockets */ 166 /* original handlers for the sockets */
167 void (*sc_state_change)(struct sock *sk); 167 void (*sc_state_change)(struct sock *sk);
168 void (*sc_data_ready)(struct sock *sk, int bytes); 168 void (*sc_data_ready)(struct sock *sk, int bytes);
169#ifdef CONFIG_DEBUG_FS 169
170 struct list_head sc_net_debug_item;
171#endif
172 struct timeval sc_tv_timer;
173 struct timeval sc_tv_data_ready;
174 struct timeval sc_tv_advance_start;
175 struct timeval sc_tv_advance_stop;
176 struct timeval sc_tv_func_start;
177 struct timeval sc_tv_func_stop;
178 u32 sc_msg_key; 170 u32 sc_msg_key;
179 u16 sc_msg_type; 171 u16 sc_msg_type;
180 172
173#ifdef CONFIG_DEBUG_FS
174 struct list_head sc_net_debug_item;
175 ktime_t sc_tv_timer;
176 ktime_t sc_tv_data_ready;
177 ktime_t sc_tv_advance_start;
178 ktime_t sc_tv_advance_stop;
179 ktime_t sc_tv_func_start;
180 ktime_t sc_tv_func_stop;
181#endif
182#ifdef CONFIG_OCFS2_FS_STATS
183 ktime_t sc_tv_acquiry_total;
184 ktime_t sc_tv_send_total;
185 ktime_t sc_tv_status_total;
186 u32 sc_send_count;
187 u32 sc_recv_count;
188 ktime_t sc_tv_process_total;
189#endif
181 struct mutex sc_send_lock; 190 struct mutex sc_send_lock;
182}; 191};
183 192
@@ -220,9 +229,9 @@ struct o2net_send_tracking {
220 u32 st_msg_type; 229 u32 st_msg_type;
221 u32 st_msg_key; 230 u32 st_msg_key;
222 u8 st_node; 231 u8 st_node;
223 struct timeval st_sock_time; 232 ktime_t st_sock_time;
224 struct timeval st_send_time; 233 ktime_t st_send_time;
225 struct timeval st_status_time; 234 ktime_t st_status_time;
226}; 235};
227#else 236#else
228struct o2net_send_tracking { 237struct o2net_send_tracking {
diff --git a/fs/ocfs2/dlm/dlmast.c b/fs/ocfs2/dlm/dlmast.c
index f44999156839..3a3ed4bb794b 100644
--- a/fs/ocfs2/dlm/dlmast.c
+++ b/fs/ocfs2/dlm/dlmast.c
@@ -90,19 +90,29 @@ static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
90 90
91void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock) 91void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
92{ 92{
93 mlog_entry_void(); 93 struct dlm_lock_resource *res;
94 94
95 BUG_ON(!dlm); 95 BUG_ON(!dlm);
96 BUG_ON(!lock); 96 BUG_ON(!lock);
97 97
98 res = lock->lockres;
99
98 assert_spin_locked(&dlm->ast_lock); 100 assert_spin_locked(&dlm->ast_lock);
101
99 if (!list_empty(&lock->ast_list)) { 102 if (!list_empty(&lock->ast_list)) {
100 mlog(ML_ERROR, "ast list not empty!! pending=%d, newlevel=%d\n", 103 mlog(ML_ERROR, "%s: res %.*s, lock %u:%llu, "
104 "AST list not empty, pending %d, newlevel %d\n",
105 dlm->name, res->lockname.len, res->lockname.name,
106 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
107 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
101 lock->ast_pending, lock->ml.type); 108 lock->ast_pending, lock->ml.type);
102 BUG(); 109 BUG();
103 } 110 }
104 if (lock->ast_pending) 111 if (lock->ast_pending)
105 mlog(0, "lock has an ast getting flushed right now\n"); 112 mlog(0, "%s: res %.*s, lock %u:%llu, AST getting flushed\n",
113 dlm->name, res->lockname.len, res->lockname.name,
114 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
115 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)));
106 116
107 /* putting lock on list, add a ref */ 117 /* putting lock on list, add a ref */
108 dlm_lock_get(lock); 118 dlm_lock_get(lock);
@@ -110,9 +120,10 @@ void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
110 120
111 /* check to see if this ast obsoletes the bast */ 121 /* check to see if this ast obsoletes the bast */
112 if (dlm_should_cancel_bast(dlm, lock)) { 122 if (dlm_should_cancel_bast(dlm, lock)) {
113 struct dlm_lock_resource *res = lock->lockres; 123 mlog(0, "%s: res %.*s, lock %u:%llu, Cancelling BAST\n",
114 mlog(0, "%s: cancelling bast for %.*s\n", 124 dlm->name, res->lockname.len, res->lockname.name,
115 dlm->name, res->lockname.len, res->lockname.name); 125 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
126 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)));
116 lock->bast_pending = 0; 127 lock->bast_pending = 0;
117 list_del_init(&lock->bast_list); 128 list_del_init(&lock->bast_list);
118 lock->ml.highest_blocked = LKM_IVMODE; 129 lock->ml.highest_blocked = LKM_IVMODE;
@@ -134,8 +145,6 @@ void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
134 145
135void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock) 146void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
136{ 147{
137 mlog_entry_void();
138
139 BUG_ON(!dlm); 148 BUG_ON(!dlm);
140 BUG_ON(!lock); 149 BUG_ON(!lock);
141 150
@@ -147,15 +156,21 @@ void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
147 156
148void __dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock) 157void __dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
149{ 158{
150 mlog_entry_void(); 159 struct dlm_lock_resource *res;
151 160
152 BUG_ON(!dlm); 161 BUG_ON(!dlm);
153 BUG_ON(!lock); 162 BUG_ON(!lock);
163
154 assert_spin_locked(&dlm->ast_lock); 164 assert_spin_locked(&dlm->ast_lock);
155 165
166 res = lock->lockres;
167
156 BUG_ON(!list_empty(&lock->bast_list)); 168 BUG_ON(!list_empty(&lock->bast_list));
157 if (lock->bast_pending) 169 if (lock->bast_pending)
158 mlog(0, "lock has a bast getting flushed right now\n"); 170 mlog(0, "%s: res %.*s, lock %u:%llu, BAST getting flushed\n",
171 dlm->name, res->lockname.len, res->lockname.name,
172 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
173 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)));
159 174
160 /* putting lock on list, add a ref */ 175 /* putting lock on list, add a ref */
161 dlm_lock_get(lock); 176 dlm_lock_get(lock);
@@ -167,8 +182,6 @@ void __dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
167 182
168void dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock) 183void dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
169{ 184{
170 mlog_entry_void();
171
172 BUG_ON(!dlm); 185 BUG_ON(!dlm);
173 BUG_ON(!lock); 186 BUG_ON(!lock);
174 187
@@ -213,7 +226,10 @@ void dlm_do_local_ast(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
213 dlm_astlockfunc_t *fn; 226 dlm_astlockfunc_t *fn;
214 struct dlm_lockstatus *lksb; 227 struct dlm_lockstatus *lksb;
215 228
216 mlog_entry_void(); 229 mlog(0, "%s: res %.*s, lock %u:%llu, Local AST\n", dlm->name,
230 res->lockname.len, res->lockname.name,
231 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
232 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)));
217 233
218 lksb = lock->lksb; 234 lksb = lock->lksb;
219 fn = lock->ast; 235 fn = lock->ast;
@@ -231,7 +247,10 @@ int dlm_do_remote_ast(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
231 struct dlm_lockstatus *lksb; 247 struct dlm_lockstatus *lksb;
232 int lksbflags; 248 int lksbflags;
233 249
234 mlog_entry_void(); 250 mlog(0, "%s: res %.*s, lock %u:%llu, Remote AST\n", dlm->name,
251 res->lockname.len, res->lockname.name,
252 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
253 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)));
235 254
236 lksb = lock->lksb; 255 lksb = lock->lksb;
237 BUG_ON(lock->ml.node == dlm->node_num); 256 BUG_ON(lock->ml.node == dlm->node_num);
@@ -250,9 +269,14 @@ void dlm_do_local_bast(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
250{ 269{
251 dlm_bastlockfunc_t *fn = lock->bast; 270 dlm_bastlockfunc_t *fn = lock->bast;
252 271
253 mlog_entry_void();
254 BUG_ON(lock->ml.node != dlm->node_num); 272 BUG_ON(lock->ml.node != dlm->node_num);
255 273
274 mlog(0, "%s: res %.*s, lock %u:%llu, Local BAST, blocked %d\n",
275 dlm->name, res->lockname.len, res->lockname.name,
276 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
277 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
278 blocked_type);
279
256 (*fn)(lock->astdata, blocked_type); 280 (*fn)(lock->astdata, blocked_type);
257} 281}
258 282
@@ -332,7 +356,8 @@ int dlm_proxy_ast_handler(struct o2net_msg *msg, u32 len, void *data,
332 /* cannot get a proxy ast message if this node owns it */ 356 /* cannot get a proxy ast message if this node owns it */
333 BUG_ON(res->owner == dlm->node_num); 357 BUG_ON(res->owner == dlm->node_num);
334 358
335 mlog(0, "lockres %.*s\n", res->lockname.len, res->lockname.name); 359 mlog(0, "%s: res %.*s\n", dlm->name, res->lockname.len,
360 res->lockname.name);
336 361
337 spin_lock(&res->spinlock); 362 spin_lock(&res->spinlock);
338 if (res->state & DLM_LOCK_RES_RECOVERING) { 363 if (res->state & DLM_LOCK_RES_RECOVERING) {
@@ -382,8 +407,12 @@ do_ast:
382 if (past->type == DLM_AST) { 407 if (past->type == DLM_AST) {
383 /* do not alter lock refcount. switching lists. */ 408 /* do not alter lock refcount. switching lists. */
384 list_move_tail(&lock->list, &res->granted); 409 list_move_tail(&lock->list, &res->granted);
385 mlog(0, "ast: Adding to granted list... type=%d, " 410 mlog(0, "%s: res %.*s, lock %u:%llu, Granted type %d => %d\n",
386 "convert_type=%d\n", lock->ml.type, lock->ml.convert_type); 411 dlm->name, res->lockname.len, res->lockname.name,
412 dlm_get_lock_cookie_node(be64_to_cpu(cookie)),
413 dlm_get_lock_cookie_seq(be64_to_cpu(cookie)),
414 lock->ml.type, lock->ml.convert_type);
415
387 if (lock->ml.convert_type != LKM_IVMODE) { 416 if (lock->ml.convert_type != LKM_IVMODE) {
388 lock->ml.type = lock->ml.convert_type; 417 lock->ml.type = lock->ml.convert_type;
389 lock->ml.convert_type = LKM_IVMODE; 418 lock->ml.convert_type = LKM_IVMODE;
@@ -426,9 +455,9 @@ int dlm_send_proxy_ast_msg(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
426 size_t veclen = 1; 455 size_t veclen = 1;
427 int status; 456 int status;
428 457
429 mlog_entry("res %.*s, to=%u, type=%d, blocked_type=%d\n", 458 mlog(0, "%s: res %.*s, to %u, type %d, blocked_type %d\n", dlm->name,
430 res->lockname.len, res->lockname.name, lock->ml.node, 459 res->lockname.len, res->lockname.name, lock->ml.node, msg_type,
431 msg_type, blocked_type); 460 blocked_type);
432 461
433 memset(&past, 0, sizeof(struct dlm_proxy_ast)); 462 memset(&past, 0, sizeof(struct dlm_proxy_ast));
434 past.node_idx = dlm->node_num; 463 past.node_idx = dlm->node_num;
@@ -441,7 +470,6 @@ int dlm_send_proxy_ast_msg(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
441 vec[0].iov_len = sizeof(struct dlm_proxy_ast); 470 vec[0].iov_len = sizeof(struct dlm_proxy_ast);
442 vec[0].iov_base = &past; 471 vec[0].iov_base = &past;
443 if (flags & DLM_LKSB_GET_LVB) { 472 if (flags & DLM_LKSB_GET_LVB) {
444 mlog(0, "returning requested LVB data\n");
445 be32_add_cpu(&past.flags, LKM_GET_LVB); 473 be32_add_cpu(&past.flags, LKM_GET_LVB);
446 vec[1].iov_len = DLM_LVB_LEN; 474 vec[1].iov_len = DLM_LVB_LEN;
447 vec[1].iov_base = lock->lksb->lvb; 475 vec[1].iov_base = lock->lksb->lvb;
@@ -451,8 +479,8 @@ int dlm_send_proxy_ast_msg(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
451 ret = o2net_send_message_vec(DLM_PROXY_AST_MSG, dlm->key, vec, veclen, 479 ret = o2net_send_message_vec(DLM_PROXY_AST_MSG, dlm->key, vec, veclen,
452 lock->ml.node, &status); 480 lock->ml.node, &status);
453 if (ret < 0) 481 if (ret < 0)
454 mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to " 482 mlog(ML_ERROR, "%s: res %.*s, error %d send AST to node %u\n",
455 "node %u\n", ret, DLM_PROXY_AST_MSG, dlm->key, 483 dlm->name, res->lockname.len, res->lockname.name, ret,
456 lock->ml.node); 484 lock->ml.node);
457 else { 485 else {
458 if (status == DLM_RECOVERING) { 486 if (status == DLM_RECOVERING) {
diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h
index b36d0bf77a5a..4bdf7baee344 100644
--- a/fs/ocfs2/dlm/dlmcommon.h
+++ b/fs/ocfs2/dlm/dlmcommon.h
@@ -50,10 +50,10 @@
50#define dlm_lockid_hash(_n, _l) full_name_hash(_n, _l) 50#define dlm_lockid_hash(_n, _l) full_name_hash(_n, _l)
51 51
52enum dlm_mle_type { 52enum dlm_mle_type {
53 DLM_MLE_BLOCK, 53 DLM_MLE_BLOCK = 0,
54 DLM_MLE_MASTER, 54 DLM_MLE_MASTER = 1,
55 DLM_MLE_MIGRATION, 55 DLM_MLE_MIGRATION = 2,
56 DLM_MLE_NUM_TYPES 56 DLM_MLE_NUM_TYPES = 3,
57}; 57};
58 58
59struct dlm_master_list_entry { 59struct dlm_master_list_entry {
@@ -82,8 +82,8 @@ struct dlm_master_list_entry {
82 82
83enum dlm_ast_type { 83enum dlm_ast_type {
84 DLM_AST = 0, 84 DLM_AST = 0,
85 DLM_BAST, 85 DLM_BAST = 1,
86 DLM_ASTUNLOCK 86 DLM_ASTUNLOCK = 2,
87}; 87};
88 88
89 89
@@ -119,9 +119,9 @@ struct dlm_recovery_ctxt
119 119
120enum dlm_ctxt_state { 120enum dlm_ctxt_state {
121 DLM_CTXT_NEW = 0, 121 DLM_CTXT_NEW = 0,
122 DLM_CTXT_JOINED, 122 DLM_CTXT_JOINED = 1,
123 DLM_CTXT_IN_SHUTDOWN, 123 DLM_CTXT_IN_SHUTDOWN = 2,
124 DLM_CTXT_LEAVING, 124 DLM_CTXT_LEAVING = 3,
125}; 125};
126 126
127struct dlm_ctxt 127struct dlm_ctxt
@@ -388,8 +388,8 @@ struct dlm_lock
388 388
389enum dlm_lockres_list { 389enum dlm_lockres_list {
390 DLM_GRANTED_LIST = 0, 390 DLM_GRANTED_LIST = 0,
391 DLM_CONVERTING_LIST, 391 DLM_CONVERTING_LIST = 1,
392 DLM_BLOCKED_LIST 392 DLM_BLOCKED_LIST = 2,
393}; 393};
394 394
395static inline int dlm_lvb_is_empty(char *lvb) 395static inline int dlm_lvb_is_empty(char *lvb)
@@ -427,27 +427,27 @@ struct dlm_node_iter
427 427
428 428
429enum { 429enum {
430 DLM_MASTER_REQUEST_MSG = 500, 430 DLM_MASTER_REQUEST_MSG = 500,
431 DLM_UNUSED_MSG1, /* 501 */ 431 DLM_UNUSED_MSG1 = 501,
432 DLM_ASSERT_MASTER_MSG, /* 502 */ 432 DLM_ASSERT_MASTER_MSG = 502,
433 DLM_CREATE_LOCK_MSG, /* 503 */ 433 DLM_CREATE_LOCK_MSG = 503,
434 DLM_CONVERT_LOCK_MSG, /* 504 */ 434 DLM_CONVERT_LOCK_MSG = 504,
435 DLM_PROXY_AST_MSG, /* 505 */ 435 DLM_PROXY_AST_MSG = 505,
436 DLM_UNLOCK_LOCK_MSG, /* 506 */ 436 DLM_UNLOCK_LOCK_MSG = 506,
437 DLM_DEREF_LOCKRES_MSG, /* 507 */ 437 DLM_DEREF_LOCKRES_MSG = 507,
438 DLM_MIGRATE_REQUEST_MSG, /* 508 */ 438 DLM_MIGRATE_REQUEST_MSG = 508,
439 DLM_MIG_LOCKRES_MSG, /* 509 */ 439 DLM_MIG_LOCKRES_MSG = 509,
440 DLM_QUERY_JOIN_MSG, /* 510 */ 440 DLM_QUERY_JOIN_MSG = 510,
441 DLM_ASSERT_JOINED_MSG, /* 511 */ 441 DLM_ASSERT_JOINED_MSG = 511,
442 DLM_CANCEL_JOIN_MSG, /* 512 */ 442 DLM_CANCEL_JOIN_MSG = 512,
443 DLM_EXIT_DOMAIN_MSG, /* 513 */ 443 DLM_EXIT_DOMAIN_MSG = 513,
444 DLM_MASTER_REQUERY_MSG, /* 514 */ 444 DLM_MASTER_REQUERY_MSG = 514,
445 DLM_LOCK_REQUEST_MSG, /* 515 */ 445 DLM_LOCK_REQUEST_MSG = 515,
446 DLM_RECO_DATA_DONE_MSG, /* 516 */ 446 DLM_RECO_DATA_DONE_MSG = 516,
447 DLM_BEGIN_RECO_MSG, /* 517 */ 447 DLM_BEGIN_RECO_MSG = 517,
448 DLM_FINALIZE_RECO_MSG, /* 518 */ 448 DLM_FINALIZE_RECO_MSG = 518,
449 DLM_QUERY_REGION, /* 519 */ 449 DLM_QUERY_REGION = 519,
450 DLM_QUERY_NODEINFO, /* 520 */ 450 DLM_QUERY_NODEINFO = 520,
451}; 451};
452 452
453struct dlm_reco_node_data 453struct dlm_reco_node_data
@@ -460,19 +460,19 @@ struct dlm_reco_node_data
460enum { 460enum {
461 DLM_RECO_NODE_DATA_DEAD = -1, 461 DLM_RECO_NODE_DATA_DEAD = -1,
462 DLM_RECO_NODE_DATA_INIT = 0, 462 DLM_RECO_NODE_DATA_INIT = 0,
463 DLM_RECO_NODE_DATA_REQUESTING, 463 DLM_RECO_NODE_DATA_REQUESTING = 1,
464 DLM_RECO_NODE_DATA_REQUESTED, 464 DLM_RECO_NODE_DATA_REQUESTED = 2,
465 DLM_RECO_NODE_DATA_RECEIVING, 465 DLM_RECO_NODE_DATA_RECEIVING = 3,
466 DLM_RECO_NODE_DATA_DONE, 466 DLM_RECO_NODE_DATA_DONE = 4,
467 DLM_RECO_NODE_DATA_FINALIZE_SENT, 467 DLM_RECO_NODE_DATA_FINALIZE_SENT = 5,
468}; 468};
469 469
470 470
471enum { 471enum {
472 DLM_MASTER_RESP_NO = 0, 472 DLM_MASTER_RESP_NO = 0,
473 DLM_MASTER_RESP_YES, 473 DLM_MASTER_RESP_YES = 1,
474 DLM_MASTER_RESP_MAYBE, 474 DLM_MASTER_RESP_MAYBE = 2,
475 DLM_MASTER_RESP_ERROR 475 DLM_MASTER_RESP_ERROR = 3,
476}; 476};
477 477
478 478
@@ -649,9 +649,9 @@ struct dlm_proxy_ast
649#define DLM_MOD_KEY (0x666c6172) 649#define DLM_MOD_KEY (0x666c6172)
650enum dlm_query_join_response_code { 650enum dlm_query_join_response_code {
651 JOIN_DISALLOW = 0, 651 JOIN_DISALLOW = 0,
652 JOIN_OK, 652 JOIN_OK = 1,
653 JOIN_OK_NO_MAP, 653 JOIN_OK_NO_MAP = 2,
654 JOIN_PROTOCOL_MISMATCH, 654 JOIN_PROTOCOL_MISMATCH = 3,
655}; 655};
656 656
657struct dlm_query_join_packet { 657struct dlm_query_join_packet {
diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c
index 272ec8631a51..04a32be0aeb9 100644
--- a/fs/ocfs2/dlm/dlmdebug.c
+++ b/fs/ocfs2/dlm/dlmdebug.c
@@ -370,92 +370,46 @@ static void dlm_debug_get(struct dlm_debug_ctxt *dc)
370 kref_get(&dc->debug_refcnt); 370 kref_get(&dc->debug_refcnt);
371} 371}
372 372
373static struct debug_buffer *debug_buffer_allocate(void) 373static int debug_release(struct inode *inode, struct file *file)
374{ 374{
375 struct debug_buffer *db = NULL; 375 free_page((unsigned long)file->private_data);
376 376 return 0;
377 db = kzalloc(sizeof(struct debug_buffer), GFP_KERNEL);
378 if (!db)
379 goto bail;
380
381 db->len = PAGE_SIZE;
382 db->buf = kmalloc(db->len, GFP_KERNEL);
383 if (!db->buf)
384 goto bail;
385
386 return db;
387bail:
388 kfree(db);
389 return NULL;
390}
391
392static ssize_t debug_buffer_read(struct file *file, char __user *buf,
393 size_t nbytes, loff_t *ppos)
394{
395 struct debug_buffer *db = file->private_data;
396
397 return simple_read_from_buffer(buf, nbytes, ppos, db->buf, db->len);
398}
399
400static loff_t debug_buffer_llseek(struct file *file, loff_t off, int whence)
401{
402 struct debug_buffer *db = file->private_data;
403 loff_t new = -1;
404
405 switch (whence) {
406 case 0:
407 new = off;
408 break;
409 case 1:
410 new = file->f_pos + off;
411 break;
412 }
413
414 if (new < 0 || new > db->len)
415 return -EINVAL;
416
417 return (file->f_pos = new);
418} 377}
419 378
420static int debug_buffer_release(struct inode *inode, struct file *file) 379static ssize_t debug_read(struct file *file, char __user *buf,
380 size_t nbytes, loff_t *ppos)
421{ 381{
422 struct debug_buffer *db = file->private_data; 382 return simple_read_from_buffer(buf, nbytes, ppos, file->private_data,
423 383 i_size_read(file->f_mapping->host));
424 if (db)
425 kfree(db->buf);
426 kfree(db);
427
428 return 0;
429} 384}
430/* end - util funcs */ 385/* end - util funcs */
431 386
432/* begin - purge list funcs */ 387/* begin - purge list funcs */
433static int debug_purgelist_print(struct dlm_ctxt *dlm, struct debug_buffer *db) 388static int debug_purgelist_print(struct dlm_ctxt *dlm, char *buf, int len)
434{ 389{
435 struct dlm_lock_resource *res; 390 struct dlm_lock_resource *res;
436 int out = 0; 391 int out = 0;
437 unsigned long total = 0; 392 unsigned long total = 0;
438 393
439 out += snprintf(db->buf + out, db->len - out, 394 out += snprintf(buf + out, len - out,
440 "Dumping Purgelist for Domain: %s\n", dlm->name); 395 "Dumping Purgelist for Domain: %s\n", dlm->name);
441 396
442 spin_lock(&dlm->spinlock); 397 spin_lock(&dlm->spinlock);
443 list_for_each_entry(res, &dlm->purge_list, purge) { 398 list_for_each_entry(res, &dlm->purge_list, purge) {
444 ++total; 399 ++total;
445 if (db->len - out < 100) 400 if (len - out < 100)
446 continue; 401 continue;
447 spin_lock(&res->spinlock); 402 spin_lock(&res->spinlock);
448 out += stringify_lockname(res->lockname.name, 403 out += stringify_lockname(res->lockname.name,
449 res->lockname.len, 404 res->lockname.len,
450 db->buf + out, db->len - out); 405 buf + out, len - out);
451 out += snprintf(db->buf + out, db->len - out, "\t%ld\n", 406 out += snprintf(buf + out, len - out, "\t%ld\n",
452 (jiffies - res->last_used)/HZ); 407 (jiffies - res->last_used)/HZ);
453 spin_unlock(&res->spinlock); 408 spin_unlock(&res->spinlock);
454 } 409 }
455 spin_unlock(&dlm->spinlock); 410 spin_unlock(&dlm->spinlock);
456 411
457 out += snprintf(db->buf + out, db->len - out, 412 out += snprintf(buf + out, len - out, "Total on list: %ld\n", total);
458 "Total on list: %ld\n", total);
459 413
460 return out; 414 return out;
461} 415}
@@ -463,15 +417,15 @@ static int debug_purgelist_print(struct dlm_ctxt *dlm, struct debug_buffer *db)
463static int debug_purgelist_open(struct inode *inode, struct file *file) 417static int debug_purgelist_open(struct inode *inode, struct file *file)
464{ 418{
465 struct dlm_ctxt *dlm = inode->i_private; 419 struct dlm_ctxt *dlm = inode->i_private;
466 struct debug_buffer *db; 420 char *buf = NULL;
467 421
468 db = debug_buffer_allocate(); 422 buf = (char *) get_zeroed_page(GFP_NOFS);
469 if (!db) 423 if (!buf)
470 goto bail; 424 goto bail;
471 425
472 db->len = debug_purgelist_print(dlm, db); 426 i_size_write(inode, debug_purgelist_print(dlm, buf, PAGE_SIZE - 1));
473 427
474 file->private_data = db; 428 file->private_data = buf;
475 429
476 return 0; 430 return 0;
477bail: 431bail:
@@ -480,14 +434,14 @@ bail:
480 434
481static const struct file_operations debug_purgelist_fops = { 435static const struct file_operations debug_purgelist_fops = {
482 .open = debug_purgelist_open, 436 .open = debug_purgelist_open,
483 .release = debug_buffer_release, 437 .release = debug_release,
484 .read = debug_buffer_read, 438 .read = debug_read,
485 .llseek = debug_buffer_llseek, 439 .llseek = generic_file_llseek,
486}; 440};
487/* end - purge list funcs */ 441/* end - purge list funcs */
488 442
489/* begin - debug mle funcs */ 443/* begin - debug mle funcs */
490static int debug_mle_print(struct dlm_ctxt *dlm, struct debug_buffer *db) 444static int debug_mle_print(struct dlm_ctxt *dlm, char *buf, int len)
491{ 445{
492 struct dlm_master_list_entry *mle; 446 struct dlm_master_list_entry *mle;
493 struct hlist_head *bucket; 447 struct hlist_head *bucket;
@@ -495,7 +449,7 @@ static int debug_mle_print(struct dlm_ctxt *dlm, struct debug_buffer *db)
495 int i, out = 0; 449 int i, out = 0;
496 unsigned long total = 0, longest = 0, bucket_count = 0; 450 unsigned long total = 0, longest = 0, bucket_count = 0;
497 451
498 out += snprintf(db->buf + out, db->len - out, 452 out += snprintf(buf + out, len - out,
499 "Dumping MLEs for Domain: %s\n", dlm->name); 453 "Dumping MLEs for Domain: %s\n", dlm->name);
500 454
501 spin_lock(&dlm->master_lock); 455 spin_lock(&dlm->master_lock);
@@ -506,16 +460,16 @@ static int debug_mle_print(struct dlm_ctxt *dlm, struct debug_buffer *db)
506 master_hash_node); 460 master_hash_node);
507 ++total; 461 ++total;
508 ++bucket_count; 462 ++bucket_count;
509 if (db->len - out < 200) 463 if (len - out < 200)
510 continue; 464 continue;
511 out += dump_mle(mle, db->buf + out, db->len - out); 465 out += dump_mle(mle, buf + out, len - out);
512 } 466 }
513 longest = max(longest, bucket_count); 467 longest = max(longest, bucket_count);
514 bucket_count = 0; 468 bucket_count = 0;
515 } 469 }
516 spin_unlock(&dlm->master_lock); 470 spin_unlock(&dlm->master_lock);
517 471
518 out += snprintf(db->buf + out, db->len - out, 472 out += snprintf(buf + out, len - out,
519 "Total: %ld, Longest: %ld\n", total, longest); 473 "Total: %ld, Longest: %ld\n", total, longest);
520 return out; 474 return out;
521} 475}
@@ -523,15 +477,15 @@ static int debug_mle_print(struct dlm_ctxt *dlm, struct debug_buffer *db)
523static int debug_mle_open(struct inode *inode, struct file *file) 477static int debug_mle_open(struct inode *inode, struct file *file)
524{ 478{
525 struct dlm_ctxt *dlm = inode->i_private; 479 struct dlm_ctxt *dlm = inode->i_private;
526 struct debug_buffer *db; 480 char *buf = NULL;
527 481
528 db = debug_buffer_allocate(); 482 buf = (char *) get_zeroed_page(GFP_NOFS);
529 if (!db) 483 if (!buf)
530 goto bail; 484 goto bail;
531 485
532 db->len = debug_mle_print(dlm, db); 486 i_size_write(inode, debug_mle_print(dlm, buf, PAGE_SIZE - 1));
533 487
534 file->private_data = db; 488 file->private_data = buf;
535 489
536 return 0; 490 return 0;
537bail: 491bail:
@@ -540,9 +494,9 @@ bail:
540 494
541static const struct file_operations debug_mle_fops = { 495static const struct file_operations debug_mle_fops = {
542 .open = debug_mle_open, 496 .open = debug_mle_open,
543 .release = debug_buffer_release, 497 .release = debug_release,
544 .read = debug_buffer_read, 498 .read = debug_read,
545 .llseek = debug_buffer_llseek, 499 .llseek = generic_file_llseek,
546}; 500};
547 501
548/* end - debug mle funcs */ 502/* end - debug mle funcs */
@@ -757,7 +711,7 @@ static const struct file_operations debug_lockres_fops = {
757/* end - debug lockres funcs */ 711/* end - debug lockres funcs */
758 712
759/* begin - debug state funcs */ 713/* begin - debug state funcs */
760static int debug_state_print(struct dlm_ctxt *dlm, struct debug_buffer *db) 714static int debug_state_print(struct dlm_ctxt *dlm, char *buf, int len)
761{ 715{
762 int out = 0; 716 int out = 0;
763 struct dlm_reco_node_data *node; 717 struct dlm_reco_node_data *node;
@@ -781,35 +735,35 @@ static int debug_state_print(struct dlm_ctxt *dlm, struct debug_buffer *db)
781 } 735 }
782 736
783 /* Domain: xxxxxxxxxx Key: 0xdfbac769 */ 737 /* Domain: xxxxxxxxxx Key: 0xdfbac769 */
784 out += snprintf(db->buf + out, db->len - out, 738 out += snprintf(buf + out, len - out,
785 "Domain: %s Key: 0x%08x Protocol: %d.%d\n", 739 "Domain: %s Key: 0x%08x Protocol: %d.%d\n",
786 dlm->name, dlm->key, dlm->dlm_locking_proto.pv_major, 740 dlm->name, dlm->key, dlm->dlm_locking_proto.pv_major,
787 dlm->dlm_locking_proto.pv_minor); 741 dlm->dlm_locking_proto.pv_minor);
788 742
789 /* Thread Pid: xxx Node: xxx State: xxxxx */ 743 /* Thread Pid: xxx Node: xxx State: xxxxx */
790 out += snprintf(db->buf + out, db->len - out, 744 out += snprintf(buf + out, len - out,
791 "Thread Pid: %d Node: %d State: %s\n", 745 "Thread Pid: %d Node: %d State: %s\n",
792 dlm->dlm_thread_task->pid, dlm->node_num, state); 746 task_pid_nr(dlm->dlm_thread_task), dlm->node_num, state);
793 747
794 /* Number of Joins: xxx Joining Node: xxx */ 748 /* Number of Joins: xxx Joining Node: xxx */
795 out += snprintf(db->buf + out, db->len - out, 749 out += snprintf(buf + out, len - out,
796 "Number of Joins: %d Joining Node: %d\n", 750 "Number of Joins: %d Joining Node: %d\n",
797 dlm->num_joins, dlm->joining_node); 751 dlm->num_joins, dlm->joining_node);
798 752
799 /* Domain Map: xx xx xx */ 753 /* Domain Map: xx xx xx */
800 out += snprintf(db->buf + out, db->len - out, "Domain Map: "); 754 out += snprintf(buf + out, len - out, "Domain Map: ");
801 out += stringify_nodemap(dlm->domain_map, O2NM_MAX_NODES, 755 out += stringify_nodemap(dlm->domain_map, O2NM_MAX_NODES,
802 db->buf + out, db->len - out); 756 buf + out, len - out);
803 out += snprintf(db->buf + out, db->len - out, "\n"); 757 out += snprintf(buf + out, len - out, "\n");
804 758
805 /* Live Map: xx xx xx */ 759 /* Live Map: xx xx xx */
806 out += snprintf(db->buf + out, db->len - out, "Live Map: "); 760 out += snprintf(buf + out, len - out, "Live Map: ");
807 out += stringify_nodemap(dlm->live_nodes_map, O2NM_MAX_NODES, 761 out += stringify_nodemap(dlm->live_nodes_map, O2NM_MAX_NODES,
808 db->buf + out, db->len - out); 762 buf + out, len - out);
809 out += snprintf(db->buf + out, db->len - out, "\n"); 763 out += snprintf(buf + out, len - out, "\n");
810 764
811 /* Lock Resources: xxx (xxx) */ 765 /* Lock Resources: xxx (xxx) */
812 out += snprintf(db->buf + out, db->len - out, 766 out += snprintf(buf + out, len - out,
813 "Lock Resources: %d (%d)\n", 767 "Lock Resources: %d (%d)\n",
814 atomic_read(&dlm->res_cur_count), 768 atomic_read(&dlm->res_cur_count),
815 atomic_read(&dlm->res_tot_count)); 769 atomic_read(&dlm->res_tot_count));
@@ -821,29 +775,29 @@ static int debug_state_print(struct dlm_ctxt *dlm, struct debug_buffer *db)
821 cur_mles += atomic_read(&dlm->mle_cur_count[i]); 775 cur_mles += atomic_read(&dlm->mle_cur_count[i]);
822 776
823 /* MLEs: xxx (xxx) */ 777 /* MLEs: xxx (xxx) */
824 out += snprintf(db->buf + out, db->len - out, 778 out += snprintf(buf + out, len - out,
825 "MLEs: %d (%d)\n", cur_mles, tot_mles); 779 "MLEs: %d (%d)\n", cur_mles, tot_mles);
826 780
827 /* Blocking: xxx (xxx) */ 781 /* Blocking: xxx (xxx) */
828 out += snprintf(db->buf + out, db->len - out, 782 out += snprintf(buf + out, len - out,
829 " Blocking: %d (%d)\n", 783 " Blocking: %d (%d)\n",
830 atomic_read(&dlm->mle_cur_count[DLM_MLE_BLOCK]), 784 atomic_read(&dlm->mle_cur_count[DLM_MLE_BLOCK]),
831 atomic_read(&dlm->mle_tot_count[DLM_MLE_BLOCK])); 785 atomic_read(&dlm->mle_tot_count[DLM_MLE_BLOCK]));
832 786
833 /* Mastery: xxx (xxx) */ 787 /* Mastery: xxx (xxx) */
834 out += snprintf(db->buf + out, db->len - out, 788 out += snprintf(buf + out, len - out,
835 " Mastery: %d (%d)\n", 789 " Mastery: %d (%d)\n",
836 atomic_read(&dlm->mle_cur_count[DLM_MLE_MASTER]), 790 atomic_read(&dlm->mle_cur_count[DLM_MLE_MASTER]),
837 atomic_read(&dlm->mle_tot_count[DLM_MLE_MASTER])); 791 atomic_read(&dlm->mle_tot_count[DLM_MLE_MASTER]));
838 792
839 /* Migration: xxx (xxx) */ 793 /* Migration: xxx (xxx) */
840 out += snprintf(db->buf + out, db->len - out, 794 out += snprintf(buf + out, len - out,
841 " Migration: %d (%d)\n", 795 " Migration: %d (%d)\n",
842 atomic_read(&dlm->mle_cur_count[DLM_MLE_MIGRATION]), 796 atomic_read(&dlm->mle_cur_count[DLM_MLE_MIGRATION]),
843 atomic_read(&dlm->mle_tot_count[DLM_MLE_MIGRATION])); 797 atomic_read(&dlm->mle_tot_count[DLM_MLE_MIGRATION]));
844 798
845 /* Lists: Dirty=Empty Purge=InUse PendingASTs=Empty ... */ 799 /* Lists: Dirty=Empty Purge=InUse PendingASTs=Empty ... */
846 out += snprintf(db->buf + out, db->len - out, 800 out += snprintf(buf + out, len - out,
847 "Lists: Dirty=%s Purge=%s PendingASTs=%s " 801 "Lists: Dirty=%s Purge=%s PendingASTs=%s "
848 "PendingBASTs=%s\n", 802 "PendingBASTs=%s\n",
849 (list_empty(&dlm->dirty_list) ? "Empty" : "InUse"), 803 (list_empty(&dlm->dirty_list) ? "Empty" : "InUse"),
@@ -852,12 +806,12 @@ static int debug_state_print(struct dlm_ctxt *dlm, struct debug_buffer *db)
852 (list_empty(&dlm->pending_basts) ? "Empty" : "InUse")); 806 (list_empty(&dlm->pending_basts) ? "Empty" : "InUse"));
853 807
854 /* Purge Count: xxx Refs: xxx */ 808 /* Purge Count: xxx Refs: xxx */
855 out += snprintf(db->buf + out, db->len - out, 809 out += snprintf(buf + out, len - out,
856 "Purge Count: %d Refs: %d\n", dlm->purge_count, 810 "Purge Count: %d Refs: %d\n", dlm->purge_count,
857 atomic_read(&dlm->dlm_refs.refcount)); 811 atomic_read(&dlm->dlm_refs.refcount));
858 812
859 /* Dead Node: xxx */ 813 /* Dead Node: xxx */
860 out += snprintf(db->buf + out, db->len - out, 814 out += snprintf(buf + out, len - out,
861 "Dead Node: %d\n", dlm->reco.dead_node); 815 "Dead Node: %d\n", dlm->reco.dead_node);
862 816
863 /* What about DLM_RECO_STATE_FINALIZE? */ 817 /* What about DLM_RECO_STATE_FINALIZE? */
@@ -867,19 +821,19 @@ static int debug_state_print(struct dlm_ctxt *dlm, struct debug_buffer *db)
867 state = "INACTIVE"; 821 state = "INACTIVE";
868 822
869 /* Recovery Pid: xxxx Master: xxx State: xxxx */ 823 /* Recovery Pid: xxxx Master: xxx State: xxxx */
870 out += snprintf(db->buf + out, db->len - out, 824 out += snprintf(buf + out, len - out,
871 "Recovery Pid: %d Master: %d State: %s\n", 825 "Recovery Pid: %d Master: %d State: %s\n",
872 dlm->dlm_reco_thread_task->pid, 826 task_pid_nr(dlm->dlm_reco_thread_task),
873 dlm->reco.new_master, state); 827 dlm->reco.new_master, state);
874 828
875 /* Recovery Map: xx xx */ 829 /* Recovery Map: xx xx */
876 out += snprintf(db->buf + out, db->len - out, "Recovery Map: "); 830 out += snprintf(buf + out, len - out, "Recovery Map: ");
877 out += stringify_nodemap(dlm->recovery_map, O2NM_MAX_NODES, 831 out += stringify_nodemap(dlm->recovery_map, O2NM_MAX_NODES,
878 db->buf + out, db->len - out); 832 buf + out, len - out);
879 out += snprintf(db->buf + out, db->len - out, "\n"); 833 out += snprintf(buf + out, len - out, "\n");
880 834
881 /* Recovery Node State: */ 835 /* Recovery Node State: */
882 out += snprintf(db->buf + out, db->len - out, "Recovery Node State:\n"); 836 out += snprintf(buf + out, len - out, "Recovery Node State:\n");
883 list_for_each_entry(node, &dlm->reco.node_data, list) { 837 list_for_each_entry(node, &dlm->reco.node_data, list) {
884 switch (node->state) { 838 switch (node->state) {
885 case DLM_RECO_NODE_DATA_INIT: 839 case DLM_RECO_NODE_DATA_INIT:
@@ -907,7 +861,7 @@ static int debug_state_print(struct dlm_ctxt *dlm, struct debug_buffer *db)
907 state = "BAD"; 861 state = "BAD";
908 break; 862 break;
909 } 863 }
910 out += snprintf(db->buf + out, db->len - out, "\t%u - %s\n", 864 out += snprintf(buf + out, len - out, "\t%u - %s\n",
911 node->node_num, state); 865 node->node_num, state);
912 } 866 }
913 867
@@ -919,15 +873,15 @@ static int debug_state_print(struct dlm_ctxt *dlm, struct debug_buffer *db)
919static int debug_state_open(struct inode *inode, struct file *file) 873static int debug_state_open(struct inode *inode, struct file *file)
920{ 874{
921 struct dlm_ctxt *dlm = inode->i_private; 875 struct dlm_ctxt *dlm = inode->i_private;
922 struct debug_buffer *db = NULL; 876 char *buf = NULL;
923 877
924 db = debug_buffer_allocate(); 878 buf = (char *) get_zeroed_page(GFP_NOFS);
925 if (!db) 879 if (!buf)
926 goto bail; 880 goto bail;
927 881
928 db->len = debug_state_print(dlm, db); 882 i_size_write(inode, debug_state_print(dlm, buf, PAGE_SIZE - 1));
929 883
930 file->private_data = db; 884 file->private_data = buf;
931 885
932 return 0; 886 return 0;
933bail: 887bail:
@@ -936,9 +890,9 @@ bail:
936 890
937static const struct file_operations debug_state_fops = { 891static const struct file_operations debug_state_fops = {
938 .open = debug_state_open, 892 .open = debug_state_open,
939 .release = debug_buffer_release, 893 .release = debug_release,
940 .read = debug_buffer_read, 894 .read = debug_read,
941 .llseek = debug_buffer_llseek, 895 .llseek = generic_file_llseek,
942}; 896};
943/* end - debug state funcs */ 897/* end - debug state funcs */
944 898
@@ -1002,14 +956,10 @@ void dlm_debug_shutdown(struct dlm_ctxt *dlm)
1002 struct dlm_debug_ctxt *dc = dlm->dlm_debug_ctxt; 956 struct dlm_debug_ctxt *dc = dlm->dlm_debug_ctxt;
1003 957
1004 if (dc) { 958 if (dc) {
1005 if (dc->debug_purgelist_dentry) 959 debugfs_remove(dc->debug_purgelist_dentry);
1006 debugfs_remove(dc->debug_purgelist_dentry); 960 debugfs_remove(dc->debug_mle_dentry);
1007 if (dc->debug_mle_dentry) 961 debugfs_remove(dc->debug_lockres_dentry);
1008 debugfs_remove(dc->debug_mle_dentry); 962 debugfs_remove(dc->debug_state_dentry);
1009 if (dc->debug_lockres_dentry)
1010 debugfs_remove(dc->debug_lockres_dentry);
1011 if (dc->debug_state_dentry)
1012 debugfs_remove(dc->debug_state_dentry);
1013 dlm_debug_put(dc); 963 dlm_debug_put(dc);
1014 } 964 }
1015} 965}
@@ -1040,8 +990,7 @@ bail:
1040 990
1041void dlm_destroy_debugfs_subroot(struct dlm_ctxt *dlm) 991void dlm_destroy_debugfs_subroot(struct dlm_ctxt *dlm)
1042{ 992{
1043 if (dlm->dlm_debugfs_subroot) 993 debugfs_remove(dlm->dlm_debugfs_subroot);
1044 debugfs_remove(dlm->dlm_debugfs_subroot);
1045} 994}
1046 995
1047/* debugfs root */ 996/* debugfs root */
@@ -1057,7 +1006,6 @@ int dlm_create_debugfs_root(void)
1057 1006
1058void dlm_destroy_debugfs_root(void) 1007void dlm_destroy_debugfs_root(void)
1059{ 1008{
1060 if (dlm_debugfs_root) 1009 debugfs_remove(dlm_debugfs_root);
1061 debugfs_remove(dlm_debugfs_root);
1062} 1010}
1063#endif /* CONFIG_DEBUG_FS */ 1011#endif /* CONFIG_DEBUG_FS */
diff --git a/fs/ocfs2/dlm/dlmdebug.h b/fs/ocfs2/dlm/dlmdebug.h
index 8c686d22f9c7..1f27c4812d1a 100644
--- a/fs/ocfs2/dlm/dlmdebug.h
+++ b/fs/ocfs2/dlm/dlmdebug.h
@@ -37,11 +37,6 @@ struct dlm_debug_ctxt {
37 struct dentry *debug_purgelist_dentry; 37 struct dentry *debug_purgelist_dentry;
38}; 38};
39 39
40struct debug_buffer {
41 int len;
42 char *buf;
43};
44
45struct debug_lockres { 40struct debug_lockres {
46 int dl_len; 41 int dl_len;
47 char *dl_buf; 42 char *dl_buf;
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index cc2aaa96cfe5..7e38a072d720 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -460,8 +460,6 @@ redo_bucket:
460 } 460 }
461 cond_resched_lock(&dlm->spinlock); 461 cond_resched_lock(&dlm->spinlock);
462 num += n; 462 num += n;
463 mlog(0, "%s: touched %d lockreses in bucket %d "
464 "(tot=%d)\n", dlm->name, n, i, num);
465 } 463 }
466 spin_unlock(&dlm->spinlock); 464 spin_unlock(&dlm->spinlock);
467 wake_up(&dlm->dlm_thread_wq); 465 wake_up(&dlm->dlm_thread_wq);
@@ -1661,8 +1659,8 @@ bail:
1661 1659
1662static void dlm_unregister_domain_handlers(struct dlm_ctxt *dlm) 1660static void dlm_unregister_domain_handlers(struct dlm_ctxt *dlm)
1663{ 1661{
1664 o2hb_unregister_callback(NULL, &dlm->dlm_hb_up); 1662 o2hb_unregister_callback(dlm->name, &dlm->dlm_hb_up);
1665 o2hb_unregister_callback(NULL, &dlm->dlm_hb_down); 1663 o2hb_unregister_callback(dlm->name, &dlm->dlm_hb_down);
1666 o2net_unregister_handler_list(&dlm->dlm_domain_handlers); 1664 o2net_unregister_handler_list(&dlm->dlm_domain_handlers);
1667} 1665}
1668 1666
@@ -1674,13 +1672,13 @@ static int dlm_register_domain_handlers(struct dlm_ctxt *dlm)
1674 1672
1675 o2hb_setup_callback(&dlm->dlm_hb_down, O2HB_NODE_DOWN_CB, 1673 o2hb_setup_callback(&dlm->dlm_hb_down, O2HB_NODE_DOWN_CB,
1676 dlm_hb_node_down_cb, dlm, DLM_HB_NODE_DOWN_PRI); 1674 dlm_hb_node_down_cb, dlm, DLM_HB_NODE_DOWN_PRI);
1677 status = o2hb_register_callback(NULL, &dlm->dlm_hb_down); 1675 status = o2hb_register_callback(dlm->name, &dlm->dlm_hb_down);
1678 if (status) 1676 if (status)
1679 goto bail; 1677 goto bail;
1680 1678
1681 o2hb_setup_callback(&dlm->dlm_hb_up, O2HB_NODE_UP_CB, 1679 o2hb_setup_callback(&dlm->dlm_hb_up, O2HB_NODE_UP_CB,
1682 dlm_hb_node_up_cb, dlm, DLM_HB_NODE_UP_PRI); 1680 dlm_hb_node_up_cb, dlm, DLM_HB_NODE_UP_PRI);
1683 status = o2hb_register_callback(NULL, &dlm->dlm_hb_up); 1681 status = o2hb_register_callback(dlm->name, &dlm->dlm_hb_up);
1684 if (status) 1682 if (status)
1685 goto bail; 1683 goto bail;
1686 1684
diff --git a/fs/ocfs2/dlm/dlmlock.c b/fs/ocfs2/dlm/dlmlock.c
index 69cf369961c4..7009292aac5a 100644
--- a/fs/ocfs2/dlm/dlmlock.c
+++ b/fs/ocfs2/dlm/dlmlock.c
@@ -106,6 +106,9 @@ static int dlm_can_grant_new_lock(struct dlm_lock_resource *res,
106 106
107 if (!dlm_lock_compatible(tmplock->ml.type, lock->ml.type)) 107 if (!dlm_lock_compatible(tmplock->ml.type, lock->ml.type))
108 return 0; 108 return 0;
109 if (!dlm_lock_compatible(tmplock->ml.convert_type,
110 lock->ml.type))
111 return 0;
109 } 112 }
110 113
111 return 1; 114 return 1;
diff --git a/fs/ocfs2/dlm/dlmthread.c b/fs/ocfs2/dlm/dlmthread.c
index 2211acf33d9b..1d6d1d22c471 100644
--- a/fs/ocfs2/dlm/dlmthread.c
+++ b/fs/ocfs2/dlm/dlmthread.c
@@ -122,15 +122,13 @@ int __dlm_lockres_unused(struct dlm_lock_resource *res)
122void __dlm_lockres_calc_usage(struct dlm_ctxt *dlm, 122void __dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
123 struct dlm_lock_resource *res) 123 struct dlm_lock_resource *res)
124{ 124{
125 mlog_entry("%.*s\n", res->lockname.len, res->lockname.name);
126
127 assert_spin_locked(&dlm->spinlock); 125 assert_spin_locked(&dlm->spinlock);
128 assert_spin_locked(&res->spinlock); 126 assert_spin_locked(&res->spinlock);
129 127
130 if (__dlm_lockres_unused(res)){ 128 if (__dlm_lockres_unused(res)){
131 if (list_empty(&res->purge)) { 129 if (list_empty(&res->purge)) {
132 mlog(0, "putting lockres %.*s:%p onto purge list\n", 130 mlog(0, "%s: Adding res %.*s to purge list\n",
133 res->lockname.len, res->lockname.name, res); 131 dlm->name, res->lockname.len, res->lockname.name);
134 132
135 res->last_used = jiffies; 133 res->last_used = jiffies;
136 dlm_lockres_get(res); 134 dlm_lockres_get(res);
@@ -138,8 +136,8 @@ void __dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
138 dlm->purge_count++; 136 dlm->purge_count++;
139 } 137 }
140 } else if (!list_empty(&res->purge)) { 138 } else if (!list_empty(&res->purge)) {
141 mlog(0, "removing lockres %.*s:%p from purge list, owner=%u\n", 139 mlog(0, "%s: Removing res %.*s from purge list\n",
142 res->lockname.len, res->lockname.name, res, res->owner); 140 dlm->name, res->lockname.len, res->lockname.name);
143 141
144 list_del_init(&res->purge); 142 list_del_init(&res->purge);
145 dlm_lockres_put(res); 143 dlm_lockres_put(res);
@@ -150,7 +148,6 @@ void __dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
150void dlm_lockres_calc_usage(struct dlm_ctxt *dlm, 148void dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
151 struct dlm_lock_resource *res) 149 struct dlm_lock_resource *res)
152{ 150{
153 mlog_entry("%.*s\n", res->lockname.len, res->lockname.name);
154 spin_lock(&dlm->spinlock); 151 spin_lock(&dlm->spinlock);
155 spin_lock(&res->spinlock); 152 spin_lock(&res->spinlock);
156 153
@@ -171,9 +168,8 @@ static void dlm_purge_lockres(struct dlm_ctxt *dlm,
171 168
172 master = (res->owner == dlm->node_num); 169 master = (res->owner == dlm->node_num);
173 170
174 171 mlog(0, "%s: Purging res %.*s, master %d\n", dlm->name,
175 mlog(0, "purging lockres %.*s, master = %d\n", res->lockname.len, 172 res->lockname.len, res->lockname.name, master);
176 res->lockname.name, master);
177 173
178 if (!master) { 174 if (!master) {
179 res->state |= DLM_LOCK_RES_DROPPING_REF; 175 res->state |= DLM_LOCK_RES_DROPPING_REF;
@@ -189,27 +185,25 @@ static void dlm_purge_lockres(struct dlm_ctxt *dlm,
189 /* clear our bit from the master's refmap, ignore errors */ 185 /* clear our bit from the master's refmap, ignore errors */
190 ret = dlm_drop_lockres_ref(dlm, res); 186 ret = dlm_drop_lockres_ref(dlm, res);
191 if (ret < 0) { 187 if (ret < 0) {
192 mlog_errno(ret); 188 mlog(ML_ERROR, "%s: deref %.*s failed %d\n", dlm->name,
189 res->lockname.len, res->lockname.name, ret);
193 if (!dlm_is_host_down(ret)) 190 if (!dlm_is_host_down(ret))
194 BUG(); 191 BUG();
195 } 192 }
196 mlog(0, "%s:%.*s: dlm_deref_lockres returned %d\n",
197 dlm->name, res->lockname.len, res->lockname.name, ret);
198 spin_lock(&dlm->spinlock); 193 spin_lock(&dlm->spinlock);
199 spin_lock(&res->spinlock); 194 spin_lock(&res->spinlock);
200 } 195 }
201 196
202 if (!list_empty(&res->purge)) { 197 if (!list_empty(&res->purge)) {
203 mlog(0, "removing lockres %.*s:%p from purgelist, " 198 mlog(0, "%s: Removing res %.*s from purgelist, master %d\n",
204 "master = %d\n", res->lockname.len, res->lockname.name, 199 dlm->name, res->lockname.len, res->lockname.name, master);
205 res, master);
206 list_del_init(&res->purge); 200 list_del_init(&res->purge);
207 dlm_lockres_put(res); 201 dlm_lockres_put(res);
208 dlm->purge_count--; 202 dlm->purge_count--;
209 } 203 }
210 204
211 if (!__dlm_lockres_unused(res)) { 205 if (!__dlm_lockres_unused(res)) {
212 mlog(ML_ERROR, "found lockres %s:%.*s: in use after deref\n", 206 mlog(ML_ERROR, "%s: res %.*s in use after deref\n",
213 dlm->name, res->lockname.len, res->lockname.name); 207 dlm->name, res->lockname.len, res->lockname.name);
214 __dlm_print_one_lock_resource(res); 208 __dlm_print_one_lock_resource(res);
215 BUG(); 209 BUG();
@@ -266,10 +260,10 @@ static void dlm_run_purge_list(struct dlm_ctxt *dlm,
266 unused = __dlm_lockres_unused(lockres); 260 unused = __dlm_lockres_unused(lockres);
267 if (!unused || 261 if (!unused ||
268 (lockres->state & DLM_LOCK_RES_MIGRATING)) { 262 (lockres->state & DLM_LOCK_RES_MIGRATING)) {
269 mlog(0, "lockres %s:%.*s: is in use or " 263 mlog(0, "%s: res %.*s is in use or being remastered, "
270 "being remastered, used %d, state %d\n", 264 "used %d, state %d\n", dlm->name,
271 dlm->name, lockres->lockname.len, 265 lockres->lockname.len, lockres->lockname.name,
272 lockres->lockname.name, !unused, lockres->state); 266 !unused, lockres->state);
273 list_move_tail(&dlm->purge_list, &lockres->purge); 267 list_move_tail(&dlm->purge_list, &lockres->purge);
274 spin_unlock(&lockres->spinlock); 268 spin_unlock(&lockres->spinlock);
275 continue; 269 continue;
@@ -296,15 +290,12 @@ static void dlm_shuffle_lists(struct dlm_ctxt *dlm,
296 struct list_head *head; 290 struct list_head *head;
297 int can_grant = 1; 291 int can_grant = 1;
298 292
299 //mlog(0, "res->lockname.len=%d\n", res->lockname.len); 293 /*
300 //mlog(0, "res->lockname.name=%p\n", res->lockname.name); 294 * Because this function is called with the lockres
301 //mlog(0, "shuffle res %.*s\n", res->lockname.len,
302 // res->lockname.name);
303
304 /* because this function is called with the lockres
305 * spinlock, and because we know that it is not migrating/ 295 * spinlock, and because we know that it is not migrating/
306 * recovering/in-progress, it is fine to reserve asts and 296 * recovering/in-progress, it is fine to reserve asts and
307 * basts right before queueing them all throughout */ 297 * basts right before queueing them all throughout
298 */
308 assert_spin_locked(&dlm->ast_lock); 299 assert_spin_locked(&dlm->ast_lock);
309 assert_spin_locked(&res->spinlock); 300 assert_spin_locked(&res->spinlock);
310 BUG_ON((res->state & (DLM_LOCK_RES_MIGRATING| 301 BUG_ON((res->state & (DLM_LOCK_RES_MIGRATING|
@@ -314,13 +305,13 @@ static void dlm_shuffle_lists(struct dlm_ctxt *dlm,
314converting: 305converting:
315 if (list_empty(&res->converting)) 306 if (list_empty(&res->converting))
316 goto blocked; 307 goto blocked;
317 mlog(0, "res %.*s has locks on a convert queue\n", res->lockname.len, 308 mlog(0, "%s: res %.*s has locks on the convert queue\n", dlm->name,
318 res->lockname.name); 309 res->lockname.len, res->lockname.name);
319 310
320 target = list_entry(res->converting.next, struct dlm_lock, list); 311 target = list_entry(res->converting.next, struct dlm_lock, list);
321 if (target->ml.convert_type == LKM_IVMODE) { 312 if (target->ml.convert_type == LKM_IVMODE) {
322 mlog(ML_ERROR, "%.*s: converting a lock with no " 313 mlog(ML_ERROR, "%s: res %.*s converting lock to invalid mode\n",
323 "convert_type!\n", res->lockname.len, res->lockname.name); 314 dlm->name, res->lockname.len, res->lockname.name);
324 BUG(); 315 BUG();
325 } 316 }
326 head = &res->granted; 317 head = &res->granted;
@@ -365,9 +356,12 @@ converting:
365 spin_lock(&target->spinlock); 356 spin_lock(&target->spinlock);
366 BUG_ON(target->ml.highest_blocked != LKM_IVMODE); 357 BUG_ON(target->ml.highest_blocked != LKM_IVMODE);
367 358
368 mlog(0, "calling ast for converting lock: %.*s, have: %d, " 359 mlog(0, "%s: res %.*s, AST for Converting lock %u:%llu, type "
369 "granting: %d, node: %u\n", res->lockname.len, 360 "%d => %d, node %u\n", dlm->name, res->lockname.len,
370 res->lockname.name, target->ml.type, 361 res->lockname.name,
362 dlm_get_lock_cookie_node(be64_to_cpu(target->ml.cookie)),
363 dlm_get_lock_cookie_seq(be64_to_cpu(target->ml.cookie)),
364 target->ml.type,
371 target->ml.convert_type, target->ml.node); 365 target->ml.convert_type, target->ml.node);
372 366
373 target->ml.type = target->ml.convert_type; 367 target->ml.type = target->ml.convert_type;
@@ -428,11 +422,14 @@ blocked:
428 spin_lock(&target->spinlock); 422 spin_lock(&target->spinlock);
429 BUG_ON(target->ml.highest_blocked != LKM_IVMODE); 423 BUG_ON(target->ml.highest_blocked != LKM_IVMODE);
430 424
431 mlog(0, "calling ast for blocked lock: %.*s, granting: %d, " 425 mlog(0, "%s: res %.*s, AST for Blocked lock %u:%llu, type %d, "
432 "node: %u\n", res->lockname.len, res->lockname.name, 426 "node %u\n", dlm->name, res->lockname.len,
427 res->lockname.name,
428 dlm_get_lock_cookie_node(be64_to_cpu(target->ml.cookie)),
429 dlm_get_lock_cookie_seq(be64_to_cpu(target->ml.cookie)),
433 target->ml.type, target->ml.node); 430 target->ml.type, target->ml.node);
434 431
435 // target->ml.type is already correct 432 /* target->ml.type is already correct */
436 list_move_tail(&target->list, &res->granted); 433 list_move_tail(&target->list, &res->granted);
437 434
438 BUG_ON(!target->lksb); 435 BUG_ON(!target->lksb);
@@ -453,7 +450,6 @@ leave:
453/* must have NO locks when calling this with res !=NULL * */ 450/* must have NO locks when calling this with res !=NULL * */
454void dlm_kick_thread(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) 451void dlm_kick_thread(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
455{ 452{
456 mlog_entry("dlm=%p, res=%p\n", dlm, res);
457 if (res) { 453 if (res) {
458 spin_lock(&dlm->spinlock); 454 spin_lock(&dlm->spinlock);
459 spin_lock(&res->spinlock); 455 spin_lock(&res->spinlock);
@@ -466,8 +462,6 @@ void dlm_kick_thread(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
466 462
467void __dlm_dirty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) 463void __dlm_dirty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
468{ 464{
469 mlog_entry("dlm=%p, res=%p\n", dlm, res);
470
471 assert_spin_locked(&dlm->spinlock); 465 assert_spin_locked(&dlm->spinlock);
472 assert_spin_locked(&res->spinlock); 466 assert_spin_locked(&res->spinlock);
473 467
@@ -484,13 +478,16 @@ void __dlm_dirty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
484 res->state |= DLM_LOCK_RES_DIRTY; 478 res->state |= DLM_LOCK_RES_DIRTY;
485 } 479 }
486 } 480 }
481
482 mlog(0, "%s: res %.*s\n", dlm->name, res->lockname.len,
483 res->lockname.name);
487} 484}
488 485
489 486
490/* Launch the NM thread for the mounted volume */ 487/* Launch the NM thread for the mounted volume */
491int dlm_launch_thread(struct dlm_ctxt *dlm) 488int dlm_launch_thread(struct dlm_ctxt *dlm)
492{ 489{
493 mlog(0, "starting dlm thread...\n"); 490 mlog(0, "Starting dlm_thread...\n");
494 491
495 dlm->dlm_thread_task = kthread_run(dlm_thread, dlm, "dlm_thread"); 492 dlm->dlm_thread_task = kthread_run(dlm_thread, dlm, "dlm_thread");
496 if (IS_ERR(dlm->dlm_thread_task)) { 493 if (IS_ERR(dlm->dlm_thread_task)) {
@@ -505,7 +502,7 @@ int dlm_launch_thread(struct dlm_ctxt *dlm)
505void dlm_complete_thread(struct dlm_ctxt *dlm) 502void dlm_complete_thread(struct dlm_ctxt *dlm)
506{ 503{
507 if (dlm->dlm_thread_task) { 504 if (dlm->dlm_thread_task) {
508 mlog(ML_KTHREAD, "waiting for dlm thread to exit\n"); 505 mlog(ML_KTHREAD, "Waiting for dlm thread to exit\n");
509 kthread_stop(dlm->dlm_thread_task); 506 kthread_stop(dlm->dlm_thread_task);
510 dlm->dlm_thread_task = NULL; 507 dlm->dlm_thread_task = NULL;
511 } 508 }
@@ -536,7 +533,12 @@ static void dlm_flush_asts(struct dlm_ctxt *dlm)
536 /* get an extra ref on lock */ 533 /* get an extra ref on lock */
537 dlm_lock_get(lock); 534 dlm_lock_get(lock);
538 res = lock->lockres; 535 res = lock->lockres;
539 mlog(0, "delivering an ast for this lockres\n"); 536 mlog(0, "%s: res %.*s, Flush AST for lock %u:%llu, type %d, "
537 "node %u\n", dlm->name, res->lockname.len,
538 res->lockname.name,
539 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
540 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
541 lock->ml.type, lock->ml.node);
540 542
541 BUG_ON(!lock->ast_pending); 543 BUG_ON(!lock->ast_pending);
542 544
@@ -557,9 +559,9 @@ static void dlm_flush_asts(struct dlm_ctxt *dlm)
557 /* possible that another ast was queued while 559 /* possible that another ast was queued while
558 * we were delivering the last one */ 560 * we were delivering the last one */
559 if (!list_empty(&lock->ast_list)) { 561 if (!list_empty(&lock->ast_list)) {
560 mlog(0, "aha another ast got queued while " 562 mlog(0, "%s: res %.*s, AST queued while flushing last "
561 "we were finishing the last one. will " 563 "one\n", dlm->name, res->lockname.len,
562 "keep the ast_pending flag set.\n"); 564 res->lockname.name);
563 } else 565 } else
564 lock->ast_pending = 0; 566 lock->ast_pending = 0;
565 567
@@ -590,8 +592,12 @@ static void dlm_flush_asts(struct dlm_ctxt *dlm)
590 dlm_lock_put(lock); 592 dlm_lock_put(lock);
591 spin_unlock(&dlm->ast_lock); 593 spin_unlock(&dlm->ast_lock);
592 594
593 mlog(0, "delivering a bast for this lockres " 595 mlog(0, "%s: res %.*s, Flush BAST for lock %u:%llu, "
594 "(blocked = %d\n", hi); 596 "blocked %d, node %u\n",
597 dlm->name, res->lockname.len, res->lockname.name,
598 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
599 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
600 hi, lock->ml.node);
595 601
596 if (lock->ml.node != dlm->node_num) { 602 if (lock->ml.node != dlm->node_num) {
597 ret = dlm_send_proxy_bast(dlm, res, lock, hi); 603 ret = dlm_send_proxy_bast(dlm, res, lock, hi);
@@ -605,9 +611,9 @@ static void dlm_flush_asts(struct dlm_ctxt *dlm)
605 /* possible that another bast was queued while 611 /* possible that another bast was queued while
606 * we were delivering the last one */ 612 * we were delivering the last one */
607 if (!list_empty(&lock->bast_list)) { 613 if (!list_empty(&lock->bast_list)) {
608 mlog(0, "aha another bast got queued while " 614 mlog(0, "%s: res %.*s, BAST queued while flushing last "
609 "we were finishing the last one. will " 615 "one\n", dlm->name, res->lockname.len,
610 "keep the bast_pending flag set.\n"); 616 res->lockname.name);
611 } else 617 } else
612 lock->bast_pending = 0; 618 lock->bast_pending = 0;
613 619
@@ -675,11 +681,12 @@ static int dlm_thread(void *data)
675 spin_lock(&res->spinlock); 681 spin_lock(&res->spinlock);
676 if (res->owner != dlm->node_num) { 682 if (res->owner != dlm->node_num) {
677 __dlm_print_one_lock_resource(res); 683 __dlm_print_one_lock_resource(res);
678 mlog(ML_ERROR, "inprog:%s, mig:%s, reco:%s, dirty:%s\n", 684 mlog(ML_ERROR, "%s: inprog %d, mig %d, reco %d,"
679 res->state & DLM_LOCK_RES_IN_PROGRESS ? "yes" : "no", 685 " dirty %d\n", dlm->name,
680 res->state & DLM_LOCK_RES_MIGRATING ? "yes" : "no", 686 !!(res->state & DLM_LOCK_RES_IN_PROGRESS),
681 res->state & DLM_LOCK_RES_RECOVERING ? "yes" : "no", 687 !!(res->state & DLM_LOCK_RES_MIGRATING),
682 res->state & DLM_LOCK_RES_DIRTY ? "yes" : "no"); 688 !!(res->state & DLM_LOCK_RES_RECOVERING),
689 !!(res->state & DLM_LOCK_RES_DIRTY));
683 } 690 }
684 BUG_ON(res->owner != dlm->node_num); 691 BUG_ON(res->owner != dlm->node_num);
685 692
@@ -693,8 +700,8 @@ static int dlm_thread(void *data)
693 res->state &= ~DLM_LOCK_RES_DIRTY; 700 res->state &= ~DLM_LOCK_RES_DIRTY;
694 spin_unlock(&res->spinlock); 701 spin_unlock(&res->spinlock);
695 spin_unlock(&dlm->ast_lock); 702 spin_unlock(&dlm->ast_lock);
696 mlog(0, "delaying list shuffling for in-" 703 mlog(0, "%s: res %.*s, inprogress, delay list "
697 "progress lockres %.*s, state=%d\n", 704 "shuffle, state %d\n", dlm->name,
698 res->lockname.len, res->lockname.name, 705 res->lockname.len, res->lockname.name,
699 res->state); 706 res->state);
700 delay = 1; 707 delay = 1;
@@ -706,10 +713,6 @@ static int dlm_thread(void *data)
706 * spinlock and do NOT have the dlm lock. 713 * spinlock and do NOT have the dlm lock.
707 * safe to reserve/queue asts and run the lists. */ 714 * safe to reserve/queue asts and run the lists. */
708 715
709 mlog(0, "calling dlm_shuffle_lists with dlm=%s, "
710 "res=%.*s\n", dlm->name,
711 res->lockname.len, res->lockname.name);
712
713 /* called while holding lockres lock */ 716 /* called while holding lockres lock */
714 dlm_shuffle_lists(dlm, res); 717 dlm_shuffle_lists(dlm, res);
715 res->state &= ~DLM_LOCK_RES_DIRTY; 718 res->state &= ~DLM_LOCK_RES_DIRTY;
@@ -733,7 +736,8 @@ in_progress:
733 /* unlikely, but we may need to give time to 736 /* unlikely, but we may need to give time to
734 * other tasks */ 737 * other tasks */
735 if (!--n) { 738 if (!--n) {
736 mlog(0, "throttling dlm_thread\n"); 739 mlog(0, "%s: Throttling dlm thread\n",
740 dlm->name);
737 break; 741 break;
738 } 742 }
739 } 743 }
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index d14cad6e2e41..30c523144452 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -1017,8 +1017,11 @@ static int ocfs2_double_lock(struct ocfs2_super *osb,
1017 * An error return must mean that no cluster locks 1017 * An error return must mean that no cluster locks
1018 * were held on function exit. 1018 * were held on function exit.
1019 */ 1019 */
1020 if (oi1->ip_blkno != oi2->ip_blkno) 1020 if (oi1->ip_blkno != oi2->ip_blkno) {
1021 ocfs2_inode_unlock(inode2, 1); 1021 ocfs2_inode_unlock(inode2, 1);
1022 brelse(*bh2);
1023 *bh2 = NULL;
1024 }
1022 1025
1023 if (status != -ENOENT) 1026 if (status != -ENOENT)
1024 mlog_errno(status); 1027 mlog_errno(status);
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index 70dd3b1798f1..51cd6898e7f1 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -420,6 +420,11 @@ struct ocfs2_super
420 struct inode *osb_tl_inode; 420 struct inode *osb_tl_inode;
421 struct buffer_head *osb_tl_bh; 421 struct buffer_head *osb_tl_bh;
422 struct delayed_work osb_truncate_log_wq; 422 struct delayed_work osb_truncate_log_wq;
423 /*
424 * How many clusters in our truncate log.
425 * It must be protected by osb_tl_inode->i_mutex.
426 */
427 unsigned int truncated_clusters;
423 428
424 struct ocfs2_node_map osb_recovering_orphan_dirs; 429 struct ocfs2_node_map osb_recovering_orphan_dirs;
425 unsigned int *osb_orphan_wipes; 430 unsigned int *osb_orphan_wipes;
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 0fed41e6efcd..84becd3e4772 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -133,16 +133,20 @@ __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_data_lock);
133EXPORT_SYMBOL(dq_data_lock); 133EXPORT_SYMBOL(dq_data_lock);
134 134
135void __quota_error(struct super_block *sb, const char *func, 135void __quota_error(struct super_block *sb, const char *func,
136 const char *fmt, ...) 136 const char *fmt, ...)
137{ 137{
138 va_list args;
139
140 if (printk_ratelimit()) { 138 if (printk_ratelimit()) {
139 va_list args;
140 struct va_format vaf;
141
141 va_start(args, fmt); 142 va_start(args, fmt);
142 printk(KERN_ERR "Quota error (device %s): %s: ", 143
143 sb->s_id, func); 144 vaf.fmt = fmt;
144 vprintk(fmt, args); 145 vaf.va = &args;
145 printk("\n"); 146
147 printk(KERN_ERR "Quota error (device %s): %s: %pV\n",
148 sb->s_id, func, &vaf);
149
146 va_end(args); 150 va_end(args);
147 } 151 }
148} 152}
diff --git a/fs/quota/quota_tree.c b/fs/quota/quota_tree.c
index 9e48874eabcc..e41c1becf096 100644
--- a/fs/quota/quota_tree.c
+++ b/fs/quota/quota_tree.c
@@ -468,8 +468,8 @@ static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
468 return -ENOMEM; 468 return -ENOMEM;
469 ret = read_blk(info, *blk, buf); 469 ret = read_blk(info, *blk, buf);
470 if (ret < 0) { 470 if (ret < 0) {
471 quota_error(dquot->dq_sb, "Can't read quota data " 471 quota_error(dquot->dq_sb, "Can't read quota data block %u",
472 "block %u", blk); 472 *blk);
473 goto out_buf; 473 goto out_buf;
474 } 474 }
475 newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]); 475 newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
@@ -493,8 +493,9 @@ static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
493 } else { 493 } else {
494 ret = write_blk(info, *blk, buf); 494 ret = write_blk(info, *blk, buf);
495 if (ret < 0) 495 if (ret < 0)
496 quota_error(dquot->dq_sb, "Can't write quota " 496 quota_error(dquot->dq_sb,
497 "tree block %u", blk); 497 "Can't write quota tree block %u",
498 *blk);
498 } 499 }
499 } 500 }
500out_buf: 501out_buf:
diff --git a/fs/udf/Kconfig b/fs/udf/Kconfig
index f8def3c8ea4c..0e0e99bd6bce 100644
--- a/fs/udf/Kconfig
+++ b/fs/udf/Kconfig
@@ -1,6 +1,5 @@
1config UDF_FS 1config UDF_FS
2 tristate "UDF file system support" 2 tristate "UDF file system support"
3 depends on BKL # needs serious work to remove
4 select CRC_ITU_T 3 select CRC_ITU_T
5 help 4 help
6 This is the new file system used on some CD-ROMs and DVDs. Say Y if 5 This is the new file system used on some CD-ROMs and DVDs. Say Y if
diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
index b608efaa4cee..306ee39ef2c3 100644
--- a/fs/udf/balloc.c
+++ b/fs/udf/balloc.c
@@ -157,10 +157,9 @@ static void udf_bitmap_free_blocks(struct super_block *sb,
157 udf_debug("bit %ld already set\n", bit + i); 157 udf_debug("bit %ld already set\n", bit + i);
158 udf_debug("byte=%2x\n", 158 udf_debug("byte=%2x\n",
159 ((char *)bh->b_data)[(bit + i) >> 3]); 159 ((char *)bh->b_data)[(bit + i) >> 3]);
160 } else {
161 udf_add_free_space(sb, sbi->s_partition, 1);
162 } 160 }
163 } 161 }
162 udf_add_free_space(sb, sbi->s_partition, count);
164 mark_buffer_dirty(bh); 163 mark_buffer_dirty(bh);
165 if (overflow) { 164 if (overflow) {
166 block += count; 165 block += count;
diff --git a/fs/udf/dir.c b/fs/udf/dir.c
index 51552bf50225..eb8bfe2b89a5 100644
--- a/fs/udf/dir.c
+++ b/fs/udf/dir.c
@@ -30,7 +30,6 @@
30#include <linux/errno.h> 30#include <linux/errno.h>
31#include <linux/mm.h> 31#include <linux/mm.h>
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include <linux/smp_lock.h>
34#include <linux/buffer_head.h> 33#include <linux/buffer_head.h>
35 34
36#include "udf_i.h" 35#include "udf_i.h"
@@ -190,18 +189,14 @@ static int udf_readdir(struct file *filp, void *dirent, filldir_t filldir)
190 struct inode *dir = filp->f_path.dentry->d_inode; 189 struct inode *dir = filp->f_path.dentry->d_inode;
191 int result; 190 int result;
192 191
193 lock_kernel();
194
195 if (filp->f_pos == 0) { 192 if (filp->f_pos == 0) {
196 if (filldir(dirent, ".", 1, filp->f_pos, dir->i_ino, DT_DIR) < 0) { 193 if (filldir(dirent, ".", 1, filp->f_pos, dir->i_ino, DT_DIR) < 0) {
197 unlock_kernel();
198 return 0; 194 return 0;
199 } 195 }
200 filp->f_pos++; 196 filp->f_pos++;
201 } 197 }
202 198
203 result = do_udf_readdir(dir, filp, filldir, dirent); 199 result = do_udf_readdir(dir, filp, filldir, dirent);
204 unlock_kernel();
205 return result; 200 return result;
206} 201}
207 202
diff --git a/fs/udf/file.c b/fs/udf/file.c
index 66b9e7e7e4c5..89c78486cbbe 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -32,7 +32,6 @@
32#include <linux/string.h> /* memset */ 32#include <linux/string.h> /* memset */
33#include <linux/capability.h> 33#include <linux/capability.h>
34#include <linux/errno.h> 34#include <linux/errno.h>
35#include <linux/smp_lock.h>
36#include <linux/pagemap.h> 35#include <linux/pagemap.h>
37#include <linux/buffer_head.h> 36#include <linux/buffer_head.h>
38#include <linux/aio.h> 37#include <linux/aio.h>
@@ -114,6 +113,7 @@ static ssize_t udf_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
114 size_t count = iocb->ki_left; 113 size_t count = iocb->ki_left;
115 struct udf_inode_info *iinfo = UDF_I(inode); 114 struct udf_inode_info *iinfo = UDF_I(inode);
116 115
116 down_write(&iinfo->i_data_sem);
117 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) { 117 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
118 if (file->f_flags & O_APPEND) 118 if (file->f_flags & O_APPEND)
119 pos = inode->i_size; 119 pos = inode->i_size;
@@ -126,6 +126,7 @@ static ssize_t udf_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
126 udf_expand_file_adinicb(inode, pos + count, &err); 126 udf_expand_file_adinicb(inode, pos + count, &err);
127 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) { 127 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
128 udf_debug("udf_expand_adinicb: err=%d\n", err); 128 udf_debug("udf_expand_adinicb: err=%d\n", err);
129 up_write(&iinfo->i_data_sem);
129 return err; 130 return err;
130 } 131 }
131 } else { 132 } else {
@@ -135,6 +136,7 @@ static ssize_t udf_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
135 iinfo->i_lenAlloc = inode->i_size; 136 iinfo->i_lenAlloc = inode->i_size;
136 } 137 }
137 } 138 }
139 up_write(&iinfo->i_data_sem);
138 140
139 retval = generic_file_aio_write(iocb, iov, nr_segs, ppos); 141 retval = generic_file_aio_write(iocb, iov, nr_segs, ppos);
140 if (retval > 0) 142 if (retval > 0)
@@ -149,8 +151,6 @@ long udf_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
149 long old_block, new_block; 151 long old_block, new_block;
150 int result = -EINVAL; 152 int result = -EINVAL;
151 153
152 lock_kernel();
153
154 if (file_permission(filp, MAY_READ) != 0) { 154 if (file_permission(filp, MAY_READ) != 0) {
155 udf_debug("no permission to access inode %lu\n", inode->i_ino); 155 udf_debug("no permission to access inode %lu\n", inode->i_ino);
156 result = -EPERM; 156 result = -EPERM;
@@ -196,7 +196,6 @@ long udf_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
196 } 196 }
197 197
198out: 198out:
199 unlock_kernel();
200 return result; 199 return result;
201} 200}
202 201
@@ -204,10 +203,10 @@ static int udf_release_file(struct inode *inode, struct file *filp)
204{ 203{
205 if (filp->f_mode & FMODE_WRITE) { 204 if (filp->f_mode & FMODE_WRITE) {
206 mutex_lock(&inode->i_mutex); 205 mutex_lock(&inode->i_mutex);
207 lock_kernel(); 206 down_write(&UDF_I(inode)->i_data_sem);
208 udf_discard_prealloc(inode); 207 udf_discard_prealloc(inode);
209 udf_truncate_tail_extent(inode); 208 udf_truncate_tail_extent(inode);
210 unlock_kernel(); 209 up_write(&UDF_I(inode)->i_data_sem);
211 mutex_unlock(&inode->i_mutex); 210 mutex_unlock(&inode->i_mutex);
212 } 211 }
213 return 0; 212 return 0;
diff --git a/fs/udf/ialloc.c b/fs/udf/ialloc.c
index 75d9304d0dc3..6fb7e0adcda0 100644
--- a/fs/udf/ialloc.c
+++ b/fs/udf/ialloc.c
@@ -92,28 +92,19 @@ struct inode *udf_new_inode(struct inode *dir, int mode, int *err)
92 return NULL; 92 return NULL;
93 } 93 }
94 94
95 mutex_lock(&sbi->s_alloc_mutex);
96 if (sbi->s_lvid_bh) { 95 if (sbi->s_lvid_bh) {
97 struct logicalVolIntegrityDesc *lvid = 96 struct logicalVolIntegrityDescImpUse *lvidiu;
98 (struct logicalVolIntegrityDesc *) 97
99 sbi->s_lvid_bh->b_data; 98 iinfo->i_unique = lvid_get_unique_id(sb);
100 struct logicalVolIntegrityDescImpUse *lvidiu = 99 mutex_lock(&sbi->s_alloc_mutex);
101 udf_sb_lvidiu(sbi); 100 lvidiu = udf_sb_lvidiu(sbi);
102 struct logicalVolHeaderDesc *lvhd;
103 uint64_t uniqueID;
104 lvhd = (struct logicalVolHeaderDesc *)
105 (lvid->logicalVolContentsUse);
106 if (S_ISDIR(mode)) 101 if (S_ISDIR(mode))
107 le32_add_cpu(&lvidiu->numDirs, 1); 102 le32_add_cpu(&lvidiu->numDirs, 1);
108 else 103 else
109 le32_add_cpu(&lvidiu->numFiles, 1); 104 le32_add_cpu(&lvidiu->numFiles, 1);
110 iinfo->i_unique = uniqueID = le64_to_cpu(lvhd->uniqueID);
111 if (!(++uniqueID & 0x00000000FFFFFFFFUL))
112 uniqueID += 16;
113 lvhd->uniqueID = cpu_to_le64(uniqueID);
114 udf_updated_lvid(sb); 105 udf_updated_lvid(sb);
106 mutex_unlock(&sbi->s_alloc_mutex);
115 } 107 }
116 mutex_unlock(&sbi->s_alloc_mutex);
117 108
118 inode_init_owner(inode, dir, mode); 109 inode_init_owner(inode, dir, mode);
119 110
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index fc48f37aa2dd..c6a2e782b97b 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -31,7 +31,6 @@
31 31
32#include "udfdecl.h" 32#include "udfdecl.h"
33#include <linux/mm.h> 33#include <linux/mm.h>
34#include <linux/smp_lock.h>
35#include <linux/module.h> 34#include <linux/module.h>
36#include <linux/pagemap.h> 35#include <linux/pagemap.h>
37#include <linux/buffer_head.h> 36#include <linux/buffer_head.h>
@@ -51,6 +50,7 @@ MODULE_LICENSE("GPL");
51static mode_t udf_convert_permissions(struct fileEntry *); 50static mode_t udf_convert_permissions(struct fileEntry *);
52static int udf_update_inode(struct inode *, int); 51static int udf_update_inode(struct inode *, int);
53static void udf_fill_inode(struct inode *, struct buffer_head *); 52static void udf_fill_inode(struct inode *, struct buffer_head *);
53static int udf_sync_inode(struct inode *inode);
54static int udf_alloc_i_data(struct inode *inode, size_t size); 54static int udf_alloc_i_data(struct inode *inode, size_t size);
55static struct buffer_head *inode_getblk(struct inode *, sector_t, int *, 55static struct buffer_head *inode_getblk(struct inode *, sector_t, int *,
56 sector_t *, int *); 56 sector_t *, int *);
@@ -79,9 +79,7 @@ void udf_evict_inode(struct inode *inode)
79 want_delete = 1; 79 want_delete = 1;
80 inode->i_size = 0; 80 inode->i_size = 0;
81 udf_truncate(inode); 81 udf_truncate(inode);
82 lock_kernel();
83 udf_update_inode(inode, IS_SYNC(inode)); 82 udf_update_inode(inode, IS_SYNC(inode));
84 unlock_kernel();
85 } 83 }
86 invalidate_inode_buffers(inode); 84 invalidate_inode_buffers(inode);
87 end_writeback(inode); 85 end_writeback(inode);
@@ -97,9 +95,7 @@ void udf_evict_inode(struct inode *inode)
97 kfree(iinfo->i_ext.i_data); 95 kfree(iinfo->i_ext.i_data);
98 iinfo->i_ext.i_data = NULL; 96 iinfo->i_ext.i_data = NULL;
99 if (want_delete) { 97 if (want_delete) {
100 lock_kernel();
101 udf_free_inode(inode); 98 udf_free_inode(inode);
102 unlock_kernel();
103 } 99 }
104} 100}
105 101
@@ -302,10 +298,9 @@ static int udf_get_block(struct inode *inode, sector_t block,
302 err = -EIO; 298 err = -EIO;
303 new = 0; 299 new = 0;
304 bh = NULL; 300 bh = NULL;
305
306 lock_kernel();
307
308 iinfo = UDF_I(inode); 301 iinfo = UDF_I(inode);
302
303 down_write(&iinfo->i_data_sem);
309 if (block == iinfo->i_next_alloc_block + 1) { 304 if (block == iinfo->i_next_alloc_block + 1) {
310 iinfo->i_next_alloc_block++; 305 iinfo->i_next_alloc_block++;
311 iinfo->i_next_alloc_goal++; 306 iinfo->i_next_alloc_goal++;
@@ -324,7 +319,7 @@ static int udf_get_block(struct inode *inode, sector_t block,
324 map_bh(bh_result, inode->i_sb, phys); 319 map_bh(bh_result, inode->i_sb, phys);
325 320
326abort: 321abort:
327 unlock_kernel(); 322 up_write(&iinfo->i_data_sem);
328 return err; 323 return err;
329} 324}
330 325
@@ -1022,16 +1017,16 @@ void udf_truncate(struct inode *inode)
1022 if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) 1017 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
1023 return; 1018 return;
1024 1019
1025 lock_kernel();
1026 iinfo = UDF_I(inode); 1020 iinfo = UDF_I(inode);
1027 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) { 1021 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
1022 down_write(&iinfo->i_data_sem);
1028 if (inode->i_sb->s_blocksize < 1023 if (inode->i_sb->s_blocksize <
1029 (udf_file_entry_alloc_offset(inode) + 1024 (udf_file_entry_alloc_offset(inode) +
1030 inode->i_size)) { 1025 inode->i_size)) {
1031 udf_expand_file_adinicb(inode, inode->i_size, &err); 1026 udf_expand_file_adinicb(inode, inode->i_size, &err);
1032 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) { 1027 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
1033 inode->i_size = iinfo->i_lenAlloc; 1028 inode->i_size = iinfo->i_lenAlloc;
1034 unlock_kernel(); 1029 up_write(&iinfo->i_data_sem);
1035 return; 1030 return;
1036 } else 1031 } else
1037 udf_truncate_extents(inode); 1032 udf_truncate_extents(inode);
@@ -1042,10 +1037,13 @@ void udf_truncate(struct inode *inode)
1042 offset - udf_file_entry_alloc_offset(inode)); 1037 offset - udf_file_entry_alloc_offset(inode));
1043 iinfo->i_lenAlloc = inode->i_size; 1038 iinfo->i_lenAlloc = inode->i_size;
1044 } 1039 }
1040 up_write(&iinfo->i_data_sem);
1045 } else { 1041 } else {
1046 block_truncate_page(inode->i_mapping, inode->i_size, 1042 block_truncate_page(inode->i_mapping, inode->i_size,
1047 udf_get_block); 1043 udf_get_block);
1044 down_write(&iinfo->i_data_sem);
1048 udf_truncate_extents(inode); 1045 udf_truncate_extents(inode);
1046 up_write(&iinfo->i_data_sem);
1049 } 1047 }
1050 1048
1051 inode->i_mtime = inode->i_ctime = current_fs_time(inode->i_sb); 1049 inode->i_mtime = inode->i_ctime = current_fs_time(inode->i_sb);
@@ -1053,7 +1051,6 @@ void udf_truncate(struct inode *inode)
1053 udf_sync_inode(inode); 1051 udf_sync_inode(inode);
1054 else 1052 else
1055 mark_inode_dirty(inode); 1053 mark_inode_dirty(inode);
1056 unlock_kernel();
1057} 1054}
1058 1055
1059static void __udf_read_inode(struct inode *inode) 1056static void __udf_read_inode(struct inode *inode)
@@ -1202,6 +1199,7 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
1202 return; 1199 return;
1203 } 1200 }
1204 1201
1202 read_lock(&sbi->s_cred_lock);
1205 inode->i_uid = le32_to_cpu(fe->uid); 1203 inode->i_uid = le32_to_cpu(fe->uid);
1206 if (inode->i_uid == -1 || 1204 if (inode->i_uid == -1 ||
1207 UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_IGNORE) || 1205 UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_IGNORE) ||
@@ -1214,13 +1212,6 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
1214 UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_SET)) 1212 UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_SET))
1215 inode->i_gid = UDF_SB(inode->i_sb)->s_gid; 1213 inode->i_gid = UDF_SB(inode->i_sb)->s_gid;
1216 1214
1217 inode->i_nlink = le16_to_cpu(fe->fileLinkCount);
1218 if (!inode->i_nlink)
1219 inode->i_nlink = 1;
1220
1221 inode->i_size = le64_to_cpu(fe->informationLength);
1222 iinfo->i_lenExtents = inode->i_size;
1223
1224 if (fe->icbTag.fileType != ICBTAG_FILE_TYPE_DIRECTORY && 1215 if (fe->icbTag.fileType != ICBTAG_FILE_TYPE_DIRECTORY &&
1225 sbi->s_fmode != UDF_INVALID_MODE) 1216 sbi->s_fmode != UDF_INVALID_MODE)
1226 inode->i_mode = sbi->s_fmode; 1217 inode->i_mode = sbi->s_fmode;
@@ -1230,6 +1221,14 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
1230 else 1221 else
1231 inode->i_mode = udf_convert_permissions(fe); 1222 inode->i_mode = udf_convert_permissions(fe);
1232 inode->i_mode &= ~sbi->s_umask; 1223 inode->i_mode &= ~sbi->s_umask;
1224 read_unlock(&sbi->s_cred_lock);
1225
1226 inode->i_nlink = le16_to_cpu(fe->fileLinkCount);
1227 if (!inode->i_nlink)
1228 inode->i_nlink = 1;
1229
1230 inode->i_size = le64_to_cpu(fe->informationLength);
1231 iinfo->i_lenExtents = inode->i_size;
1233 1232
1234 if (iinfo->i_efe == 0) { 1233 if (iinfo->i_efe == 0) {
1235 inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) << 1234 inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) <<
@@ -1373,16 +1372,10 @@ static mode_t udf_convert_permissions(struct fileEntry *fe)
1373 1372
1374int udf_write_inode(struct inode *inode, struct writeback_control *wbc) 1373int udf_write_inode(struct inode *inode, struct writeback_control *wbc)
1375{ 1374{
1376 int ret; 1375 return udf_update_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
1377
1378 lock_kernel();
1379 ret = udf_update_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
1380 unlock_kernel();
1381
1382 return ret;
1383} 1376}
1384 1377
1385int udf_sync_inode(struct inode *inode) 1378static int udf_sync_inode(struct inode *inode)
1386{ 1379{
1387 return udf_update_inode(inode, 1); 1380 return udf_update_inode(inode, 1);
1388} 1381}
@@ -2048,7 +2041,7 @@ long udf_block_map(struct inode *inode, sector_t block)
2048 struct extent_position epos = {}; 2041 struct extent_position epos = {};
2049 int ret; 2042 int ret;
2050 2043
2051 lock_kernel(); 2044 down_read(&UDF_I(inode)->i_data_sem);
2052 2045
2053 if (inode_bmap(inode, block, &epos, &eloc, &elen, &offset) == 2046 if (inode_bmap(inode, block, &epos, &eloc, &elen, &offset) ==
2054 (EXT_RECORDED_ALLOCATED >> 30)) 2047 (EXT_RECORDED_ALLOCATED >> 30))
@@ -2056,7 +2049,7 @@ long udf_block_map(struct inode *inode, sector_t block)
2056 else 2049 else
2057 ret = 0; 2050 ret = 0;
2058 2051
2059 unlock_kernel(); 2052 up_read(&UDF_I(inode)->i_data_sem);
2060 brelse(epos.bh); 2053 brelse(epos.bh);
2061 2054
2062 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_VARCONV)) 2055 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_VARCONV))
diff --git a/fs/udf/namei.c b/fs/udf/namei.c
index 6d8dc02baebb..2be0f9eb86d2 100644
--- a/fs/udf/namei.c
+++ b/fs/udf/namei.c
@@ -27,7 +27,6 @@
27#include <linux/errno.h> 27#include <linux/errno.h>
28#include <linux/mm.h> 28#include <linux/mm.h>
29#include <linux/slab.h> 29#include <linux/slab.h>
30#include <linux/smp_lock.h>
31#include <linux/buffer_head.h> 30#include <linux/buffer_head.h>
32#include <linux/sched.h> 31#include <linux/sched.h>
33#include <linux/crc-itu-t.h> 32#include <linux/crc-itu-t.h>
@@ -228,10 +227,8 @@ static struct fileIdentDesc *udf_find_entry(struct inode *dir,
228 } 227 }
229 228
230 if ((cfi->fileCharacteristics & FID_FILE_CHAR_PARENT) && 229 if ((cfi->fileCharacteristics & FID_FILE_CHAR_PARENT) &&
231 isdotdot) { 230 isdotdot)
232 brelse(epos.bh); 231 goto out_ok;
233 return fi;
234 }
235 232
236 if (!lfi) 233 if (!lfi)
237 continue; 234 continue;
@@ -263,7 +260,6 @@ static struct dentry *udf_lookup(struct inode *dir, struct dentry *dentry,
263 if (dentry->d_name.len > UDF_NAME_LEN - 2) 260 if (dentry->d_name.len > UDF_NAME_LEN - 2)
264 return ERR_PTR(-ENAMETOOLONG); 261 return ERR_PTR(-ENAMETOOLONG);
265 262
266 lock_kernel();
267#ifdef UDF_RECOVERY 263#ifdef UDF_RECOVERY
268 /* temporary shorthand for specifying files by inode number */ 264 /* temporary shorthand for specifying files by inode number */
269 if (!strncmp(dentry->d_name.name, ".B=", 3)) { 265 if (!strncmp(dentry->d_name.name, ".B=", 3)) {
@@ -275,7 +271,6 @@ static struct dentry *udf_lookup(struct inode *dir, struct dentry *dentry,
275 }; 271 };
276 inode = udf_iget(dir->i_sb, lb); 272 inode = udf_iget(dir->i_sb, lb);
277 if (!inode) { 273 if (!inode) {
278 unlock_kernel();
279 return ERR_PTR(-EACCES); 274 return ERR_PTR(-EACCES);
280 } 275 }
281 } else 276 } else
@@ -291,11 +286,9 @@ static struct dentry *udf_lookup(struct inode *dir, struct dentry *dentry,
291 loc = lelb_to_cpu(cfi.icb.extLocation); 286 loc = lelb_to_cpu(cfi.icb.extLocation);
292 inode = udf_iget(dir->i_sb, &loc); 287 inode = udf_iget(dir->i_sb, &loc);
293 if (!inode) { 288 if (!inode) {
294 unlock_kernel();
295 return ERR_PTR(-EACCES); 289 return ERR_PTR(-EACCES);
296 } 290 }
297 } 291 }
298 unlock_kernel();
299 292
300 return d_splice_alias(inode, dentry); 293 return d_splice_alias(inode, dentry);
301} 294}
@@ -476,15 +469,19 @@ add:
476 f_pos >> dir->i_sb->s_blocksize_bits, 1, err); 469 f_pos >> dir->i_sb->s_blocksize_bits, 1, err);
477 if (!fibh->ebh) 470 if (!fibh->ebh)
478 goto out_err; 471 goto out_err;
472 /* Extents could have been merged, invalidate our position */
473 brelse(epos.bh);
474 epos.bh = NULL;
475 epos.block = dinfo->i_location;
476 epos.offset = udf_file_entry_alloc_offset(dir);
479 477
480 if (!fibh->soffset) { 478 if (!fibh->soffset) {
481 if (udf_next_aext(dir, &epos, &eloc, &elen, 1) == 479 /* Find the freshly allocated block */
482 (EXT_RECORDED_ALLOCATED >> 30)) { 480 while (udf_next_aext(dir, &epos, &eloc, &elen, 1) ==
483 block = eloc.logicalBlockNum + ((elen - 1) >> 481 (EXT_RECORDED_ALLOCATED >> 30))
482 ;
483 block = eloc.logicalBlockNum + ((elen - 1) >>
484 dir->i_sb->s_blocksize_bits); 484 dir->i_sb->s_blocksize_bits);
485 } else
486 block++;
487
488 brelse(fibh->sbh); 485 brelse(fibh->sbh);
489 fibh->sbh = fibh->ebh; 486 fibh->sbh = fibh->ebh;
490 fi = (struct fileIdentDesc *)(fibh->sbh->b_data); 487 fi = (struct fileIdentDesc *)(fibh->sbh->b_data);
@@ -562,10 +559,8 @@ static int udf_create(struct inode *dir, struct dentry *dentry, int mode,
562 int err; 559 int err;
563 struct udf_inode_info *iinfo; 560 struct udf_inode_info *iinfo;
564 561
565 lock_kernel();
566 inode = udf_new_inode(dir, mode, &err); 562 inode = udf_new_inode(dir, mode, &err);
567 if (!inode) { 563 if (!inode) {
568 unlock_kernel();
569 return err; 564 return err;
570 } 565 }
571 566
@@ -583,7 +578,6 @@ static int udf_create(struct inode *dir, struct dentry *dentry, int mode,
583 inode->i_nlink--; 578 inode->i_nlink--;
584 mark_inode_dirty(inode); 579 mark_inode_dirty(inode);
585 iput(inode); 580 iput(inode);
586 unlock_kernel();
587 return err; 581 return err;
588 } 582 }
589 cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize); 583 cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize);
@@ -596,7 +590,6 @@ static int udf_create(struct inode *dir, struct dentry *dentry, int mode,
596 if (fibh.sbh != fibh.ebh) 590 if (fibh.sbh != fibh.ebh)
597 brelse(fibh.ebh); 591 brelse(fibh.ebh);
598 brelse(fibh.sbh); 592 brelse(fibh.sbh);
599 unlock_kernel();
600 d_instantiate(dentry, inode); 593 d_instantiate(dentry, inode);
601 594
602 return 0; 595 return 0;
@@ -614,7 +607,6 @@ static int udf_mknod(struct inode *dir, struct dentry *dentry, int mode,
614 if (!old_valid_dev(rdev)) 607 if (!old_valid_dev(rdev))
615 return -EINVAL; 608 return -EINVAL;
616 609
617 lock_kernel();
618 err = -EIO; 610 err = -EIO;
619 inode = udf_new_inode(dir, mode, &err); 611 inode = udf_new_inode(dir, mode, &err);
620 if (!inode) 612 if (!inode)
@@ -627,7 +619,6 @@ static int udf_mknod(struct inode *dir, struct dentry *dentry, int mode,
627 inode->i_nlink--; 619 inode->i_nlink--;
628 mark_inode_dirty(inode); 620 mark_inode_dirty(inode);
629 iput(inode); 621 iput(inode);
630 unlock_kernel();
631 return err; 622 return err;
632 } 623 }
633 cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize); 624 cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize);
@@ -646,7 +637,6 @@ static int udf_mknod(struct inode *dir, struct dentry *dentry, int mode,
646 err = 0; 637 err = 0;
647 638
648out: 639out:
649 unlock_kernel();
650 return err; 640 return err;
651} 641}
652 642
@@ -659,7 +649,6 @@ static int udf_mkdir(struct inode *dir, struct dentry *dentry, int mode)
659 struct udf_inode_info *dinfo = UDF_I(dir); 649 struct udf_inode_info *dinfo = UDF_I(dir);
660 struct udf_inode_info *iinfo; 650 struct udf_inode_info *iinfo;
661 651
662 lock_kernel();
663 err = -EMLINK; 652 err = -EMLINK;
664 if (dir->i_nlink >= (256 << sizeof(dir->i_nlink)) - 1) 653 if (dir->i_nlink >= (256 << sizeof(dir->i_nlink)) - 1)
665 goto out; 654 goto out;
@@ -712,7 +701,6 @@ static int udf_mkdir(struct inode *dir, struct dentry *dentry, int mode)
712 err = 0; 701 err = 0;
713 702
714out: 703out:
715 unlock_kernel();
716 return err; 704 return err;
717} 705}
718 706
@@ -794,7 +782,6 @@ static int udf_rmdir(struct inode *dir, struct dentry *dentry)
794 struct kernel_lb_addr tloc; 782 struct kernel_lb_addr tloc;
795 783
796 retval = -ENOENT; 784 retval = -ENOENT;
797 lock_kernel();
798 fi = udf_find_entry(dir, &dentry->d_name, &fibh, &cfi); 785 fi = udf_find_entry(dir, &dentry->d_name, &fibh, &cfi);
799 if (!fi) 786 if (!fi)
800 goto out; 787 goto out;
@@ -826,7 +813,6 @@ end_rmdir:
826 brelse(fibh.sbh); 813 brelse(fibh.sbh);
827 814
828out: 815out:
829 unlock_kernel();
830 return retval; 816 return retval;
831} 817}
832 818
@@ -840,7 +826,6 @@ static int udf_unlink(struct inode *dir, struct dentry *dentry)
840 struct kernel_lb_addr tloc; 826 struct kernel_lb_addr tloc;
841 827
842 retval = -ENOENT; 828 retval = -ENOENT;
843 lock_kernel();
844 fi = udf_find_entry(dir, &dentry->d_name, &fibh, &cfi); 829 fi = udf_find_entry(dir, &dentry->d_name, &fibh, &cfi);
845 if (!fi) 830 if (!fi)
846 goto out; 831 goto out;
@@ -870,7 +855,6 @@ end_unlink:
870 brelse(fibh.sbh); 855 brelse(fibh.sbh);
871 856
872out: 857out:
873 unlock_kernel();
874 return retval; 858 return retval;
875} 859}
876 860
@@ -890,21 +874,21 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry,
890 int block; 874 int block;
891 unsigned char *name = NULL; 875 unsigned char *name = NULL;
892 int namelen; 876 int namelen;
893 struct buffer_head *bh;
894 struct udf_inode_info *iinfo; 877 struct udf_inode_info *iinfo;
878 struct super_block *sb = dir->i_sb;
895 879
896 lock_kernel();
897 inode = udf_new_inode(dir, S_IFLNK | S_IRWXUGO, &err); 880 inode = udf_new_inode(dir, S_IFLNK | S_IRWXUGO, &err);
898 if (!inode) 881 if (!inode)
899 goto out; 882 goto out;
900 883
884 iinfo = UDF_I(inode);
885 down_write(&iinfo->i_data_sem);
901 name = kmalloc(UDF_NAME_LEN, GFP_NOFS); 886 name = kmalloc(UDF_NAME_LEN, GFP_NOFS);
902 if (!name) { 887 if (!name) {
903 err = -ENOMEM; 888 err = -ENOMEM;
904 goto out_no_entry; 889 goto out_no_entry;
905 } 890 }
906 891
907 iinfo = UDF_I(inode);
908 inode->i_data.a_ops = &udf_symlink_aops; 892 inode->i_data.a_ops = &udf_symlink_aops;
909 inode->i_op = &udf_symlink_inode_operations; 893 inode->i_op = &udf_symlink_inode_operations;
910 894
@@ -912,7 +896,7 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry,
912 struct kernel_lb_addr eloc; 896 struct kernel_lb_addr eloc;
913 uint32_t bsize; 897 uint32_t bsize;
914 898
915 block = udf_new_block(inode->i_sb, inode, 899 block = udf_new_block(sb, inode,
916 iinfo->i_location.partitionReferenceNum, 900 iinfo->i_location.partitionReferenceNum,
917 iinfo->i_location.logicalBlockNum, &err); 901 iinfo->i_location.logicalBlockNum, &err);
918 if (!block) 902 if (!block)
@@ -923,17 +907,17 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry,
923 eloc.logicalBlockNum = block; 907 eloc.logicalBlockNum = block;
924 eloc.partitionReferenceNum = 908 eloc.partitionReferenceNum =
925 iinfo->i_location.partitionReferenceNum; 909 iinfo->i_location.partitionReferenceNum;
926 bsize = inode->i_sb->s_blocksize; 910 bsize = sb->s_blocksize;
927 iinfo->i_lenExtents = bsize; 911 iinfo->i_lenExtents = bsize;
928 udf_add_aext(inode, &epos, &eloc, bsize, 0); 912 udf_add_aext(inode, &epos, &eloc, bsize, 0);
929 brelse(epos.bh); 913 brelse(epos.bh);
930 914
931 block = udf_get_pblock(inode->i_sb, block, 915 block = udf_get_pblock(sb, block,
932 iinfo->i_location.partitionReferenceNum, 916 iinfo->i_location.partitionReferenceNum,
933 0); 917 0);
934 epos.bh = udf_tgetblk(inode->i_sb, block); 918 epos.bh = udf_tgetblk(sb, block);
935 lock_buffer(epos.bh); 919 lock_buffer(epos.bh);
936 memset(epos.bh->b_data, 0x00, inode->i_sb->s_blocksize); 920 memset(epos.bh->b_data, 0x00, bsize);
937 set_buffer_uptodate(epos.bh); 921 set_buffer_uptodate(epos.bh);
938 unlock_buffer(epos.bh); 922 unlock_buffer(epos.bh);
939 mark_buffer_dirty_inode(epos.bh, inode); 923 mark_buffer_dirty_inode(epos.bh, inode);
@@ -941,7 +925,7 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry,
941 } else 925 } else
942 ea = iinfo->i_ext.i_data + iinfo->i_lenEAttr; 926 ea = iinfo->i_ext.i_data + iinfo->i_lenEAttr;
943 927
944 eoffset = inode->i_sb->s_blocksize - udf_ext0_offset(inode); 928 eoffset = sb->s_blocksize - udf_ext0_offset(inode);
945 pc = (struct pathComponent *)ea; 929 pc = (struct pathComponent *)ea;
946 930
947 if (*symname == '/') { 931 if (*symname == '/') {
@@ -981,7 +965,7 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry,
981 } 965 }
982 966
983 if (pc->componentType == 5) { 967 if (pc->componentType == 5) {
984 namelen = udf_put_filename(inode->i_sb, compstart, name, 968 namelen = udf_put_filename(sb, compstart, name,
985 symname - compstart); 969 symname - compstart);
986 if (!namelen) 970 if (!namelen)
987 goto out_no_entry; 971 goto out_no_entry;
@@ -1015,27 +999,16 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry,
1015 fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err); 999 fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err);
1016 if (!fi) 1000 if (!fi)
1017 goto out_no_entry; 1001 goto out_no_entry;
1018 cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize); 1002 cfi.icb.extLength = cpu_to_le32(sb->s_blocksize);
1019 cfi.icb.extLocation = cpu_to_lelb(iinfo->i_location); 1003 cfi.icb.extLocation = cpu_to_lelb(iinfo->i_location);
1020 bh = UDF_SB(inode->i_sb)->s_lvid_bh; 1004 if (UDF_SB(inode->i_sb)->s_lvid_bh) {
1021 if (bh) {
1022 struct logicalVolIntegrityDesc *lvid =
1023 (struct logicalVolIntegrityDesc *)bh->b_data;
1024 struct logicalVolHeaderDesc *lvhd;
1025 uint64_t uniqueID;
1026 lvhd = (struct logicalVolHeaderDesc *)
1027 lvid->logicalVolContentsUse;
1028 uniqueID = le64_to_cpu(lvhd->uniqueID);
1029 *(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse = 1005 *(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse =
1030 cpu_to_le32(uniqueID & 0x00000000FFFFFFFFUL); 1006 cpu_to_le32(lvid_get_unique_id(sb));
1031 if (!(++uniqueID & 0x00000000FFFFFFFFUL))
1032 uniqueID += 16;
1033 lvhd->uniqueID = cpu_to_le64(uniqueID);
1034 mark_buffer_dirty(bh);
1035 } 1007 }
1036 udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL); 1008 udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL);
1037 if (UDF_I(dir)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) 1009 if (UDF_I(dir)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
1038 mark_inode_dirty(dir); 1010 mark_inode_dirty(dir);
1011 up_write(&iinfo->i_data_sem);
1039 if (fibh.sbh != fibh.ebh) 1012 if (fibh.sbh != fibh.ebh)
1040 brelse(fibh.ebh); 1013 brelse(fibh.ebh);
1041 brelse(fibh.sbh); 1014 brelse(fibh.sbh);
@@ -1044,10 +1017,10 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry,
1044 1017
1045out: 1018out:
1046 kfree(name); 1019 kfree(name);
1047 unlock_kernel();
1048 return err; 1020 return err;
1049 1021
1050out_no_entry: 1022out_no_entry:
1023 up_write(&iinfo->i_data_sem);
1051 inode_dec_link_count(inode); 1024 inode_dec_link_count(inode);
1052 iput(inode); 1025 iput(inode);
1053 goto out; 1026 goto out;
@@ -1060,36 +1033,20 @@ static int udf_link(struct dentry *old_dentry, struct inode *dir,
1060 struct udf_fileident_bh fibh; 1033 struct udf_fileident_bh fibh;
1061 struct fileIdentDesc cfi, *fi; 1034 struct fileIdentDesc cfi, *fi;
1062 int err; 1035 int err;
1063 struct buffer_head *bh;
1064 1036
1065 lock_kernel();
1066 if (inode->i_nlink >= (256 << sizeof(inode->i_nlink)) - 1) { 1037 if (inode->i_nlink >= (256 << sizeof(inode->i_nlink)) - 1) {
1067 unlock_kernel();
1068 return -EMLINK; 1038 return -EMLINK;
1069 } 1039 }
1070 1040
1071 fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err); 1041 fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err);
1072 if (!fi) { 1042 if (!fi) {
1073 unlock_kernel();
1074 return err; 1043 return err;
1075 } 1044 }
1076 cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize); 1045 cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize);
1077 cfi.icb.extLocation = cpu_to_lelb(UDF_I(inode)->i_location); 1046 cfi.icb.extLocation = cpu_to_lelb(UDF_I(inode)->i_location);
1078 bh = UDF_SB(inode->i_sb)->s_lvid_bh; 1047 if (UDF_SB(inode->i_sb)->s_lvid_bh) {
1079 if (bh) {
1080 struct logicalVolIntegrityDesc *lvid =
1081 (struct logicalVolIntegrityDesc *)bh->b_data;
1082 struct logicalVolHeaderDesc *lvhd;
1083 uint64_t uniqueID;
1084 lvhd = (struct logicalVolHeaderDesc *)
1085 (lvid->logicalVolContentsUse);
1086 uniqueID = le64_to_cpu(lvhd->uniqueID);
1087 *(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse = 1048 *(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse =
1088 cpu_to_le32(uniqueID & 0x00000000FFFFFFFFUL); 1049 cpu_to_le32(lvid_get_unique_id(inode->i_sb));
1089 if (!(++uniqueID & 0x00000000FFFFFFFFUL))
1090 uniqueID += 16;
1091 lvhd->uniqueID = cpu_to_le64(uniqueID);
1092 mark_buffer_dirty(bh);
1093 } 1050 }
1094 udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL); 1051 udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL);
1095 if (UDF_I(dir)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) 1052 if (UDF_I(dir)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
@@ -1103,7 +1060,6 @@ static int udf_link(struct dentry *old_dentry, struct inode *dir,
1103 mark_inode_dirty(inode); 1060 mark_inode_dirty(inode);
1104 ihold(inode); 1061 ihold(inode);
1105 d_instantiate(dentry, inode); 1062 d_instantiate(dentry, inode);
1106 unlock_kernel();
1107 1063
1108 return 0; 1064 return 0;
1109} 1065}
@@ -1124,7 +1080,6 @@ static int udf_rename(struct inode *old_dir, struct dentry *old_dentry,
1124 struct kernel_lb_addr tloc; 1080 struct kernel_lb_addr tloc;
1125 struct udf_inode_info *old_iinfo = UDF_I(old_inode); 1081 struct udf_inode_info *old_iinfo = UDF_I(old_inode);
1126 1082
1127 lock_kernel();
1128 ofi = udf_find_entry(old_dir, &old_dentry->d_name, &ofibh, &ocfi); 1083 ofi = udf_find_entry(old_dir, &old_dentry->d_name, &ofibh, &ocfi);
1129 if (ofi) { 1084 if (ofi) {
1130 if (ofibh.sbh != ofibh.ebh) 1085 if (ofibh.sbh != ofibh.ebh)
@@ -1248,7 +1203,6 @@ end_rename:
1248 brelse(nfibh.ebh); 1203 brelse(nfibh.ebh);
1249 brelse(nfibh.sbh); 1204 brelse(nfibh.sbh);
1250 } 1205 }
1251 unlock_kernel();
1252 1206
1253 return retval; 1207 return retval;
1254} 1208}
@@ -1261,7 +1215,6 @@ static struct dentry *udf_get_parent(struct dentry *child)
1261 struct fileIdentDesc cfi; 1215 struct fileIdentDesc cfi;
1262 struct udf_fileident_bh fibh; 1216 struct udf_fileident_bh fibh;
1263 1217
1264 lock_kernel();
1265 if (!udf_find_entry(child->d_inode, &dotdot, &fibh, &cfi)) 1218 if (!udf_find_entry(child->d_inode, &dotdot, &fibh, &cfi))
1266 goto out_unlock; 1219 goto out_unlock;
1267 1220
@@ -1273,11 +1226,9 @@ static struct dentry *udf_get_parent(struct dentry *child)
1273 inode = udf_iget(child->d_inode->i_sb, &tloc); 1226 inode = udf_iget(child->d_inode->i_sb, &tloc);
1274 if (!inode) 1227 if (!inode)
1275 goto out_unlock; 1228 goto out_unlock;
1276 unlock_kernel();
1277 1229
1278 return d_obtain_alias(inode); 1230 return d_obtain_alias(inode);
1279out_unlock: 1231out_unlock:
1280 unlock_kernel();
1281 return ERR_PTR(-EACCES); 1232 return ERR_PTR(-EACCES);
1282} 1233}
1283 1234
diff --git a/fs/udf/partition.c b/fs/udf/partition.c
index 745eb209be0c..a71090ea0e07 100644
--- a/fs/udf/partition.c
+++ b/fs/udf/partition.c
@@ -25,6 +25,7 @@
25#include <linux/fs.h> 25#include <linux/fs.h>
26#include <linux/string.h> 26#include <linux/string.h>
27#include <linux/buffer_head.h> 27#include <linux/buffer_head.h>
28#include <linux/mutex.h>
28 29
29uint32_t udf_get_pblock(struct super_block *sb, uint32_t block, 30uint32_t udf_get_pblock(struct super_block *sb, uint32_t block,
30 uint16_t partition, uint32_t offset) 31 uint16_t partition, uint32_t offset)
@@ -159,7 +160,9 @@ int udf_relocate_blocks(struct super_block *sb, long old_block, long *new_block)
159 struct udf_sb_info *sbi = UDF_SB(sb); 160 struct udf_sb_info *sbi = UDF_SB(sb);
160 u16 reallocationTableLen; 161 u16 reallocationTableLen;
161 struct buffer_head *bh; 162 struct buffer_head *bh;
163 int ret = 0;
162 164
165 mutex_lock(&sbi->s_alloc_mutex);
163 for (i = 0; i < sbi->s_partitions; i++) { 166 for (i = 0; i < sbi->s_partitions; i++) {
164 struct udf_part_map *map = &sbi->s_partmaps[i]; 167 struct udf_part_map *map = &sbi->s_partmaps[i];
165 if (old_block > map->s_partition_root && 168 if (old_block > map->s_partition_root &&
@@ -175,8 +178,10 @@ int udf_relocate_blocks(struct super_block *sb, long old_block, long *new_block)
175 break; 178 break;
176 } 179 }
177 180
178 if (!st) 181 if (!st) {
179 return 1; 182 ret = 1;
183 goto out;
184 }
180 185
181 reallocationTableLen = 186 reallocationTableLen =
182 le16_to_cpu(st->reallocationTableLen); 187 le16_to_cpu(st->reallocationTableLen);
@@ -207,14 +212,16 @@ int udf_relocate_blocks(struct super_block *sb, long old_block, long *new_block)
207 ((old_block - 212 ((old_block -
208 map->s_partition_root) & 213 map->s_partition_root) &
209 (sdata->s_packet_len - 1)); 214 (sdata->s_packet_len - 1));
210 return 0; 215 ret = 0;
216 goto out;
211 } else if (origLoc == packet) { 217 } else if (origLoc == packet) {
212 *new_block = le32_to_cpu( 218 *new_block = le32_to_cpu(
213 entry->mappedLocation) + 219 entry->mappedLocation) +
214 ((old_block - 220 ((old_block -
215 map->s_partition_root) & 221 map->s_partition_root) &
216 (sdata->s_packet_len - 1)); 222 (sdata->s_packet_len - 1));
217 return 0; 223 ret = 0;
224 goto out;
218 } else if (origLoc > packet) 225 } else if (origLoc > packet)
219 break; 226 break;
220 } 227 }
@@ -251,20 +258,24 @@ int udf_relocate_blocks(struct super_block *sb, long old_block, long *new_block)
251 st->mapEntry[k].mappedLocation) + 258 st->mapEntry[k].mappedLocation) +
252 ((old_block - map->s_partition_root) & 259 ((old_block - map->s_partition_root) &
253 (sdata->s_packet_len - 1)); 260 (sdata->s_packet_len - 1));
254 return 0; 261 ret = 0;
262 goto out;
255 } 263 }
256 264
257 return 1; 265 ret = 1;
266 goto out;
258 } /* if old_block */ 267 } /* if old_block */
259 } 268 }
260 269
261 if (i == sbi->s_partitions) { 270 if (i == sbi->s_partitions) {
262 /* outside of partitions */ 271 /* outside of partitions */
263 /* for now, fail =) */ 272 /* for now, fail =) */
264 return 1; 273 ret = 1;
265 } 274 }
266 275
267 return 0; 276out:
277 mutex_unlock(&sbi->s_alloc_mutex);
278 return ret;
268} 279}
269 280
270static uint32_t udf_try_read_meta(struct inode *inode, uint32_t block, 281static uint32_t udf_try_read_meta(struct inode *inode, uint32_t block,
diff --git a/fs/udf/super.c b/fs/udf/super.c
index b539d53320fb..7b27b063ff6d 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -48,7 +48,6 @@
48#include <linux/stat.h> 48#include <linux/stat.h>
49#include <linux/cdrom.h> 49#include <linux/cdrom.h>
50#include <linux/nls.h> 50#include <linux/nls.h>
51#include <linux/smp_lock.h>
52#include <linux/buffer_head.h> 51#include <linux/buffer_head.h>
53#include <linux/vfs.h> 52#include <linux/vfs.h>
54#include <linux/vmalloc.h> 53#include <linux/vmalloc.h>
@@ -135,6 +134,7 @@ static struct inode *udf_alloc_inode(struct super_block *sb)
135 ei->i_next_alloc_block = 0; 134 ei->i_next_alloc_block = 0;
136 ei->i_next_alloc_goal = 0; 135 ei->i_next_alloc_goal = 0;
137 ei->i_strat4096 = 0; 136 ei->i_strat4096 = 0;
137 init_rwsem(&ei->i_data_sem);
138 138
139 return &ei->vfs_inode; 139 return &ei->vfs_inode;
140} 140}
@@ -574,13 +574,14 @@ static int udf_remount_fs(struct super_block *sb, int *flags, char *options)
574 if (!udf_parse_options(options, &uopt, true)) 574 if (!udf_parse_options(options, &uopt, true))
575 return -EINVAL; 575 return -EINVAL;
576 576
577 lock_kernel(); 577 write_lock(&sbi->s_cred_lock);
578 sbi->s_flags = uopt.flags; 578 sbi->s_flags = uopt.flags;
579 sbi->s_uid = uopt.uid; 579 sbi->s_uid = uopt.uid;
580 sbi->s_gid = uopt.gid; 580 sbi->s_gid = uopt.gid;
581 sbi->s_umask = uopt.umask; 581 sbi->s_umask = uopt.umask;
582 sbi->s_fmode = uopt.fmode; 582 sbi->s_fmode = uopt.fmode;
583 sbi->s_dmode = uopt.dmode; 583 sbi->s_dmode = uopt.dmode;
584 write_unlock(&sbi->s_cred_lock);
584 585
585 if (sbi->s_lvid_bh) { 586 if (sbi->s_lvid_bh) {
586 int write_rev = le16_to_cpu(udf_sb_lvidiu(sbi)->minUDFWriteRev); 587 int write_rev = le16_to_cpu(udf_sb_lvidiu(sbi)->minUDFWriteRev);
@@ -597,7 +598,6 @@ static int udf_remount_fs(struct super_block *sb, int *flags, char *options)
597 udf_open_lvid(sb); 598 udf_open_lvid(sb);
598 599
599out_unlock: 600out_unlock:
600 unlock_kernel();
601 return error; 601 return error;
602} 602}
603 603
@@ -966,9 +966,9 @@ static struct udf_bitmap *udf_sb_alloc_bitmap(struct super_block *sb, u32 index)
966 (sizeof(struct buffer_head *) * nr_groups); 966 (sizeof(struct buffer_head *) * nr_groups);
967 967
968 if (size <= PAGE_SIZE) 968 if (size <= PAGE_SIZE)
969 bitmap = kmalloc(size, GFP_KERNEL); 969 bitmap = kzalloc(size, GFP_KERNEL);
970 else 970 else
971 bitmap = vmalloc(size); /* TODO: get rid of vmalloc */ 971 bitmap = vzalloc(size); /* TODO: get rid of vzalloc */
972 972
973 if (bitmap == NULL) { 973 if (bitmap == NULL) {
974 udf_error(sb, __func__, 974 udf_error(sb, __func__,
@@ -977,7 +977,6 @@ static struct udf_bitmap *udf_sb_alloc_bitmap(struct super_block *sb, u32 index)
977 return NULL; 977 return NULL;
978 } 978 }
979 979
980 memset(bitmap, 0x00, size);
981 bitmap->s_block_bitmap = (struct buffer_head **)(bitmap + 1); 980 bitmap->s_block_bitmap = (struct buffer_head **)(bitmap + 1);
982 bitmap->s_nr_groups = nr_groups; 981 bitmap->s_nr_groups = nr_groups;
983 return bitmap; 982 return bitmap;
@@ -1781,6 +1780,8 @@ static void udf_open_lvid(struct super_block *sb)
1781 1780
1782 if (!bh) 1781 if (!bh)
1783 return; 1782 return;
1783
1784 mutex_lock(&sbi->s_alloc_mutex);
1784 lvid = (struct logicalVolIntegrityDesc *)bh->b_data; 1785 lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
1785 lvidiu = udf_sb_lvidiu(sbi); 1786 lvidiu = udf_sb_lvidiu(sbi);
1786 1787
@@ -1797,6 +1798,7 @@ static void udf_open_lvid(struct super_block *sb)
1797 lvid->descTag.tagChecksum = udf_tag_checksum(&lvid->descTag); 1798 lvid->descTag.tagChecksum = udf_tag_checksum(&lvid->descTag);
1798 mark_buffer_dirty(bh); 1799 mark_buffer_dirty(bh);
1799 sbi->s_lvid_dirty = 0; 1800 sbi->s_lvid_dirty = 0;
1801 mutex_unlock(&sbi->s_alloc_mutex);
1800} 1802}
1801 1803
1802static void udf_close_lvid(struct super_block *sb) 1804static void udf_close_lvid(struct super_block *sb)
@@ -1809,6 +1811,7 @@ static void udf_close_lvid(struct super_block *sb)
1809 if (!bh) 1811 if (!bh)
1810 return; 1812 return;
1811 1813
1814 mutex_lock(&sbi->s_alloc_mutex);
1812 lvid = (struct logicalVolIntegrityDesc *)bh->b_data; 1815 lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
1813 lvidiu = udf_sb_lvidiu(sbi); 1816 lvidiu = udf_sb_lvidiu(sbi);
1814 lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX; 1817 lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
@@ -1829,6 +1832,34 @@ static void udf_close_lvid(struct super_block *sb)
1829 lvid->descTag.tagChecksum = udf_tag_checksum(&lvid->descTag); 1832 lvid->descTag.tagChecksum = udf_tag_checksum(&lvid->descTag);
1830 mark_buffer_dirty(bh); 1833 mark_buffer_dirty(bh);
1831 sbi->s_lvid_dirty = 0; 1834 sbi->s_lvid_dirty = 0;
1835 mutex_unlock(&sbi->s_alloc_mutex);
1836}
1837
1838u64 lvid_get_unique_id(struct super_block *sb)
1839{
1840 struct buffer_head *bh;
1841 struct udf_sb_info *sbi = UDF_SB(sb);
1842 struct logicalVolIntegrityDesc *lvid;
1843 struct logicalVolHeaderDesc *lvhd;
1844 u64 uniqueID;
1845 u64 ret;
1846
1847 bh = sbi->s_lvid_bh;
1848 if (!bh)
1849 return 0;
1850
1851 lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
1852 lvhd = (struct logicalVolHeaderDesc *)lvid->logicalVolContentsUse;
1853
1854 mutex_lock(&sbi->s_alloc_mutex);
1855 ret = uniqueID = le64_to_cpu(lvhd->uniqueID);
1856 if (!(++uniqueID & 0xFFFFFFFF))
1857 uniqueID += 16;
1858 lvhd->uniqueID = cpu_to_le64(uniqueID);
1859 mutex_unlock(&sbi->s_alloc_mutex);
1860 mark_buffer_dirty(bh);
1861
1862 return ret;
1832} 1863}
1833 1864
1834static void udf_sb_free_bitmap(struct udf_bitmap *bitmap) 1865static void udf_sb_free_bitmap(struct udf_bitmap *bitmap)
@@ -1886,8 +1917,6 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
1886 struct kernel_lb_addr rootdir, fileset; 1917 struct kernel_lb_addr rootdir, fileset;
1887 struct udf_sb_info *sbi; 1918 struct udf_sb_info *sbi;
1888 1919
1889 lock_kernel();
1890
1891 uopt.flags = (1 << UDF_FLAG_USE_AD_IN_ICB) | (1 << UDF_FLAG_STRICT); 1920 uopt.flags = (1 << UDF_FLAG_USE_AD_IN_ICB) | (1 << UDF_FLAG_STRICT);
1892 uopt.uid = -1; 1921 uopt.uid = -1;
1893 uopt.gid = -1; 1922 uopt.gid = -1;
@@ -1896,10 +1925,8 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
1896 uopt.dmode = UDF_INVALID_MODE; 1925 uopt.dmode = UDF_INVALID_MODE;
1897 1926
1898 sbi = kzalloc(sizeof(struct udf_sb_info), GFP_KERNEL); 1927 sbi = kzalloc(sizeof(struct udf_sb_info), GFP_KERNEL);
1899 if (!sbi) { 1928 if (!sbi)
1900 unlock_kernel();
1901 return -ENOMEM; 1929 return -ENOMEM;
1902 }
1903 1930
1904 sb->s_fs_info = sbi; 1931 sb->s_fs_info = sbi;
1905 1932
@@ -1936,6 +1963,7 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
1936 sbi->s_fmode = uopt.fmode; 1963 sbi->s_fmode = uopt.fmode;
1937 sbi->s_dmode = uopt.dmode; 1964 sbi->s_dmode = uopt.dmode;
1938 sbi->s_nls_map = uopt.nls_map; 1965 sbi->s_nls_map = uopt.nls_map;
1966 rwlock_init(&sbi->s_cred_lock);
1939 1967
1940 if (uopt.session == 0xFFFFFFFF) 1968 if (uopt.session == 0xFFFFFFFF)
1941 sbi->s_session = udf_get_last_session(sb); 1969 sbi->s_session = udf_get_last_session(sb);
@@ -2045,7 +2073,6 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
2045 goto error_out; 2073 goto error_out;
2046 } 2074 }
2047 sb->s_maxbytes = MAX_LFS_FILESIZE; 2075 sb->s_maxbytes = MAX_LFS_FILESIZE;
2048 unlock_kernel();
2049 return 0; 2076 return 0;
2050 2077
2051error_out: 2078error_out:
@@ -2066,7 +2093,6 @@ error_out:
2066 kfree(sbi); 2093 kfree(sbi);
2067 sb->s_fs_info = NULL; 2094 sb->s_fs_info = NULL;
2068 2095
2069 unlock_kernel();
2070 return -EINVAL; 2096 return -EINVAL;
2071} 2097}
2072 2098
@@ -2105,8 +2131,6 @@ static void udf_put_super(struct super_block *sb)
2105 2131
2106 sbi = UDF_SB(sb); 2132 sbi = UDF_SB(sb);
2107 2133
2108 lock_kernel();
2109
2110 if (sbi->s_vat_inode) 2134 if (sbi->s_vat_inode)
2111 iput(sbi->s_vat_inode); 2135 iput(sbi->s_vat_inode);
2112 if (sbi->s_partitions) 2136 if (sbi->s_partitions)
@@ -2122,8 +2146,6 @@ static void udf_put_super(struct super_block *sb)
2122 kfree(sbi->s_partmaps); 2146 kfree(sbi->s_partmaps);
2123 kfree(sb->s_fs_info); 2147 kfree(sb->s_fs_info);
2124 sb->s_fs_info = NULL; 2148 sb->s_fs_info = NULL;
2125
2126 unlock_kernel();
2127} 2149}
2128 2150
2129static int udf_sync_fs(struct super_block *sb, int wait) 2151static int udf_sync_fs(struct super_block *sb, int wait)
@@ -2186,8 +2208,6 @@ static unsigned int udf_count_free_bitmap(struct super_block *sb,
2186 uint16_t ident; 2208 uint16_t ident;
2187 struct spaceBitmapDesc *bm; 2209 struct spaceBitmapDesc *bm;
2188 2210
2189 lock_kernel();
2190
2191 loc.logicalBlockNum = bitmap->s_extPosition; 2211 loc.logicalBlockNum = bitmap->s_extPosition;
2192 loc.partitionReferenceNum = UDF_SB(sb)->s_partition; 2212 loc.partitionReferenceNum = UDF_SB(sb)->s_partition;
2193 bh = udf_read_ptagged(sb, &loc, 0, &ident); 2213 bh = udf_read_ptagged(sb, &loc, 0, &ident);
@@ -2224,10 +2244,7 @@ static unsigned int udf_count_free_bitmap(struct super_block *sb,
2224 } 2244 }
2225 } 2245 }
2226 brelse(bh); 2246 brelse(bh);
2227
2228out: 2247out:
2229 unlock_kernel();
2230
2231 return accum; 2248 return accum;
2232} 2249}
2233 2250
@@ -2240,8 +2257,7 @@ static unsigned int udf_count_free_table(struct super_block *sb,
2240 int8_t etype; 2257 int8_t etype;
2241 struct extent_position epos; 2258 struct extent_position epos;
2242 2259
2243 lock_kernel(); 2260 mutex_lock(&UDF_SB(sb)->s_alloc_mutex);
2244
2245 epos.block = UDF_I(table)->i_location; 2261 epos.block = UDF_I(table)->i_location;
2246 epos.offset = sizeof(struct unallocSpaceEntry); 2262 epos.offset = sizeof(struct unallocSpaceEntry);
2247 epos.bh = NULL; 2263 epos.bh = NULL;
@@ -2250,8 +2266,7 @@ static unsigned int udf_count_free_table(struct super_block *sb,
2250 accum += (elen >> table->i_sb->s_blocksize_bits); 2266 accum += (elen >> table->i_sb->s_blocksize_bits);
2251 2267
2252 brelse(epos.bh); 2268 brelse(epos.bh);
2253 2269 mutex_unlock(&UDF_SB(sb)->s_alloc_mutex);
2254 unlock_kernel();
2255 2270
2256 return accum; 2271 return accum;
2257} 2272}
diff --git a/fs/udf/symlink.c b/fs/udf/symlink.c
index 16064787d2b7..b1d4488b0f14 100644
--- a/fs/udf/symlink.c
+++ b/fs/udf/symlink.c
@@ -27,7 +27,6 @@
27#include <linux/mm.h> 27#include <linux/mm.h>
28#include <linux/stat.h> 28#include <linux/stat.h>
29#include <linux/pagemap.h> 29#include <linux/pagemap.h>
30#include <linux/smp_lock.h>
31#include <linux/buffer_head.h> 30#include <linux/buffer_head.h>
32#include "udf_i.h" 31#include "udf_i.h"
33 32
@@ -78,13 +77,16 @@ static int udf_symlink_filler(struct file *file, struct page *page)
78 int err = -EIO; 77 int err = -EIO;
79 unsigned char *p = kmap(page); 78 unsigned char *p = kmap(page);
80 struct udf_inode_info *iinfo; 79 struct udf_inode_info *iinfo;
80 uint32_t pos;
81 81
82 lock_kernel();
83 iinfo = UDF_I(inode); 82 iinfo = UDF_I(inode);
83 pos = udf_block_map(inode, 0);
84
85 down_read(&iinfo->i_data_sem);
84 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) { 86 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
85 symlink = iinfo->i_ext.i_data + iinfo->i_lenEAttr; 87 symlink = iinfo->i_ext.i_data + iinfo->i_lenEAttr;
86 } else { 88 } else {
87 bh = sb_bread(inode->i_sb, udf_block_map(inode, 0)); 89 bh = sb_bread(inode->i_sb, pos);
88 90
89 if (!bh) 91 if (!bh)
90 goto out; 92 goto out;
@@ -95,14 +97,14 @@ static int udf_symlink_filler(struct file *file, struct page *page)
95 udf_pc_to_char(inode->i_sb, symlink, inode->i_size, p); 97 udf_pc_to_char(inode->i_sb, symlink, inode->i_size, p);
96 brelse(bh); 98 brelse(bh);
97 99
98 unlock_kernel(); 100 up_read(&iinfo->i_data_sem);
99 SetPageUptodate(page); 101 SetPageUptodate(page);
100 kunmap(page); 102 kunmap(page);
101 unlock_page(page); 103 unlock_page(page);
102 return 0; 104 return 0;
103 105
104out: 106out:
105 unlock_kernel(); 107 up_read(&iinfo->i_data_sem);
106 SetPageError(page); 108 SetPageError(page);
107 kunmap(page); 109 kunmap(page);
108 unlock_page(page); 110 unlock_page(page);
diff --git a/fs/udf/udf_i.h b/fs/udf/udf_i.h
index e58d1de41073..d1bd31ea724e 100644
--- a/fs/udf/udf_i.h
+++ b/fs/udf/udf_i.h
@@ -1,6 +1,18 @@
1#ifndef _UDF_I_H 1#ifndef _UDF_I_H
2#define _UDF_I_H 2#define _UDF_I_H
3 3
4/*
5 * The i_data_sem and i_mutex serve for protection of allocation information
6 * of a regular files and symlinks. This includes all extents belonging to
7 * the file/symlink, a fact whether data are in-inode or in external data
8 * blocks, preallocation, goal block information... When extents are read,
9 * i_mutex or i_data_sem must be held (for reading is enough in case of
10 * i_data_sem). When extents are changed, i_data_sem must be held for writing
11 * and also i_mutex must be held.
12 *
13 * For directories i_mutex is used for all the necessary protection.
14 */
15
4struct udf_inode_info { 16struct udf_inode_info {
5 struct timespec i_crtime; 17 struct timespec i_crtime;
6 /* Physical address of inode */ 18 /* Physical address of inode */
@@ -21,6 +33,7 @@ struct udf_inode_info {
21 struct long_ad *i_lad; 33 struct long_ad *i_lad;
22 __u8 *i_data; 34 __u8 *i_data;
23 } i_ext; 35 } i_ext;
36 struct rw_semaphore i_data_sem;
24 struct inode vfs_inode; 37 struct inode vfs_inode;
25}; 38};
26 39
diff --git a/fs/udf/udf_sb.h b/fs/udf/udf_sb.h
index d113b72c2768..4858c191242b 100644
--- a/fs/udf/udf_sb.h
+++ b/fs/udf/udf_sb.h
@@ -2,6 +2,7 @@
2#define __LINUX_UDF_SB_H 2#define __LINUX_UDF_SB_H
3 3
4#include <linux/mutex.h> 4#include <linux/mutex.h>
5#include <linux/bitops.h>
5 6
6/* Since UDF 2.01 is ISO 13346 based... */ 7/* Since UDF 2.01 is ISO 13346 based... */
7#define UDF_SUPER_MAGIC 0x15013346 8#define UDF_SUPER_MAGIC 0x15013346
@@ -128,6 +129,8 @@ struct udf_sb_info {
128 uid_t s_uid; 129 uid_t s_uid;
129 mode_t s_fmode; 130 mode_t s_fmode;
130 mode_t s_dmode; 131 mode_t s_dmode;
132 /* Lock protecting consistency of above permission settings */
133 rwlock_t s_cred_lock;
131 134
132 /* Root Info */ 135 /* Root Info */
133 struct timespec s_record_time; 136 struct timespec s_record_time;
@@ -139,7 +142,7 @@ struct udf_sb_info {
139 __u16 s_udfrev; 142 __u16 s_udfrev;
140 143
141 /* Miscellaneous flags */ 144 /* Miscellaneous flags */
142 __u32 s_flags; 145 unsigned long s_flags;
143 146
144 /* Encoding info */ 147 /* Encoding info */
145 struct nls_table *s_nls_map; 148 struct nls_table *s_nls_map;
@@ -161,8 +164,19 @@ struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct udf_sb_info *sbi);
161 164
162int udf_compute_nr_groups(struct super_block *sb, u32 partition); 165int udf_compute_nr_groups(struct super_block *sb, u32 partition);
163 166
164#define UDF_QUERY_FLAG(X,Y) ( UDF_SB(X)->s_flags & ( 1 << (Y) ) ) 167static inline int UDF_QUERY_FLAG(struct super_block *sb, int flag)
165#define UDF_SET_FLAG(X,Y) ( UDF_SB(X)->s_flags |= ( 1 << (Y) ) ) 168{
166#define UDF_CLEAR_FLAG(X,Y) ( UDF_SB(X)->s_flags &= ~( 1 << (Y) ) ) 169 return test_bit(flag, &UDF_SB(sb)->s_flags);
170}
171
172static inline void UDF_SET_FLAG(struct super_block *sb, int flag)
173{
174 set_bit(flag, &UDF_SB(sb)->s_flags);
175}
176
177static inline void UDF_CLEAR_FLAG(struct super_block *sb, int flag)
178{
179 clear_bit(flag, &UDF_SB(sb)->s_flags);
180}
167 181
168#endif /* __LINUX_UDF_SB_H */ 182#endif /* __LINUX_UDF_SB_H */
diff --git a/fs/udf/udfdecl.h b/fs/udf/udfdecl.h
index 6995ab1f4305..eba48209f9f3 100644
--- a/fs/udf/udfdecl.h
+++ b/fs/udf/udfdecl.h
@@ -111,6 +111,8 @@ struct extent_position {
111}; 111};
112 112
113/* super.c */ 113/* super.c */
114
115__attribute__((format(printf, 3, 4)))
114extern void udf_warning(struct super_block *, const char *, const char *, ...); 116extern void udf_warning(struct super_block *, const char *, const char *, ...);
115static inline void udf_updated_lvid(struct super_block *sb) 117static inline void udf_updated_lvid(struct super_block *sb)
116{ 118{
@@ -123,6 +125,7 @@ static inline void udf_updated_lvid(struct super_block *sb)
123 sb->s_dirt = 1; 125 sb->s_dirt = 1;
124 UDF_SB(sb)->s_lvid_dirty = 1; 126 UDF_SB(sb)->s_lvid_dirty = 1;
125} 127}
128extern u64 lvid_get_unique_id(struct super_block *sb);
126 129
127/* namei.c */ 130/* namei.c */
128extern int udf_write_fi(struct inode *inode, struct fileIdentDesc *, 131extern int udf_write_fi(struct inode *inode, struct fileIdentDesc *,
@@ -133,7 +136,6 @@ extern int udf_write_fi(struct inode *inode, struct fileIdentDesc *,
133extern long udf_ioctl(struct file *, unsigned int, unsigned long); 136extern long udf_ioctl(struct file *, unsigned int, unsigned long);
134/* inode.c */ 137/* inode.c */
135extern struct inode *udf_iget(struct super_block *, struct kernel_lb_addr *); 138extern struct inode *udf_iget(struct super_block *, struct kernel_lb_addr *);
136extern int udf_sync_inode(struct inode *);
137extern void udf_expand_file_adinicb(struct inode *, int, int *); 139extern void udf_expand_file_adinicb(struct inode *, int, int *);
138extern struct buffer_head *udf_expand_dir_adinicb(struct inode *, int *, int *); 140extern struct buffer_head *udf_expand_dir_adinicb(struct inode *, int *, int *);
139extern struct buffer_head *udf_bread(struct inode *, int, int, int *); 141extern struct buffer_head *udf_bread(struct inode *, int, int, int *);
diff --git a/fs/xfs/linux-2.6/sv.h b/fs/xfs/linux-2.6/sv.h
deleted file mode 100644
index 4dfc7c370819..000000000000
--- a/fs/xfs/linux-2.6/sv.h
+++ /dev/null
@@ -1,59 +0,0 @@
1/*
2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#ifndef __XFS_SUPPORT_SV_H__
19#define __XFS_SUPPORT_SV_H__
20
21#include <linux/wait.h>
22#include <linux/sched.h>
23#include <linux/spinlock.h>
24
25/*
26 * Synchronisation variables.
27 *
28 * (Parameters "pri", "svf" and "rts" are not implemented)
29 */
30
31typedef struct sv_s {
32 wait_queue_head_t waiters;
33} sv_t;
34
35static inline void _sv_wait(sv_t *sv, spinlock_t *lock)
36{
37 DECLARE_WAITQUEUE(wait, current);
38
39 add_wait_queue_exclusive(&sv->waiters, &wait);
40 __set_current_state(TASK_UNINTERRUPTIBLE);
41 spin_unlock(lock);
42
43 schedule();
44
45 remove_wait_queue(&sv->waiters, &wait);
46}
47
48#define sv_init(sv,flag,name) \
49 init_waitqueue_head(&(sv)->waiters)
50#define sv_destroy(sv) \
51 /*NOTHING*/
52#define sv_wait(sv, pri, lock, s) \
53 _sv_wait(sv, lock)
54#define sv_signal(sv) \
55 wake_up(&(sv)->waiters)
56#define sv_broadcast(sv) \
57 wake_up_all(&(sv)->waiters)
58
59#endif /* __XFS_SUPPORT_SV_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index 691f61223ed6..ec7bbb5645b6 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -38,15 +38,6 @@
38#include <linux/pagevec.h> 38#include <linux/pagevec.h>
39#include <linux/writeback.h> 39#include <linux/writeback.h>
40 40
41/*
42 * Types of I/O for bmap clustering and I/O completion tracking.
43 */
44enum {
45 IO_READ, /* mapping for a read */
46 IO_DELAY, /* mapping covers delalloc region */
47 IO_UNWRITTEN, /* mapping covers allocated but uninitialized data */
48 IO_NEW /* just allocated */
49};
50 41
51/* 42/*
52 * Prime number of hash buckets since address is used as the key. 43 * Prime number of hash buckets since address is used as the key.
@@ -182,9 +173,6 @@ xfs_setfilesize(
182 xfs_inode_t *ip = XFS_I(ioend->io_inode); 173 xfs_inode_t *ip = XFS_I(ioend->io_inode);
183 xfs_fsize_t isize; 174 xfs_fsize_t isize;
184 175
185 ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG);
186 ASSERT(ioend->io_type != IO_READ);
187
188 if (unlikely(ioend->io_error)) 176 if (unlikely(ioend->io_error))
189 return 0; 177 return 0;
190 178
@@ -244,10 +232,8 @@ xfs_end_io(
244 * We might have to update the on-disk file size after extending 232 * We might have to update the on-disk file size after extending
245 * writes. 233 * writes.
246 */ 234 */
247 if (ioend->io_type != IO_READ) { 235 error = xfs_setfilesize(ioend);
248 error = xfs_setfilesize(ioend); 236 ASSERT(!error || error == EAGAIN);
249 ASSERT(!error || error == EAGAIN);
250 }
251 237
252 /* 238 /*
253 * If we didn't complete processing of the ioend, requeue it to the 239 * If we didn't complete processing of the ioend, requeue it to the
@@ -318,14 +304,63 @@ STATIC int
318xfs_map_blocks( 304xfs_map_blocks(
319 struct inode *inode, 305 struct inode *inode,
320 loff_t offset, 306 loff_t offset,
321 ssize_t count,
322 struct xfs_bmbt_irec *imap, 307 struct xfs_bmbt_irec *imap,
323 int flags) 308 int type,
309 int nonblocking)
324{ 310{
325 int nmaps = 1; 311 struct xfs_inode *ip = XFS_I(inode);
326 int new = 0; 312 struct xfs_mount *mp = ip->i_mount;
313 ssize_t count = 1 << inode->i_blkbits;
314 xfs_fileoff_t offset_fsb, end_fsb;
315 int error = 0;
316 int bmapi_flags = XFS_BMAPI_ENTIRE;
317 int nimaps = 1;
318
319 if (XFS_FORCED_SHUTDOWN(mp))
320 return -XFS_ERROR(EIO);
321
322 if (type == IO_UNWRITTEN)
323 bmapi_flags |= XFS_BMAPI_IGSTATE;
324
325 if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) {
326 if (nonblocking)
327 return -XFS_ERROR(EAGAIN);
328 xfs_ilock(ip, XFS_ILOCK_SHARED);
329 }
327 330
328 return -xfs_iomap(XFS_I(inode), offset, count, flags, imap, &nmaps, &new); 331 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
332 (ip->i_df.if_flags & XFS_IFEXTENTS));
333 ASSERT(offset <= mp->m_maxioffset);
334
335 if (offset + count > mp->m_maxioffset)
336 count = mp->m_maxioffset - offset;
337 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
338 offset_fsb = XFS_B_TO_FSBT(mp, offset);
339 error = xfs_bmapi(NULL, ip, offset_fsb, end_fsb - offset_fsb,
340 bmapi_flags, NULL, 0, imap, &nimaps, NULL);
341 xfs_iunlock(ip, XFS_ILOCK_SHARED);
342
343 if (error)
344 return -XFS_ERROR(error);
345
346 if (type == IO_DELALLOC &&
347 (!nimaps || isnullstartblock(imap->br_startblock))) {
348 error = xfs_iomap_write_allocate(ip, offset, count, imap);
349 if (!error)
350 trace_xfs_map_blocks_alloc(ip, offset, count, type, imap);
351 return -XFS_ERROR(error);
352 }
353
354#ifdef DEBUG
355 if (type == IO_UNWRITTEN) {
356 ASSERT(nimaps);
357 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
358 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
359 }
360#endif
361 if (nimaps)
362 trace_xfs_map_blocks_found(ip, offset, count, type, imap);
363 return 0;
329} 364}
330 365
331STATIC int 366STATIC int
@@ -380,26 +415,18 @@ xfs_submit_ioend_bio(
380 415
381 submit_bio(wbc->sync_mode == WB_SYNC_ALL ? 416 submit_bio(wbc->sync_mode == WB_SYNC_ALL ?
382 WRITE_SYNC_PLUG : WRITE, bio); 417 WRITE_SYNC_PLUG : WRITE, bio);
383 ASSERT(!bio_flagged(bio, BIO_EOPNOTSUPP));
384 bio_put(bio);
385} 418}
386 419
387STATIC struct bio * 420STATIC struct bio *
388xfs_alloc_ioend_bio( 421xfs_alloc_ioend_bio(
389 struct buffer_head *bh) 422 struct buffer_head *bh)
390{ 423{
391 struct bio *bio;
392 int nvecs = bio_get_nr_vecs(bh->b_bdev); 424 int nvecs = bio_get_nr_vecs(bh->b_bdev);
393 425 struct bio *bio = bio_alloc(GFP_NOIO, nvecs);
394 do {
395 bio = bio_alloc(GFP_NOIO, nvecs);
396 nvecs >>= 1;
397 } while (!bio);
398 426
399 ASSERT(bio->bi_private == NULL); 427 ASSERT(bio->bi_private == NULL);
400 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9); 428 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
401 bio->bi_bdev = bh->b_bdev; 429 bio->bi_bdev = bh->b_bdev;
402 bio_get(bio);
403 return bio; 430 return bio;
404} 431}
405 432
@@ -470,9 +497,8 @@ xfs_submit_ioend(
470 /* Pass 1 - start writeback */ 497 /* Pass 1 - start writeback */
471 do { 498 do {
472 next = ioend->io_list; 499 next = ioend->io_list;
473 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) { 500 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private)
474 xfs_start_buffer_writeback(bh); 501 xfs_start_buffer_writeback(bh);
475 }
476 } while ((ioend = next) != NULL); 502 } while ((ioend = next) != NULL);
477 503
478 /* Pass 2 - submit I/O */ 504 /* Pass 2 - submit I/O */
@@ -600,117 +626,13 @@ xfs_map_at_offset(
600 ASSERT(imap->br_startblock != HOLESTARTBLOCK); 626 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
601 ASSERT(imap->br_startblock != DELAYSTARTBLOCK); 627 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
602 628
603 lock_buffer(bh);
604 xfs_map_buffer(inode, bh, imap, offset); 629 xfs_map_buffer(inode, bh, imap, offset);
605 bh->b_bdev = xfs_find_bdev_for_inode(inode);
606 set_buffer_mapped(bh); 630 set_buffer_mapped(bh);
607 clear_buffer_delay(bh); 631 clear_buffer_delay(bh);
608 clear_buffer_unwritten(bh); 632 clear_buffer_unwritten(bh);
609} 633}
610 634
611/* 635/*
612 * Look for a page at index that is suitable for clustering.
613 */
614STATIC unsigned int
615xfs_probe_page(
616 struct page *page,
617 unsigned int pg_offset)
618{
619 struct buffer_head *bh, *head;
620 int ret = 0;
621
622 if (PageWriteback(page))
623 return 0;
624 if (!PageDirty(page))
625 return 0;
626 if (!page->mapping)
627 return 0;
628 if (!page_has_buffers(page))
629 return 0;
630
631 bh = head = page_buffers(page);
632 do {
633 if (!buffer_uptodate(bh))
634 break;
635 if (!buffer_mapped(bh))
636 break;
637 ret += bh->b_size;
638 if (ret >= pg_offset)
639 break;
640 } while ((bh = bh->b_this_page) != head);
641
642 return ret;
643}
644
645STATIC size_t
646xfs_probe_cluster(
647 struct inode *inode,
648 struct page *startpage,
649 struct buffer_head *bh,
650 struct buffer_head *head)
651{
652 struct pagevec pvec;
653 pgoff_t tindex, tlast, tloff;
654 size_t total = 0;
655 int done = 0, i;
656
657 /* First sum forwards in this page */
658 do {
659 if (!buffer_uptodate(bh) || !buffer_mapped(bh))
660 return total;
661 total += bh->b_size;
662 } while ((bh = bh->b_this_page) != head);
663
664 /* if we reached the end of the page, sum forwards in following pages */
665 tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT;
666 tindex = startpage->index + 1;
667
668 /* Prune this back to avoid pathological behavior */
669 tloff = min(tlast, startpage->index + 64);
670
671 pagevec_init(&pvec, 0);
672 while (!done && tindex <= tloff) {
673 unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
674
675 if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
676 break;
677
678 for (i = 0; i < pagevec_count(&pvec); i++) {
679 struct page *page = pvec.pages[i];
680 size_t pg_offset, pg_len = 0;
681
682 if (tindex == tlast) {
683 pg_offset =
684 i_size_read(inode) & (PAGE_CACHE_SIZE - 1);
685 if (!pg_offset) {
686 done = 1;
687 break;
688 }
689 } else
690 pg_offset = PAGE_CACHE_SIZE;
691
692 if (page->index == tindex && trylock_page(page)) {
693 pg_len = xfs_probe_page(page, pg_offset);
694 unlock_page(page);
695 }
696
697 if (!pg_len) {
698 done = 1;
699 break;
700 }
701
702 total += pg_len;
703 tindex++;
704 }
705
706 pagevec_release(&pvec);
707 cond_resched();
708 }
709
710 return total;
711}
712
713/*
714 * Test if a given page is suitable for writing as part of an unwritten 636 * Test if a given page is suitable for writing as part of an unwritten
715 * or delayed allocate extent. 637 * or delayed allocate extent.
716 */ 638 */
@@ -731,9 +653,9 @@ xfs_is_delayed_page(
731 if (buffer_unwritten(bh)) 653 if (buffer_unwritten(bh))
732 acceptable = (type == IO_UNWRITTEN); 654 acceptable = (type == IO_UNWRITTEN);
733 else if (buffer_delay(bh)) 655 else if (buffer_delay(bh))
734 acceptable = (type == IO_DELAY); 656 acceptable = (type == IO_DELALLOC);
735 else if (buffer_dirty(bh) && buffer_mapped(bh)) 657 else if (buffer_dirty(bh) && buffer_mapped(bh))
736 acceptable = (type == IO_NEW); 658 acceptable = (type == IO_OVERWRITE);
737 else 659 else
738 break; 660 break;
739 } while ((bh = bh->b_this_page) != head); 661 } while ((bh = bh->b_this_page) != head);
@@ -758,8 +680,7 @@ xfs_convert_page(
758 loff_t tindex, 680 loff_t tindex,
759 struct xfs_bmbt_irec *imap, 681 struct xfs_bmbt_irec *imap,
760 xfs_ioend_t **ioendp, 682 xfs_ioend_t **ioendp,
761 struct writeback_control *wbc, 683 struct writeback_control *wbc)
762 int all_bh)
763{ 684{
764 struct buffer_head *bh, *head; 685 struct buffer_head *bh, *head;
765 xfs_off_t end_offset; 686 xfs_off_t end_offset;
@@ -814,37 +735,30 @@ xfs_convert_page(
814 continue; 735 continue;
815 } 736 }
816 737
817 if (buffer_unwritten(bh) || buffer_delay(bh)) { 738 if (buffer_unwritten(bh) || buffer_delay(bh) ||
739 buffer_mapped(bh)) {
818 if (buffer_unwritten(bh)) 740 if (buffer_unwritten(bh))
819 type = IO_UNWRITTEN; 741 type = IO_UNWRITTEN;
742 else if (buffer_delay(bh))
743 type = IO_DELALLOC;
820 else 744 else
821 type = IO_DELAY; 745 type = IO_OVERWRITE;
822 746
823 if (!xfs_imap_valid(inode, imap, offset)) { 747 if (!xfs_imap_valid(inode, imap, offset)) {
824 done = 1; 748 done = 1;
825 continue; 749 continue;
826 } 750 }
827 751
828 ASSERT(imap->br_startblock != HOLESTARTBLOCK); 752 lock_buffer(bh);
829 ASSERT(imap->br_startblock != DELAYSTARTBLOCK); 753 if (type != IO_OVERWRITE)
830 754 xfs_map_at_offset(inode, bh, imap, offset);
831 xfs_map_at_offset(inode, bh, imap, offset);
832 xfs_add_to_ioend(inode, bh, offset, type, 755 xfs_add_to_ioend(inode, bh, offset, type,
833 ioendp, done); 756 ioendp, done);
834 757
835 page_dirty--; 758 page_dirty--;
836 count++; 759 count++;
837 } else { 760 } else {
838 type = IO_NEW; 761 done = 1;
839 if (buffer_mapped(bh) && all_bh) {
840 lock_buffer(bh);
841 xfs_add_to_ioend(inode, bh, offset,
842 type, ioendp, done);
843 count++;
844 page_dirty--;
845 } else {
846 done = 1;
847 }
848 } 762 }
849 } while (offset += len, (bh = bh->b_this_page) != head); 763 } while (offset += len, (bh = bh->b_this_page) != head);
850 764
@@ -876,7 +790,6 @@ xfs_cluster_write(
876 struct xfs_bmbt_irec *imap, 790 struct xfs_bmbt_irec *imap,
877 xfs_ioend_t **ioendp, 791 xfs_ioend_t **ioendp,
878 struct writeback_control *wbc, 792 struct writeback_control *wbc,
879 int all_bh,
880 pgoff_t tlast) 793 pgoff_t tlast)
881{ 794{
882 struct pagevec pvec; 795 struct pagevec pvec;
@@ -891,7 +804,7 @@ xfs_cluster_write(
891 804
892 for (i = 0; i < pagevec_count(&pvec); i++) { 805 for (i = 0; i < pagevec_count(&pvec); i++) {
893 done = xfs_convert_page(inode, pvec.pages[i], tindex++, 806 done = xfs_convert_page(inode, pvec.pages[i], tindex++,
894 imap, ioendp, wbc, all_bh); 807 imap, ioendp, wbc);
895 if (done) 808 if (done)
896 break; 809 break;
897 } 810 }
@@ -935,7 +848,7 @@ xfs_aops_discard_page(
935 struct buffer_head *bh, *head; 848 struct buffer_head *bh, *head;
936 loff_t offset = page_offset(page); 849 loff_t offset = page_offset(page);
937 850
938 if (!xfs_is_delayed_page(page, IO_DELAY)) 851 if (!xfs_is_delayed_page(page, IO_DELALLOC))
939 goto out_invalidate; 852 goto out_invalidate;
940 853
941 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) 854 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
@@ -1002,10 +915,10 @@ xfs_vm_writepage(
1002 unsigned int type; 915 unsigned int type;
1003 __uint64_t end_offset; 916 __uint64_t end_offset;
1004 pgoff_t end_index, last_index; 917 pgoff_t end_index, last_index;
1005 ssize_t size, len; 918 ssize_t len;
1006 int flags, err, imap_valid = 0, uptodate = 1; 919 int err, imap_valid = 0, uptodate = 1;
1007 int count = 0; 920 int count = 0;
1008 int all_bh = 0; 921 int nonblocking = 0;
1009 922
1010 trace_xfs_writepage(inode, page, 0); 923 trace_xfs_writepage(inode, page, 0);
1011 924
@@ -1056,10 +969,14 @@ xfs_vm_writepage(
1056 969
1057 bh = head = page_buffers(page); 970 bh = head = page_buffers(page);
1058 offset = page_offset(page); 971 offset = page_offset(page);
1059 flags = BMAPI_READ; 972 type = IO_OVERWRITE;
1060 type = IO_NEW; 973
974 if (wbc->sync_mode == WB_SYNC_NONE && wbc->nonblocking)
975 nonblocking = 1;
1061 976
1062 do { 977 do {
978 int new_ioend = 0;
979
1063 if (offset >= end_offset) 980 if (offset >= end_offset)
1064 break; 981 break;
1065 if (!buffer_uptodate(bh)) 982 if (!buffer_uptodate(bh))
@@ -1076,90 +993,54 @@ xfs_vm_writepage(
1076 continue; 993 continue;
1077 } 994 }
1078 995
1079 if (imap_valid) 996 if (buffer_unwritten(bh)) {
1080 imap_valid = xfs_imap_valid(inode, &imap, offset); 997 if (type != IO_UNWRITTEN) {
1081
1082 if (buffer_unwritten(bh) || buffer_delay(bh)) {
1083 int new_ioend = 0;
1084
1085 /*
1086 * Make sure we don't use a read-only iomap
1087 */
1088 if (flags == BMAPI_READ)
1089 imap_valid = 0;
1090
1091 if (buffer_unwritten(bh)) {
1092 type = IO_UNWRITTEN; 998 type = IO_UNWRITTEN;
1093 flags = BMAPI_WRITE | BMAPI_IGNSTATE; 999 imap_valid = 0;
1094 } else if (buffer_delay(bh)) {
1095 type = IO_DELAY;
1096 flags = BMAPI_ALLOCATE;
1097
1098 if (wbc->sync_mode == WB_SYNC_NONE)
1099 flags |= BMAPI_TRYLOCK;
1100 }
1101
1102 if (!imap_valid) {
1103 /*
1104 * If we didn't have a valid mapping then we
1105 * need to ensure that we put the new mapping
1106 * in a new ioend structure. This needs to be
1107 * done to ensure that the ioends correctly
1108 * reflect the block mappings at io completion
1109 * for unwritten extent conversion.
1110 */
1111 new_ioend = 1;
1112 err = xfs_map_blocks(inode, offset, len,
1113 &imap, flags);
1114 if (err)
1115 goto error;
1116 imap_valid = xfs_imap_valid(inode, &imap,
1117 offset);
1118 } 1000 }
1119 if (imap_valid) { 1001 } else if (buffer_delay(bh)) {
1120 xfs_map_at_offset(inode, bh, &imap, offset); 1002 if (type != IO_DELALLOC) {
1121 xfs_add_to_ioend(inode, bh, offset, type, 1003 type = IO_DELALLOC;
1122 &ioend, new_ioend); 1004 imap_valid = 0;
1123 count++;
1124 } 1005 }
1125 } else if (buffer_uptodate(bh)) { 1006 } else if (buffer_uptodate(bh)) {
1126 /* 1007 if (type != IO_OVERWRITE) {
1127 * we got here because the buffer is already mapped. 1008 type = IO_OVERWRITE;
1128 * That means it must already have extents allocated 1009 imap_valid = 0;
1129 * underneath it. Map the extent by reading it.
1130 */
1131 if (!imap_valid || flags != BMAPI_READ) {
1132 flags = BMAPI_READ;
1133 size = xfs_probe_cluster(inode, page, bh, head);
1134 err = xfs_map_blocks(inode, offset, size,
1135 &imap, flags);
1136 if (err)
1137 goto error;
1138 imap_valid = xfs_imap_valid(inode, &imap,
1139 offset);
1140 } 1010 }
1011 } else {
1012 if (PageUptodate(page)) {
1013 ASSERT(buffer_mapped(bh));
1014 imap_valid = 0;
1015 }
1016 continue;
1017 }
1141 1018
1019 if (imap_valid)
1020 imap_valid = xfs_imap_valid(inode, &imap, offset);
1021 if (!imap_valid) {
1142 /* 1022 /*
1143 * We set the type to IO_NEW in case we are doing a 1023 * If we didn't have a valid mapping then we need to
1144 * small write at EOF that is extending the file but 1024 * put the new mapping into a separate ioend structure.
1145 * without needing an allocation. We need to update the 1025 * This ensures non-contiguous extents always have
1146 * file size on I/O completion in this case so it is 1026 * separate ioends, which is particularly important
1147 * the same case as having just allocated a new extent 1027 * for unwritten extent conversion at I/O completion
1148 * that we are writing into for the first time. 1028 * time.
1149 */ 1029 */
1150 type = IO_NEW; 1030 new_ioend = 1;
1151 if (trylock_buffer(bh)) { 1031 err = xfs_map_blocks(inode, offset, &imap, type,
1152 if (imap_valid) 1032 nonblocking);
1153 all_bh = 1; 1033 if (err)
1154 xfs_add_to_ioend(inode, bh, offset, type, 1034 goto error;
1155 &ioend, !imap_valid); 1035 imap_valid = xfs_imap_valid(inode, &imap, offset);
1156 count++; 1036 }
1157 } else { 1037 if (imap_valid) {
1158 imap_valid = 0; 1038 lock_buffer(bh);
1159 } 1039 if (type != IO_OVERWRITE)
1160 } else if (PageUptodate(page)) { 1040 xfs_map_at_offset(inode, bh, &imap, offset);
1161 ASSERT(buffer_mapped(bh)); 1041 xfs_add_to_ioend(inode, bh, offset, type, &ioend,
1162 imap_valid = 0; 1042 new_ioend);
1043 count++;
1163 } 1044 }
1164 1045
1165 if (!iohead) 1046 if (!iohead)
@@ -1188,7 +1069,7 @@ xfs_vm_writepage(
1188 end_index = last_index; 1069 end_index = last_index;
1189 1070
1190 xfs_cluster_write(inode, page->index + 1, &imap, &ioend, 1071 xfs_cluster_write(inode, page->index + 1, &imap, &ioend,
1191 wbc, all_bh, end_index); 1072 wbc, end_index);
1192 } 1073 }
1193 1074
1194 if (iohead) 1075 if (iohead)
@@ -1257,13 +1138,19 @@ __xfs_get_blocks(
1257 int create, 1138 int create,
1258 int direct) 1139 int direct)
1259{ 1140{
1260 int flags = create ? BMAPI_WRITE : BMAPI_READ; 1141 struct xfs_inode *ip = XFS_I(inode);
1142 struct xfs_mount *mp = ip->i_mount;
1143 xfs_fileoff_t offset_fsb, end_fsb;
1144 int error = 0;
1145 int lockmode = 0;
1261 struct xfs_bmbt_irec imap; 1146 struct xfs_bmbt_irec imap;
1147 int nimaps = 1;
1262 xfs_off_t offset; 1148 xfs_off_t offset;
1263 ssize_t size; 1149 ssize_t size;
1264 int nimap = 1;
1265 int new = 0; 1150 int new = 0;
1266 int error; 1151
1152 if (XFS_FORCED_SHUTDOWN(mp))
1153 return -XFS_ERROR(EIO);
1267 1154
1268 offset = (xfs_off_t)iblock << inode->i_blkbits; 1155 offset = (xfs_off_t)iblock << inode->i_blkbits;
1269 ASSERT(bh_result->b_size >= (1 << inode->i_blkbits)); 1156 ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
@@ -1272,15 +1159,45 @@ __xfs_get_blocks(
1272 if (!create && direct && offset >= i_size_read(inode)) 1159 if (!create && direct && offset >= i_size_read(inode))
1273 return 0; 1160 return 0;
1274 1161
1275 if (direct && create) 1162 if (create) {
1276 flags |= BMAPI_DIRECT; 1163 lockmode = XFS_ILOCK_EXCL;
1164 xfs_ilock(ip, lockmode);
1165 } else {
1166 lockmode = xfs_ilock_map_shared(ip);
1167 }
1168
1169 ASSERT(offset <= mp->m_maxioffset);
1170 if (offset + size > mp->m_maxioffset)
1171 size = mp->m_maxioffset - offset;
1172 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size);
1173 offset_fsb = XFS_B_TO_FSBT(mp, offset);
1277 1174
1278 error = xfs_iomap(XFS_I(inode), offset, size, flags, &imap, &nimap, 1175 error = xfs_bmapi(NULL, ip, offset_fsb, end_fsb - offset_fsb,
1279 &new); 1176 XFS_BMAPI_ENTIRE, NULL, 0, &imap, &nimaps, NULL);
1280 if (error) 1177 if (error)
1281 return -error; 1178 goto out_unlock;
1282 if (nimap == 0) 1179
1283 return 0; 1180 if (create &&
1181 (!nimaps ||
1182 (imap.br_startblock == HOLESTARTBLOCK ||
1183 imap.br_startblock == DELAYSTARTBLOCK))) {
1184 if (direct) {
1185 error = xfs_iomap_write_direct(ip, offset, size,
1186 &imap, nimaps);
1187 } else {
1188 error = xfs_iomap_write_delay(ip, offset, size, &imap);
1189 }
1190 if (error)
1191 goto out_unlock;
1192
1193 trace_xfs_get_blocks_alloc(ip, offset, size, 0, &imap);
1194 } else if (nimaps) {
1195 trace_xfs_get_blocks_found(ip, offset, size, 0, &imap);
1196 } else {
1197 trace_xfs_get_blocks_notfound(ip, offset, size);
1198 goto out_unlock;
1199 }
1200 xfs_iunlock(ip, lockmode);
1284 1201
1285 if (imap.br_startblock != HOLESTARTBLOCK && 1202 if (imap.br_startblock != HOLESTARTBLOCK &&
1286 imap.br_startblock != DELAYSTARTBLOCK) { 1203 imap.br_startblock != DELAYSTARTBLOCK) {
@@ -1347,6 +1264,10 @@ __xfs_get_blocks(
1347 } 1264 }
1348 1265
1349 return 0; 1266 return 0;
1267
1268out_unlock:
1269 xfs_iunlock(ip, lockmode);
1270 return -error;
1350} 1271}
1351 1272
1352int 1273int
@@ -1434,7 +1355,7 @@ xfs_vm_direct_IO(
1434 ssize_t ret; 1355 ssize_t ret;
1435 1356
1436 if (rw & WRITE) { 1357 if (rw & WRITE) {
1437 iocb->private = xfs_alloc_ioend(inode, IO_NEW); 1358 iocb->private = xfs_alloc_ioend(inode, IO_DIRECT);
1438 1359
1439 ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iov, 1360 ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iov,
1440 offset, nr_segs, 1361 offset, nr_segs,
diff --git a/fs/xfs/linux-2.6/xfs_aops.h b/fs/xfs/linux-2.6/xfs_aops.h
index c5057fb6237a..71f721e1a71f 100644
--- a/fs/xfs/linux-2.6/xfs_aops.h
+++ b/fs/xfs/linux-2.6/xfs_aops.h
@@ -23,6 +23,22 @@ extern struct workqueue_struct *xfsconvertd_workqueue;
23extern mempool_t *xfs_ioend_pool; 23extern mempool_t *xfs_ioend_pool;
24 24
25/* 25/*
26 * Types of I/O for bmap clustering and I/O completion tracking.
27 */
28enum {
29 IO_DIRECT = 0, /* special case for direct I/O ioends */
30 IO_DELALLOC, /* mapping covers delalloc region */
31 IO_UNWRITTEN, /* mapping covers allocated but uninitialized data */
32 IO_OVERWRITE, /* mapping covers already allocated extent */
33};
34
35#define XFS_IO_TYPES \
36 { 0, "" }, \
37 { IO_DELALLOC, "delalloc" }, \
38 { IO_UNWRITTEN, "unwritten" }, \
39 { IO_OVERWRITE, "overwrite" }
40
41/*
26 * xfs_ioend struct manages large extent writes for XFS. 42 * xfs_ioend struct manages large extent writes for XFS.
27 * It can manage several multi-page bio's at once. 43 * It can manage several multi-page bio's at once.
28 */ 44 */
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 4c5deb6e9e31..92f1f2acc6ab 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -44,12 +44,7 @@
44 44
45static kmem_zone_t *xfs_buf_zone; 45static kmem_zone_t *xfs_buf_zone;
46STATIC int xfsbufd(void *); 46STATIC int xfsbufd(void *);
47STATIC int xfsbufd_wakeup(struct shrinker *, int, gfp_t);
48STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int); 47STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int);
49static struct shrinker xfs_buf_shake = {
50 .shrink = xfsbufd_wakeup,
51 .seeks = DEFAULT_SEEKS,
52};
53 48
54static struct workqueue_struct *xfslogd_workqueue; 49static struct workqueue_struct *xfslogd_workqueue;
55struct workqueue_struct *xfsdatad_workqueue; 50struct workqueue_struct *xfsdatad_workqueue;
@@ -168,8 +163,79 @@ test_page_region(
168} 163}
169 164
170/* 165/*
171 * Internal xfs_buf_t object manipulation 166 * xfs_buf_lru_add - add a buffer to the LRU.
167 *
168 * The LRU takes a new reference to the buffer so that it will only be freed
169 * once the shrinker takes the buffer off the LRU.
172 */ 170 */
171STATIC void
172xfs_buf_lru_add(
173 struct xfs_buf *bp)
174{
175 struct xfs_buftarg *btp = bp->b_target;
176
177 spin_lock(&btp->bt_lru_lock);
178 if (list_empty(&bp->b_lru)) {
179 atomic_inc(&bp->b_hold);
180 list_add_tail(&bp->b_lru, &btp->bt_lru);
181 btp->bt_lru_nr++;
182 }
183 spin_unlock(&btp->bt_lru_lock);
184}
185
186/*
187 * xfs_buf_lru_del - remove a buffer from the LRU
188 *
189 * The unlocked check is safe here because it only occurs when there are not
190 * b_lru_ref counts left on the inode under the pag->pag_buf_lock. it is there
191 * to optimise the shrinker removing the buffer from the LRU and calling
192 * xfs_buf_free(). i.e. it removes an unneccessary round trip on the
193 * bt_lru_lock.
194 */
195STATIC void
196xfs_buf_lru_del(
197 struct xfs_buf *bp)
198{
199 struct xfs_buftarg *btp = bp->b_target;
200
201 if (list_empty(&bp->b_lru))
202 return;
203
204 spin_lock(&btp->bt_lru_lock);
205 if (!list_empty(&bp->b_lru)) {
206 list_del_init(&bp->b_lru);
207 btp->bt_lru_nr--;
208 }
209 spin_unlock(&btp->bt_lru_lock);
210}
211
212/*
213 * When we mark a buffer stale, we remove the buffer from the LRU and clear the
214 * b_lru_ref count so that the buffer is freed immediately when the buffer
215 * reference count falls to zero. If the buffer is already on the LRU, we need
216 * to remove the reference that LRU holds on the buffer.
217 *
218 * This prevents build-up of stale buffers on the LRU.
219 */
220void
221xfs_buf_stale(
222 struct xfs_buf *bp)
223{
224 bp->b_flags |= XBF_STALE;
225 atomic_set(&(bp)->b_lru_ref, 0);
226 if (!list_empty(&bp->b_lru)) {
227 struct xfs_buftarg *btp = bp->b_target;
228
229 spin_lock(&btp->bt_lru_lock);
230 if (!list_empty(&bp->b_lru)) {
231 list_del_init(&bp->b_lru);
232 btp->bt_lru_nr--;
233 atomic_dec(&bp->b_hold);
234 }
235 spin_unlock(&btp->bt_lru_lock);
236 }
237 ASSERT(atomic_read(&bp->b_hold) >= 1);
238}
173 239
174STATIC void 240STATIC void
175_xfs_buf_initialize( 241_xfs_buf_initialize(
@@ -186,7 +252,9 @@ _xfs_buf_initialize(
186 252
187 memset(bp, 0, sizeof(xfs_buf_t)); 253 memset(bp, 0, sizeof(xfs_buf_t));
188 atomic_set(&bp->b_hold, 1); 254 atomic_set(&bp->b_hold, 1);
255 atomic_set(&bp->b_lru_ref, 1);
189 init_completion(&bp->b_iowait); 256 init_completion(&bp->b_iowait);
257 INIT_LIST_HEAD(&bp->b_lru);
190 INIT_LIST_HEAD(&bp->b_list); 258 INIT_LIST_HEAD(&bp->b_list);
191 RB_CLEAR_NODE(&bp->b_rbnode); 259 RB_CLEAR_NODE(&bp->b_rbnode);
192 sema_init(&bp->b_sema, 0); /* held, no waiters */ 260 sema_init(&bp->b_sema, 0); /* held, no waiters */
@@ -262,6 +330,8 @@ xfs_buf_free(
262{ 330{
263 trace_xfs_buf_free(bp, _RET_IP_); 331 trace_xfs_buf_free(bp, _RET_IP_);
264 332
333 ASSERT(list_empty(&bp->b_lru));
334
265 if (bp->b_flags & (_XBF_PAGE_CACHE|_XBF_PAGES)) { 335 if (bp->b_flags & (_XBF_PAGE_CACHE|_XBF_PAGES)) {
266 uint i; 336 uint i;
267 337
@@ -337,7 +407,6 @@ _xfs_buf_lookup_pages(
337 __func__, gfp_mask); 407 __func__, gfp_mask);
338 408
339 XFS_STATS_INC(xb_page_retries); 409 XFS_STATS_INC(xb_page_retries);
340 xfsbufd_wakeup(NULL, 0, gfp_mask);
341 congestion_wait(BLK_RW_ASYNC, HZ/50); 410 congestion_wait(BLK_RW_ASYNC, HZ/50);
342 goto retry; 411 goto retry;
343 } 412 }
@@ -828,6 +897,7 @@ xfs_buf_rele(
828 897
829 if (!pag) { 898 if (!pag) {
830 ASSERT(!bp->b_relse); 899 ASSERT(!bp->b_relse);
900 ASSERT(list_empty(&bp->b_lru));
831 ASSERT(RB_EMPTY_NODE(&bp->b_rbnode)); 901 ASSERT(RB_EMPTY_NODE(&bp->b_rbnode));
832 if (atomic_dec_and_test(&bp->b_hold)) 902 if (atomic_dec_and_test(&bp->b_hold))
833 xfs_buf_free(bp); 903 xfs_buf_free(bp);
@@ -835,13 +905,19 @@ xfs_buf_rele(
835 } 905 }
836 906
837 ASSERT(!RB_EMPTY_NODE(&bp->b_rbnode)); 907 ASSERT(!RB_EMPTY_NODE(&bp->b_rbnode));
908
838 ASSERT(atomic_read(&bp->b_hold) > 0); 909 ASSERT(atomic_read(&bp->b_hold) > 0);
839 if (atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock)) { 910 if (atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock)) {
840 if (bp->b_relse) { 911 if (bp->b_relse) {
841 atomic_inc(&bp->b_hold); 912 atomic_inc(&bp->b_hold);
842 spin_unlock(&pag->pag_buf_lock); 913 spin_unlock(&pag->pag_buf_lock);
843 bp->b_relse(bp); 914 bp->b_relse(bp);
915 } else if (!(bp->b_flags & XBF_STALE) &&
916 atomic_read(&bp->b_lru_ref)) {
917 xfs_buf_lru_add(bp);
918 spin_unlock(&pag->pag_buf_lock);
844 } else { 919 } else {
920 xfs_buf_lru_del(bp);
845 ASSERT(!(bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q))); 921 ASSERT(!(bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)));
846 rb_erase(&bp->b_rbnode, &pag->pag_buf_tree); 922 rb_erase(&bp->b_rbnode, &pag->pag_buf_tree);
847 spin_unlock(&pag->pag_buf_lock); 923 spin_unlock(&pag->pag_buf_lock);
@@ -1438,51 +1514,84 @@ xfs_buf_iomove(
1438 */ 1514 */
1439 1515
1440/* 1516/*
1441 * Wait for any bufs with callbacks that have been submitted but 1517 * Wait for any bufs with callbacks that have been submitted but have not yet
1442 * have not yet returned... walk the hash list for the target. 1518 * returned. These buffers will have an elevated hold count, so wait on those
1519 * while freeing all the buffers only held by the LRU.
1443 */ 1520 */
1444void 1521void
1445xfs_wait_buftarg( 1522xfs_wait_buftarg(
1446 struct xfs_buftarg *btp) 1523 struct xfs_buftarg *btp)
1447{ 1524{
1448 struct xfs_perag *pag; 1525 struct xfs_buf *bp;
1449 uint i;
1450 1526
1451 for (i = 0; i < btp->bt_mount->m_sb.sb_agcount; i++) { 1527restart:
1452 pag = xfs_perag_get(btp->bt_mount, i); 1528 spin_lock(&btp->bt_lru_lock);
1453 spin_lock(&pag->pag_buf_lock); 1529 while (!list_empty(&btp->bt_lru)) {
1454 while (rb_first(&pag->pag_buf_tree)) { 1530 bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru);
1455 spin_unlock(&pag->pag_buf_lock); 1531 if (atomic_read(&bp->b_hold) > 1) {
1532 spin_unlock(&btp->bt_lru_lock);
1456 delay(100); 1533 delay(100);
1457 spin_lock(&pag->pag_buf_lock); 1534 goto restart;
1458 } 1535 }
1459 spin_unlock(&pag->pag_buf_lock); 1536 /*
1460 xfs_perag_put(pag); 1537 * clear the LRU reference count so the bufer doesn't get
1538 * ignored in xfs_buf_rele().
1539 */
1540 atomic_set(&bp->b_lru_ref, 0);
1541 spin_unlock(&btp->bt_lru_lock);
1542 xfs_buf_rele(bp);
1543 spin_lock(&btp->bt_lru_lock);
1461 } 1544 }
1545 spin_unlock(&btp->bt_lru_lock);
1462} 1546}
1463 1547
1464/* 1548int
1465 * buftarg list for delwrite queue processing 1549xfs_buftarg_shrink(
1466 */ 1550 struct shrinker *shrink,
1467static LIST_HEAD(xfs_buftarg_list); 1551 int nr_to_scan,
1468static DEFINE_SPINLOCK(xfs_buftarg_lock); 1552 gfp_t mask)
1469
1470STATIC void
1471xfs_register_buftarg(
1472 xfs_buftarg_t *btp)
1473{ 1553{
1474 spin_lock(&xfs_buftarg_lock); 1554 struct xfs_buftarg *btp = container_of(shrink,
1475 list_add(&btp->bt_list, &xfs_buftarg_list); 1555 struct xfs_buftarg, bt_shrinker);
1476 spin_unlock(&xfs_buftarg_lock); 1556 struct xfs_buf *bp;
1477} 1557 LIST_HEAD(dispose);
1478 1558
1479STATIC void 1559 if (!nr_to_scan)
1480xfs_unregister_buftarg( 1560 return btp->bt_lru_nr;
1481 xfs_buftarg_t *btp) 1561
1482{ 1562 spin_lock(&btp->bt_lru_lock);
1483 spin_lock(&xfs_buftarg_lock); 1563 while (!list_empty(&btp->bt_lru)) {
1484 list_del(&btp->bt_list); 1564 if (nr_to_scan-- <= 0)
1485 spin_unlock(&xfs_buftarg_lock); 1565 break;
1566
1567 bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru);
1568
1569 /*
1570 * Decrement the b_lru_ref count unless the value is already
1571 * zero. If the value is already zero, we need to reclaim the
1572 * buffer, otherwise it gets another trip through the LRU.
1573 */
1574 if (!atomic_add_unless(&bp->b_lru_ref, -1, 0)) {
1575 list_move_tail(&bp->b_lru, &btp->bt_lru);
1576 continue;
1577 }
1578
1579 /*
1580 * remove the buffer from the LRU now to avoid needing another
1581 * lock round trip inside xfs_buf_rele().
1582 */
1583 list_move(&bp->b_lru, &dispose);
1584 btp->bt_lru_nr--;
1585 }
1586 spin_unlock(&btp->bt_lru_lock);
1587
1588 while (!list_empty(&dispose)) {
1589 bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
1590 list_del_init(&bp->b_lru);
1591 xfs_buf_rele(bp);
1592 }
1593
1594 return btp->bt_lru_nr;
1486} 1595}
1487 1596
1488void 1597void
@@ -1490,17 +1599,14 @@ xfs_free_buftarg(
1490 struct xfs_mount *mp, 1599 struct xfs_mount *mp,
1491 struct xfs_buftarg *btp) 1600 struct xfs_buftarg *btp)
1492{ 1601{
1602 unregister_shrinker(&btp->bt_shrinker);
1603
1493 xfs_flush_buftarg(btp, 1); 1604 xfs_flush_buftarg(btp, 1);
1494 if (mp->m_flags & XFS_MOUNT_BARRIER) 1605 if (mp->m_flags & XFS_MOUNT_BARRIER)
1495 xfs_blkdev_issue_flush(btp); 1606 xfs_blkdev_issue_flush(btp);
1496 iput(btp->bt_mapping->host); 1607 iput(btp->bt_mapping->host);
1497 1608
1498 /* Unregister the buftarg first so that we don't get a
1499 * wakeup finding a non-existent task
1500 */
1501 xfs_unregister_buftarg(btp);
1502 kthread_stop(btp->bt_task); 1609 kthread_stop(btp->bt_task);
1503
1504 kmem_free(btp); 1610 kmem_free(btp);
1505} 1611}
1506 1612
@@ -1597,20 +1703,13 @@ xfs_alloc_delwrite_queue(
1597 xfs_buftarg_t *btp, 1703 xfs_buftarg_t *btp,
1598 const char *fsname) 1704 const char *fsname)
1599{ 1705{
1600 int error = 0;
1601
1602 INIT_LIST_HEAD(&btp->bt_list);
1603 INIT_LIST_HEAD(&btp->bt_delwrite_queue); 1706 INIT_LIST_HEAD(&btp->bt_delwrite_queue);
1604 spin_lock_init(&btp->bt_delwrite_lock); 1707 spin_lock_init(&btp->bt_delwrite_lock);
1605 btp->bt_flags = 0; 1708 btp->bt_flags = 0;
1606 btp->bt_task = kthread_run(xfsbufd, btp, "xfsbufd/%s", fsname); 1709 btp->bt_task = kthread_run(xfsbufd, btp, "xfsbufd/%s", fsname);
1607 if (IS_ERR(btp->bt_task)) { 1710 if (IS_ERR(btp->bt_task))
1608 error = PTR_ERR(btp->bt_task); 1711 return PTR_ERR(btp->bt_task);
1609 goto out_error; 1712 return 0;
1610 }
1611 xfs_register_buftarg(btp);
1612out_error:
1613 return error;
1614} 1713}
1615 1714
1616xfs_buftarg_t * 1715xfs_buftarg_t *
@@ -1627,12 +1726,17 @@ xfs_alloc_buftarg(
1627 btp->bt_mount = mp; 1726 btp->bt_mount = mp;
1628 btp->bt_dev = bdev->bd_dev; 1727 btp->bt_dev = bdev->bd_dev;
1629 btp->bt_bdev = bdev; 1728 btp->bt_bdev = bdev;
1729 INIT_LIST_HEAD(&btp->bt_lru);
1730 spin_lock_init(&btp->bt_lru_lock);
1630 if (xfs_setsize_buftarg_early(btp, bdev)) 1731 if (xfs_setsize_buftarg_early(btp, bdev))
1631 goto error; 1732 goto error;
1632 if (xfs_mapping_buftarg(btp, bdev)) 1733 if (xfs_mapping_buftarg(btp, bdev))
1633 goto error; 1734 goto error;
1634 if (xfs_alloc_delwrite_queue(btp, fsname)) 1735 if (xfs_alloc_delwrite_queue(btp, fsname))
1635 goto error; 1736 goto error;
1737 btp->bt_shrinker.shrink = xfs_buftarg_shrink;
1738 btp->bt_shrinker.seeks = DEFAULT_SEEKS;
1739 register_shrinker(&btp->bt_shrinker);
1636 return btp; 1740 return btp;
1637 1741
1638error: 1742error:
@@ -1737,27 +1841,6 @@ xfs_buf_runall_queues(
1737 flush_workqueue(queue); 1841 flush_workqueue(queue);
1738} 1842}
1739 1843
1740STATIC int
1741xfsbufd_wakeup(
1742 struct shrinker *shrink,
1743 int priority,
1744 gfp_t mask)
1745{
1746 xfs_buftarg_t *btp;
1747
1748 spin_lock(&xfs_buftarg_lock);
1749 list_for_each_entry(btp, &xfs_buftarg_list, bt_list) {
1750 if (test_bit(XBT_FORCE_SLEEP, &btp->bt_flags))
1751 continue;
1752 if (list_empty(&btp->bt_delwrite_queue))
1753 continue;
1754 set_bit(XBT_FORCE_FLUSH, &btp->bt_flags);
1755 wake_up_process(btp->bt_task);
1756 }
1757 spin_unlock(&xfs_buftarg_lock);
1758 return 0;
1759}
1760
1761/* 1844/*
1762 * Move as many buffers as specified to the supplied list 1845 * Move as many buffers as specified to the supplied list
1763 * idicating if we skipped any buffers to prevent deadlocks. 1846 * idicating if we skipped any buffers to prevent deadlocks.
@@ -1952,7 +2035,6 @@ xfs_buf_init(void)
1952 if (!xfsconvertd_workqueue) 2035 if (!xfsconvertd_workqueue)
1953 goto out_destroy_xfsdatad_workqueue; 2036 goto out_destroy_xfsdatad_workqueue;
1954 2037
1955 register_shrinker(&xfs_buf_shake);
1956 return 0; 2038 return 0;
1957 2039
1958 out_destroy_xfsdatad_workqueue: 2040 out_destroy_xfsdatad_workqueue:
@@ -1968,7 +2050,6 @@ xfs_buf_init(void)
1968void 2050void
1969xfs_buf_terminate(void) 2051xfs_buf_terminate(void)
1970{ 2052{
1971 unregister_shrinker(&xfs_buf_shake);
1972 destroy_workqueue(xfsconvertd_workqueue); 2053 destroy_workqueue(xfsconvertd_workqueue);
1973 destroy_workqueue(xfsdatad_workqueue); 2054 destroy_workqueue(xfsdatad_workqueue);
1974 destroy_workqueue(xfslogd_workqueue); 2055 destroy_workqueue(xfslogd_workqueue);
diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h
index 383a3f37cf98..a76c2428faff 100644
--- a/fs/xfs/linux-2.6/xfs_buf.h
+++ b/fs/xfs/linux-2.6/xfs_buf.h
@@ -128,10 +128,15 @@ typedef struct xfs_buftarg {
128 128
129 /* per device delwri queue */ 129 /* per device delwri queue */
130 struct task_struct *bt_task; 130 struct task_struct *bt_task;
131 struct list_head bt_list;
132 struct list_head bt_delwrite_queue; 131 struct list_head bt_delwrite_queue;
133 spinlock_t bt_delwrite_lock; 132 spinlock_t bt_delwrite_lock;
134 unsigned long bt_flags; 133 unsigned long bt_flags;
134
135 /* LRU control structures */
136 struct shrinker bt_shrinker;
137 struct list_head bt_lru;
138 spinlock_t bt_lru_lock;
139 unsigned int bt_lru_nr;
135} xfs_buftarg_t; 140} xfs_buftarg_t;
136 141
137/* 142/*
@@ -164,9 +169,11 @@ typedef struct xfs_buf {
164 xfs_off_t b_file_offset; /* offset in file */ 169 xfs_off_t b_file_offset; /* offset in file */
165 size_t b_buffer_length;/* size of buffer in bytes */ 170 size_t b_buffer_length;/* size of buffer in bytes */
166 atomic_t b_hold; /* reference count */ 171 atomic_t b_hold; /* reference count */
172 atomic_t b_lru_ref; /* lru reclaim ref count */
167 xfs_buf_flags_t b_flags; /* status flags */ 173 xfs_buf_flags_t b_flags; /* status flags */
168 struct semaphore b_sema; /* semaphore for lockables */ 174 struct semaphore b_sema; /* semaphore for lockables */
169 175
176 struct list_head b_lru; /* lru list */
170 wait_queue_head_t b_waiters; /* unpin waiters */ 177 wait_queue_head_t b_waiters; /* unpin waiters */
171 struct list_head b_list; 178 struct list_head b_list;
172 struct xfs_perag *b_pag; /* contains rbtree root */ 179 struct xfs_perag *b_pag; /* contains rbtree root */
@@ -264,7 +271,8 @@ extern void xfs_buf_terminate(void);
264#define XFS_BUF_ZEROFLAGS(bp) ((bp)->b_flags &= \ 271#define XFS_BUF_ZEROFLAGS(bp) ((bp)->b_flags &= \
265 ~(XBF_READ|XBF_WRITE|XBF_ASYNC|XBF_DELWRI|XBF_ORDERED)) 272 ~(XBF_READ|XBF_WRITE|XBF_ASYNC|XBF_DELWRI|XBF_ORDERED))
266 273
267#define XFS_BUF_STALE(bp) ((bp)->b_flags |= XBF_STALE) 274void xfs_buf_stale(struct xfs_buf *bp);
275#define XFS_BUF_STALE(bp) xfs_buf_stale(bp);
268#define XFS_BUF_UNSTALE(bp) ((bp)->b_flags &= ~XBF_STALE) 276#define XFS_BUF_UNSTALE(bp) ((bp)->b_flags &= ~XBF_STALE)
269#define XFS_BUF_ISSTALE(bp) ((bp)->b_flags & XBF_STALE) 277#define XFS_BUF_ISSTALE(bp) ((bp)->b_flags & XBF_STALE)
270#define XFS_BUF_SUPER_STALE(bp) do { \ 278#define XFS_BUF_SUPER_STALE(bp) do { \
@@ -328,9 +336,15 @@ extern void xfs_buf_terminate(void);
328#define XFS_BUF_SIZE(bp) ((bp)->b_buffer_length) 336#define XFS_BUF_SIZE(bp) ((bp)->b_buffer_length)
329#define XFS_BUF_SET_SIZE(bp, cnt) ((bp)->b_buffer_length = (cnt)) 337#define XFS_BUF_SET_SIZE(bp, cnt) ((bp)->b_buffer_length = (cnt))
330 338
331#define XFS_BUF_SET_VTYPE_REF(bp, type, ref) do { } while (0) 339static inline void
340xfs_buf_set_ref(
341 struct xfs_buf *bp,
342 int lru_ref)
343{
344 atomic_set(&bp->b_lru_ref, lru_ref);
345}
346#define XFS_BUF_SET_VTYPE_REF(bp, type, ref) xfs_buf_set_ref(bp, ref)
332#define XFS_BUF_SET_VTYPE(bp, type) do { } while (0) 347#define XFS_BUF_SET_VTYPE(bp, type) do { } while (0)
333#define XFS_BUF_SET_REF(bp, ref) do { } while (0)
334 348
335#define XFS_BUF_ISPINNED(bp) atomic_read(&((bp)->b_pin_count)) 349#define XFS_BUF_ISPINNED(bp) atomic_read(&((bp)->b_pin_count))
336 350
diff --git a/fs/xfs/linux-2.6/xfs_export.c b/fs/xfs/linux-2.6/xfs_export.c
index 3764d74790ec..fc0114da7fdd 100644
--- a/fs/xfs/linux-2.6/xfs_export.c
+++ b/fs/xfs/linux-2.6/xfs_export.c
@@ -70,8 +70,16 @@ xfs_fs_encode_fh(
70 else 70 else
71 fileid_type = FILEID_INO32_GEN_PARENT; 71 fileid_type = FILEID_INO32_GEN_PARENT;
72 72
73 /* filesystem may contain 64bit inode numbers */ 73 /*
74 if (!(XFS_M(inode->i_sb)->m_flags & XFS_MOUNT_SMALL_INUMS)) 74 * If the the filesystem may contain 64bit inode numbers, we need
75 * to use larger file handles that can represent them.
76 *
77 * While we only allocate inodes that do not fit into 32 bits any
78 * large enough filesystem may contain them, thus the slightly
79 * confusing looking conditional below.
80 */
81 if (!(XFS_M(inode->i_sb)->m_flags & XFS_MOUNT_SMALL_INUMS) ||
82 (XFS_M(inode->i_sb)->m_flags & XFS_MOUNT_32BITINODES))
75 fileid_type |= XFS_FILEID_TYPE_64FLAG; 83 fileid_type |= XFS_FILEID_TYPE_64FLAG;
76 84
77 /* 85 /*
diff --git a/fs/xfs/linux-2.6/xfs_linux.h b/fs/xfs/linux-2.6/xfs_linux.h
index 214ddd71ff79..096494997747 100644
--- a/fs/xfs/linux-2.6/xfs_linux.h
+++ b/fs/xfs/linux-2.6/xfs_linux.h
@@ -37,7 +37,6 @@
37 37
38#include <kmem.h> 38#include <kmem.h>
39#include <mrlock.h> 39#include <mrlock.h>
40#include <sv.h>
41#include <time.h> 40#include <time.h>
42 41
43#include <support/debug.h> 42#include <support/debug.h>
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index 064f964d4f3c..c51faaa5e291 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -834,8 +834,11 @@ xfsaild_wakeup(
834 struct xfs_ail *ailp, 834 struct xfs_ail *ailp,
835 xfs_lsn_t threshold_lsn) 835 xfs_lsn_t threshold_lsn)
836{ 836{
837 ailp->xa_target = threshold_lsn; 837 /* only ever move the target forwards */
838 wake_up_process(ailp->xa_task); 838 if (XFS_LSN_CMP(threshold_lsn, ailp->xa_target) > 0) {
839 ailp->xa_target = threshold_lsn;
840 wake_up_process(ailp->xa_task);
841 }
839} 842}
840 843
841STATIC int 844STATIC int
@@ -847,8 +850,17 @@ xfsaild(
847 long tout = 0; /* milliseconds */ 850 long tout = 0; /* milliseconds */
848 851
849 while (!kthread_should_stop()) { 852 while (!kthread_should_stop()) {
850 schedule_timeout_interruptible(tout ? 853 /*
851 msecs_to_jiffies(tout) : MAX_SCHEDULE_TIMEOUT); 854 * for short sleeps indicating congestion, don't allow us to
855 * get woken early. Otherwise all we do is bang on the AIL lock
856 * without making progress.
857 */
858 if (tout && tout <= 20)
859 __set_current_state(TASK_KILLABLE);
860 else
861 __set_current_state(TASK_INTERRUPTIBLE);
862 schedule_timeout(tout ?
863 msecs_to_jiffies(tout) : MAX_SCHEDULE_TIMEOUT);
852 864
853 /* swsusp */ 865 /* swsusp */
854 try_to_freeze(); 866 try_to_freeze();
@@ -1118,6 +1130,8 @@ xfs_fs_evict_inode(
1118 */ 1130 */
1119 ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock)); 1131 ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock));
1120 mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino); 1132 mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
1133 lockdep_set_class_and_name(&ip->i_iolock.mr_lock,
1134 &xfs_iolock_reclaimable, "xfs_iolock_reclaimable");
1121 1135
1122 xfs_inactive(ip); 1136 xfs_inactive(ip);
1123} 1137}
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c
index afb0d7cfad1c..a02480de9759 100644
--- a/fs/xfs/linux-2.6/xfs_sync.c
+++ b/fs/xfs/linux-2.6/xfs_sync.c
@@ -53,14 +53,30 @@ xfs_inode_ag_walk_grab(
53{ 53{
54 struct inode *inode = VFS_I(ip); 54 struct inode *inode = VFS_I(ip);
55 55
56 ASSERT(rcu_read_lock_held());
57
58 /*
59 * check for stale RCU freed inode
60 *
61 * If the inode has been reallocated, it doesn't matter if it's not in
62 * the AG we are walking - we are walking for writeback, so if it
63 * passes all the "valid inode" checks and is dirty, then we'll write
64 * it back anyway. If it has been reallocated and still being
65 * initialised, the XFS_INEW check below will catch it.
66 */
67 spin_lock(&ip->i_flags_lock);
68 if (!ip->i_ino)
69 goto out_unlock_noent;
70
71 /* avoid new or reclaimable inodes. Leave for reclaim code to flush */
72 if (__xfs_iflags_test(ip, XFS_INEW | XFS_IRECLAIMABLE | XFS_IRECLAIM))
73 goto out_unlock_noent;
74 spin_unlock(&ip->i_flags_lock);
75
56 /* nothing to sync during shutdown */ 76 /* nothing to sync during shutdown */
57 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) 77 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
58 return EFSCORRUPTED; 78 return EFSCORRUPTED;
59 79
60 /* avoid new or reclaimable inodes. Leave for reclaim code to flush */
61 if (xfs_iflags_test(ip, XFS_INEW | XFS_IRECLAIMABLE | XFS_IRECLAIM))
62 return ENOENT;
63
64 /* If we can't grab the inode, it must on it's way to reclaim. */ 80 /* If we can't grab the inode, it must on it's way to reclaim. */
65 if (!igrab(inode)) 81 if (!igrab(inode))
66 return ENOENT; 82 return ENOENT;
@@ -72,6 +88,10 @@ xfs_inode_ag_walk_grab(
72 88
73 /* inode is valid */ 89 /* inode is valid */
74 return 0; 90 return 0;
91
92out_unlock_noent:
93 spin_unlock(&ip->i_flags_lock);
94 return ENOENT;
75} 95}
76 96
77STATIC int 97STATIC int
@@ -98,12 +118,12 @@ restart:
98 int error = 0; 118 int error = 0;
99 int i; 119 int i;
100 120
101 read_lock(&pag->pag_ici_lock); 121 rcu_read_lock();
102 nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, 122 nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
103 (void **)batch, first_index, 123 (void **)batch, first_index,
104 XFS_LOOKUP_BATCH); 124 XFS_LOOKUP_BATCH);
105 if (!nr_found) { 125 if (!nr_found) {
106 read_unlock(&pag->pag_ici_lock); 126 rcu_read_unlock();
107 break; 127 break;
108 } 128 }
109 129
@@ -118,18 +138,26 @@ restart:
118 batch[i] = NULL; 138 batch[i] = NULL;
119 139
120 /* 140 /*
121 * Update the index for the next lookup. Catch overflows 141 * Update the index for the next lookup. Catch
122 * into the next AG range which can occur if we have inodes 142 * overflows into the next AG range which can occur if
123 * in the last block of the AG and we are currently 143 * we have inodes in the last block of the AG and we
124 * pointing to the last inode. 144 * are currently pointing to the last inode.
145 *
146 * Because we may see inodes that are from the wrong AG
147 * due to RCU freeing and reallocation, only update the
148 * index if it lies in this AG. It was a race that lead
149 * us to see this inode, so another lookup from the
150 * same index will not find it again.
125 */ 151 */
152 if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
153 continue;
126 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); 154 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
127 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) 155 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
128 done = 1; 156 done = 1;
129 } 157 }
130 158
131 /* unlock now we've grabbed the inodes. */ 159 /* unlock now we've grabbed the inodes. */
132 read_unlock(&pag->pag_ici_lock); 160 rcu_read_unlock();
133 161
134 for (i = 0; i < nr_found; i++) { 162 for (i = 0; i < nr_found; i++) {
135 if (!batch[i]) 163 if (!batch[i])
@@ -592,12 +620,12 @@ xfs_inode_set_reclaim_tag(
592 struct xfs_perag *pag; 620 struct xfs_perag *pag;
593 621
594 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); 622 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
595 write_lock(&pag->pag_ici_lock); 623 spin_lock(&pag->pag_ici_lock);
596 spin_lock(&ip->i_flags_lock); 624 spin_lock(&ip->i_flags_lock);
597 __xfs_inode_set_reclaim_tag(pag, ip); 625 __xfs_inode_set_reclaim_tag(pag, ip);
598 __xfs_iflags_set(ip, XFS_IRECLAIMABLE); 626 __xfs_iflags_set(ip, XFS_IRECLAIMABLE);
599 spin_unlock(&ip->i_flags_lock); 627 spin_unlock(&ip->i_flags_lock);
600 write_unlock(&pag->pag_ici_lock); 628 spin_unlock(&pag->pag_ici_lock);
601 xfs_perag_put(pag); 629 xfs_perag_put(pag);
602} 630}
603 631
@@ -639,9 +667,14 @@ xfs_reclaim_inode_grab(
639 struct xfs_inode *ip, 667 struct xfs_inode *ip,
640 int flags) 668 int flags)
641{ 669{
670 ASSERT(rcu_read_lock_held());
671
672 /* quick check for stale RCU freed inode */
673 if (!ip->i_ino)
674 return 1;
642 675
643 /* 676 /*
644 * do some unlocked checks first to avoid unnecceary lock traffic. 677 * do some unlocked checks first to avoid unnecessary lock traffic.
645 * The first is a flush lock check, the second is a already in reclaim 678 * The first is a flush lock check, the second is a already in reclaim
646 * check. Only do these checks if we are not going to block on locks. 679 * check. Only do these checks if we are not going to block on locks.
647 */ 680 */
@@ -654,11 +687,16 @@ xfs_reclaim_inode_grab(
654 * The radix tree lock here protects a thread in xfs_iget from racing 687 * The radix tree lock here protects a thread in xfs_iget from racing
655 * with us starting reclaim on the inode. Once we have the 688 * with us starting reclaim on the inode. Once we have the
656 * XFS_IRECLAIM flag set it will not touch us. 689 * XFS_IRECLAIM flag set it will not touch us.
690 *
691 * Due to RCU lookup, we may find inodes that have been freed and only
692 * have XFS_IRECLAIM set. Indeed, we may see reallocated inodes that
693 * aren't candidates for reclaim at all, so we must check the
694 * XFS_IRECLAIMABLE is set first before proceeding to reclaim.
657 */ 695 */
658 spin_lock(&ip->i_flags_lock); 696 spin_lock(&ip->i_flags_lock);
659 ASSERT_ALWAYS(__xfs_iflags_test(ip, XFS_IRECLAIMABLE)); 697 if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) ||
660 if (__xfs_iflags_test(ip, XFS_IRECLAIM)) { 698 __xfs_iflags_test(ip, XFS_IRECLAIM)) {
661 /* ignore as it is already under reclaim */ 699 /* not a reclaim candidate. */
662 spin_unlock(&ip->i_flags_lock); 700 spin_unlock(&ip->i_flags_lock);
663 return 1; 701 return 1;
664 } 702 }
@@ -795,12 +833,12 @@ reclaim:
795 * added to the tree assert that it's been there before to catch 833 * added to the tree assert that it's been there before to catch
796 * problems with the inode life time early on. 834 * problems with the inode life time early on.
797 */ 835 */
798 write_lock(&pag->pag_ici_lock); 836 spin_lock(&pag->pag_ici_lock);
799 if (!radix_tree_delete(&pag->pag_ici_root, 837 if (!radix_tree_delete(&pag->pag_ici_root,
800 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino))) 838 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino)))
801 ASSERT(0); 839 ASSERT(0);
802 __xfs_inode_clear_reclaim(pag, ip); 840 __xfs_inode_clear_reclaim(pag, ip);
803 write_unlock(&pag->pag_ici_lock); 841 spin_unlock(&pag->pag_ici_lock);
804 842
805 /* 843 /*
806 * Here we do an (almost) spurious inode lock in order to coordinate 844 * Here we do an (almost) spurious inode lock in order to coordinate
@@ -864,14 +902,14 @@ restart:
864 struct xfs_inode *batch[XFS_LOOKUP_BATCH]; 902 struct xfs_inode *batch[XFS_LOOKUP_BATCH];
865 int i; 903 int i;
866 904
867 write_lock(&pag->pag_ici_lock); 905 rcu_read_lock();
868 nr_found = radix_tree_gang_lookup_tag( 906 nr_found = radix_tree_gang_lookup_tag(
869 &pag->pag_ici_root, 907 &pag->pag_ici_root,
870 (void **)batch, first_index, 908 (void **)batch, first_index,
871 XFS_LOOKUP_BATCH, 909 XFS_LOOKUP_BATCH,
872 XFS_ICI_RECLAIM_TAG); 910 XFS_ICI_RECLAIM_TAG);
873 if (!nr_found) { 911 if (!nr_found) {
874 write_unlock(&pag->pag_ici_lock); 912 rcu_read_unlock();
875 break; 913 break;
876 } 914 }
877 915
@@ -891,14 +929,24 @@ restart:
891 * occur if we have inodes in the last block of 929 * occur if we have inodes in the last block of
892 * the AG and we are currently pointing to the 930 * the AG and we are currently pointing to the
893 * last inode. 931 * last inode.
932 *
933 * Because we may see inodes that are from the
934 * wrong AG due to RCU freeing and
935 * reallocation, only update the index if it
936 * lies in this AG. It was a race that lead us
937 * to see this inode, so another lookup from
938 * the same index will not find it again.
894 */ 939 */
940 if (XFS_INO_TO_AGNO(mp, ip->i_ino) !=
941 pag->pag_agno)
942 continue;
895 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); 943 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
896 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) 944 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
897 done = 1; 945 done = 1;
898 } 946 }
899 947
900 /* unlock now we've grabbed the inodes. */ 948 /* unlock now we've grabbed the inodes. */
901 write_unlock(&pag->pag_ici_lock); 949 rcu_read_unlock();
902 950
903 for (i = 0; i < nr_found; i++) { 951 for (i = 0; i < nr_found; i++) {
904 if (!batch[i]) 952 if (!batch[i])
diff --git a/fs/xfs/linux-2.6/xfs_trace.h b/fs/xfs/linux-2.6/xfs_trace.h
index acef2e98c594..647af2a2e7aa 100644
--- a/fs/xfs/linux-2.6/xfs_trace.h
+++ b/fs/xfs/linux-2.6/xfs_trace.h
@@ -766,8 +766,8 @@ DECLARE_EVENT_CLASS(xfs_loggrant_class,
766 __field(int, curr_res) 766 __field(int, curr_res)
767 __field(int, unit_res) 767 __field(int, unit_res)
768 __field(unsigned int, flags) 768 __field(unsigned int, flags)
769 __field(void *, reserve_headq) 769 __field(int, reserveq)
770 __field(void *, write_headq) 770 __field(int, writeq)
771 __field(int, grant_reserve_cycle) 771 __field(int, grant_reserve_cycle)
772 __field(int, grant_reserve_bytes) 772 __field(int, grant_reserve_bytes)
773 __field(int, grant_write_cycle) 773 __field(int, grant_write_cycle)
@@ -784,19 +784,21 @@ DECLARE_EVENT_CLASS(xfs_loggrant_class,
784 __entry->curr_res = tic->t_curr_res; 784 __entry->curr_res = tic->t_curr_res;
785 __entry->unit_res = tic->t_unit_res; 785 __entry->unit_res = tic->t_unit_res;
786 __entry->flags = tic->t_flags; 786 __entry->flags = tic->t_flags;
787 __entry->reserve_headq = log->l_reserve_headq; 787 __entry->reserveq = list_empty(&log->l_reserveq);
788 __entry->write_headq = log->l_write_headq; 788 __entry->writeq = list_empty(&log->l_writeq);
789 __entry->grant_reserve_cycle = log->l_grant_reserve_cycle; 789 xlog_crack_grant_head(&log->l_grant_reserve_head,
790 __entry->grant_reserve_bytes = log->l_grant_reserve_bytes; 790 &__entry->grant_reserve_cycle,
791 __entry->grant_write_cycle = log->l_grant_write_cycle; 791 &__entry->grant_reserve_bytes);
792 __entry->grant_write_bytes = log->l_grant_write_bytes; 792 xlog_crack_grant_head(&log->l_grant_write_head,
793 &__entry->grant_write_cycle,
794 &__entry->grant_write_bytes);
793 __entry->curr_cycle = log->l_curr_cycle; 795 __entry->curr_cycle = log->l_curr_cycle;
794 __entry->curr_block = log->l_curr_block; 796 __entry->curr_block = log->l_curr_block;
795 __entry->tail_lsn = log->l_tail_lsn; 797 __entry->tail_lsn = atomic64_read(&log->l_tail_lsn);
796 ), 798 ),
797 TP_printk("dev %d:%d type %s t_ocnt %u t_cnt %u t_curr_res %u " 799 TP_printk("dev %d:%d type %s t_ocnt %u t_cnt %u t_curr_res %u "
798 "t_unit_res %u t_flags %s reserve_headq 0x%p " 800 "t_unit_res %u t_flags %s reserveq %s "
799 "write_headq 0x%p grant_reserve_cycle %d " 801 "writeq %s grant_reserve_cycle %d "
800 "grant_reserve_bytes %d grant_write_cycle %d " 802 "grant_reserve_bytes %d grant_write_cycle %d "
801 "grant_write_bytes %d curr_cycle %d curr_block %d " 803 "grant_write_bytes %d curr_cycle %d curr_block %d "
802 "tail_cycle %d tail_block %d", 804 "tail_cycle %d tail_block %d",
@@ -807,8 +809,8 @@ DECLARE_EVENT_CLASS(xfs_loggrant_class,
807 __entry->curr_res, 809 __entry->curr_res,
808 __entry->unit_res, 810 __entry->unit_res,
809 __print_flags(__entry->flags, "|", XLOG_TIC_FLAGS), 811 __print_flags(__entry->flags, "|", XLOG_TIC_FLAGS),
810 __entry->reserve_headq, 812 __entry->reserveq ? "empty" : "active",
811 __entry->write_headq, 813 __entry->writeq ? "empty" : "active",
812 __entry->grant_reserve_cycle, 814 __entry->grant_reserve_cycle,
813 __entry->grant_reserve_bytes, 815 __entry->grant_reserve_bytes,
814 __entry->grant_write_cycle, 816 __entry->grant_write_cycle,
@@ -835,6 +837,7 @@ DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep1);
835DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake1); 837DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake1);
836DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep2); 838DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep2);
837DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake2); 839DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake2);
840DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake_up);
838DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_enter); 841DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_enter);
839DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_exit); 842DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_exit);
840DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_error); 843DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_error);
@@ -842,6 +845,7 @@ DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_sleep1);
842DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake1); 845DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake1);
843DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_sleep2); 846DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_sleep2);
844DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake2); 847DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake2);
848DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake_up);
845DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_enter); 849DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_enter);
846DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_exit); 850DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_exit);
847DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_sub); 851DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_sub);
@@ -935,10 +939,10 @@ DEFINE_PAGE_EVENT(xfs_writepage);
935DEFINE_PAGE_EVENT(xfs_releasepage); 939DEFINE_PAGE_EVENT(xfs_releasepage);
936DEFINE_PAGE_EVENT(xfs_invalidatepage); 940DEFINE_PAGE_EVENT(xfs_invalidatepage);
937 941
938DECLARE_EVENT_CLASS(xfs_iomap_class, 942DECLARE_EVENT_CLASS(xfs_imap_class,
939 TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count, 943 TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count,
940 int flags, struct xfs_bmbt_irec *irec), 944 int type, struct xfs_bmbt_irec *irec),
941 TP_ARGS(ip, offset, count, flags, irec), 945 TP_ARGS(ip, offset, count, type, irec),
942 TP_STRUCT__entry( 946 TP_STRUCT__entry(
943 __field(dev_t, dev) 947 __field(dev_t, dev)
944 __field(xfs_ino_t, ino) 948 __field(xfs_ino_t, ino)
@@ -946,7 +950,7 @@ DECLARE_EVENT_CLASS(xfs_iomap_class,
946 __field(loff_t, new_size) 950 __field(loff_t, new_size)
947 __field(loff_t, offset) 951 __field(loff_t, offset)
948 __field(size_t, count) 952 __field(size_t, count)
949 __field(int, flags) 953 __field(int, type)
950 __field(xfs_fileoff_t, startoff) 954 __field(xfs_fileoff_t, startoff)
951 __field(xfs_fsblock_t, startblock) 955 __field(xfs_fsblock_t, startblock)
952 __field(xfs_filblks_t, blockcount) 956 __field(xfs_filblks_t, blockcount)
@@ -958,13 +962,13 @@ DECLARE_EVENT_CLASS(xfs_iomap_class,
958 __entry->new_size = ip->i_new_size; 962 __entry->new_size = ip->i_new_size;
959 __entry->offset = offset; 963 __entry->offset = offset;
960 __entry->count = count; 964 __entry->count = count;
961 __entry->flags = flags; 965 __entry->type = type;
962 __entry->startoff = irec ? irec->br_startoff : 0; 966 __entry->startoff = irec ? irec->br_startoff : 0;
963 __entry->startblock = irec ? irec->br_startblock : 0; 967 __entry->startblock = irec ? irec->br_startblock : 0;
964 __entry->blockcount = irec ? irec->br_blockcount : 0; 968 __entry->blockcount = irec ? irec->br_blockcount : 0;
965 ), 969 ),
966 TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx " 970 TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx "
967 "offset 0x%llx count %zd flags %s " 971 "offset 0x%llx count %zd type %s "
968 "startoff 0x%llx startblock %lld blockcount 0x%llx", 972 "startoff 0x%llx startblock %lld blockcount 0x%llx",
969 MAJOR(__entry->dev), MINOR(__entry->dev), 973 MAJOR(__entry->dev), MINOR(__entry->dev),
970 __entry->ino, 974 __entry->ino,
@@ -972,20 +976,21 @@ DECLARE_EVENT_CLASS(xfs_iomap_class,
972 __entry->new_size, 976 __entry->new_size,
973 __entry->offset, 977 __entry->offset,
974 __entry->count, 978 __entry->count,
975 __print_flags(__entry->flags, "|", BMAPI_FLAGS), 979 __print_symbolic(__entry->type, XFS_IO_TYPES),
976 __entry->startoff, 980 __entry->startoff,
977 (__int64_t)__entry->startblock, 981 (__int64_t)__entry->startblock,
978 __entry->blockcount) 982 __entry->blockcount)
979) 983)
980 984
981#define DEFINE_IOMAP_EVENT(name) \ 985#define DEFINE_IOMAP_EVENT(name) \
982DEFINE_EVENT(xfs_iomap_class, name, \ 986DEFINE_EVENT(xfs_imap_class, name, \
983 TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count, \ 987 TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count, \
984 int flags, struct xfs_bmbt_irec *irec), \ 988 int type, struct xfs_bmbt_irec *irec), \
985 TP_ARGS(ip, offset, count, flags, irec)) 989 TP_ARGS(ip, offset, count, type, irec))
986DEFINE_IOMAP_EVENT(xfs_iomap_enter); 990DEFINE_IOMAP_EVENT(xfs_map_blocks_found);
987DEFINE_IOMAP_EVENT(xfs_iomap_found); 991DEFINE_IOMAP_EVENT(xfs_map_blocks_alloc);
988DEFINE_IOMAP_EVENT(xfs_iomap_alloc); 992DEFINE_IOMAP_EVENT(xfs_get_blocks_found);
993DEFINE_IOMAP_EVENT(xfs_get_blocks_alloc);
989 994
990DECLARE_EVENT_CLASS(xfs_simple_io_class, 995DECLARE_EVENT_CLASS(xfs_simple_io_class,
991 TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count), 996 TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count),
@@ -1022,6 +1027,7 @@ DEFINE_EVENT(xfs_simple_io_class, name, \
1022 TP_ARGS(ip, offset, count)) 1027 TP_ARGS(ip, offset, count))
1023DEFINE_SIMPLE_IO_EVENT(xfs_delalloc_enospc); 1028DEFINE_SIMPLE_IO_EVENT(xfs_delalloc_enospc);
1024DEFINE_SIMPLE_IO_EVENT(xfs_unwritten_convert); 1029DEFINE_SIMPLE_IO_EVENT(xfs_unwritten_convert);
1030DEFINE_SIMPLE_IO_EVENT(xfs_get_blocks_notfound);
1025 1031
1026 1032
1027TRACE_EVENT(xfs_itruncate_start, 1033TRACE_EVENT(xfs_itruncate_start,
@@ -1420,6 +1426,7 @@ DEFINE_EVENT(xfs_alloc_class, name, \
1420 TP_PROTO(struct xfs_alloc_arg *args), \ 1426 TP_PROTO(struct xfs_alloc_arg *args), \
1421 TP_ARGS(args)) 1427 TP_ARGS(args))
1422DEFINE_ALLOC_EVENT(xfs_alloc_exact_done); 1428DEFINE_ALLOC_EVENT(xfs_alloc_exact_done);
1429DEFINE_ALLOC_EVENT(xfs_alloc_exact_notfound);
1423DEFINE_ALLOC_EVENT(xfs_alloc_exact_error); 1430DEFINE_ALLOC_EVENT(xfs_alloc_exact_error);
1424DEFINE_ALLOC_EVENT(xfs_alloc_near_nominleft); 1431DEFINE_ALLOC_EVENT(xfs_alloc_near_nominleft);
1425DEFINE_ALLOC_EVENT(xfs_alloc_near_first); 1432DEFINE_ALLOC_EVENT(xfs_alloc_near_first);
diff --git a/fs/xfs/quota/xfs_dquot.c b/fs/xfs/quota/xfs_dquot.c
index faf8e1a83a12..d22aa3103106 100644
--- a/fs/xfs/quota/xfs_dquot.c
+++ b/fs/xfs/quota/xfs_dquot.c
@@ -149,7 +149,6 @@ xfs_qm_dqdestroy(
149 ASSERT(list_empty(&dqp->q_freelist)); 149 ASSERT(list_empty(&dqp->q_freelist));
150 150
151 mutex_destroy(&dqp->q_qlock); 151 mutex_destroy(&dqp->q_qlock);
152 sv_destroy(&dqp->q_pinwait);
153 kmem_zone_free(xfs_Gqm->qm_dqzone, dqp); 152 kmem_zone_free(xfs_Gqm->qm_dqzone, dqp);
154 153
155 atomic_dec(&xfs_Gqm->qm_totaldquots); 154 atomic_dec(&xfs_Gqm->qm_totaldquots);
diff --git a/fs/xfs/xfs_ag.h b/fs/xfs/xfs_ag.h
index 63c7a1a6c022..58632cc17f2d 100644
--- a/fs/xfs/xfs_ag.h
+++ b/fs/xfs/xfs_ag.h
@@ -227,7 +227,7 @@ typedef struct xfs_perag {
227 227
228 atomic_t pagf_fstrms; /* # of filestreams active in this AG */ 228 atomic_t pagf_fstrms; /* # of filestreams active in this AG */
229 229
230 rwlock_t pag_ici_lock; /* incore inode lock */ 230 spinlock_t pag_ici_lock; /* incore inode cache lock */
231 struct radix_tree_root pag_ici_root; /* incore inode cache root */ 231 struct radix_tree_root pag_ici_root; /* incore inode cache root */
232 int pag_ici_reclaimable; /* reclaimable inodes */ 232 int pag_ici_reclaimable; /* reclaimable inodes */
233 struct mutex pag_ici_reclaim_lock; /* serialisation point */ 233 struct mutex pag_ici_reclaim_lock; /* serialisation point */
diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c
index 112abc439ca5..fa8723f5870a 100644
--- a/fs/xfs/xfs_alloc.c
+++ b/fs/xfs/xfs_alloc.c
@@ -577,61 +577,58 @@ xfs_alloc_ag_vextent_exact(
577 xfs_extlen_t rlen; /* length of returned extent */ 577 xfs_extlen_t rlen; /* length of returned extent */
578 578
579 ASSERT(args->alignment == 1); 579 ASSERT(args->alignment == 1);
580
580 /* 581 /*
581 * Allocate/initialize a cursor for the by-number freespace btree. 582 * Allocate/initialize a cursor for the by-number freespace btree.
582 */ 583 */
583 bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp, 584 bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
584 args->agno, XFS_BTNUM_BNO); 585 args->agno, XFS_BTNUM_BNO);
586
585 /* 587 /*
586 * Lookup bno and minlen in the btree (minlen is irrelevant, really). 588 * Lookup bno and minlen in the btree (minlen is irrelevant, really).
587 * Look for the closest free block <= bno, it must contain bno 589 * Look for the closest free block <= bno, it must contain bno
588 * if any free block does. 590 * if any free block does.
589 */ 591 */
590 if ((error = xfs_alloc_lookup_le(bno_cur, args->agbno, args->minlen, &i))) 592 error = xfs_alloc_lookup_le(bno_cur, args->agbno, args->minlen, &i);
593 if (error)
591 goto error0; 594 goto error0;
592 if (!i) { 595 if (!i)
593 /* 596 goto not_found;
594 * Didn't find it, return null. 597
595 */
596 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
597 args->agbno = NULLAGBLOCK;
598 return 0;
599 }
600 /* 598 /*
601 * Grab the freespace record. 599 * Grab the freespace record.
602 */ 600 */
603 if ((error = xfs_alloc_get_rec(bno_cur, &fbno, &flen, &i))) 601 error = xfs_alloc_get_rec(bno_cur, &fbno, &flen, &i);
602 if (error)
604 goto error0; 603 goto error0;
605 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 604 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
606 ASSERT(fbno <= args->agbno); 605 ASSERT(fbno <= args->agbno);
607 minend = args->agbno + args->minlen; 606 minend = args->agbno + args->minlen;
608 maxend = args->agbno + args->maxlen; 607 maxend = args->agbno + args->maxlen;
609 fend = fbno + flen; 608 fend = fbno + flen;
609
610 /* 610 /*
611 * Give up if the freespace isn't long enough for the minimum request. 611 * Give up if the freespace isn't long enough for the minimum request.
612 */ 612 */
613 if (fend < minend) { 613 if (fend < minend)
614 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR); 614 goto not_found;
615 args->agbno = NULLAGBLOCK; 615
616 return 0;
617 }
618 /* 616 /*
619 * End of extent will be smaller of the freespace end and the 617 * End of extent will be smaller of the freespace end and the
620 * maximal requested end. 618 * maximal requested end.
621 */ 619 *
622 end = XFS_AGBLOCK_MIN(fend, maxend);
623 /*
624 * Fix the length according to mod and prod if given. 620 * Fix the length according to mod and prod if given.
625 */ 621 */
622 end = XFS_AGBLOCK_MIN(fend, maxend);
626 args->len = end - args->agbno; 623 args->len = end - args->agbno;
627 xfs_alloc_fix_len(args); 624 xfs_alloc_fix_len(args);
628 if (!xfs_alloc_fix_minleft(args)) { 625 if (!xfs_alloc_fix_minleft(args))
629 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR); 626 goto not_found;
630 return 0; 627
631 }
632 rlen = args->len; 628 rlen = args->len;
633 ASSERT(args->agbno + rlen <= fend); 629 ASSERT(args->agbno + rlen <= fend);
634 end = args->agbno + rlen; 630 end = args->agbno + rlen;
631
635 /* 632 /*
636 * We are allocating agbno for rlen [agbno .. end] 633 * We are allocating agbno for rlen [agbno .. end]
637 * Allocate/initialize a cursor for the by-size btree. 634 * Allocate/initialize a cursor for the by-size btree.
@@ -640,16 +637,25 @@ xfs_alloc_ag_vextent_exact(
640 args->agno, XFS_BTNUM_CNT); 637 args->agno, XFS_BTNUM_CNT);
641 ASSERT(args->agbno + args->len <= 638 ASSERT(args->agbno + args->len <=
642 be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length)); 639 be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
643 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen, 640 error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen, args->agbno,
644 args->agbno, args->len, XFSA_FIXUP_BNO_OK))) { 641 args->len, XFSA_FIXUP_BNO_OK);
642 if (error) {
645 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR); 643 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
646 goto error0; 644 goto error0;
647 } 645 }
646
648 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR); 647 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
649 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); 648 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
650 649
651 trace_xfs_alloc_exact_done(args);
652 args->wasfromfl = 0; 650 args->wasfromfl = 0;
651 trace_xfs_alloc_exact_done(args);
652 return 0;
653
654not_found:
655 /* Didn't find it, return null. */
656 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
657 args->agbno = NULLAGBLOCK;
658 trace_xfs_alloc_exact_notfound(args);
653 return 0; 659 return 0;
654 660
655error0: 661error0:
@@ -659,6 +665,95 @@ error0:
659} 665}
660 666
661/* 667/*
668 * Search the btree in a given direction via the search cursor and compare
669 * the records found against the good extent we've already found.
670 */
671STATIC int
672xfs_alloc_find_best_extent(
673 struct xfs_alloc_arg *args, /* allocation argument structure */
674 struct xfs_btree_cur **gcur, /* good cursor */
675 struct xfs_btree_cur **scur, /* searching cursor */
676 xfs_agblock_t gdiff, /* difference for search comparison */
677 xfs_agblock_t *sbno, /* extent found by search */
678 xfs_extlen_t *slen,
679 xfs_extlen_t *slena, /* aligned length */
680 int dir) /* 0 = search right, 1 = search left */
681{
682 xfs_agblock_t bno;
683 xfs_agblock_t new;
684 xfs_agblock_t sdiff;
685 int error;
686 int i;
687
688 /* The good extent is perfect, no need to search. */
689 if (!gdiff)
690 goto out_use_good;
691
692 /*
693 * Look until we find a better one, run out of space or run off the end.
694 */
695 do {
696 error = xfs_alloc_get_rec(*scur, sbno, slen, &i);
697 if (error)
698 goto error0;
699 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
700 xfs_alloc_compute_aligned(*sbno, *slen, args->alignment,
701 args->minlen, &bno, slena);
702
703 /*
704 * The good extent is closer than this one.
705 */
706 if (!dir) {
707 if (bno >= args->agbno + gdiff)
708 goto out_use_good;
709 } else {
710 if (bno <= args->agbno - gdiff)
711 goto out_use_good;
712 }
713
714 /*
715 * Same distance, compare length and pick the best.
716 */
717 if (*slena >= args->minlen) {
718 args->len = XFS_EXTLEN_MIN(*slena, args->maxlen);
719 xfs_alloc_fix_len(args);
720
721 sdiff = xfs_alloc_compute_diff(args->agbno, args->len,
722 args->alignment, *sbno,
723 *slen, &new);
724
725 /*
726 * Choose closer size and invalidate other cursor.
727 */
728 if (sdiff < gdiff)
729 goto out_use_search;
730 goto out_use_good;
731 }
732
733 if (!dir)
734 error = xfs_btree_increment(*scur, 0, &i);
735 else
736 error = xfs_btree_decrement(*scur, 0, &i);
737 if (error)
738 goto error0;
739 } while (i);
740
741out_use_good:
742 xfs_btree_del_cursor(*scur, XFS_BTREE_NOERROR);
743 *scur = NULL;
744 return 0;
745
746out_use_search:
747 xfs_btree_del_cursor(*gcur, XFS_BTREE_NOERROR);
748 *gcur = NULL;
749 return 0;
750
751error0:
752 /* caller invalidates cursors */
753 return error;
754}
755
756/*
662 * Allocate a variable extent near bno in the allocation group agno. 757 * Allocate a variable extent near bno in the allocation group agno.
663 * Extent's length (returned in len) will be between minlen and maxlen, 758 * Extent's length (returned in len) will be between minlen and maxlen,
664 * and of the form k * prod + mod unless there's nothing that large. 759 * and of the form k * prod + mod unless there's nothing that large.
@@ -925,203 +1020,45 @@ xfs_alloc_ag_vextent_near(
925 } 1020 }
926 } 1021 }
927 } while (bno_cur_lt || bno_cur_gt); 1022 } while (bno_cur_lt || bno_cur_gt);
1023
928 /* 1024 /*
929 * Got both cursors still active, need to find better entry. 1025 * Got both cursors still active, need to find better entry.
930 */ 1026 */
931 if (bno_cur_lt && bno_cur_gt) { 1027 if (bno_cur_lt && bno_cur_gt) {
932 /*
933 * Left side is long enough, look for a right side entry.
934 */
935 if (ltlena >= args->minlen) { 1028 if (ltlena >= args->minlen) {
936 /* 1029 /*
937 * Fix up the length. 1030 * Left side is good, look for a right side entry.
938 */ 1031 */
939 args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen); 1032 args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
940 xfs_alloc_fix_len(args); 1033 xfs_alloc_fix_len(args);
941 rlen = args->len; 1034 ltdiff = xfs_alloc_compute_diff(args->agbno, args->len,
942 ltdiff = xfs_alloc_compute_diff(args->agbno, rlen,
943 args->alignment, ltbno, ltlen, &ltnew); 1035 args->alignment, ltbno, ltlen, &ltnew);
1036
1037 error = xfs_alloc_find_best_extent(args,
1038 &bno_cur_lt, &bno_cur_gt,
1039 ltdiff, &gtbno, &gtlen, &gtlena,
1040 0 /* search right */);
1041 } else {
1042 ASSERT(gtlena >= args->minlen);
1043
944 /* 1044 /*
945 * Not perfect. 1045 * Right side is good, look for a left side entry.
946 */
947 if (ltdiff) {
948 /*
949 * Look until we find a better one, run out of
950 * space, or run off the end.
951 */
952 while (bno_cur_lt && bno_cur_gt) {
953 if ((error = xfs_alloc_get_rec(
954 bno_cur_gt, &gtbno,
955 &gtlen, &i)))
956 goto error0;
957 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
958 xfs_alloc_compute_aligned(gtbno, gtlen,
959 args->alignment, args->minlen,
960 &gtbnoa, &gtlena);
961 /*
962 * The left one is clearly better.
963 */
964 if (gtbnoa >= args->agbno + ltdiff) {
965 xfs_btree_del_cursor(
966 bno_cur_gt,
967 XFS_BTREE_NOERROR);
968 bno_cur_gt = NULL;
969 break;
970 }
971 /*
972 * If we reach a big enough entry,
973 * compare the two and pick the best.
974 */
975 if (gtlena >= args->minlen) {
976 args->len =
977 XFS_EXTLEN_MIN(gtlena,
978 args->maxlen);
979 xfs_alloc_fix_len(args);
980 rlen = args->len;
981 gtdiff = xfs_alloc_compute_diff(
982 args->agbno, rlen,
983 args->alignment,
984 gtbno, gtlen, &gtnew);
985 /*
986 * Right side is better.
987 */
988 if (gtdiff < ltdiff) {
989 xfs_btree_del_cursor(
990 bno_cur_lt,
991 XFS_BTREE_NOERROR);
992 bno_cur_lt = NULL;
993 }
994 /*
995 * Left side is better.
996 */
997 else {
998 xfs_btree_del_cursor(
999 bno_cur_gt,
1000 XFS_BTREE_NOERROR);
1001 bno_cur_gt = NULL;
1002 }
1003 break;
1004 }
1005 /*
1006 * Fell off the right end.
1007 */
1008 if ((error = xfs_btree_increment(
1009 bno_cur_gt, 0, &i)))
1010 goto error0;
1011 if (!i) {
1012 xfs_btree_del_cursor(
1013 bno_cur_gt,
1014 XFS_BTREE_NOERROR);
1015 bno_cur_gt = NULL;
1016 break;
1017 }
1018 }
1019 }
1020 /*
1021 * The left side is perfect, trash the right side.
1022 */
1023 else {
1024 xfs_btree_del_cursor(bno_cur_gt,
1025 XFS_BTREE_NOERROR);
1026 bno_cur_gt = NULL;
1027 }
1028 }
1029 /*
1030 * It's the right side that was found first, look left.
1031 */
1032 else {
1033 /*
1034 * Fix up the length.
1035 */ 1046 */
1036 args->len = XFS_EXTLEN_MIN(gtlena, args->maxlen); 1047 args->len = XFS_EXTLEN_MIN(gtlena, args->maxlen);
1037 xfs_alloc_fix_len(args); 1048 xfs_alloc_fix_len(args);
1038 rlen = args->len; 1049 gtdiff = xfs_alloc_compute_diff(args->agbno, args->len,
1039 gtdiff = xfs_alloc_compute_diff(args->agbno, rlen,
1040 args->alignment, gtbno, gtlen, &gtnew); 1050 args->alignment, gtbno, gtlen, &gtnew);
1041 /* 1051
1042 * Right side entry isn't perfect. 1052 error = xfs_alloc_find_best_extent(args,
1043 */ 1053 &bno_cur_gt, &bno_cur_lt,
1044 if (gtdiff) { 1054 gtdiff, &ltbno, &ltlen, &ltlena,
1045 /* 1055 1 /* search left */);
1046 * Look until we find a better one, run out of
1047 * space, or run off the end.
1048 */
1049 while (bno_cur_lt && bno_cur_gt) {
1050 if ((error = xfs_alloc_get_rec(
1051 bno_cur_lt, &ltbno,
1052 &ltlen, &i)))
1053 goto error0;
1054 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1055 xfs_alloc_compute_aligned(ltbno, ltlen,
1056 args->alignment, args->minlen,
1057 &ltbnoa, &ltlena);
1058 /*
1059 * The right one is clearly better.
1060 */
1061 if (ltbnoa <= args->agbno - gtdiff) {
1062 xfs_btree_del_cursor(
1063 bno_cur_lt,
1064 XFS_BTREE_NOERROR);
1065 bno_cur_lt = NULL;
1066 break;
1067 }
1068 /*
1069 * If we reach a big enough entry,
1070 * compare the two and pick the best.
1071 */
1072 if (ltlena >= args->minlen) {
1073 args->len = XFS_EXTLEN_MIN(
1074 ltlena, args->maxlen);
1075 xfs_alloc_fix_len(args);
1076 rlen = args->len;
1077 ltdiff = xfs_alloc_compute_diff(
1078 args->agbno, rlen,
1079 args->alignment,
1080 ltbno, ltlen, &ltnew);
1081 /*
1082 * Left side is better.
1083 */
1084 if (ltdiff < gtdiff) {
1085 xfs_btree_del_cursor(
1086 bno_cur_gt,
1087 XFS_BTREE_NOERROR);
1088 bno_cur_gt = NULL;
1089 }
1090 /*
1091 * Right side is better.
1092 */
1093 else {
1094 xfs_btree_del_cursor(
1095 bno_cur_lt,
1096 XFS_BTREE_NOERROR);
1097 bno_cur_lt = NULL;
1098 }
1099 break;
1100 }
1101 /*
1102 * Fell off the left end.
1103 */
1104 if ((error = xfs_btree_decrement(
1105 bno_cur_lt, 0, &i)))
1106 goto error0;
1107 if (!i) {
1108 xfs_btree_del_cursor(bno_cur_lt,
1109 XFS_BTREE_NOERROR);
1110 bno_cur_lt = NULL;
1111 break;
1112 }
1113 }
1114 }
1115 /*
1116 * The right side is perfect, trash the left side.
1117 */
1118 else {
1119 xfs_btree_del_cursor(bno_cur_lt,
1120 XFS_BTREE_NOERROR);
1121 bno_cur_lt = NULL;
1122 }
1123 } 1056 }
1057
1058 if (error)
1059 goto error0;
1124 } 1060 }
1061
1125 /* 1062 /*
1126 * If we couldn't get anything, give up. 1063 * If we couldn't get anything, give up.
1127 */ 1064 */
@@ -1130,6 +1067,7 @@ xfs_alloc_ag_vextent_near(
1130 args->agbno = NULLAGBLOCK; 1067 args->agbno = NULLAGBLOCK;
1131 return 0; 1068 return 0;
1132 } 1069 }
1070
1133 /* 1071 /*
1134 * At this point we have selected a freespace entry, either to the 1072 * At this point we have selected a freespace entry, either to the
1135 * left or to the right. If it's on the right, copy all the 1073 * left or to the right. If it's on the right, copy all the
@@ -1146,6 +1084,7 @@ xfs_alloc_ag_vextent_near(
1146 j = 1; 1084 j = 1;
1147 } else 1085 } else
1148 j = 0; 1086 j = 0;
1087
1149 /* 1088 /*
1150 * Fix up the length and compute the useful address. 1089 * Fix up the length and compute the useful address.
1151 */ 1090 */
diff --git a/fs/xfs/xfs_attr_leaf.c b/fs/xfs/xfs_attr_leaf.c
index a6cff8edcdb6..71e90dc2aeb1 100644
--- a/fs/xfs/xfs_attr_leaf.c
+++ b/fs/xfs/xfs_attr_leaf.c
@@ -637,7 +637,7 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
637 * It didn't all fit, so we have to sort everything on hashval. 637 * It didn't all fit, so we have to sort everything on hashval.
638 */ 638 */
639 sbsize = sf->hdr.count * sizeof(*sbuf); 639 sbsize = sf->hdr.count * sizeof(*sbuf);
640 sbp = sbuf = kmem_alloc(sbsize, KM_SLEEP); 640 sbp = sbuf = kmem_alloc(sbsize, KM_SLEEP | KM_NOFS);
641 641
642 /* 642 /*
643 * Scan the attribute list for the rest of the entries, storing 643 * Scan the attribute list for the rest of the entries, storing
@@ -2386,7 +2386,7 @@ xfs_attr_leaf_list_int(xfs_dabuf_t *bp, xfs_attr_list_context_t *context)
2386 args.dp = context->dp; 2386 args.dp = context->dp;
2387 args.whichfork = XFS_ATTR_FORK; 2387 args.whichfork = XFS_ATTR_FORK;
2388 args.valuelen = valuelen; 2388 args.valuelen = valuelen;
2389 args.value = kmem_alloc(valuelen, KM_SLEEP); 2389 args.value = kmem_alloc(valuelen, KM_SLEEP | KM_NOFS);
2390 args.rmtblkno = be32_to_cpu(name_rmt->valueblk); 2390 args.rmtblkno = be32_to_cpu(name_rmt->valueblk);
2391 args.rmtblkcnt = XFS_B_TO_FSB(args.dp->i_mount, valuelen); 2391 args.rmtblkcnt = XFS_B_TO_FSB(args.dp->i_mount, valuelen);
2392 retval = xfs_attr_rmtval_get(&args); 2392 retval = xfs_attr_rmtval_get(&args);
diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c
index 04f9cca8da7e..2f9e97c128a0 100644
--- a/fs/xfs/xfs_btree.c
+++ b/fs/xfs/xfs_btree.c
@@ -634,9 +634,8 @@ xfs_btree_read_bufl(
634 return error; 634 return error;
635 } 635 }
636 ASSERT(!bp || !XFS_BUF_GETERROR(bp)); 636 ASSERT(!bp || !XFS_BUF_GETERROR(bp));
637 if (bp != NULL) { 637 if (bp)
638 XFS_BUF_SET_VTYPE_REF(bp, B_FS_MAP, refval); 638 XFS_BUF_SET_VTYPE_REF(bp, B_FS_MAP, refval);
639 }
640 *bpp = bp; 639 *bpp = bp;
641 return 0; 640 return 0;
642} 641}
@@ -944,13 +943,13 @@ xfs_btree_set_refs(
944 switch (cur->bc_btnum) { 943 switch (cur->bc_btnum) {
945 case XFS_BTNUM_BNO: 944 case XFS_BTNUM_BNO:
946 case XFS_BTNUM_CNT: 945 case XFS_BTNUM_CNT:
947 XFS_BUF_SET_VTYPE_REF(*bpp, B_FS_MAP, XFS_ALLOC_BTREE_REF); 946 XFS_BUF_SET_VTYPE_REF(bp, B_FS_MAP, XFS_ALLOC_BTREE_REF);
948 break; 947 break;
949 case XFS_BTNUM_INO: 948 case XFS_BTNUM_INO:
950 XFS_BUF_SET_VTYPE_REF(*bpp, B_FS_INOMAP, XFS_INO_BTREE_REF); 949 XFS_BUF_SET_VTYPE_REF(bp, B_FS_INOMAP, XFS_INO_BTREE_REF);
951 break; 950 break;
952 case XFS_BTNUM_BMAP: 951 case XFS_BTNUM_BMAP:
953 XFS_BUF_SET_VTYPE_REF(*bpp, B_FS_MAP, XFS_BMAP_BTREE_REF); 952 XFS_BUF_SET_VTYPE_REF(bp, B_FS_MAP, XFS_BMAP_BTREE_REF);
954 break; 953 break;
955 default: 954 default:
956 ASSERT(0); 955 ASSERT(0);
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index 2686d0d54c5b..ed2b65f3f8b9 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -142,7 +142,7 @@ xfs_buf_item_log_check(
142#endif 142#endif
143 143
144STATIC void xfs_buf_error_relse(xfs_buf_t *bp); 144STATIC void xfs_buf_error_relse(xfs_buf_t *bp);
145STATIC void xfs_buf_do_callbacks(xfs_buf_t *bp, xfs_log_item_t *lip); 145STATIC void xfs_buf_do_callbacks(struct xfs_buf *bp);
146 146
147/* 147/*
148 * This returns the number of log iovecs needed to log the 148 * This returns the number of log iovecs needed to log the
@@ -450,7 +450,7 @@ xfs_buf_item_unpin(
450 * xfs_trans_ail_delete() drops the AIL lock. 450 * xfs_trans_ail_delete() drops the AIL lock.
451 */ 451 */
452 if (bip->bli_flags & XFS_BLI_STALE_INODE) { 452 if (bip->bli_flags & XFS_BLI_STALE_INODE) {
453 xfs_buf_do_callbacks(bp, (xfs_log_item_t *)bip); 453 xfs_buf_do_callbacks(bp);
454 XFS_BUF_SET_FSPRIVATE(bp, NULL); 454 XFS_BUF_SET_FSPRIVATE(bp, NULL);
455 XFS_BUF_CLR_IODONE_FUNC(bp); 455 XFS_BUF_CLR_IODONE_FUNC(bp);
456 } else { 456 } else {
@@ -918,15 +918,26 @@ xfs_buf_attach_iodone(
918 XFS_BUF_SET_IODONE_FUNC(bp, xfs_buf_iodone_callbacks); 918 XFS_BUF_SET_IODONE_FUNC(bp, xfs_buf_iodone_callbacks);
919} 919}
920 920
921/*
922 * We can have many callbacks on a buffer. Running the callbacks individually
923 * can cause a lot of contention on the AIL lock, so we allow for a single
924 * callback to be able to scan the remaining lip->li_bio_list for other items
925 * of the same type and callback to be processed in the first call.
926 *
927 * As a result, the loop walking the callback list below will also modify the
928 * list. it removes the first item from the list and then runs the callback.
929 * The loop then restarts from the new head of the list. This allows the
930 * callback to scan and modify the list attached to the buffer and we don't
931 * have to care about maintaining a next item pointer.
932 */
921STATIC void 933STATIC void
922xfs_buf_do_callbacks( 934xfs_buf_do_callbacks(
923 xfs_buf_t *bp, 935 struct xfs_buf *bp)
924 xfs_log_item_t *lip)
925{ 936{
926 xfs_log_item_t *nlip; 937 struct xfs_log_item *lip;
927 938
928 while (lip != NULL) { 939 while ((lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *)) != NULL) {
929 nlip = lip->li_bio_list; 940 XFS_BUF_SET_FSPRIVATE(bp, lip->li_bio_list);
930 ASSERT(lip->li_cb != NULL); 941 ASSERT(lip->li_cb != NULL);
931 /* 942 /*
932 * Clear the next pointer so we don't have any 943 * Clear the next pointer so we don't have any
@@ -936,7 +947,6 @@ xfs_buf_do_callbacks(
936 */ 947 */
937 lip->li_bio_list = NULL; 948 lip->li_bio_list = NULL;
938 lip->li_cb(bp, lip); 949 lip->li_cb(bp, lip);
939 lip = nlip;
940 } 950 }
941} 951}
942 952
@@ -970,7 +980,7 @@ xfs_buf_iodone_callbacks(
970 ASSERT(XFS_BUF_TARGET(bp) == mp->m_ddev_targp); 980 ASSERT(XFS_BUF_TARGET(bp) == mp->m_ddev_targp);
971 XFS_BUF_SUPER_STALE(bp); 981 XFS_BUF_SUPER_STALE(bp);
972 trace_xfs_buf_item_iodone(bp, _RET_IP_); 982 trace_xfs_buf_item_iodone(bp, _RET_IP_);
973 xfs_buf_do_callbacks(bp, lip); 983 xfs_buf_do_callbacks(bp);
974 XFS_BUF_SET_FSPRIVATE(bp, NULL); 984 XFS_BUF_SET_FSPRIVATE(bp, NULL);
975 XFS_BUF_CLR_IODONE_FUNC(bp); 985 XFS_BUF_CLR_IODONE_FUNC(bp);
976 xfs_buf_ioend(bp, 0); 986 xfs_buf_ioend(bp, 0);
@@ -1029,7 +1039,7 @@ xfs_buf_iodone_callbacks(
1029 return; 1039 return;
1030 } 1040 }
1031 1041
1032 xfs_buf_do_callbacks(bp, lip); 1042 xfs_buf_do_callbacks(bp);
1033 XFS_BUF_SET_FSPRIVATE(bp, NULL); 1043 XFS_BUF_SET_FSPRIVATE(bp, NULL);
1034 XFS_BUF_CLR_IODONE_FUNC(bp); 1044 XFS_BUF_CLR_IODONE_FUNC(bp);
1035 xfs_buf_ioend(bp, 0); 1045 xfs_buf_ioend(bp, 0);
@@ -1063,7 +1073,7 @@ xfs_buf_error_relse(
1063 * We have to unpin the pinned buffers so do the 1073 * We have to unpin the pinned buffers so do the
1064 * callbacks. 1074 * callbacks.
1065 */ 1075 */
1066 xfs_buf_do_callbacks(bp, lip); 1076 xfs_buf_do_callbacks(bp);
1067 XFS_BUF_SET_FSPRIVATE(bp, NULL); 1077 XFS_BUF_SET_FSPRIVATE(bp, NULL);
1068 XFS_BUF_CLR_IODONE_FUNC(bp); 1078 XFS_BUF_CLR_IODONE_FUNC(bp);
1069 XFS_BUF_SET_BRELSE_FUNC(bp,NULL); 1079 XFS_BUF_SET_BRELSE_FUNC(bp,NULL);
diff --git a/fs/xfs/xfs_buf_item.h b/fs/xfs/xfs_buf_item.h
index 0e2ed43f16c7..b6ecd2061e7c 100644
--- a/fs/xfs/xfs_buf_item.h
+++ b/fs/xfs/xfs_buf_item.h
@@ -105,17 +105,6 @@ typedef struct xfs_buf_log_item {
105 xfs_buf_log_format_t bli_format; /* in-log header */ 105 xfs_buf_log_format_t bli_format; /* in-log header */
106} xfs_buf_log_item_t; 106} xfs_buf_log_item_t;
107 107
108/*
109 * This structure is used during recovery to record the buf log
110 * items which have been canceled and should not be replayed.
111 */
112typedef struct xfs_buf_cancel {
113 xfs_daddr_t bc_blkno;
114 uint bc_len;
115 int bc_refcount;
116 struct xfs_buf_cancel *bc_next;
117} xfs_buf_cancel_t;
118
119void xfs_buf_item_init(struct xfs_buf *, struct xfs_mount *); 108void xfs_buf_item_init(struct xfs_buf *, struct xfs_mount *);
120void xfs_buf_item_relse(struct xfs_buf *); 109void xfs_buf_item_relse(struct xfs_buf *);
121void xfs_buf_item_log(xfs_buf_log_item_t *, uint, uint); 110void xfs_buf_item_log(xfs_buf_log_item_t *, uint, uint);
diff --git a/fs/xfs/xfs_extfree_item.c b/fs/xfs/xfs_extfree_item.c
index a55e687bf562..75f2ef60e579 100644
--- a/fs/xfs/xfs_extfree_item.c
+++ b/fs/xfs/xfs_extfree_item.c
@@ -48,6 +48,28 @@ xfs_efi_item_free(
48} 48}
49 49
50/* 50/*
51 * Freeing the efi requires that we remove it from the AIL if it has already
52 * been placed there. However, the EFI may not yet have been placed in the AIL
53 * when called by xfs_efi_release() from EFD processing due to the ordering of
54 * committed vs unpin operations in bulk insert operations. Hence the
55 * test_and_clear_bit(XFS_EFI_COMMITTED) to ensure only the last caller frees
56 * the EFI.
57 */
58STATIC void
59__xfs_efi_release(
60 struct xfs_efi_log_item *efip)
61{
62 struct xfs_ail *ailp = efip->efi_item.li_ailp;
63
64 if (!test_and_clear_bit(XFS_EFI_COMMITTED, &efip->efi_flags)) {
65 spin_lock(&ailp->xa_lock);
66 /* xfs_trans_ail_delete() drops the AIL lock. */
67 xfs_trans_ail_delete(ailp, &efip->efi_item);
68 xfs_efi_item_free(efip);
69 }
70}
71
72/*
51 * This returns the number of iovecs needed to log the given efi item. 73 * This returns the number of iovecs needed to log the given efi item.
52 * We only need 1 iovec for an efi item. It just logs the efi_log_format 74 * We only need 1 iovec for an efi item. It just logs the efi_log_format
53 * structure. 75 * structure.
@@ -74,7 +96,8 @@ xfs_efi_item_format(
74 struct xfs_efi_log_item *efip = EFI_ITEM(lip); 96 struct xfs_efi_log_item *efip = EFI_ITEM(lip);
75 uint size; 97 uint size;
76 98
77 ASSERT(efip->efi_next_extent == efip->efi_format.efi_nextents); 99 ASSERT(atomic_read(&efip->efi_next_extent) ==
100 efip->efi_format.efi_nextents);
78 101
79 efip->efi_format.efi_type = XFS_LI_EFI; 102 efip->efi_format.efi_type = XFS_LI_EFI;
80 103
@@ -99,10 +122,12 @@ xfs_efi_item_pin(
99} 122}
100 123
101/* 124/*
102 * While EFIs cannot really be pinned, the unpin operation is the 125 * While EFIs cannot really be pinned, the unpin operation is the last place at
103 * last place at which the EFI is manipulated during a transaction. 126 * which the EFI is manipulated during a transaction. If we are being asked to
104 * Here we coordinate with xfs_efi_cancel() to determine who gets to 127 * remove the EFI it's because the transaction has been cancelled and by
105 * free the EFI. 128 * definition that means the EFI cannot be in the AIL so remove it from the
129 * transaction and free it. Otherwise coordinate with xfs_efi_release() (via
130 * XFS_EFI_COMMITTED) to determine who gets to free the EFI.
106 */ 131 */
107STATIC void 132STATIC void
108xfs_efi_item_unpin( 133xfs_efi_item_unpin(
@@ -110,20 +135,14 @@ xfs_efi_item_unpin(
110 int remove) 135 int remove)
111{ 136{
112 struct xfs_efi_log_item *efip = EFI_ITEM(lip); 137 struct xfs_efi_log_item *efip = EFI_ITEM(lip);
113 struct xfs_ail *ailp = lip->li_ailp;
114
115 spin_lock(&ailp->xa_lock);
116 if (efip->efi_flags & XFS_EFI_CANCELED) {
117 if (remove)
118 xfs_trans_del_item(lip);
119 138
120 /* xfs_trans_ail_delete() drops the AIL lock. */ 139 if (remove) {
121 xfs_trans_ail_delete(ailp, lip); 140 ASSERT(!(lip->li_flags & XFS_LI_IN_AIL));
141 xfs_trans_del_item(lip);
122 xfs_efi_item_free(efip); 142 xfs_efi_item_free(efip);
123 } else { 143 return;
124 efip->efi_flags |= XFS_EFI_COMMITTED;
125 spin_unlock(&ailp->xa_lock);
126 } 144 }
145 __xfs_efi_release(efip);
127} 146}
128 147
129/* 148/*
@@ -152,16 +171,20 @@ xfs_efi_item_unlock(
152} 171}
153 172
154/* 173/*
155 * The EFI is logged only once and cannot be moved in the log, so 174 * The EFI is logged only once and cannot be moved in the log, so simply return
156 * simply return the lsn at which it's been logged. The canceled 175 * the lsn at which it's been logged. For bulk transaction committed
157 * flag is not paid any attention here. Checking for that is delayed 176 * processing, the EFI may be processed but not yet unpinned prior to the EFD
158 * until the EFI is unpinned. 177 * being processed. Set the XFS_EFI_COMMITTED flag so this case can be detected
178 * when processing the EFD.
159 */ 179 */
160STATIC xfs_lsn_t 180STATIC xfs_lsn_t
161xfs_efi_item_committed( 181xfs_efi_item_committed(
162 struct xfs_log_item *lip, 182 struct xfs_log_item *lip,
163 xfs_lsn_t lsn) 183 xfs_lsn_t lsn)
164{ 184{
185 struct xfs_efi_log_item *efip = EFI_ITEM(lip);
186
187 set_bit(XFS_EFI_COMMITTED, &efip->efi_flags);
165 return lsn; 188 return lsn;
166} 189}
167 190
@@ -230,6 +253,7 @@ xfs_efi_init(
230 xfs_log_item_init(mp, &efip->efi_item, XFS_LI_EFI, &xfs_efi_item_ops); 253 xfs_log_item_init(mp, &efip->efi_item, XFS_LI_EFI, &xfs_efi_item_ops);
231 efip->efi_format.efi_nextents = nextents; 254 efip->efi_format.efi_nextents = nextents;
232 efip->efi_format.efi_id = (__psint_t)(void*)efip; 255 efip->efi_format.efi_id = (__psint_t)(void*)efip;
256 atomic_set(&efip->efi_next_extent, 0);
233 257
234 return efip; 258 return efip;
235} 259}
@@ -289,37 +313,18 @@ xfs_efi_copy_format(xfs_log_iovec_t *buf, xfs_efi_log_format_t *dst_efi_fmt)
289} 313}
290 314
291/* 315/*
292 * This is called by the efd item code below to release references to 316 * This is called by the efd item code below to release references to the given
293 * the given efi item. Each efd calls this with the number of 317 * efi item. Each efd calls this with the number of extents that it has
294 * extents that it has logged, and when the sum of these reaches 318 * logged, and when the sum of these reaches the total number of extents logged
295 * the total number of extents logged by this efi item we can free 319 * by this efi item we can free the efi item.
296 * the efi item.
297 *
298 * Freeing the efi item requires that we remove it from the AIL.
299 * We'll use the AIL lock to protect our counters as well as
300 * the removal from the AIL.
301 */ 320 */
302void 321void
303xfs_efi_release(xfs_efi_log_item_t *efip, 322xfs_efi_release(xfs_efi_log_item_t *efip,
304 uint nextents) 323 uint nextents)
305{ 324{
306 struct xfs_ail *ailp = efip->efi_item.li_ailp; 325 ASSERT(atomic_read(&efip->efi_next_extent) >= nextents);
307 int extents_left; 326 if (atomic_sub_and_test(nextents, &efip->efi_next_extent))
308 327 __xfs_efi_release(efip);
309 ASSERT(efip->efi_next_extent > 0);
310 ASSERT(efip->efi_flags & XFS_EFI_COMMITTED);
311
312 spin_lock(&ailp->xa_lock);
313 ASSERT(efip->efi_next_extent >= nextents);
314 efip->efi_next_extent -= nextents;
315 extents_left = efip->efi_next_extent;
316 if (extents_left == 0) {
317 /* xfs_trans_ail_delete() drops the AIL lock. */
318 xfs_trans_ail_delete(ailp, (xfs_log_item_t *)efip);
319 xfs_efi_item_free(efip);
320 } else {
321 spin_unlock(&ailp->xa_lock);
322 }
323} 328}
324 329
325static inline struct xfs_efd_log_item *EFD_ITEM(struct xfs_log_item *lip) 330static inline struct xfs_efd_log_item *EFD_ITEM(struct xfs_log_item *lip)
diff --git a/fs/xfs/xfs_extfree_item.h b/fs/xfs/xfs_extfree_item.h
index 0d22c56fdf64..375f68e42531 100644
--- a/fs/xfs/xfs_extfree_item.h
+++ b/fs/xfs/xfs_extfree_item.h
@@ -111,11 +111,10 @@ typedef struct xfs_efd_log_format_64 {
111#define XFS_EFI_MAX_FAST_EXTENTS 16 111#define XFS_EFI_MAX_FAST_EXTENTS 16
112 112
113/* 113/*
114 * Define EFI flags. 114 * Define EFI flag bits. Manipulated by set/clear/test_bit operators.
115 */ 115 */
116#define XFS_EFI_RECOVERED 0x1 116#define XFS_EFI_RECOVERED 1
117#define XFS_EFI_COMMITTED 0x2 117#define XFS_EFI_COMMITTED 2
118#define XFS_EFI_CANCELED 0x4
119 118
120/* 119/*
121 * This is the "extent free intention" log item. It is used 120 * This is the "extent free intention" log item. It is used
@@ -125,8 +124,8 @@ typedef struct xfs_efd_log_format_64 {
125 */ 124 */
126typedef struct xfs_efi_log_item { 125typedef struct xfs_efi_log_item {
127 xfs_log_item_t efi_item; 126 xfs_log_item_t efi_item;
128 uint efi_flags; /* misc flags */ 127 atomic_t efi_next_extent;
129 uint efi_next_extent; 128 unsigned long efi_flags; /* misc flags */
130 xfs_efi_log_format_t efi_format; 129 xfs_efi_log_format_t efi_format;
131} xfs_efi_log_item_t; 130} xfs_efi_log_item_t;
132 131
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index a7c116e814af..f56d30e8040c 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -374,6 +374,7 @@ xfs_growfs_data_private(
374 mp->m_maxicount = icount << mp->m_sb.sb_inopblog; 374 mp->m_maxicount = icount << mp->m_sb.sb_inopblog;
375 } else 375 } else
376 mp->m_maxicount = 0; 376 mp->m_maxicount = 0;
377 xfs_set_low_space_thresholds(mp);
377 378
378 /* update secondary superblocks. */ 379 /* update secondary superblocks. */
379 for (agno = 1; agno < nagcount; agno++) { 380 for (agno = 1; agno < nagcount; agno++) {
diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c
index d7de5a3f7867..cb9b6d1469f7 100644
--- a/fs/xfs/xfs_iget.c
+++ b/fs/xfs/xfs_iget.c
@@ -43,6 +43,17 @@
43 43
44 44
45/* 45/*
46 * Define xfs inode iolock lockdep classes. We need to ensure that all active
47 * inodes are considered the same for lockdep purposes, including inodes that
48 * are recycled through the XFS_IRECLAIMABLE state. This is the the only way to
49 * guarantee the locks are considered the same when there are multiple lock
50 * initialisation siteѕ. Also, define a reclaimable inode class so it is
51 * obvious in lockdep reports which class the report is against.
52 */
53static struct lock_class_key xfs_iolock_active;
54struct lock_class_key xfs_iolock_reclaimable;
55
56/*
46 * Allocate and initialise an xfs_inode. 57 * Allocate and initialise an xfs_inode.
47 */ 58 */
48STATIC struct xfs_inode * 59STATIC struct xfs_inode *
@@ -69,8 +80,11 @@ xfs_inode_alloc(
69 ASSERT(atomic_read(&ip->i_pincount) == 0); 80 ASSERT(atomic_read(&ip->i_pincount) == 0);
70 ASSERT(!spin_is_locked(&ip->i_flags_lock)); 81 ASSERT(!spin_is_locked(&ip->i_flags_lock));
71 ASSERT(completion_done(&ip->i_flush)); 82 ASSERT(completion_done(&ip->i_flush));
83 ASSERT(ip->i_ino == 0);
72 84
73 mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino); 85 mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
86 lockdep_set_class_and_name(&ip->i_iolock.mr_lock,
87 &xfs_iolock_active, "xfs_iolock_active");
74 88
75 /* initialise the xfs inode */ 89 /* initialise the xfs inode */
76 ip->i_ino = ino; 90 ip->i_ino = ino;
@@ -85,9 +99,6 @@ xfs_inode_alloc(
85 ip->i_size = 0; 99 ip->i_size = 0;
86 ip->i_new_size = 0; 100 ip->i_new_size = 0;
87 101
88 /* prevent anyone from using this yet */
89 VFS_I(ip)->i_state = I_NEW;
90
91 return ip; 102 return ip;
92} 103}
93 104
@@ -145,7 +156,18 @@ xfs_inode_free(
145 ASSERT(!spin_is_locked(&ip->i_flags_lock)); 156 ASSERT(!spin_is_locked(&ip->i_flags_lock));
146 ASSERT(completion_done(&ip->i_flush)); 157 ASSERT(completion_done(&ip->i_flush));
147 158
148 call_rcu(&ip->i_vnode.i_rcu, xfs_inode_free_callback); 159 /*
160 * Because we use RCU freeing we need to ensure the inode always
161 * appears to be reclaimed with an invalid inode number when in the
162 * free state. The ip->i_flags_lock provides the barrier against lookup
163 * races.
164 */
165 spin_lock(&ip->i_flags_lock);
166 ip->i_flags = XFS_IRECLAIM;
167 ip->i_ino = 0;
168 spin_unlock(&ip->i_flags_lock);
169
170 call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
149} 171}
150 172
151/* 173/*
@@ -155,14 +177,29 @@ static int
155xfs_iget_cache_hit( 177xfs_iget_cache_hit(
156 struct xfs_perag *pag, 178 struct xfs_perag *pag,
157 struct xfs_inode *ip, 179 struct xfs_inode *ip,
180 xfs_ino_t ino,
158 int flags, 181 int flags,
159 int lock_flags) __releases(pag->pag_ici_lock) 182 int lock_flags) __releases(RCU)
160{ 183{
161 struct inode *inode = VFS_I(ip); 184 struct inode *inode = VFS_I(ip);
162 struct xfs_mount *mp = ip->i_mount; 185 struct xfs_mount *mp = ip->i_mount;
163 int error; 186 int error;
164 187
188 /*
189 * check for re-use of an inode within an RCU grace period due to the
190 * radix tree nodes not being updated yet. We monitor for this by
191 * setting the inode number to zero before freeing the inode structure.
192 * If the inode has been reallocated and set up, then the inode number
193 * will not match, so check for that, too.
194 */
165 spin_lock(&ip->i_flags_lock); 195 spin_lock(&ip->i_flags_lock);
196 if (ip->i_ino != ino) {
197 trace_xfs_iget_skip(ip);
198 XFS_STATS_INC(xs_ig_frecycle);
199 error = EAGAIN;
200 goto out_error;
201 }
202
166 203
167 /* 204 /*
168 * If we are racing with another cache hit that is currently 205 * If we are racing with another cache hit that is currently
@@ -205,7 +242,7 @@ xfs_iget_cache_hit(
205 ip->i_flags |= XFS_IRECLAIM; 242 ip->i_flags |= XFS_IRECLAIM;
206 243
207 spin_unlock(&ip->i_flags_lock); 244 spin_unlock(&ip->i_flags_lock);
208 read_unlock(&pag->pag_ici_lock); 245 rcu_read_unlock();
209 246
210 error = -inode_init_always(mp->m_super, inode); 247 error = -inode_init_always(mp->m_super, inode);
211 if (error) { 248 if (error) {
@@ -213,7 +250,7 @@ xfs_iget_cache_hit(
213 * Re-initializing the inode failed, and we are in deep 250 * Re-initializing the inode failed, and we are in deep
214 * trouble. Try to re-add it to the reclaim list. 251 * trouble. Try to re-add it to the reclaim list.
215 */ 252 */
216 read_lock(&pag->pag_ici_lock); 253 rcu_read_lock();
217 spin_lock(&ip->i_flags_lock); 254 spin_lock(&ip->i_flags_lock);
218 255
219 ip->i_flags &= ~XFS_INEW; 256 ip->i_flags &= ~XFS_INEW;
@@ -223,14 +260,20 @@ xfs_iget_cache_hit(
223 goto out_error; 260 goto out_error;
224 } 261 }
225 262
226 write_lock(&pag->pag_ici_lock); 263 spin_lock(&pag->pag_ici_lock);
227 spin_lock(&ip->i_flags_lock); 264 spin_lock(&ip->i_flags_lock);
228 ip->i_flags &= ~(XFS_IRECLAIMABLE | XFS_IRECLAIM); 265 ip->i_flags &= ~(XFS_IRECLAIMABLE | XFS_IRECLAIM);
229 ip->i_flags |= XFS_INEW; 266 ip->i_flags |= XFS_INEW;
230 __xfs_inode_clear_reclaim_tag(mp, pag, ip); 267 __xfs_inode_clear_reclaim_tag(mp, pag, ip);
231 inode->i_state = I_NEW; 268 inode->i_state = I_NEW;
269
270 ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock));
271 mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
272 lockdep_set_class_and_name(&ip->i_iolock.mr_lock,
273 &xfs_iolock_active, "xfs_iolock_active");
274
232 spin_unlock(&ip->i_flags_lock); 275 spin_unlock(&ip->i_flags_lock);
233 write_unlock(&pag->pag_ici_lock); 276 spin_unlock(&pag->pag_ici_lock);
234 } else { 277 } else {
235 /* If the VFS inode is being torn down, pause and try again. */ 278 /* If the VFS inode is being torn down, pause and try again. */
236 if (!igrab(inode)) { 279 if (!igrab(inode)) {
@@ -241,7 +284,7 @@ xfs_iget_cache_hit(
241 284
242 /* We've got a live one. */ 285 /* We've got a live one. */
243 spin_unlock(&ip->i_flags_lock); 286 spin_unlock(&ip->i_flags_lock);
244 read_unlock(&pag->pag_ici_lock); 287 rcu_read_unlock();
245 trace_xfs_iget_hit(ip); 288 trace_xfs_iget_hit(ip);
246 } 289 }
247 290
@@ -255,7 +298,7 @@ xfs_iget_cache_hit(
255 298
256out_error: 299out_error:
257 spin_unlock(&ip->i_flags_lock); 300 spin_unlock(&ip->i_flags_lock);
258 read_unlock(&pag->pag_ici_lock); 301 rcu_read_unlock();
259 return error; 302 return error;
260} 303}
261 304
@@ -308,7 +351,7 @@ xfs_iget_cache_miss(
308 BUG(); 351 BUG();
309 } 352 }
310 353
311 write_lock(&pag->pag_ici_lock); 354 spin_lock(&pag->pag_ici_lock);
312 355
313 /* insert the new inode */ 356 /* insert the new inode */
314 error = radix_tree_insert(&pag->pag_ici_root, agino, ip); 357 error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
@@ -323,14 +366,14 @@ xfs_iget_cache_miss(
323 ip->i_udquot = ip->i_gdquot = NULL; 366 ip->i_udquot = ip->i_gdquot = NULL;
324 xfs_iflags_set(ip, XFS_INEW); 367 xfs_iflags_set(ip, XFS_INEW);
325 368
326 write_unlock(&pag->pag_ici_lock); 369 spin_unlock(&pag->pag_ici_lock);
327 radix_tree_preload_end(); 370 radix_tree_preload_end();
328 371
329 *ipp = ip; 372 *ipp = ip;
330 return 0; 373 return 0;
331 374
332out_preload_end: 375out_preload_end:
333 write_unlock(&pag->pag_ici_lock); 376 spin_unlock(&pag->pag_ici_lock);
334 radix_tree_preload_end(); 377 radix_tree_preload_end();
335 if (lock_flags) 378 if (lock_flags)
336 xfs_iunlock(ip, lock_flags); 379 xfs_iunlock(ip, lock_flags);
@@ -377,7 +420,7 @@ xfs_iget(
377 xfs_agino_t agino; 420 xfs_agino_t agino;
378 421
379 /* reject inode numbers outside existing AGs */ 422 /* reject inode numbers outside existing AGs */
380 if (XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount) 423 if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
381 return EINVAL; 424 return EINVAL;
382 425
383 /* get the perag structure and ensure that it's inode capable */ 426 /* get the perag structure and ensure that it's inode capable */
@@ -386,15 +429,15 @@ xfs_iget(
386 429
387again: 430again:
388 error = 0; 431 error = 0;
389 read_lock(&pag->pag_ici_lock); 432 rcu_read_lock();
390 ip = radix_tree_lookup(&pag->pag_ici_root, agino); 433 ip = radix_tree_lookup(&pag->pag_ici_root, agino);
391 434
392 if (ip) { 435 if (ip) {
393 error = xfs_iget_cache_hit(pag, ip, flags, lock_flags); 436 error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags);
394 if (error) 437 if (error)
395 goto out_error_or_again; 438 goto out_error_or_again;
396 } else { 439 } else {
397 read_unlock(&pag->pag_ici_lock); 440 rcu_read_unlock();
398 XFS_STATS_INC(xs_ig_missed); 441 XFS_STATS_INC(xs_ig_missed);
399 442
400 error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip, 443 error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 108c7a085f94..be7cf625421f 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -887,7 +887,7 @@ xfs_iread(
887 * around for a while. This helps to keep recently accessed 887 * around for a while. This helps to keep recently accessed
888 * meta-data in-core longer. 888 * meta-data in-core longer.
889 */ 889 */
890 XFS_BUF_SET_REF(bp, XFS_INO_REF); 890 xfs_buf_set_ref(bp, XFS_INO_REF);
891 891
892 /* 892 /*
893 * Use xfs_trans_brelse() to release the buffer containing the 893 * Use xfs_trans_brelse() to release the buffer containing the
@@ -2000,17 +2000,33 @@ xfs_ifree_cluster(
2000 */ 2000 */
2001 for (i = 0; i < ninodes; i++) { 2001 for (i = 0; i < ninodes; i++) {
2002retry: 2002retry:
2003 read_lock(&pag->pag_ici_lock); 2003 rcu_read_lock();
2004 ip = radix_tree_lookup(&pag->pag_ici_root, 2004 ip = radix_tree_lookup(&pag->pag_ici_root,
2005 XFS_INO_TO_AGINO(mp, (inum + i))); 2005 XFS_INO_TO_AGINO(mp, (inum + i)));
2006 2006
2007 /* Inode not in memory or stale, nothing to do */ 2007 /* Inode not in memory, nothing to do */
2008 if (!ip || xfs_iflags_test(ip, XFS_ISTALE)) { 2008 if (!ip) {
2009 read_unlock(&pag->pag_ici_lock); 2009 rcu_read_unlock();
2010 continue; 2010 continue;
2011 } 2011 }
2012 2012
2013 /* 2013 /*
2014 * because this is an RCU protected lookup, we could
2015 * find a recently freed or even reallocated inode
2016 * during the lookup. We need to check under the
2017 * i_flags_lock for a valid inode here. Skip it if it
2018 * is not valid, the wrong inode or stale.
2019 */
2020 spin_lock(&ip->i_flags_lock);
2021 if (ip->i_ino != inum + i ||
2022 __xfs_iflags_test(ip, XFS_ISTALE)) {
2023 spin_unlock(&ip->i_flags_lock);
2024 rcu_read_unlock();
2025 continue;
2026 }
2027 spin_unlock(&ip->i_flags_lock);
2028
2029 /*
2014 * Don't try to lock/unlock the current inode, but we 2030 * Don't try to lock/unlock the current inode, but we
2015 * _cannot_ skip the other inodes that we did not find 2031 * _cannot_ skip the other inodes that we did not find
2016 * in the list attached to the buffer and are not 2032 * in the list attached to the buffer and are not
@@ -2019,11 +2035,11 @@ retry:
2019 */ 2035 */
2020 if (ip != free_ip && 2036 if (ip != free_ip &&
2021 !xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) { 2037 !xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
2022 read_unlock(&pag->pag_ici_lock); 2038 rcu_read_unlock();
2023 delay(1); 2039 delay(1);
2024 goto retry; 2040 goto retry;
2025 } 2041 }
2026 read_unlock(&pag->pag_ici_lock); 2042 rcu_read_unlock();
2027 2043
2028 xfs_iflock(ip); 2044 xfs_iflock(ip);
2029 xfs_iflags_set(ip, XFS_ISTALE); 2045 xfs_iflags_set(ip, XFS_ISTALE);
@@ -2629,7 +2645,7 @@ xfs_iflush_cluster(
2629 2645
2630 mask = ~(((XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog)) - 1); 2646 mask = ~(((XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog)) - 1);
2631 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino) & mask; 2647 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino) & mask;
2632 read_lock(&pag->pag_ici_lock); 2648 rcu_read_lock();
2633 /* really need a gang lookup range call here */ 2649 /* really need a gang lookup range call here */
2634 nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, (void**)ilist, 2650 nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, (void**)ilist,
2635 first_index, inodes_per_cluster); 2651 first_index, inodes_per_cluster);
@@ -2640,9 +2656,21 @@ xfs_iflush_cluster(
2640 iq = ilist[i]; 2656 iq = ilist[i];
2641 if (iq == ip) 2657 if (iq == ip)
2642 continue; 2658 continue;
2643 /* if the inode lies outside this cluster, we're done. */ 2659
2644 if ((XFS_INO_TO_AGINO(mp, iq->i_ino) & mask) != first_index) 2660 /*
2645 break; 2661 * because this is an RCU protected lookup, we could find a
2662 * recently freed or even reallocated inode during the lookup.
2663 * We need to check under the i_flags_lock for a valid inode
2664 * here. Skip it if it is not valid or the wrong inode.
2665 */
2666 spin_lock(&ip->i_flags_lock);
2667 if (!ip->i_ino ||
2668 (XFS_INO_TO_AGINO(mp, iq->i_ino) & mask) != first_index) {
2669 spin_unlock(&ip->i_flags_lock);
2670 continue;
2671 }
2672 spin_unlock(&ip->i_flags_lock);
2673
2646 /* 2674 /*
2647 * Do an un-protected check to see if the inode is dirty and 2675 * Do an un-protected check to see if the inode is dirty and
2648 * is a candidate for flushing. These checks will be repeated 2676 * is a candidate for flushing. These checks will be repeated
@@ -2692,7 +2720,7 @@ xfs_iflush_cluster(
2692 } 2720 }
2693 2721
2694out_free: 2722out_free:
2695 read_unlock(&pag->pag_ici_lock); 2723 rcu_read_unlock();
2696 kmem_free(ilist); 2724 kmem_free(ilist);
2697out_put: 2725out_put:
2698 xfs_perag_put(pag); 2726 xfs_perag_put(pag);
@@ -2704,7 +2732,7 @@ cluster_corrupt_out:
2704 * Corruption detected in the clustering loop. Invalidate the 2732 * Corruption detected in the clustering loop. Invalidate the
2705 * inode buffer and shut down the filesystem. 2733 * inode buffer and shut down the filesystem.
2706 */ 2734 */
2707 read_unlock(&pag->pag_ici_lock); 2735 rcu_read_unlock();
2708 /* 2736 /*
2709 * Clean up the buffer. If it was B_DELWRI, just release it -- 2737 * Clean up the buffer. If it was B_DELWRI, just release it --
2710 * brelse can handle it with no problems. If not, shut down the 2738 * brelse can handle it with no problems. If not, shut down the
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index fb2ca2e4cdc9..5c95fa8ec11d 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -376,12 +376,13 @@ static inline void xfs_ifunlock(xfs_inode_t *ip)
376/* 376/*
377 * In-core inode flags. 377 * In-core inode flags.
378 */ 378 */
379#define XFS_IRECLAIM 0x0001 /* we have started reclaiming this inode */ 379#define XFS_IRECLAIM 0x0001 /* started reclaiming this inode */
380#define XFS_ISTALE 0x0002 /* inode has been staled */ 380#define XFS_ISTALE 0x0002 /* inode has been staled */
381#define XFS_IRECLAIMABLE 0x0004 /* inode can be reclaimed */ 381#define XFS_IRECLAIMABLE 0x0004 /* inode can be reclaimed */
382#define XFS_INEW 0x0008 /* inode has just been allocated */ 382#define XFS_INEW 0x0008 /* inode has just been allocated */
383#define XFS_IFILESTREAM 0x0010 /* inode is in a filestream directory */ 383#define XFS_IFILESTREAM 0x0010 /* inode is in a filestream directory */
384#define XFS_ITRUNCATED 0x0020 /* truncated down so flush-on-close */ 384#define XFS_ITRUNCATED 0x0020 /* truncated down so flush-on-close */
385#define XFS_IDIRTY_RELEASE 0x0040 /* dirty release already seen */
385 386
386/* 387/*
387 * Flags for inode locking. 388 * Flags for inode locking.
@@ -438,6 +439,8 @@ static inline void xfs_ifunlock(xfs_inode_t *ip)
438#define XFS_IOLOCK_DEP(flags) (((flags) & XFS_IOLOCK_DEP_MASK) >> XFS_IOLOCK_SHIFT) 439#define XFS_IOLOCK_DEP(flags) (((flags) & XFS_IOLOCK_DEP_MASK) >> XFS_IOLOCK_SHIFT)
439#define XFS_ILOCK_DEP(flags) (((flags) & XFS_ILOCK_DEP_MASK) >> XFS_ILOCK_SHIFT) 440#define XFS_ILOCK_DEP(flags) (((flags) & XFS_ILOCK_DEP_MASK) >> XFS_ILOCK_SHIFT)
440 441
442extern struct lock_class_key xfs_iolock_reclaimable;
443
441/* 444/*
442 * Flags for xfs_itruncate_start(). 445 * Flags for xfs_itruncate_start().
443 */ 446 */
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index 7c8d30c453c3..fd4f398bd6f1 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -842,15 +842,64 @@ xfs_inode_item_destroy(
842 * flushed to disk. It is responsible for removing the inode item 842 * flushed to disk. It is responsible for removing the inode item
843 * from the AIL if it has not been re-logged, and unlocking the inode's 843 * from the AIL if it has not been re-logged, and unlocking the inode's
844 * flush lock. 844 * flush lock.
845 *
846 * To reduce AIL lock traffic as much as possible, we scan the buffer log item
847 * list for other inodes that will run this function. We remove them from the
848 * buffer list so we can process all the inode IO completions in one AIL lock
849 * traversal.
845 */ 850 */
846void 851void
847xfs_iflush_done( 852xfs_iflush_done(
848 struct xfs_buf *bp, 853 struct xfs_buf *bp,
849 struct xfs_log_item *lip) 854 struct xfs_log_item *lip)
850{ 855{
851 struct xfs_inode_log_item *iip = INODE_ITEM(lip); 856 struct xfs_inode_log_item *iip;
852 xfs_inode_t *ip = iip->ili_inode; 857 struct xfs_log_item *blip;
858 struct xfs_log_item *next;
859 struct xfs_log_item *prev;
853 struct xfs_ail *ailp = lip->li_ailp; 860 struct xfs_ail *ailp = lip->li_ailp;
861 int need_ail = 0;
862
863 /*
864 * Scan the buffer IO completions for other inodes being completed and
865 * attach them to the current inode log item.
866 */
867 blip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *);
868 prev = NULL;
869 while (blip != NULL) {
870 if (lip->li_cb != xfs_iflush_done) {
871 prev = blip;
872 blip = blip->li_bio_list;
873 continue;
874 }
875
876 /* remove from list */
877 next = blip->li_bio_list;
878 if (!prev) {
879 XFS_BUF_SET_FSPRIVATE(bp, next);
880 } else {
881 prev->li_bio_list = next;
882 }
883
884 /* add to current list */
885 blip->li_bio_list = lip->li_bio_list;
886 lip->li_bio_list = blip;
887
888 /*
889 * while we have the item, do the unlocked check for needing
890 * the AIL lock.
891 */
892 iip = INODE_ITEM(blip);
893 if (iip->ili_logged && blip->li_lsn == iip->ili_flush_lsn)
894 need_ail++;
895
896 blip = next;
897 }
898
899 /* make sure we capture the state of the initial inode. */
900 iip = INODE_ITEM(lip);
901 if (iip->ili_logged && lip->li_lsn == iip->ili_flush_lsn)
902 need_ail++;
854 903
855 /* 904 /*
856 * We only want to pull the item from the AIL if it is 905 * We only want to pull the item from the AIL if it is
@@ -861,28 +910,37 @@ xfs_iflush_done(
861 * the lock since it's cheaper, and then we recheck while 910 * the lock since it's cheaper, and then we recheck while
862 * holding the lock before removing the inode from the AIL. 911 * holding the lock before removing the inode from the AIL.
863 */ 912 */
864 if (iip->ili_logged && lip->li_lsn == iip->ili_flush_lsn) { 913 if (need_ail) {
914 struct xfs_log_item *log_items[need_ail];
915 int i = 0;
865 spin_lock(&ailp->xa_lock); 916 spin_lock(&ailp->xa_lock);
866 if (lip->li_lsn == iip->ili_flush_lsn) { 917 for (blip = lip; blip; blip = blip->li_bio_list) {
867 /* xfs_trans_ail_delete() drops the AIL lock. */ 918 iip = INODE_ITEM(blip);
868 xfs_trans_ail_delete(ailp, lip); 919 if (iip->ili_logged &&
869 } else { 920 blip->li_lsn == iip->ili_flush_lsn) {
870 spin_unlock(&ailp->xa_lock); 921 log_items[i++] = blip;
922 }
923 ASSERT(i <= need_ail);
871 } 924 }
925 /* xfs_trans_ail_delete_bulk() drops the AIL lock. */
926 xfs_trans_ail_delete_bulk(ailp, log_items, i);
872 } 927 }
873 928
874 iip->ili_logged = 0;
875 929
876 /* 930 /*
877 * Clear the ili_last_fields bits now that we know that the 931 * clean up and unlock the flush lock now we are done. We can clear the
878 * data corresponding to them is safely on disk. 932 * ili_last_fields bits now that we know that the data corresponding to
933 * them is safely on disk.
879 */ 934 */
880 iip->ili_last_fields = 0; 935 for (blip = lip; blip; blip = next) {
936 next = blip->li_bio_list;
937 blip->li_bio_list = NULL;
881 938
882 /* 939 iip = INODE_ITEM(blip);
883 * Release the inode's flush lock since we're done with it. 940 iip->ili_logged = 0;
884 */ 941 iip->ili_last_fields = 0;
885 xfs_ifunlock(ip); 942 xfs_ifunlock(iip->ili_inode);
943 }
886} 944}
887 945
888/* 946/*
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 20576146369f..55582bd66659 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -47,127 +47,8 @@
47 47
48#define XFS_WRITEIO_ALIGN(mp,off) (((off) >> mp->m_writeio_log) \ 48#define XFS_WRITEIO_ALIGN(mp,off) (((off) >> mp->m_writeio_log) \
49 << mp->m_writeio_log) 49 << mp->m_writeio_log)
50#define XFS_STRAT_WRITE_IMAPS 2
51#define XFS_WRITE_IMAPS XFS_BMAP_MAX_NMAP 50#define XFS_WRITE_IMAPS XFS_BMAP_MAX_NMAP
52 51
53STATIC int xfs_iomap_write_direct(struct xfs_inode *, xfs_off_t, size_t,
54 int, struct xfs_bmbt_irec *, int *);
55STATIC int xfs_iomap_write_delay(struct xfs_inode *, xfs_off_t, size_t, int,
56 struct xfs_bmbt_irec *, int *);
57STATIC int xfs_iomap_write_allocate(struct xfs_inode *, xfs_off_t, size_t,
58 struct xfs_bmbt_irec *, int *);
59
60int
61xfs_iomap(
62 struct xfs_inode *ip,
63 xfs_off_t offset,
64 ssize_t count,
65 int flags,
66 struct xfs_bmbt_irec *imap,
67 int *nimaps,
68 int *new)
69{
70 struct xfs_mount *mp = ip->i_mount;
71 xfs_fileoff_t offset_fsb, end_fsb;
72 int error = 0;
73 int lockmode = 0;
74 int bmapi_flags = 0;
75
76 ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG);
77
78 *new = 0;
79
80 if (XFS_FORCED_SHUTDOWN(mp))
81 return XFS_ERROR(EIO);
82
83 trace_xfs_iomap_enter(ip, offset, count, flags, NULL);
84
85 switch (flags & (BMAPI_READ | BMAPI_WRITE | BMAPI_ALLOCATE)) {
86 case BMAPI_READ:
87 lockmode = xfs_ilock_map_shared(ip);
88 bmapi_flags = XFS_BMAPI_ENTIRE;
89 break;
90 case BMAPI_WRITE:
91 lockmode = XFS_ILOCK_EXCL;
92 if (flags & BMAPI_IGNSTATE)
93 bmapi_flags |= XFS_BMAPI_IGSTATE|XFS_BMAPI_ENTIRE;
94 xfs_ilock(ip, lockmode);
95 break;
96 case BMAPI_ALLOCATE:
97 lockmode = XFS_ILOCK_SHARED;
98 bmapi_flags = XFS_BMAPI_ENTIRE;
99
100 /* Attempt non-blocking lock */
101 if (flags & BMAPI_TRYLOCK) {
102 if (!xfs_ilock_nowait(ip, lockmode))
103 return XFS_ERROR(EAGAIN);
104 } else {
105 xfs_ilock(ip, lockmode);
106 }
107 break;
108 default:
109 BUG();
110 }
111
112 ASSERT(offset <= mp->m_maxioffset);
113 if ((xfs_fsize_t)offset + count > mp->m_maxioffset)
114 count = mp->m_maxioffset - offset;
115 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
116 offset_fsb = XFS_B_TO_FSBT(mp, offset);
117
118 error = xfs_bmapi(NULL, ip, offset_fsb,
119 (xfs_filblks_t)(end_fsb - offset_fsb),
120 bmapi_flags, NULL, 0, imap,
121 nimaps, NULL);
122
123 if (error)
124 goto out;
125
126 switch (flags & (BMAPI_WRITE|BMAPI_ALLOCATE)) {
127 case BMAPI_WRITE:
128 /* If we found an extent, return it */
129 if (*nimaps &&
130 (imap->br_startblock != HOLESTARTBLOCK) &&
131 (imap->br_startblock != DELAYSTARTBLOCK)) {
132 trace_xfs_iomap_found(ip, offset, count, flags, imap);
133 break;
134 }
135
136 if (flags & BMAPI_DIRECT) {
137 error = xfs_iomap_write_direct(ip, offset, count, flags,
138 imap, nimaps);
139 } else {
140 error = xfs_iomap_write_delay(ip, offset, count, flags,
141 imap, nimaps);
142 }
143 if (!error) {
144 trace_xfs_iomap_alloc(ip, offset, count, flags, imap);
145 }
146 *new = 1;
147 break;
148 case BMAPI_ALLOCATE:
149 /* If we found an extent, return it */
150 xfs_iunlock(ip, lockmode);
151 lockmode = 0;
152
153 if (*nimaps && !isnullstartblock(imap->br_startblock)) {
154 trace_xfs_iomap_found(ip, offset, count, flags, imap);
155 break;
156 }
157
158 error = xfs_iomap_write_allocate(ip, offset, count,
159 imap, nimaps);
160 break;
161 }
162
163 ASSERT(*nimaps <= 1);
164
165out:
166 if (lockmode)
167 xfs_iunlock(ip, lockmode);
168 return XFS_ERROR(error);
169}
170
171STATIC int 52STATIC int
172xfs_iomap_eof_align_last_fsb( 53xfs_iomap_eof_align_last_fsb(
173 xfs_mount_t *mp, 54 xfs_mount_t *mp,
@@ -236,14 +117,13 @@ xfs_cmn_err_fsblock_zero(
236 return EFSCORRUPTED; 117 return EFSCORRUPTED;
237} 118}
238 119
239STATIC int 120int
240xfs_iomap_write_direct( 121xfs_iomap_write_direct(
241 xfs_inode_t *ip, 122 xfs_inode_t *ip,
242 xfs_off_t offset, 123 xfs_off_t offset,
243 size_t count, 124 size_t count,
244 int flags,
245 xfs_bmbt_irec_t *imap, 125 xfs_bmbt_irec_t *imap,
246 int *nmaps) 126 int nmaps)
247{ 127{
248 xfs_mount_t *mp = ip->i_mount; 128 xfs_mount_t *mp = ip->i_mount;
249 xfs_fileoff_t offset_fsb; 129 xfs_fileoff_t offset_fsb;
@@ -279,7 +159,7 @@ xfs_iomap_write_direct(
279 if (error) 159 if (error)
280 goto error_out; 160 goto error_out;
281 } else { 161 } else {
282 if (*nmaps && (imap->br_startblock == HOLESTARTBLOCK)) 162 if (nmaps && (imap->br_startblock == HOLESTARTBLOCK))
283 last_fsb = MIN(last_fsb, (xfs_fileoff_t) 163 last_fsb = MIN(last_fsb, (xfs_fileoff_t)
284 imap->br_blockcount + 164 imap->br_blockcount +
285 imap->br_startoff); 165 imap->br_startoff);
@@ -331,7 +211,7 @@ xfs_iomap_write_direct(
331 xfs_trans_ijoin(tp, ip); 211 xfs_trans_ijoin(tp, ip);
332 212
333 bmapi_flag = XFS_BMAPI_WRITE; 213 bmapi_flag = XFS_BMAPI_WRITE;
334 if ((flags & BMAPI_DIRECT) && (offset < ip->i_size || extsz)) 214 if (offset < ip->i_size || extsz)
335 bmapi_flag |= XFS_BMAPI_PREALLOC; 215 bmapi_flag |= XFS_BMAPI_PREALLOC;
336 216
337 /* 217 /*
@@ -370,7 +250,6 @@ xfs_iomap_write_direct(
370 goto error_out; 250 goto error_out;
371 } 251 }
372 252
373 *nmaps = 1;
374 return 0; 253 return 0;
375 254
376error0: /* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */ 255error0: /* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */
@@ -379,7 +258,6 @@ error0: /* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */
379 258
380error1: /* Just cancel transaction */ 259error1: /* Just cancel transaction */
381 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT); 260 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
382 *nmaps = 0; /* nothing set-up here */
383 261
384error_out: 262error_out:
385 return XFS_ERROR(error); 263 return XFS_ERROR(error);
@@ -389,6 +267,9 @@ error_out:
389 * If the caller is doing a write at the end of the file, then extend the 267 * If the caller is doing a write at the end of the file, then extend the
390 * allocation out to the file system's write iosize. We clean up any extra 268 * allocation out to the file system's write iosize. We clean up any extra
391 * space left over when the file is closed in xfs_inactive(). 269 * space left over when the file is closed in xfs_inactive().
270 *
271 * If we find we already have delalloc preallocation beyond EOF, don't do more
272 * preallocation as it it not needed.
392 */ 273 */
393STATIC int 274STATIC int
394xfs_iomap_eof_want_preallocate( 275xfs_iomap_eof_want_preallocate(
@@ -396,7 +277,6 @@ xfs_iomap_eof_want_preallocate(
396 xfs_inode_t *ip, 277 xfs_inode_t *ip,
397 xfs_off_t offset, 278 xfs_off_t offset,
398 size_t count, 279 size_t count,
399 int ioflag,
400 xfs_bmbt_irec_t *imap, 280 xfs_bmbt_irec_t *imap,
401 int nimaps, 281 int nimaps,
402 int *prealloc) 282 int *prealloc)
@@ -405,6 +285,7 @@ xfs_iomap_eof_want_preallocate(
405 xfs_filblks_t count_fsb; 285 xfs_filblks_t count_fsb;
406 xfs_fsblock_t firstblock; 286 xfs_fsblock_t firstblock;
407 int n, error, imaps; 287 int n, error, imaps;
288 int found_delalloc = 0;
408 289
409 *prealloc = 0; 290 *prealloc = 0;
410 if ((offset + count) <= ip->i_size) 291 if ((offset + count) <= ip->i_size)
@@ -429,20 +310,66 @@ xfs_iomap_eof_want_preallocate(
429 return 0; 310 return 0;
430 start_fsb += imap[n].br_blockcount; 311 start_fsb += imap[n].br_blockcount;
431 count_fsb -= imap[n].br_blockcount; 312 count_fsb -= imap[n].br_blockcount;
313
314 if (imap[n].br_startblock == DELAYSTARTBLOCK)
315 found_delalloc = 1;
432 } 316 }
433 } 317 }
434 *prealloc = 1; 318 if (!found_delalloc)
319 *prealloc = 1;
435 return 0; 320 return 0;
436} 321}
437 322
438STATIC int 323/*
324 * If we don't have a user specified preallocation size, dynamically increase
325 * the preallocation size as the size of the file grows. Cap the maximum size
326 * at a single extent or less if the filesystem is near full. The closer the
327 * filesystem is to full, the smaller the maximum prealocation.
328 */
329STATIC xfs_fsblock_t
330xfs_iomap_prealloc_size(
331 struct xfs_mount *mp,
332 struct xfs_inode *ip)
333{
334 xfs_fsblock_t alloc_blocks = 0;
335
336 if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)) {
337 int shift = 0;
338 int64_t freesp;
339
340 alloc_blocks = XFS_B_TO_FSB(mp, ip->i_size);
341 alloc_blocks = XFS_FILEOFF_MIN(MAXEXTLEN,
342 rounddown_pow_of_two(alloc_blocks));
343
344 xfs_icsb_sync_counters(mp, XFS_ICSB_LAZY_COUNT);
345 freesp = mp->m_sb.sb_fdblocks;
346 if (freesp < mp->m_low_space[XFS_LOWSP_5_PCNT]) {
347 shift = 2;
348 if (freesp < mp->m_low_space[XFS_LOWSP_4_PCNT])
349 shift++;
350 if (freesp < mp->m_low_space[XFS_LOWSP_3_PCNT])
351 shift++;
352 if (freesp < mp->m_low_space[XFS_LOWSP_2_PCNT])
353 shift++;
354 if (freesp < mp->m_low_space[XFS_LOWSP_1_PCNT])
355 shift++;
356 }
357 if (shift)
358 alloc_blocks >>= shift;
359 }
360
361 if (alloc_blocks < mp->m_writeio_blocks)
362 alloc_blocks = mp->m_writeio_blocks;
363
364 return alloc_blocks;
365}
366
367int
439xfs_iomap_write_delay( 368xfs_iomap_write_delay(
440 xfs_inode_t *ip, 369 xfs_inode_t *ip,
441 xfs_off_t offset, 370 xfs_off_t offset,
442 size_t count, 371 size_t count,
443 int ioflag, 372 xfs_bmbt_irec_t *ret_imap)
444 xfs_bmbt_irec_t *ret_imap,
445 int *nmaps)
446{ 373{
447 xfs_mount_t *mp = ip->i_mount; 374 xfs_mount_t *mp = ip->i_mount;
448 xfs_fileoff_t offset_fsb; 375 xfs_fileoff_t offset_fsb;
@@ -469,16 +396,19 @@ xfs_iomap_write_delay(
469 extsz = xfs_get_extsz_hint(ip); 396 extsz = xfs_get_extsz_hint(ip);
470 offset_fsb = XFS_B_TO_FSBT(mp, offset); 397 offset_fsb = XFS_B_TO_FSBT(mp, offset);
471 398
399
472 error = xfs_iomap_eof_want_preallocate(mp, ip, offset, count, 400 error = xfs_iomap_eof_want_preallocate(mp, ip, offset, count,
473 ioflag, imap, XFS_WRITE_IMAPS, &prealloc); 401 imap, XFS_WRITE_IMAPS, &prealloc);
474 if (error) 402 if (error)
475 return error; 403 return error;
476 404
477retry: 405retry:
478 if (prealloc) { 406 if (prealloc) {
407 xfs_fsblock_t alloc_blocks = xfs_iomap_prealloc_size(mp, ip);
408
479 aligned_offset = XFS_WRITEIO_ALIGN(mp, (offset + count - 1)); 409 aligned_offset = XFS_WRITEIO_ALIGN(mp, (offset + count - 1));
480 ioalign = XFS_B_TO_FSBT(mp, aligned_offset); 410 ioalign = XFS_B_TO_FSBT(mp, aligned_offset);
481 last_fsb = ioalign + mp->m_writeio_blocks; 411 last_fsb = ioalign + alloc_blocks;
482 } else { 412 } else {
483 last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count))); 413 last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));
484 } 414 }
@@ -496,22 +426,31 @@ retry:
496 XFS_BMAPI_DELAY | XFS_BMAPI_WRITE | 426 XFS_BMAPI_DELAY | XFS_BMAPI_WRITE |
497 XFS_BMAPI_ENTIRE, &firstblock, 1, imap, 427 XFS_BMAPI_ENTIRE, &firstblock, 1, imap,
498 &nimaps, NULL); 428 &nimaps, NULL);
499 if (error && (error != ENOSPC)) 429 switch (error) {
430 case 0:
431 case ENOSPC:
432 case EDQUOT:
433 break;
434 default:
500 return XFS_ERROR(error); 435 return XFS_ERROR(error);
436 }
501 437
502 /* 438 /*
503 * If bmapi returned us nothing, and if we didn't get back EDQUOT, 439 * If bmapi returned us nothing, we got either ENOSPC or EDQUOT. For
504 * then we must have run out of space - flush all other inodes with 440 * ENOSPC, * flush all other inodes with delalloc blocks to free up
505 * delalloc blocks and retry without EOF preallocation. 441 * some of the excess reserved metadata space. For both cases, retry
442 * without EOF preallocation.
506 */ 443 */
507 if (nimaps == 0) { 444 if (nimaps == 0) {
508 trace_xfs_delalloc_enospc(ip, offset, count); 445 trace_xfs_delalloc_enospc(ip, offset, count);
509 if (flushed) 446 if (flushed)
510 return XFS_ERROR(ENOSPC); 447 return XFS_ERROR(error ? error : ENOSPC);
511 448
512 xfs_iunlock(ip, XFS_ILOCK_EXCL); 449 if (error == ENOSPC) {
513 xfs_flush_inodes(ip); 450 xfs_iunlock(ip, XFS_ILOCK_EXCL);
514 xfs_ilock(ip, XFS_ILOCK_EXCL); 451 xfs_flush_inodes(ip);
452 xfs_ilock(ip, XFS_ILOCK_EXCL);
453 }
515 454
516 flushed = 1; 455 flushed = 1;
517 error = 0; 456 error = 0;
@@ -523,8 +462,6 @@ retry:
523 return xfs_cmn_err_fsblock_zero(ip, &imap[0]); 462 return xfs_cmn_err_fsblock_zero(ip, &imap[0]);
524 463
525 *ret_imap = imap[0]; 464 *ret_imap = imap[0];
526 *nmaps = 1;
527
528 return 0; 465 return 0;
529} 466}
530 467
@@ -538,13 +475,12 @@ retry:
538 * We no longer bother to look at the incoming map - all we have to 475 * We no longer bother to look at the incoming map - all we have to
539 * guarantee is that whatever we allocate fills the required range. 476 * guarantee is that whatever we allocate fills the required range.
540 */ 477 */
541STATIC int 478int
542xfs_iomap_write_allocate( 479xfs_iomap_write_allocate(
543 xfs_inode_t *ip, 480 xfs_inode_t *ip,
544 xfs_off_t offset, 481 xfs_off_t offset,
545 size_t count, 482 size_t count,
546 xfs_bmbt_irec_t *imap, 483 xfs_bmbt_irec_t *imap)
547 int *retmap)
548{ 484{
549 xfs_mount_t *mp = ip->i_mount; 485 xfs_mount_t *mp = ip->i_mount;
550 xfs_fileoff_t offset_fsb, last_block; 486 xfs_fileoff_t offset_fsb, last_block;
@@ -557,8 +493,6 @@ xfs_iomap_write_allocate(
557 int error = 0; 493 int error = 0;
558 int nres; 494 int nres;
559 495
560 *retmap = 0;
561
562 /* 496 /*
563 * Make sure that the dquots are there. 497 * Make sure that the dquots are there.
564 */ 498 */
@@ -680,7 +614,6 @@ xfs_iomap_write_allocate(
680 if ((offset_fsb >= imap->br_startoff) && 614 if ((offset_fsb >= imap->br_startoff) &&
681 (offset_fsb < (imap->br_startoff + 615 (offset_fsb < (imap->br_startoff +
682 imap->br_blockcount))) { 616 imap->br_blockcount))) {
683 *retmap = 1;
684 XFS_STATS_INC(xs_xstrat_quick); 617 XFS_STATS_INC(xs_xstrat_quick);
685 return 0; 618 return 0;
686 } 619 }
diff --git a/fs/xfs/xfs_iomap.h b/fs/xfs/xfs_iomap.h
index 7748a430f50d..80615760959a 100644
--- a/fs/xfs/xfs_iomap.h
+++ b/fs/xfs/xfs_iomap.h
@@ -18,30 +18,15 @@
18#ifndef __XFS_IOMAP_H__ 18#ifndef __XFS_IOMAP_H__
19#define __XFS_IOMAP_H__ 19#define __XFS_IOMAP_H__
20 20
21/* base extent manipulation calls */
22#define BMAPI_READ (1 << 0) /* read extents */
23#define BMAPI_WRITE (1 << 1) /* create extents */
24#define BMAPI_ALLOCATE (1 << 2) /* delayed allocate to real extents */
25
26/* modifiers */
27#define BMAPI_IGNSTATE (1 << 4) /* ignore unwritten state on read */
28#define BMAPI_DIRECT (1 << 5) /* direct instead of buffered write */
29#define BMAPI_MMA (1 << 6) /* allocate for mmap write */
30#define BMAPI_TRYLOCK (1 << 7) /* non-blocking request */
31
32#define BMAPI_FLAGS \
33 { BMAPI_READ, "READ" }, \
34 { BMAPI_WRITE, "WRITE" }, \
35 { BMAPI_ALLOCATE, "ALLOCATE" }, \
36 { BMAPI_IGNSTATE, "IGNSTATE" }, \
37 { BMAPI_DIRECT, "DIRECT" }, \
38 { BMAPI_TRYLOCK, "TRYLOCK" }
39
40struct xfs_inode; 21struct xfs_inode;
41struct xfs_bmbt_irec; 22struct xfs_bmbt_irec;
42 23
43extern int xfs_iomap(struct xfs_inode *, xfs_off_t, ssize_t, int, 24extern int xfs_iomap_write_direct(struct xfs_inode *, xfs_off_t, size_t,
44 struct xfs_bmbt_irec *, int *, int *); 25 struct xfs_bmbt_irec *, int);
26extern int xfs_iomap_write_delay(struct xfs_inode *, xfs_off_t, size_t,
27 struct xfs_bmbt_irec *);
28extern int xfs_iomap_write_allocate(struct xfs_inode *, xfs_off_t, size_t,
29 struct xfs_bmbt_irec *);
45extern int xfs_iomap_write_unwritten(struct xfs_inode *, xfs_off_t, size_t); 30extern int xfs_iomap_write_unwritten(struct xfs_inode *, xfs_off_t, size_t);
46 31
47#endif /* __XFS_IOMAP_H__*/ 32#endif /* __XFS_IOMAP_H__*/
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index cee4ab9f8a9e..0bf24b11d0c4 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -47,7 +47,7 @@ STATIC xlog_t * xlog_alloc_log(xfs_mount_t *mp,
47 xfs_buftarg_t *log_target, 47 xfs_buftarg_t *log_target,
48 xfs_daddr_t blk_offset, 48 xfs_daddr_t blk_offset,
49 int num_bblks); 49 int num_bblks);
50STATIC int xlog_space_left(xlog_t *log, int cycle, int bytes); 50STATIC int xlog_space_left(struct log *log, atomic64_t *head);
51STATIC int xlog_sync(xlog_t *log, xlog_in_core_t *iclog); 51STATIC int xlog_sync(xlog_t *log, xlog_in_core_t *iclog);
52STATIC void xlog_dealloc_log(xlog_t *log); 52STATIC void xlog_dealloc_log(xlog_t *log);
53 53
@@ -70,7 +70,7 @@ STATIC void xlog_state_want_sync(xlog_t *log, xlog_in_core_t *iclog);
70/* local functions to manipulate grant head */ 70/* local functions to manipulate grant head */
71STATIC int xlog_grant_log_space(xlog_t *log, 71STATIC int xlog_grant_log_space(xlog_t *log,
72 xlog_ticket_t *xtic); 72 xlog_ticket_t *xtic);
73STATIC void xlog_grant_push_ail(xfs_mount_t *mp, 73STATIC void xlog_grant_push_ail(struct log *log,
74 int need_bytes); 74 int need_bytes);
75STATIC void xlog_regrant_reserve_log_space(xlog_t *log, 75STATIC void xlog_regrant_reserve_log_space(xlog_t *log,
76 xlog_ticket_t *ticket); 76 xlog_ticket_t *ticket);
@@ -81,98 +81,73 @@ STATIC void xlog_ungrant_log_space(xlog_t *log,
81 81
82#if defined(DEBUG) 82#if defined(DEBUG)
83STATIC void xlog_verify_dest_ptr(xlog_t *log, char *ptr); 83STATIC void xlog_verify_dest_ptr(xlog_t *log, char *ptr);
84STATIC void xlog_verify_grant_head(xlog_t *log, int equals); 84STATIC void xlog_verify_grant_tail(struct log *log);
85STATIC void xlog_verify_iclog(xlog_t *log, xlog_in_core_t *iclog, 85STATIC void xlog_verify_iclog(xlog_t *log, xlog_in_core_t *iclog,
86 int count, boolean_t syncing); 86 int count, boolean_t syncing);
87STATIC void xlog_verify_tail_lsn(xlog_t *log, xlog_in_core_t *iclog, 87STATIC void xlog_verify_tail_lsn(xlog_t *log, xlog_in_core_t *iclog,
88 xfs_lsn_t tail_lsn); 88 xfs_lsn_t tail_lsn);
89#else 89#else
90#define xlog_verify_dest_ptr(a,b) 90#define xlog_verify_dest_ptr(a,b)
91#define xlog_verify_grant_head(a,b) 91#define xlog_verify_grant_tail(a)
92#define xlog_verify_iclog(a,b,c,d) 92#define xlog_verify_iclog(a,b,c,d)
93#define xlog_verify_tail_lsn(a,b,c) 93#define xlog_verify_tail_lsn(a,b,c)
94#endif 94#endif
95 95
96STATIC int xlog_iclogs_empty(xlog_t *log); 96STATIC int xlog_iclogs_empty(xlog_t *log);
97 97
98
99static void 98static void
100xlog_ins_ticketq(struct xlog_ticket **qp, struct xlog_ticket *tic) 99xlog_grant_sub_space(
100 struct log *log,
101 atomic64_t *head,
102 int bytes)
101{ 103{
102 if (*qp) { 104 int64_t head_val = atomic64_read(head);
103 tic->t_next = (*qp); 105 int64_t new, old;
104 tic->t_prev = (*qp)->t_prev;
105 (*qp)->t_prev->t_next = tic;
106 (*qp)->t_prev = tic;
107 } else {
108 tic->t_prev = tic->t_next = tic;
109 *qp = tic;
110 }
111 106
112 tic->t_flags |= XLOG_TIC_IN_Q; 107 do {
113} 108 int cycle, space;
114 109
115static void 110 xlog_crack_grant_head_val(head_val, &cycle, &space);
116xlog_del_ticketq(struct xlog_ticket **qp, struct xlog_ticket *tic)
117{
118 if (tic == tic->t_next) {
119 *qp = NULL;
120 } else {
121 *qp = tic->t_next;
122 tic->t_next->t_prev = tic->t_prev;
123 tic->t_prev->t_next = tic->t_next;
124 }
125 111
126 tic->t_next = tic->t_prev = NULL; 112 space -= bytes;
127 tic->t_flags &= ~XLOG_TIC_IN_Q; 113 if (space < 0) {
114 space += log->l_logsize;
115 cycle--;
116 }
117
118 old = head_val;
119 new = xlog_assign_grant_head_val(cycle, space);
120 head_val = atomic64_cmpxchg(head, old, new);
121 } while (head_val != old);
128} 122}
129 123
130static void 124static void
131xlog_grant_sub_space(struct log *log, int bytes) 125xlog_grant_add_space(
126 struct log *log,
127 atomic64_t *head,
128 int bytes)
132{ 129{
133 log->l_grant_write_bytes -= bytes; 130 int64_t head_val = atomic64_read(head);
134 if (log->l_grant_write_bytes < 0) { 131 int64_t new, old;
135 log->l_grant_write_bytes += log->l_logsize;
136 log->l_grant_write_cycle--;
137 }
138
139 log->l_grant_reserve_bytes -= bytes;
140 if ((log)->l_grant_reserve_bytes < 0) {
141 log->l_grant_reserve_bytes += log->l_logsize;
142 log->l_grant_reserve_cycle--;
143 }
144 132
145} 133 do {
134 int tmp;
135 int cycle, space;
146 136
147static void 137 xlog_crack_grant_head_val(head_val, &cycle, &space);
148xlog_grant_add_space_write(struct log *log, int bytes)
149{
150 int tmp = log->l_logsize - log->l_grant_write_bytes;
151 if (tmp > bytes)
152 log->l_grant_write_bytes += bytes;
153 else {
154 log->l_grant_write_cycle++;
155 log->l_grant_write_bytes = bytes - tmp;
156 }
157}
158 138
159static void 139 tmp = log->l_logsize - space;
160xlog_grant_add_space_reserve(struct log *log, int bytes) 140 if (tmp > bytes)
161{ 141 space += bytes;
162 int tmp = log->l_logsize - log->l_grant_reserve_bytes; 142 else {
163 if (tmp > bytes) 143 space = bytes - tmp;
164 log->l_grant_reserve_bytes += bytes; 144 cycle++;
165 else { 145 }
166 log->l_grant_reserve_cycle++;
167 log->l_grant_reserve_bytes = bytes - tmp;
168 }
169}
170 146
171static inline void 147 old = head_val;
172xlog_grant_add_space(struct log *log, int bytes) 148 new = xlog_assign_grant_head_val(cycle, space);
173{ 149 head_val = atomic64_cmpxchg(head, old, new);
174 xlog_grant_add_space_write(log, bytes); 150 } while (head_val != old);
175 xlog_grant_add_space_reserve(log, bytes);
176} 151}
177 152
178static void 153static void
@@ -355,7 +330,7 @@ xfs_log_reserve(
355 330
356 trace_xfs_log_reserve(log, internal_ticket); 331 trace_xfs_log_reserve(log, internal_ticket);
357 332
358 xlog_grant_push_ail(mp, internal_ticket->t_unit_res); 333 xlog_grant_push_ail(log, internal_ticket->t_unit_res);
359 retval = xlog_regrant_write_log_space(log, internal_ticket); 334 retval = xlog_regrant_write_log_space(log, internal_ticket);
360 } else { 335 } else {
361 /* may sleep if need to allocate more tickets */ 336 /* may sleep if need to allocate more tickets */
@@ -369,7 +344,7 @@ xfs_log_reserve(
369 344
370 trace_xfs_log_reserve(log, internal_ticket); 345 trace_xfs_log_reserve(log, internal_ticket);
371 346
372 xlog_grant_push_ail(mp, 347 xlog_grant_push_ail(log,
373 (internal_ticket->t_unit_res * 348 (internal_ticket->t_unit_res *
374 internal_ticket->t_cnt)); 349 internal_ticket->t_cnt));
375 retval = xlog_grant_log_space(log, internal_ticket); 350 retval = xlog_grant_log_space(log, internal_ticket);
@@ -584,8 +559,8 @@ xfs_log_unmount_write(xfs_mount_t *mp)
584 if (!(iclog->ic_state == XLOG_STATE_ACTIVE || 559 if (!(iclog->ic_state == XLOG_STATE_ACTIVE ||
585 iclog->ic_state == XLOG_STATE_DIRTY)) { 560 iclog->ic_state == XLOG_STATE_DIRTY)) {
586 if (!XLOG_FORCED_SHUTDOWN(log)) { 561 if (!XLOG_FORCED_SHUTDOWN(log)) {
587 sv_wait(&iclog->ic_force_wait, PMEM, 562 xlog_wait(&iclog->ic_force_wait,
588 &log->l_icloglock, s); 563 &log->l_icloglock);
589 } else { 564 } else {
590 spin_unlock(&log->l_icloglock); 565 spin_unlock(&log->l_icloglock);
591 } 566 }
@@ -625,8 +600,8 @@ xfs_log_unmount_write(xfs_mount_t *mp)
625 || iclog->ic_state == XLOG_STATE_DIRTY 600 || iclog->ic_state == XLOG_STATE_DIRTY
626 || iclog->ic_state == XLOG_STATE_IOERROR) ) { 601 || iclog->ic_state == XLOG_STATE_IOERROR) ) {
627 602
628 sv_wait(&iclog->ic_force_wait, PMEM, 603 xlog_wait(&iclog->ic_force_wait,
629 &log->l_icloglock, s); 604 &log->l_icloglock);
630 } else { 605 } else {
631 spin_unlock(&log->l_icloglock); 606 spin_unlock(&log->l_icloglock);
632 } 607 }
@@ -703,55 +678,46 @@ xfs_log_move_tail(xfs_mount_t *mp,
703{ 678{
704 xlog_ticket_t *tic; 679 xlog_ticket_t *tic;
705 xlog_t *log = mp->m_log; 680 xlog_t *log = mp->m_log;
706 int need_bytes, free_bytes, cycle, bytes; 681 int need_bytes, free_bytes;
707 682
708 if (XLOG_FORCED_SHUTDOWN(log)) 683 if (XLOG_FORCED_SHUTDOWN(log))
709 return; 684 return;
710 685
711 if (tail_lsn == 0) { 686 if (tail_lsn == 0)
712 /* needed since sync_lsn is 64 bits */ 687 tail_lsn = atomic64_read(&log->l_last_sync_lsn);
713 spin_lock(&log->l_icloglock);
714 tail_lsn = log->l_last_sync_lsn;
715 spin_unlock(&log->l_icloglock);
716 }
717
718 spin_lock(&log->l_grant_lock);
719 688
720 /* Also an invalid lsn. 1 implies that we aren't passing in a valid 689 /* tail_lsn == 1 implies that we weren't passed a valid value. */
721 * tail_lsn. 690 if (tail_lsn != 1)
722 */ 691 atomic64_set(&log->l_tail_lsn, tail_lsn);
723 if (tail_lsn != 1) {
724 log->l_tail_lsn = tail_lsn;
725 }
726 692
727 if ((tic = log->l_write_headq)) { 693 if (!list_empty_careful(&log->l_writeq)) {
728#ifdef DEBUG 694#ifdef DEBUG
729 if (log->l_flags & XLOG_ACTIVE_RECOVERY) 695 if (log->l_flags & XLOG_ACTIVE_RECOVERY)
730 panic("Recovery problem"); 696 panic("Recovery problem");
731#endif 697#endif
732 cycle = log->l_grant_write_cycle; 698 spin_lock(&log->l_grant_write_lock);
733 bytes = log->l_grant_write_bytes; 699 free_bytes = xlog_space_left(log, &log->l_grant_write_head);
734 free_bytes = xlog_space_left(log, cycle, bytes); 700 list_for_each_entry(tic, &log->l_writeq, t_queue) {
735 do {
736 ASSERT(tic->t_flags & XLOG_TIC_PERM_RESERV); 701 ASSERT(tic->t_flags & XLOG_TIC_PERM_RESERV);
737 702
738 if (free_bytes < tic->t_unit_res && tail_lsn != 1) 703 if (free_bytes < tic->t_unit_res && tail_lsn != 1)
739 break; 704 break;
740 tail_lsn = 0; 705 tail_lsn = 0;
741 free_bytes -= tic->t_unit_res; 706 free_bytes -= tic->t_unit_res;
742 sv_signal(&tic->t_wait); 707 trace_xfs_log_regrant_write_wake_up(log, tic);
743 tic = tic->t_next; 708 wake_up(&tic->t_wait);
744 } while (tic != log->l_write_headq); 709 }
710 spin_unlock(&log->l_grant_write_lock);
745 } 711 }
746 if ((tic = log->l_reserve_headq)) { 712
713 if (!list_empty_careful(&log->l_reserveq)) {
747#ifdef DEBUG 714#ifdef DEBUG
748 if (log->l_flags & XLOG_ACTIVE_RECOVERY) 715 if (log->l_flags & XLOG_ACTIVE_RECOVERY)
749 panic("Recovery problem"); 716 panic("Recovery problem");
750#endif 717#endif
751 cycle = log->l_grant_reserve_cycle; 718 spin_lock(&log->l_grant_reserve_lock);
752 bytes = log->l_grant_reserve_bytes; 719 free_bytes = xlog_space_left(log, &log->l_grant_reserve_head);
753 free_bytes = xlog_space_left(log, cycle, bytes); 720 list_for_each_entry(tic, &log->l_reserveq, t_queue) {
754 do {
755 if (tic->t_flags & XLOG_TIC_PERM_RESERV) 721 if (tic->t_flags & XLOG_TIC_PERM_RESERV)
756 need_bytes = tic->t_unit_res*tic->t_cnt; 722 need_bytes = tic->t_unit_res*tic->t_cnt;
757 else 723 else
@@ -760,12 +726,12 @@ xfs_log_move_tail(xfs_mount_t *mp,
760 break; 726 break;
761 tail_lsn = 0; 727 tail_lsn = 0;
762 free_bytes -= need_bytes; 728 free_bytes -= need_bytes;
763 sv_signal(&tic->t_wait); 729 trace_xfs_log_grant_wake_up(log, tic);
764 tic = tic->t_next; 730 wake_up(&tic->t_wait);
765 } while (tic != log->l_reserve_headq); 731 }
732 spin_unlock(&log->l_grant_reserve_lock);
766 } 733 }
767 spin_unlock(&log->l_grant_lock); 734}
768} /* xfs_log_move_tail */
769 735
770/* 736/*
771 * Determine if we have a transaction that has gone to disk 737 * Determine if we have a transaction that has gone to disk
@@ -831,23 +797,19 @@ xfs_log_need_covered(xfs_mount_t *mp)
831 * We may be holding the log iclog lock upon entering this routine. 797 * We may be holding the log iclog lock upon entering this routine.
832 */ 798 */
833xfs_lsn_t 799xfs_lsn_t
834xlog_assign_tail_lsn(xfs_mount_t *mp) 800xlog_assign_tail_lsn(
801 struct xfs_mount *mp)
835{ 802{
836 xfs_lsn_t tail_lsn; 803 xfs_lsn_t tail_lsn;
837 xlog_t *log = mp->m_log; 804 struct log *log = mp->m_log;
838 805
839 tail_lsn = xfs_trans_ail_tail(mp->m_ail); 806 tail_lsn = xfs_trans_ail_tail(mp->m_ail);
840 spin_lock(&log->l_grant_lock); 807 if (!tail_lsn)
841 if (tail_lsn != 0) { 808 tail_lsn = atomic64_read(&log->l_last_sync_lsn);
842 log->l_tail_lsn = tail_lsn;
843 } else {
844 tail_lsn = log->l_tail_lsn = log->l_last_sync_lsn;
845 }
846 spin_unlock(&log->l_grant_lock);
847 809
810 atomic64_set(&log->l_tail_lsn, tail_lsn);
848 return tail_lsn; 811 return tail_lsn;
849} /* xlog_assign_tail_lsn */ 812}
850
851 813
852/* 814/*
853 * Return the space in the log between the tail and the head. The head 815 * Return the space in the log between the tail and the head. The head
@@ -864,21 +826,26 @@ xlog_assign_tail_lsn(xfs_mount_t *mp)
864 * result is that we return the size of the log as the amount of space left. 826 * result is that we return the size of the log as the amount of space left.
865 */ 827 */
866STATIC int 828STATIC int
867xlog_space_left(xlog_t *log, int cycle, int bytes) 829xlog_space_left(
868{ 830 struct log *log,
869 int free_bytes; 831 atomic64_t *head)
870 int tail_bytes; 832{
871 int tail_cycle; 833 int free_bytes;
872 834 int tail_bytes;
873 tail_bytes = BBTOB(BLOCK_LSN(log->l_tail_lsn)); 835 int tail_cycle;
874 tail_cycle = CYCLE_LSN(log->l_tail_lsn); 836 int head_cycle;
875 if ((tail_cycle == cycle) && (bytes >= tail_bytes)) { 837 int head_bytes;
876 free_bytes = log->l_logsize - (bytes - tail_bytes); 838
877 } else if ((tail_cycle + 1) < cycle) { 839 xlog_crack_grant_head(head, &head_cycle, &head_bytes);
840 xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_bytes);
841 tail_bytes = BBTOB(tail_bytes);
842 if (tail_cycle == head_cycle && head_bytes >= tail_bytes)
843 free_bytes = log->l_logsize - (head_bytes - tail_bytes);
844 else if (tail_cycle + 1 < head_cycle)
878 return 0; 845 return 0;
879 } else if (tail_cycle < cycle) { 846 else if (tail_cycle < head_cycle) {
880 ASSERT(tail_cycle == (cycle - 1)); 847 ASSERT(tail_cycle == (head_cycle - 1));
881 free_bytes = tail_bytes - bytes; 848 free_bytes = tail_bytes - head_bytes;
882 } else { 849 } else {
883 /* 850 /*
884 * The reservation head is behind the tail. 851 * The reservation head is behind the tail.
@@ -889,12 +856,12 @@ xlog_space_left(xlog_t *log, int cycle, int bytes)
889 "xlog_space_left: head behind tail\n" 856 "xlog_space_left: head behind tail\n"
890 " tail_cycle = %d, tail_bytes = %d\n" 857 " tail_cycle = %d, tail_bytes = %d\n"
891 " GH cycle = %d, GH bytes = %d", 858 " GH cycle = %d, GH bytes = %d",
892 tail_cycle, tail_bytes, cycle, bytes); 859 tail_cycle, tail_bytes, head_cycle, head_bytes);
893 ASSERT(0); 860 ASSERT(0);
894 free_bytes = log->l_logsize; 861 free_bytes = log->l_logsize;
895 } 862 }
896 return free_bytes; 863 return free_bytes;
897} /* xlog_space_left */ 864}
898 865
899 866
900/* 867/*
@@ -1047,12 +1014,16 @@ xlog_alloc_log(xfs_mount_t *mp,
1047 log->l_flags |= XLOG_ACTIVE_RECOVERY; 1014 log->l_flags |= XLOG_ACTIVE_RECOVERY;
1048 1015
1049 log->l_prev_block = -1; 1016 log->l_prev_block = -1;
1050 log->l_tail_lsn = xlog_assign_lsn(1, 0);
1051 /* log->l_tail_lsn = 0x100000000LL; cycle = 1; current block = 0 */ 1017 /* log->l_tail_lsn = 0x100000000LL; cycle = 1; current block = 0 */
1052 log->l_last_sync_lsn = log->l_tail_lsn; 1018 xlog_assign_atomic_lsn(&log->l_tail_lsn, 1, 0);
1019 xlog_assign_atomic_lsn(&log->l_last_sync_lsn, 1, 0);
1053 log->l_curr_cycle = 1; /* 0 is bad since this is initial value */ 1020 log->l_curr_cycle = 1; /* 0 is bad since this is initial value */
1054 log->l_grant_reserve_cycle = 1; 1021 xlog_assign_grant_head(&log->l_grant_reserve_head, 1, 0);
1055 log->l_grant_write_cycle = 1; 1022 xlog_assign_grant_head(&log->l_grant_write_head, 1, 0);
1023 INIT_LIST_HEAD(&log->l_reserveq);
1024 INIT_LIST_HEAD(&log->l_writeq);
1025 spin_lock_init(&log->l_grant_reserve_lock);
1026 spin_lock_init(&log->l_grant_write_lock);
1056 1027
1057 error = EFSCORRUPTED; 1028 error = EFSCORRUPTED;
1058 if (xfs_sb_version_hassector(&mp->m_sb)) { 1029 if (xfs_sb_version_hassector(&mp->m_sb)) {
@@ -1094,8 +1065,7 @@ xlog_alloc_log(xfs_mount_t *mp,
1094 log->l_xbuf = bp; 1065 log->l_xbuf = bp;
1095 1066
1096 spin_lock_init(&log->l_icloglock); 1067 spin_lock_init(&log->l_icloglock);
1097 spin_lock_init(&log->l_grant_lock); 1068 init_waitqueue_head(&log->l_flush_wait);
1098 sv_init(&log->l_flush_wait, 0, "flush_wait");
1099 1069
1100 /* log record size must be multiple of BBSIZE; see xlog_rec_header_t */ 1070 /* log record size must be multiple of BBSIZE; see xlog_rec_header_t */
1101 ASSERT((XFS_BUF_SIZE(bp) & BBMASK) == 0); 1071 ASSERT((XFS_BUF_SIZE(bp) & BBMASK) == 0);
@@ -1151,8 +1121,8 @@ xlog_alloc_log(xfs_mount_t *mp,
1151 1121
1152 ASSERT(XFS_BUF_ISBUSY(iclog->ic_bp)); 1122 ASSERT(XFS_BUF_ISBUSY(iclog->ic_bp));
1153 ASSERT(XFS_BUF_VALUSEMA(iclog->ic_bp) <= 0); 1123 ASSERT(XFS_BUF_VALUSEMA(iclog->ic_bp) <= 0);
1154 sv_init(&iclog->ic_force_wait, SV_DEFAULT, "iclog-force"); 1124 init_waitqueue_head(&iclog->ic_force_wait);
1155 sv_init(&iclog->ic_write_wait, SV_DEFAULT, "iclog-write"); 1125 init_waitqueue_head(&iclog->ic_write_wait);
1156 1126
1157 iclogp = &iclog->ic_next; 1127 iclogp = &iclog->ic_next;
1158 } 1128 }
@@ -1167,15 +1137,11 @@ xlog_alloc_log(xfs_mount_t *mp,
1167out_free_iclog: 1137out_free_iclog:
1168 for (iclog = log->l_iclog; iclog; iclog = prev_iclog) { 1138 for (iclog = log->l_iclog; iclog; iclog = prev_iclog) {
1169 prev_iclog = iclog->ic_next; 1139 prev_iclog = iclog->ic_next;
1170 if (iclog->ic_bp) { 1140 if (iclog->ic_bp)
1171 sv_destroy(&iclog->ic_force_wait);
1172 sv_destroy(&iclog->ic_write_wait);
1173 xfs_buf_free(iclog->ic_bp); 1141 xfs_buf_free(iclog->ic_bp);
1174 }
1175 kmem_free(iclog); 1142 kmem_free(iclog);
1176 } 1143 }
1177 spinlock_destroy(&log->l_icloglock); 1144 spinlock_destroy(&log->l_icloglock);
1178 spinlock_destroy(&log->l_grant_lock);
1179 xfs_buf_free(log->l_xbuf); 1145 xfs_buf_free(log->l_xbuf);
1180out_free_log: 1146out_free_log:
1181 kmem_free(log); 1147 kmem_free(log);
@@ -1223,61 +1189,60 @@ xlog_commit_record(
1223 * water mark. In this manner, we would be creating a low water mark. 1189 * water mark. In this manner, we would be creating a low water mark.
1224 */ 1190 */
1225STATIC void 1191STATIC void
1226xlog_grant_push_ail(xfs_mount_t *mp, 1192xlog_grant_push_ail(
1227 int need_bytes) 1193 struct log *log,
1194 int need_bytes)
1228{ 1195{
1229 xlog_t *log = mp->m_log; /* pointer to the log */ 1196 xfs_lsn_t threshold_lsn = 0;
1230 xfs_lsn_t tail_lsn; /* lsn of the log tail */ 1197 xfs_lsn_t last_sync_lsn;
1231 xfs_lsn_t threshold_lsn = 0; /* lsn we'd like to be at */ 1198 int free_blocks;
1232 int free_blocks; /* free blocks left to write to */ 1199 int free_bytes;
1233 int free_bytes; /* free bytes left to write to */ 1200 int threshold_block;
1234 int threshold_block; /* block in lsn we'd like to be at */ 1201 int threshold_cycle;
1235 int threshold_cycle; /* lsn cycle we'd like to be at */ 1202 int free_threshold;
1236 int free_threshold; 1203
1237 1204 ASSERT(BTOBB(need_bytes) < log->l_logBBsize);
1238 ASSERT(BTOBB(need_bytes) < log->l_logBBsize); 1205
1239 1206 free_bytes = xlog_space_left(log, &log->l_grant_reserve_head);
1240 spin_lock(&log->l_grant_lock); 1207 free_blocks = BTOBBT(free_bytes);
1241 free_bytes = xlog_space_left(log, 1208
1242 log->l_grant_reserve_cycle, 1209 /*
1243 log->l_grant_reserve_bytes); 1210 * Set the threshold for the minimum number of free blocks in the
1244 tail_lsn = log->l_tail_lsn; 1211 * log to the maximum of what the caller needs, one quarter of the
1245 free_blocks = BTOBBT(free_bytes); 1212 * log, and 256 blocks.
1246 1213 */
1247 /* 1214 free_threshold = BTOBB(need_bytes);
1248 * Set the threshold for the minimum number of free blocks in the 1215 free_threshold = MAX(free_threshold, (log->l_logBBsize >> 2));
1249 * log to the maximum of what the caller needs, one quarter of the 1216 free_threshold = MAX(free_threshold, 256);
1250 * log, and 256 blocks. 1217 if (free_blocks >= free_threshold)
1251 */ 1218 return;
1252 free_threshold = BTOBB(need_bytes); 1219
1253 free_threshold = MAX(free_threshold, (log->l_logBBsize >> 2)); 1220 xlog_crack_atomic_lsn(&log->l_tail_lsn, &threshold_cycle,
1254 free_threshold = MAX(free_threshold, 256); 1221 &threshold_block);
1255 if (free_blocks < free_threshold) { 1222 threshold_block += free_threshold;
1256 threshold_block = BLOCK_LSN(tail_lsn) + free_threshold;
1257 threshold_cycle = CYCLE_LSN(tail_lsn);
1258 if (threshold_block >= log->l_logBBsize) { 1223 if (threshold_block >= log->l_logBBsize) {
1259 threshold_block -= log->l_logBBsize; 1224 threshold_block -= log->l_logBBsize;
1260 threshold_cycle += 1; 1225 threshold_cycle += 1;
1261 } 1226 }
1262 threshold_lsn = xlog_assign_lsn(threshold_cycle, threshold_block); 1227 threshold_lsn = xlog_assign_lsn(threshold_cycle,
1228 threshold_block);
1229 /*
1230 * Don't pass in an lsn greater than the lsn of the last
1231 * log record known to be on disk. Use a snapshot of the last sync lsn
1232 * so that it doesn't change between the compare and the set.
1233 */
1234 last_sync_lsn = atomic64_read(&log->l_last_sync_lsn);
1235 if (XFS_LSN_CMP(threshold_lsn, last_sync_lsn) > 0)
1236 threshold_lsn = last_sync_lsn;
1263 1237
1264 /* Don't pass in an lsn greater than the lsn of the last 1238 /*
1265 * log record known to be on disk. 1239 * Get the transaction layer to kick the dirty buffers out to
1240 * disk asynchronously. No point in trying to do this if
1241 * the filesystem is shutting down.
1266 */ 1242 */
1267 if (XFS_LSN_CMP(threshold_lsn, log->l_last_sync_lsn) > 0) 1243 if (!XLOG_FORCED_SHUTDOWN(log))
1268 threshold_lsn = log->l_last_sync_lsn; 1244 xfs_trans_ail_push(log->l_ailp, threshold_lsn);
1269 } 1245}
1270 spin_unlock(&log->l_grant_lock);
1271
1272 /*
1273 * Get the transaction layer to kick the dirty buffers out to
1274 * disk asynchronously. No point in trying to do this if
1275 * the filesystem is shutting down.
1276 */
1277 if (threshold_lsn &&
1278 !XLOG_FORCED_SHUTDOWN(log))
1279 xfs_trans_ail_push(log->l_ailp, threshold_lsn);
1280} /* xlog_grant_push_ail */
1281 1246
1282/* 1247/*
1283 * The bdstrat callback function for log bufs. This gives us a central 1248 * The bdstrat callback function for log bufs. This gives us a central
@@ -1372,9 +1337,8 @@ xlog_sync(xlog_t *log,
1372 roundoff < BBTOB(1))); 1337 roundoff < BBTOB(1)));
1373 1338
1374 /* move grant heads by roundoff in sync */ 1339 /* move grant heads by roundoff in sync */
1375 spin_lock(&log->l_grant_lock); 1340 xlog_grant_add_space(log, &log->l_grant_reserve_head, roundoff);
1376 xlog_grant_add_space(log, roundoff); 1341 xlog_grant_add_space(log, &log->l_grant_write_head, roundoff);
1377 spin_unlock(&log->l_grant_lock);
1378 1342
1379 /* put cycle number in every block */ 1343 /* put cycle number in every block */
1380 xlog_pack_data(log, iclog, roundoff); 1344 xlog_pack_data(log, iclog, roundoff);
@@ -1489,15 +1453,12 @@ xlog_dealloc_log(xlog_t *log)
1489 1453
1490 iclog = log->l_iclog; 1454 iclog = log->l_iclog;
1491 for (i=0; i<log->l_iclog_bufs; i++) { 1455 for (i=0; i<log->l_iclog_bufs; i++) {
1492 sv_destroy(&iclog->ic_force_wait);
1493 sv_destroy(&iclog->ic_write_wait);
1494 xfs_buf_free(iclog->ic_bp); 1456 xfs_buf_free(iclog->ic_bp);
1495 next_iclog = iclog->ic_next; 1457 next_iclog = iclog->ic_next;
1496 kmem_free(iclog); 1458 kmem_free(iclog);
1497 iclog = next_iclog; 1459 iclog = next_iclog;
1498 } 1460 }
1499 spinlock_destroy(&log->l_icloglock); 1461 spinlock_destroy(&log->l_icloglock);
1500 spinlock_destroy(&log->l_grant_lock);
1501 1462
1502 xfs_buf_free(log->l_xbuf); 1463 xfs_buf_free(log->l_xbuf);
1503 log->l_mp->m_log = NULL; 1464 log->l_mp->m_log = NULL;
@@ -2232,7 +2193,7 @@ xlog_state_do_callback(
2232 lowest_lsn = xlog_get_lowest_lsn(log); 2193 lowest_lsn = xlog_get_lowest_lsn(log);
2233 if (lowest_lsn && 2194 if (lowest_lsn &&
2234 XFS_LSN_CMP(lowest_lsn, 2195 XFS_LSN_CMP(lowest_lsn,
2235 be64_to_cpu(iclog->ic_header.h_lsn)) < 0) { 2196 be64_to_cpu(iclog->ic_header.h_lsn)) < 0) {
2236 iclog = iclog->ic_next; 2197 iclog = iclog->ic_next;
2237 continue; /* Leave this iclog for 2198 continue; /* Leave this iclog for
2238 * another thread */ 2199 * another thread */
@@ -2240,23 +2201,21 @@ xlog_state_do_callback(
2240 2201
2241 iclog->ic_state = XLOG_STATE_CALLBACK; 2202 iclog->ic_state = XLOG_STATE_CALLBACK;
2242 2203
2243 spin_unlock(&log->l_icloglock);
2244 2204
2245 /* l_last_sync_lsn field protected by 2205 /*
2246 * l_grant_lock. Don't worry about iclog's lsn. 2206 * update the last_sync_lsn before we drop the
2247 * No one else can be here except us. 2207 * icloglock to ensure we are the only one that
2208 * can update it.
2248 */ 2209 */
2249 spin_lock(&log->l_grant_lock); 2210 ASSERT(XFS_LSN_CMP(atomic64_read(&log->l_last_sync_lsn),
2250 ASSERT(XFS_LSN_CMP(log->l_last_sync_lsn, 2211 be64_to_cpu(iclog->ic_header.h_lsn)) <= 0);
2251 be64_to_cpu(iclog->ic_header.h_lsn)) <= 0); 2212 atomic64_set(&log->l_last_sync_lsn,
2252 log->l_last_sync_lsn = 2213 be64_to_cpu(iclog->ic_header.h_lsn));
2253 be64_to_cpu(iclog->ic_header.h_lsn);
2254 spin_unlock(&log->l_grant_lock);
2255 2214
2256 } else { 2215 } else
2257 spin_unlock(&log->l_icloglock);
2258 ioerrors++; 2216 ioerrors++;
2259 } 2217
2218 spin_unlock(&log->l_icloglock);
2260 2219
2261 /* 2220 /*
2262 * Keep processing entries in the callback list until 2221 * Keep processing entries in the callback list until
@@ -2297,7 +2256,7 @@ xlog_state_do_callback(
2297 xlog_state_clean_log(log); 2256 xlog_state_clean_log(log);
2298 2257
2299 /* wake up threads waiting in xfs_log_force() */ 2258 /* wake up threads waiting in xfs_log_force() */
2300 sv_broadcast(&iclog->ic_force_wait); 2259 wake_up_all(&iclog->ic_force_wait);
2301 2260
2302 iclog = iclog->ic_next; 2261 iclog = iclog->ic_next;
2303 } while (first_iclog != iclog); 2262 } while (first_iclog != iclog);
@@ -2344,7 +2303,7 @@ xlog_state_do_callback(
2344 spin_unlock(&log->l_icloglock); 2303 spin_unlock(&log->l_icloglock);
2345 2304
2346 if (wake) 2305 if (wake)
2347 sv_broadcast(&log->l_flush_wait); 2306 wake_up_all(&log->l_flush_wait);
2348} 2307}
2349 2308
2350 2309
@@ -2395,7 +2354,7 @@ xlog_state_done_syncing(
2395 * iclog buffer, we wake them all, one will get to do the 2354 * iclog buffer, we wake them all, one will get to do the
2396 * I/O, the others get to wait for the result. 2355 * I/O, the others get to wait for the result.
2397 */ 2356 */
2398 sv_broadcast(&iclog->ic_write_wait); 2357 wake_up_all(&iclog->ic_write_wait);
2399 spin_unlock(&log->l_icloglock); 2358 spin_unlock(&log->l_icloglock);
2400 xlog_state_do_callback(log, aborted, iclog); /* also cleans log */ 2359 xlog_state_do_callback(log, aborted, iclog); /* also cleans log */
2401} /* xlog_state_done_syncing */ 2360} /* xlog_state_done_syncing */
@@ -2444,7 +2403,7 @@ restart:
2444 XFS_STATS_INC(xs_log_noiclogs); 2403 XFS_STATS_INC(xs_log_noiclogs);
2445 2404
2446 /* Wait for log writes to have flushed */ 2405 /* Wait for log writes to have flushed */
2447 sv_wait(&log->l_flush_wait, 0, &log->l_icloglock, 0); 2406 xlog_wait(&log->l_flush_wait, &log->l_icloglock);
2448 goto restart; 2407 goto restart;
2449 } 2408 }
2450 2409
@@ -2527,6 +2486,18 @@ restart:
2527 * 2486 *
2528 * Once a ticket gets put onto the reserveq, it will only return after 2487 * Once a ticket gets put onto the reserveq, it will only return after
2529 * the needed reservation is satisfied. 2488 * the needed reservation is satisfied.
2489 *
2490 * This function is structured so that it has a lock free fast path. This is
2491 * necessary because every new transaction reservation will come through this
2492 * path. Hence any lock will be globally hot if we take it unconditionally on
2493 * every pass.
2494 *
2495 * As tickets are only ever moved on and off the reserveq under the
2496 * l_grant_reserve_lock, we only need to take that lock if we are going
2497 * to add the ticket to the queue and sleep. We can avoid taking the lock if the
2498 * ticket was never added to the reserveq because the t_queue list head will be
2499 * empty and we hold the only reference to it so it can safely be checked
2500 * unlocked.
2530 */ 2501 */
2531STATIC int 2502STATIC int
2532xlog_grant_log_space(xlog_t *log, 2503xlog_grant_log_space(xlog_t *log,
@@ -2534,24 +2505,27 @@ xlog_grant_log_space(xlog_t *log,
2534{ 2505{
2535 int free_bytes; 2506 int free_bytes;
2536 int need_bytes; 2507 int need_bytes;
2537#ifdef DEBUG
2538 xfs_lsn_t tail_lsn;
2539#endif
2540
2541 2508
2542#ifdef DEBUG 2509#ifdef DEBUG
2543 if (log->l_flags & XLOG_ACTIVE_RECOVERY) 2510 if (log->l_flags & XLOG_ACTIVE_RECOVERY)
2544 panic("grant Recovery problem"); 2511 panic("grant Recovery problem");
2545#endif 2512#endif
2546 2513
2547 /* Is there space or do we need to sleep? */
2548 spin_lock(&log->l_grant_lock);
2549
2550 trace_xfs_log_grant_enter(log, tic); 2514 trace_xfs_log_grant_enter(log, tic);
2551 2515
2516 need_bytes = tic->t_unit_res;
2517 if (tic->t_flags & XFS_LOG_PERM_RESERV)
2518 need_bytes *= tic->t_ocnt;
2519
2552 /* something is already sleeping; insert new transaction at end */ 2520 /* something is already sleeping; insert new transaction at end */
2553 if (log->l_reserve_headq) { 2521 if (!list_empty_careful(&log->l_reserveq)) {
2554 xlog_ins_ticketq(&log->l_reserve_headq, tic); 2522 spin_lock(&log->l_grant_reserve_lock);
2523 /* recheck the queue now we are locked */
2524 if (list_empty(&log->l_reserveq)) {
2525 spin_unlock(&log->l_grant_reserve_lock);
2526 goto redo;
2527 }
2528 list_add_tail(&tic->t_queue, &log->l_reserveq);
2555 2529
2556 trace_xfs_log_grant_sleep1(log, tic); 2530 trace_xfs_log_grant_sleep1(log, tic);
2557 2531
@@ -2563,72 +2537,57 @@ xlog_grant_log_space(xlog_t *log,
2563 goto error_return; 2537 goto error_return;
2564 2538
2565 XFS_STATS_INC(xs_sleep_logspace); 2539 XFS_STATS_INC(xs_sleep_logspace);
2566 sv_wait(&tic->t_wait, PINOD|PLTWAIT, &log->l_grant_lock, s); 2540 xlog_wait(&tic->t_wait, &log->l_grant_reserve_lock);
2541
2567 /* 2542 /*
2568 * If we got an error, and the filesystem is shutting down, 2543 * If we got an error, and the filesystem is shutting down,
2569 * we'll catch it down below. So just continue... 2544 * we'll catch it down below. So just continue...
2570 */ 2545 */
2571 trace_xfs_log_grant_wake1(log, tic); 2546 trace_xfs_log_grant_wake1(log, tic);
2572 spin_lock(&log->l_grant_lock);
2573 } 2547 }
2574 if (tic->t_flags & XFS_LOG_PERM_RESERV)
2575 need_bytes = tic->t_unit_res*tic->t_ocnt;
2576 else
2577 need_bytes = tic->t_unit_res;
2578 2548
2579redo: 2549redo:
2580 if (XLOG_FORCED_SHUTDOWN(log)) 2550 if (XLOG_FORCED_SHUTDOWN(log))
2581 goto error_return; 2551 goto error_return_unlocked;
2582 2552
2583 free_bytes = xlog_space_left(log, log->l_grant_reserve_cycle, 2553 free_bytes = xlog_space_left(log, &log->l_grant_reserve_head);
2584 log->l_grant_reserve_bytes);
2585 if (free_bytes < need_bytes) { 2554 if (free_bytes < need_bytes) {
2586 if ((tic->t_flags & XLOG_TIC_IN_Q) == 0) 2555 spin_lock(&log->l_grant_reserve_lock);
2587 xlog_ins_ticketq(&log->l_reserve_headq, tic); 2556 if (list_empty(&tic->t_queue))
2557 list_add_tail(&tic->t_queue, &log->l_reserveq);
2588 2558
2589 trace_xfs_log_grant_sleep2(log, tic); 2559 trace_xfs_log_grant_sleep2(log, tic);
2590 2560
2591 spin_unlock(&log->l_grant_lock);
2592 xlog_grant_push_ail(log->l_mp, need_bytes);
2593 spin_lock(&log->l_grant_lock);
2594
2595 XFS_STATS_INC(xs_sleep_logspace);
2596 sv_wait(&tic->t_wait, PINOD|PLTWAIT, &log->l_grant_lock, s);
2597
2598 spin_lock(&log->l_grant_lock);
2599 if (XLOG_FORCED_SHUTDOWN(log)) 2561 if (XLOG_FORCED_SHUTDOWN(log))
2600 goto error_return; 2562 goto error_return;
2601 2563
2602 trace_xfs_log_grant_wake2(log, tic); 2564 xlog_grant_push_ail(log, need_bytes);
2565
2566 XFS_STATS_INC(xs_sleep_logspace);
2567 xlog_wait(&tic->t_wait, &log->l_grant_reserve_lock);
2603 2568
2569 trace_xfs_log_grant_wake2(log, tic);
2604 goto redo; 2570 goto redo;
2605 } else if (tic->t_flags & XLOG_TIC_IN_Q) 2571 }
2606 xlog_del_ticketq(&log->l_reserve_headq, tic);
2607 2572
2608 /* we've got enough space */ 2573 if (!list_empty(&tic->t_queue)) {
2609 xlog_grant_add_space(log, need_bytes); 2574 spin_lock(&log->l_grant_reserve_lock);
2610#ifdef DEBUG 2575 list_del_init(&tic->t_queue);
2611 tail_lsn = log->l_tail_lsn; 2576 spin_unlock(&log->l_grant_reserve_lock);
2612 /*
2613 * Check to make sure the grant write head didn't just over lap the
2614 * tail. If the cycles are the same, we can't be overlapping.
2615 * Otherwise, make sure that the cycles differ by exactly one and
2616 * check the byte count.
2617 */
2618 if (CYCLE_LSN(tail_lsn) != log->l_grant_write_cycle) {
2619 ASSERT(log->l_grant_write_cycle-1 == CYCLE_LSN(tail_lsn));
2620 ASSERT(log->l_grant_write_bytes <= BBTOB(BLOCK_LSN(tail_lsn)));
2621 } 2577 }
2622#endif 2578
2579 /* we've got enough space */
2580 xlog_grant_add_space(log, &log->l_grant_reserve_head, need_bytes);
2581 xlog_grant_add_space(log, &log->l_grant_write_head, need_bytes);
2623 trace_xfs_log_grant_exit(log, tic); 2582 trace_xfs_log_grant_exit(log, tic);
2624 xlog_verify_grant_head(log, 1); 2583 xlog_verify_grant_tail(log);
2625 spin_unlock(&log->l_grant_lock);
2626 return 0; 2584 return 0;
2627 2585
2628 error_return: 2586error_return_unlocked:
2629 if (tic->t_flags & XLOG_TIC_IN_Q) 2587 spin_lock(&log->l_grant_reserve_lock);
2630 xlog_del_ticketq(&log->l_reserve_headq, tic); 2588error_return:
2631 2589 list_del_init(&tic->t_queue);
2590 spin_unlock(&log->l_grant_reserve_lock);
2632 trace_xfs_log_grant_error(log, tic); 2591 trace_xfs_log_grant_error(log, tic);
2633 2592
2634 /* 2593 /*
@@ -2638,7 +2597,6 @@ redo:
2638 */ 2597 */
2639 tic->t_curr_res = 0; 2598 tic->t_curr_res = 0;
2640 tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */ 2599 tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */
2641 spin_unlock(&log->l_grant_lock);
2642 return XFS_ERROR(EIO); 2600 return XFS_ERROR(EIO);
2643} /* xlog_grant_log_space */ 2601} /* xlog_grant_log_space */
2644 2602
@@ -2646,17 +2604,14 @@ redo:
2646/* 2604/*
2647 * Replenish the byte reservation required by moving the grant write head. 2605 * Replenish the byte reservation required by moving the grant write head.
2648 * 2606 *
2649 * 2607 * Similar to xlog_grant_log_space, the function is structured to have a lock
2608 * free fast path.
2650 */ 2609 */
2651STATIC int 2610STATIC int
2652xlog_regrant_write_log_space(xlog_t *log, 2611xlog_regrant_write_log_space(xlog_t *log,
2653 xlog_ticket_t *tic) 2612 xlog_ticket_t *tic)
2654{ 2613{
2655 int free_bytes, need_bytes; 2614 int free_bytes, need_bytes;
2656 xlog_ticket_t *ntic;
2657#ifdef DEBUG
2658 xfs_lsn_t tail_lsn;
2659#endif
2660 2615
2661 tic->t_curr_res = tic->t_unit_res; 2616 tic->t_curr_res = tic->t_unit_res;
2662 xlog_tic_reset_res(tic); 2617 xlog_tic_reset_res(tic);
@@ -2669,12 +2624,9 @@ xlog_regrant_write_log_space(xlog_t *log,
2669 panic("regrant Recovery problem"); 2624 panic("regrant Recovery problem");
2670#endif 2625#endif
2671 2626
2672 spin_lock(&log->l_grant_lock);
2673
2674 trace_xfs_log_regrant_write_enter(log, tic); 2627 trace_xfs_log_regrant_write_enter(log, tic);
2675
2676 if (XLOG_FORCED_SHUTDOWN(log)) 2628 if (XLOG_FORCED_SHUTDOWN(log))
2677 goto error_return; 2629 goto error_return_unlocked;
2678 2630
2679 /* If there are other waiters on the queue then give them a 2631 /* If there are other waiters on the queue then give them a
2680 * chance at logspace before us. Wake up the first waiters, 2632 * chance at logspace before us. Wake up the first waiters,
@@ -2683,92 +2635,76 @@ xlog_regrant_write_log_space(xlog_t *log,
2683 * this transaction. 2635 * this transaction.
2684 */ 2636 */
2685 need_bytes = tic->t_unit_res; 2637 need_bytes = tic->t_unit_res;
2686 if ((ntic = log->l_write_headq)) { 2638 if (!list_empty_careful(&log->l_writeq)) {
2687 free_bytes = xlog_space_left(log, log->l_grant_write_cycle, 2639 struct xlog_ticket *ntic;
2688 log->l_grant_write_bytes); 2640
2689 do { 2641 spin_lock(&log->l_grant_write_lock);
2642 free_bytes = xlog_space_left(log, &log->l_grant_write_head);
2643 list_for_each_entry(ntic, &log->l_writeq, t_queue) {
2690 ASSERT(ntic->t_flags & XLOG_TIC_PERM_RESERV); 2644 ASSERT(ntic->t_flags & XLOG_TIC_PERM_RESERV);
2691 2645
2692 if (free_bytes < ntic->t_unit_res) 2646 if (free_bytes < ntic->t_unit_res)
2693 break; 2647 break;
2694 free_bytes -= ntic->t_unit_res; 2648 free_bytes -= ntic->t_unit_res;
2695 sv_signal(&ntic->t_wait); 2649 wake_up(&ntic->t_wait);
2696 ntic = ntic->t_next; 2650 }
2697 } while (ntic != log->l_write_headq);
2698
2699 if (ntic != log->l_write_headq) {
2700 if ((tic->t_flags & XLOG_TIC_IN_Q) == 0)
2701 xlog_ins_ticketq(&log->l_write_headq, tic);
2702 2651
2652 if (ntic != list_first_entry(&log->l_writeq,
2653 struct xlog_ticket, t_queue)) {
2654 if (list_empty(&tic->t_queue))
2655 list_add_tail(&tic->t_queue, &log->l_writeq);
2703 trace_xfs_log_regrant_write_sleep1(log, tic); 2656 trace_xfs_log_regrant_write_sleep1(log, tic);
2704 2657
2705 spin_unlock(&log->l_grant_lock); 2658 xlog_grant_push_ail(log, need_bytes);
2706 xlog_grant_push_ail(log->l_mp, need_bytes);
2707 spin_lock(&log->l_grant_lock);
2708 2659
2709 XFS_STATS_INC(xs_sleep_logspace); 2660 XFS_STATS_INC(xs_sleep_logspace);
2710 sv_wait(&tic->t_wait, PINOD|PLTWAIT, 2661 xlog_wait(&tic->t_wait, &log->l_grant_write_lock);
2711 &log->l_grant_lock, s);
2712
2713 /* If we're shutting down, this tic is already
2714 * off the queue */
2715 spin_lock(&log->l_grant_lock);
2716 if (XLOG_FORCED_SHUTDOWN(log))
2717 goto error_return;
2718
2719 trace_xfs_log_regrant_write_wake1(log, tic); 2662 trace_xfs_log_regrant_write_wake1(log, tic);
2720 } 2663 } else
2664 spin_unlock(&log->l_grant_write_lock);
2721 } 2665 }
2722 2666
2723redo: 2667redo:
2724 if (XLOG_FORCED_SHUTDOWN(log)) 2668 if (XLOG_FORCED_SHUTDOWN(log))
2725 goto error_return; 2669 goto error_return_unlocked;
2726 2670
2727 free_bytes = xlog_space_left(log, log->l_grant_write_cycle, 2671 free_bytes = xlog_space_left(log, &log->l_grant_write_head);
2728 log->l_grant_write_bytes);
2729 if (free_bytes < need_bytes) { 2672 if (free_bytes < need_bytes) {
2730 if ((tic->t_flags & XLOG_TIC_IN_Q) == 0) 2673 spin_lock(&log->l_grant_write_lock);
2731 xlog_ins_ticketq(&log->l_write_headq, tic); 2674 if (list_empty(&tic->t_queue))
2732 spin_unlock(&log->l_grant_lock); 2675 list_add_tail(&tic->t_queue, &log->l_writeq);
2733 xlog_grant_push_ail(log->l_mp, need_bytes);
2734 spin_lock(&log->l_grant_lock);
2735
2736 XFS_STATS_INC(xs_sleep_logspace);
2737 trace_xfs_log_regrant_write_sleep2(log, tic);
2738
2739 sv_wait(&tic->t_wait, PINOD|PLTWAIT, &log->l_grant_lock, s);
2740 2676
2741 /* If we're shutting down, this tic is already off the queue */
2742 spin_lock(&log->l_grant_lock);
2743 if (XLOG_FORCED_SHUTDOWN(log)) 2677 if (XLOG_FORCED_SHUTDOWN(log))
2744 goto error_return; 2678 goto error_return;
2745 2679
2680 xlog_grant_push_ail(log, need_bytes);
2681
2682 XFS_STATS_INC(xs_sleep_logspace);
2683 trace_xfs_log_regrant_write_sleep2(log, tic);
2684 xlog_wait(&tic->t_wait, &log->l_grant_write_lock);
2685
2746 trace_xfs_log_regrant_write_wake2(log, tic); 2686 trace_xfs_log_regrant_write_wake2(log, tic);
2747 goto redo; 2687 goto redo;
2748 } else if (tic->t_flags & XLOG_TIC_IN_Q) 2688 }
2749 xlog_del_ticketq(&log->l_write_headq, tic);
2750 2689
2751 /* we've got enough space */ 2690 if (!list_empty(&tic->t_queue)) {
2752 xlog_grant_add_space_write(log, need_bytes); 2691 spin_lock(&log->l_grant_write_lock);
2753#ifdef DEBUG 2692 list_del_init(&tic->t_queue);
2754 tail_lsn = log->l_tail_lsn; 2693 spin_unlock(&log->l_grant_write_lock);
2755 if (CYCLE_LSN(tail_lsn) != log->l_grant_write_cycle) {
2756 ASSERT(log->l_grant_write_cycle-1 == CYCLE_LSN(tail_lsn));
2757 ASSERT(log->l_grant_write_bytes <= BBTOB(BLOCK_LSN(tail_lsn)));
2758 } 2694 }
2759#endif
2760 2695
2696 /* we've got enough space */
2697 xlog_grant_add_space(log, &log->l_grant_write_head, need_bytes);
2761 trace_xfs_log_regrant_write_exit(log, tic); 2698 trace_xfs_log_regrant_write_exit(log, tic);
2762 2699 xlog_verify_grant_tail(log);
2763 xlog_verify_grant_head(log, 1);
2764 spin_unlock(&log->l_grant_lock);
2765 return 0; 2700 return 0;
2766 2701
2767 2702
2703 error_return_unlocked:
2704 spin_lock(&log->l_grant_write_lock);
2768 error_return: 2705 error_return:
2769 if (tic->t_flags & XLOG_TIC_IN_Q) 2706 list_del_init(&tic->t_queue);
2770 xlog_del_ticketq(&log->l_reserve_headq, tic); 2707 spin_unlock(&log->l_grant_write_lock);
2771
2772 trace_xfs_log_regrant_write_error(log, tic); 2708 trace_xfs_log_regrant_write_error(log, tic);
2773 2709
2774 /* 2710 /*
@@ -2778,7 +2714,6 @@ redo:
2778 */ 2714 */
2779 tic->t_curr_res = 0; 2715 tic->t_curr_res = 0;
2780 tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */ 2716 tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */
2781 spin_unlock(&log->l_grant_lock);
2782 return XFS_ERROR(EIO); 2717 return XFS_ERROR(EIO);
2783} /* xlog_regrant_write_log_space */ 2718} /* xlog_regrant_write_log_space */
2784 2719
@@ -2799,27 +2734,24 @@ xlog_regrant_reserve_log_space(xlog_t *log,
2799 if (ticket->t_cnt > 0) 2734 if (ticket->t_cnt > 0)
2800 ticket->t_cnt--; 2735 ticket->t_cnt--;
2801 2736
2802 spin_lock(&log->l_grant_lock); 2737 xlog_grant_sub_space(log, &log->l_grant_reserve_head,
2803 xlog_grant_sub_space(log, ticket->t_curr_res); 2738 ticket->t_curr_res);
2739 xlog_grant_sub_space(log, &log->l_grant_write_head,
2740 ticket->t_curr_res);
2804 ticket->t_curr_res = ticket->t_unit_res; 2741 ticket->t_curr_res = ticket->t_unit_res;
2805 xlog_tic_reset_res(ticket); 2742 xlog_tic_reset_res(ticket);
2806 2743
2807 trace_xfs_log_regrant_reserve_sub(log, ticket); 2744 trace_xfs_log_regrant_reserve_sub(log, ticket);
2808 2745
2809 xlog_verify_grant_head(log, 1);
2810
2811 /* just return if we still have some of the pre-reserved space */ 2746 /* just return if we still have some of the pre-reserved space */
2812 if (ticket->t_cnt > 0) { 2747 if (ticket->t_cnt > 0)
2813 spin_unlock(&log->l_grant_lock);
2814 return; 2748 return;
2815 }
2816 2749
2817 xlog_grant_add_space_reserve(log, ticket->t_unit_res); 2750 xlog_grant_add_space(log, &log->l_grant_reserve_head,
2751 ticket->t_unit_res);
2818 2752
2819 trace_xfs_log_regrant_reserve_exit(log, ticket); 2753 trace_xfs_log_regrant_reserve_exit(log, ticket);
2820 2754
2821 xlog_verify_grant_head(log, 0);
2822 spin_unlock(&log->l_grant_lock);
2823 ticket->t_curr_res = ticket->t_unit_res; 2755 ticket->t_curr_res = ticket->t_unit_res;
2824 xlog_tic_reset_res(ticket); 2756 xlog_tic_reset_res(ticket);
2825} /* xlog_regrant_reserve_log_space */ 2757} /* xlog_regrant_reserve_log_space */
@@ -2843,28 +2775,29 @@ STATIC void
2843xlog_ungrant_log_space(xlog_t *log, 2775xlog_ungrant_log_space(xlog_t *log,
2844 xlog_ticket_t *ticket) 2776 xlog_ticket_t *ticket)
2845{ 2777{
2778 int bytes;
2779
2846 if (ticket->t_cnt > 0) 2780 if (ticket->t_cnt > 0)
2847 ticket->t_cnt--; 2781 ticket->t_cnt--;
2848 2782
2849 spin_lock(&log->l_grant_lock);
2850 trace_xfs_log_ungrant_enter(log, ticket); 2783 trace_xfs_log_ungrant_enter(log, ticket);
2851
2852 xlog_grant_sub_space(log, ticket->t_curr_res);
2853
2854 trace_xfs_log_ungrant_sub(log, ticket); 2784 trace_xfs_log_ungrant_sub(log, ticket);
2855 2785
2856 /* If this is a permanent reservation ticket, we may be able to free 2786 /*
2787 * If this is a permanent reservation ticket, we may be able to free
2857 * up more space based on the remaining count. 2788 * up more space based on the remaining count.
2858 */ 2789 */
2790 bytes = ticket->t_curr_res;
2859 if (ticket->t_cnt > 0) { 2791 if (ticket->t_cnt > 0) {
2860 ASSERT(ticket->t_flags & XLOG_TIC_PERM_RESERV); 2792 ASSERT(ticket->t_flags & XLOG_TIC_PERM_RESERV);
2861 xlog_grant_sub_space(log, ticket->t_unit_res*ticket->t_cnt); 2793 bytes += ticket->t_unit_res*ticket->t_cnt;
2862 } 2794 }
2863 2795
2796 xlog_grant_sub_space(log, &log->l_grant_reserve_head, bytes);
2797 xlog_grant_sub_space(log, &log->l_grant_write_head, bytes);
2798
2864 trace_xfs_log_ungrant_exit(log, ticket); 2799 trace_xfs_log_ungrant_exit(log, ticket);
2865 2800
2866 xlog_verify_grant_head(log, 1);
2867 spin_unlock(&log->l_grant_lock);
2868 xfs_log_move_tail(log->l_mp, 1); 2801 xfs_log_move_tail(log->l_mp, 1);
2869} /* xlog_ungrant_log_space */ 2802} /* xlog_ungrant_log_space */
2870 2803
@@ -2901,11 +2834,11 @@ xlog_state_release_iclog(
2901 2834
2902 if (iclog->ic_state == XLOG_STATE_WANT_SYNC) { 2835 if (iclog->ic_state == XLOG_STATE_WANT_SYNC) {
2903 /* update tail before writing to iclog */ 2836 /* update tail before writing to iclog */
2904 xlog_assign_tail_lsn(log->l_mp); 2837 xfs_lsn_t tail_lsn = xlog_assign_tail_lsn(log->l_mp);
2905 sync++; 2838 sync++;
2906 iclog->ic_state = XLOG_STATE_SYNCING; 2839 iclog->ic_state = XLOG_STATE_SYNCING;
2907 iclog->ic_header.h_tail_lsn = cpu_to_be64(log->l_tail_lsn); 2840 iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn);
2908 xlog_verify_tail_lsn(log, iclog, log->l_tail_lsn); 2841 xlog_verify_tail_lsn(log, iclog, tail_lsn);
2909 /* cycle incremented when incrementing curr_block */ 2842 /* cycle incremented when incrementing curr_block */
2910 } 2843 }
2911 spin_unlock(&log->l_icloglock); 2844 spin_unlock(&log->l_icloglock);
@@ -3088,7 +3021,7 @@ maybe_sleep:
3088 return XFS_ERROR(EIO); 3021 return XFS_ERROR(EIO);
3089 } 3022 }
3090 XFS_STATS_INC(xs_log_force_sleep); 3023 XFS_STATS_INC(xs_log_force_sleep);
3091 sv_wait(&iclog->ic_force_wait, PINOD, &log->l_icloglock, s); 3024 xlog_wait(&iclog->ic_force_wait, &log->l_icloglock);
3092 /* 3025 /*
3093 * No need to grab the log lock here since we're 3026 * No need to grab the log lock here since we're
3094 * only deciding whether or not to return EIO 3027 * only deciding whether or not to return EIO
@@ -3206,8 +3139,8 @@ try_again:
3206 3139
3207 XFS_STATS_INC(xs_log_force_sleep); 3140 XFS_STATS_INC(xs_log_force_sleep);
3208 3141
3209 sv_wait(&iclog->ic_prev->ic_write_wait, 3142 xlog_wait(&iclog->ic_prev->ic_write_wait,
3210 PSWP, &log->l_icloglock, s); 3143 &log->l_icloglock);
3211 if (log_flushed) 3144 if (log_flushed)
3212 *log_flushed = 1; 3145 *log_flushed = 1;
3213 already_slept = 1; 3146 already_slept = 1;
@@ -3235,7 +3168,7 @@ try_again:
3235 return XFS_ERROR(EIO); 3168 return XFS_ERROR(EIO);
3236 } 3169 }
3237 XFS_STATS_INC(xs_log_force_sleep); 3170 XFS_STATS_INC(xs_log_force_sleep);
3238 sv_wait(&iclog->ic_force_wait, PSWP, &log->l_icloglock, s); 3171 xlog_wait(&iclog->ic_force_wait, &log->l_icloglock);
3239 /* 3172 /*
3240 * No need to grab the log lock here since we're 3173 * No need to grab the log lock here since we're
3241 * only deciding whether or not to return EIO 3174 * only deciding whether or not to return EIO
@@ -3310,10 +3243,8 @@ xfs_log_ticket_put(
3310 xlog_ticket_t *ticket) 3243 xlog_ticket_t *ticket)
3311{ 3244{
3312 ASSERT(atomic_read(&ticket->t_ref) > 0); 3245 ASSERT(atomic_read(&ticket->t_ref) > 0);
3313 if (atomic_dec_and_test(&ticket->t_ref)) { 3246 if (atomic_dec_and_test(&ticket->t_ref))
3314 sv_destroy(&ticket->t_wait);
3315 kmem_zone_free(xfs_log_ticket_zone, ticket); 3247 kmem_zone_free(xfs_log_ticket_zone, ticket);
3316 }
3317} 3248}
3318 3249
3319xlog_ticket_t * 3250xlog_ticket_t *
@@ -3435,6 +3366,7 @@ xlog_ticket_alloc(
3435 } 3366 }
3436 3367
3437 atomic_set(&tic->t_ref, 1); 3368 atomic_set(&tic->t_ref, 1);
3369 INIT_LIST_HEAD(&tic->t_queue);
3438 tic->t_unit_res = unit_bytes; 3370 tic->t_unit_res = unit_bytes;
3439 tic->t_curr_res = unit_bytes; 3371 tic->t_curr_res = unit_bytes;
3440 tic->t_cnt = cnt; 3372 tic->t_cnt = cnt;
@@ -3445,7 +3377,7 @@ xlog_ticket_alloc(
3445 tic->t_trans_type = 0; 3377 tic->t_trans_type = 0;
3446 if (xflags & XFS_LOG_PERM_RESERV) 3378 if (xflags & XFS_LOG_PERM_RESERV)
3447 tic->t_flags |= XLOG_TIC_PERM_RESERV; 3379 tic->t_flags |= XLOG_TIC_PERM_RESERV;
3448 sv_init(&tic->t_wait, SV_DEFAULT, "logtick"); 3380 init_waitqueue_head(&tic->t_wait);
3449 3381
3450 xlog_tic_reset_res(tic); 3382 xlog_tic_reset_res(tic);
3451 3383
@@ -3484,18 +3416,25 @@ xlog_verify_dest_ptr(
3484} 3416}
3485 3417
3486STATIC void 3418STATIC void
3487xlog_verify_grant_head(xlog_t *log, int equals) 3419xlog_verify_grant_tail(
3420 struct log *log)
3488{ 3421{
3489 if (log->l_grant_reserve_cycle == log->l_grant_write_cycle) { 3422 int tail_cycle, tail_blocks;
3490 if (equals) 3423 int cycle, space;
3491 ASSERT(log->l_grant_reserve_bytes >= log->l_grant_write_bytes); 3424
3492 else 3425 /*
3493 ASSERT(log->l_grant_reserve_bytes > log->l_grant_write_bytes); 3426 * Check to make sure the grant write head didn't just over lap the
3494 } else { 3427 * tail. If the cycles are the same, we can't be overlapping.
3495 ASSERT(log->l_grant_reserve_cycle-1 == log->l_grant_write_cycle); 3428 * Otherwise, make sure that the cycles differ by exactly one and
3496 ASSERT(log->l_grant_write_bytes >= log->l_grant_reserve_bytes); 3429 * check the byte count.
3497 } 3430 */
3498} /* xlog_verify_grant_head */ 3431 xlog_crack_grant_head(&log->l_grant_write_head, &cycle, &space);
3432 xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_blocks);
3433 if (tail_cycle != cycle) {
3434 ASSERT(cycle - 1 == tail_cycle);
3435 ASSERT(space <= BBTOB(tail_blocks));
3436 }
3437}
3499 3438
3500/* check if it will fit */ 3439/* check if it will fit */
3501STATIC void 3440STATIC void
@@ -3716,12 +3655,10 @@ xfs_log_force_umount(
3716 xlog_cil_force(log); 3655 xlog_cil_force(log);
3717 3656
3718 /* 3657 /*
3719 * We must hold both the GRANT lock and the LOG lock, 3658 * mark the filesystem and the as in a shutdown state and wake
3720 * before we mark the filesystem SHUTDOWN and wake 3659 * everybody up to tell them the bad news.
3721 * everybody up to tell the bad news.
3722 */ 3660 */
3723 spin_lock(&log->l_icloglock); 3661 spin_lock(&log->l_icloglock);
3724 spin_lock(&log->l_grant_lock);
3725 mp->m_flags |= XFS_MOUNT_FS_SHUTDOWN; 3662 mp->m_flags |= XFS_MOUNT_FS_SHUTDOWN;
3726 if (mp->m_sb_bp) 3663 if (mp->m_sb_bp)
3727 XFS_BUF_DONE(mp->m_sb_bp); 3664 XFS_BUF_DONE(mp->m_sb_bp);
@@ -3742,27 +3679,21 @@ xfs_log_force_umount(
3742 spin_unlock(&log->l_icloglock); 3679 spin_unlock(&log->l_icloglock);
3743 3680
3744 /* 3681 /*
3745 * We don't want anybody waiting for log reservations 3682 * We don't want anybody waiting for log reservations after this. That
3746 * after this. That means we have to wake up everybody 3683 * means we have to wake up everybody queued up on reserveq as well as
3747 * queued up on reserve_headq as well as write_headq. 3684 * writeq. In addition, we make sure in xlog_{re}grant_log_space that
3748 * In addition, we make sure in xlog_{re}grant_log_space 3685 * we don't enqueue anything once the SHUTDOWN flag is set, and this
3749 * that we don't enqueue anything once the SHUTDOWN flag 3686 * action is protected by the grant locks.
3750 * is set, and this action is protected by the GRANTLOCK.
3751 */ 3687 */
3752 if ((tic = log->l_reserve_headq)) { 3688 spin_lock(&log->l_grant_reserve_lock);
3753 do { 3689 list_for_each_entry(tic, &log->l_reserveq, t_queue)
3754 sv_signal(&tic->t_wait); 3690 wake_up(&tic->t_wait);
3755 tic = tic->t_next; 3691 spin_unlock(&log->l_grant_reserve_lock);
3756 } while (tic != log->l_reserve_headq); 3692
3757 } 3693 spin_lock(&log->l_grant_write_lock);
3758 3694 list_for_each_entry(tic, &log->l_writeq, t_queue)
3759 if ((tic = log->l_write_headq)) { 3695 wake_up(&tic->t_wait);
3760 do { 3696 spin_unlock(&log->l_grant_write_lock);
3761 sv_signal(&tic->t_wait);
3762 tic = tic->t_next;
3763 } while (tic != log->l_write_headq);
3764 }
3765 spin_unlock(&log->l_grant_lock);
3766 3697
3767 if (!(log->l_iclog->ic_state & XLOG_STATE_IOERROR)) { 3698 if (!(log->l_iclog->ic_state & XLOG_STATE_IOERROR)) {
3768 ASSERT(!logerror); 3699 ASSERT(!logerror);
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c
index 23d6ceb5e97b..9dc8125d04e5 100644
--- a/fs/xfs/xfs_log_cil.c
+++ b/fs/xfs/xfs_log_cil.c
@@ -61,7 +61,7 @@ xlog_cil_init(
61 INIT_LIST_HEAD(&cil->xc_committing); 61 INIT_LIST_HEAD(&cil->xc_committing);
62 spin_lock_init(&cil->xc_cil_lock); 62 spin_lock_init(&cil->xc_cil_lock);
63 init_rwsem(&cil->xc_ctx_lock); 63 init_rwsem(&cil->xc_ctx_lock);
64 sv_init(&cil->xc_commit_wait, SV_DEFAULT, "cilwait"); 64 init_waitqueue_head(&cil->xc_commit_wait);
65 65
66 INIT_LIST_HEAD(&ctx->committing); 66 INIT_LIST_HEAD(&ctx->committing);
67 INIT_LIST_HEAD(&ctx->busy_extents); 67 INIT_LIST_HEAD(&ctx->busy_extents);
@@ -361,15 +361,10 @@ xlog_cil_committed(
361 int abort) 361 int abort)
362{ 362{
363 struct xfs_cil_ctx *ctx = args; 363 struct xfs_cil_ctx *ctx = args;
364 struct xfs_log_vec *lv;
365 int abortflag = abort ? XFS_LI_ABORTED : 0;
366 struct xfs_busy_extent *busyp, *n; 364 struct xfs_busy_extent *busyp, *n;
367 365
368 /* unpin all the log items */ 366 xfs_trans_committed_bulk(ctx->cil->xc_log->l_ailp, ctx->lv_chain,
369 for (lv = ctx->lv_chain; lv; lv = lv->lv_next ) { 367 ctx->start_lsn, abort);
370 xfs_trans_item_committed(lv->lv_item, ctx->start_lsn,
371 abortflag);
372 }
373 368
374 list_for_each_entry_safe(busyp, n, &ctx->busy_extents, list) 369 list_for_each_entry_safe(busyp, n, &ctx->busy_extents, list)
375 xfs_alloc_busy_clear(ctx->cil->xc_log->l_mp, busyp); 370 xfs_alloc_busy_clear(ctx->cil->xc_log->l_mp, busyp);
@@ -568,7 +563,7 @@ restart:
568 * It is still being pushed! Wait for the push to 563 * It is still being pushed! Wait for the push to
569 * complete, then start again from the beginning. 564 * complete, then start again from the beginning.
570 */ 565 */
571 sv_wait(&cil->xc_commit_wait, 0, &cil->xc_cil_lock, 0); 566 xlog_wait(&cil->xc_commit_wait, &cil->xc_cil_lock);
572 goto restart; 567 goto restart;
573 } 568 }
574 } 569 }
@@ -592,7 +587,7 @@ restart:
592 */ 587 */
593 spin_lock(&cil->xc_cil_lock); 588 spin_lock(&cil->xc_cil_lock);
594 ctx->commit_lsn = commit_lsn; 589 ctx->commit_lsn = commit_lsn;
595 sv_broadcast(&cil->xc_commit_wait); 590 wake_up_all(&cil->xc_commit_wait);
596 spin_unlock(&cil->xc_cil_lock); 591 spin_unlock(&cil->xc_cil_lock);
597 592
598 /* release the hounds! */ 593 /* release the hounds! */
@@ -757,7 +752,7 @@ restart:
757 * It is still being pushed! Wait for the push to 752 * It is still being pushed! Wait for the push to
758 * complete, then start again from the beginning. 753 * complete, then start again from the beginning.
759 */ 754 */
760 sv_wait(&cil->xc_commit_wait, 0, &cil->xc_cil_lock, 0); 755 xlog_wait(&cil->xc_commit_wait, &cil->xc_cil_lock);
761 goto restart; 756 goto restart;
762 } 757 }
763 if (ctx->sequence != sequence) 758 if (ctx->sequence != sequence)
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
index edcdfe01617f..d5f8be8f4bf6 100644
--- a/fs/xfs/xfs_log_priv.h
+++ b/fs/xfs/xfs_log_priv.h
@@ -21,7 +21,6 @@
21struct xfs_buf; 21struct xfs_buf;
22struct log; 22struct log;
23struct xlog_ticket; 23struct xlog_ticket;
24struct xfs_buf_cancel;
25struct xfs_mount; 24struct xfs_mount;
26 25
27/* 26/*
@@ -54,7 +53,6 @@ struct xfs_mount;
54 BTOBB(XLOG_MAX_ICLOGS << (xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? \ 53 BTOBB(XLOG_MAX_ICLOGS << (xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? \
55 XLOG_MAX_RECORD_BSHIFT : XLOG_BIG_RECORD_BSHIFT)) 54 XLOG_MAX_RECORD_BSHIFT : XLOG_BIG_RECORD_BSHIFT))
56 55
57
58static inline xfs_lsn_t xlog_assign_lsn(uint cycle, uint block) 56static inline xfs_lsn_t xlog_assign_lsn(uint cycle, uint block)
59{ 57{
60 return ((xfs_lsn_t)cycle << 32) | block; 58 return ((xfs_lsn_t)cycle << 32) | block;
@@ -133,12 +131,10 @@ static inline uint xlog_get_client_id(__be32 i)
133 */ 131 */
134#define XLOG_TIC_INITED 0x1 /* has been initialized */ 132#define XLOG_TIC_INITED 0x1 /* has been initialized */
135#define XLOG_TIC_PERM_RESERV 0x2 /* permanent reservation */ 133#define XLOG_TIC_PERM_RESERV 0x2 /* permanent reservation */
136#define XLOG_TIC_IN_Q 0x4
137 134
138#define XLOG_TIC_FLAGS \ 135#define XLOG_TIC_FLAGS \
139 { XLOG_TIC_INITED, "XLOG_TIC_INITED" }, \ 136 { XLOG_TIC_INITED, "XLOG_TIC_INITED" }, \
140 { XLOG_TIC_PERM_RESERV, "XLOG_TIC_PERM_RESERV" }, \ 137 { XLOG_TIC_PERM_RESERV, "XLOG_TIC_PERM_RESERV" }
141 { XLOG_TIC_IN_Q, "XLOG_TIC_IN_Q" }
142 138
143#endif /* __KERNEL__ */ 139#endif /* __KERNEL__ */
144 140
@@ -244,9 +240,8 @@ typedef struct xlog_res {
244} xlog_res_t; 240} xlog_res_t;
245 241
246typedef struct xlog_ticket { 242typedef struct xlog_ticket {
247 sv_t t_wait; /* ticket wait queue : 20 */ 243 wait_queue_head_t t_wait; /* ticket wait queue */
248 struct xlog_ticket *t_next; /* :4|8 */ 244 struct list_head t_queue; /* reserve/write queue */
249 struct xlog_ticket *t_prev; /* :4|8 */
250 xlog_tid_t t_tid; /* transaction identifier : 4 */ 245 xlog_tid_t t_tid; /* transaction identifier : 4 */
251 atomic_t t_ref; /* ticket reference count : 4 */ 246 atomic_t t_ref; /* ticket reference count : 4 */
252 int t_curr_res; /* current reservation in bytes : 4 */ 247 int t_curr_res; /* current reservation in bytes : 4 */
@@ -353,8 +348,8 @@ typedef union xlog_in_core2 {
353 * and move everything else out to subsequent cachelines. 348 * and move everything else out to subsequent cachelines.
354 */ 349 */
355typedef struct xlog_in_core { 350typedef struct xlog_in_core {
356 sv_t ic_force_wait; 351 wait_queue_head_t ic_force_wait;
357 sv_t ic_write_wait; 352 wait_queue_head_t ic_write_wait;
358 struct xlog_in_core *ic_next; 353 struct xlog_in_core *ic_next;
359 struct xlog_in_core *ic_prev; 354 struct xlog_in_core *ic_prev;
360 struct xfs_buf *ic_bp; 355 struct xfs_buf *ic_bp;
@@ -421,7 +416,7 @@ struct xfs_cil {
421 struct xfs_cil_ctx *xc_ctx; 416 struct xfs_cil_ctx *xc_ctx;
422 struct rw_semaphore xc_ctx_lock; 417 struct rw_semaphore xc_ctx_lock;
423 struct list_head xc_committing; 418 struct list_head xc_committing;
424 sv_t xc_commit_wait; 419 wait_queue_head_t xc_commit_wait;
425 xfs_lsn_t xc_current_sequence; 420 xfs_lsn_t xc_current_sequence;
426}; 421};
427 422
@@ -491,7 +486,7 @@ typedef struct log {
491 struct xfs_buftarg *l_targ; /* buftarg of log */ 486 struct xfs_buftarg *l_targ; /* buftarg of log */
492 uint l_flags; 487 uint l_flags;
493 uint l_quotaoffs_flag; /* XFS_DQ_*, for QUOTAOFFs */ 488 uint l_quotaoffs_flag; /* XFS_DQ_*, for QUOTAOFFs */
494 struct xfs_buf_cancel **l_buf_cancel_table; 489 struct list_head *l_buf_cancel_table;
495 int l_iclog_hsize; /* size of iclog header */ 490 int l_iclog_hsize; /* size of iclog header */
496 int l_iclog_heads; /* # of iclog header sectors */ 491 int l_iclog_heads; /* # of iclog header sectors */
497 uint l_sectBBsize; /* sector size in BBs (2^n) */ 492 uint l_sectBBsize; /* sector size in BBs (2^n) */
@@ -503,29 +498,40 @@ typedef struct log {
503 int l_logBBsize; /* size of log in BB chunks */ 498 int l_logBBsize; /* size of log in BB chunks */
504 499
505 /* The following block of fields are changed while holding icloglock */ 500 /* The following block of fields are changed while holding icloglock */
506 sv_t l_flush_wait ____cacheline_aligned_in_smp; 501 wait_queue_head_t l_flush_wait ____cacheline_aligned_in_smp;
507 /* waiting for iclog flush */ 502 /* waiting for iclog flush */
508 int l_covered_state;/* state of "covering disk 503 int l_covered_state;/* state of "covering disk
509 * log entries" */ 504 * log entries" */
510 xlog_in_core_t *l_iclog; /* head log queue */ 505 xlog_in_core_t *l_iclog; /* head log queue */
511 spinlock_t l_icloglock; /* grab to change iclog state */ 506 spinlock_t l_icloglock; /* grab to change iclog state */
512 xfs_lsn_t l_tail_lsn; /* lsn of 1st LR with unflushed
513 * buffers */
514 xfs_lsn_t l_last_sync_lsn;/* lsn of last LR on disk */
515 int l_curr_cycle; /* Cycle number of log writes */ 507 int l_curr_cycle; /* Cycle number of log writes */
516 int l_prev_cycle; /* Cycle number before last 508 int l_prev_cycle; /* Cycle number before last
517 * block increment */ 509 * block increment */
518 int l_curr_block; /* current logical log block */ 510 int l_curr_block; /* current logical log block */
519 int l_prev_block; /* previous logical log block */ 511 int l_prev_block; /* previous logical log block */
520 512
521 /* The following block of fields are changed while holding grant_lock */ 513 /*
522 spinlock_t l_grant_lock ____cacheline_aligned_in_smp; 514 * l_last_sync_lsn and l_tail_lsn are atomics so they can be set and
523 xlog_ticket_t *l_reserve_headq; 515 * read without needing to hold specific locks. To avoid operations
524 xlog_ticket_t *l_write_headq; 516 * contending with other hot objects, place each of them on a separate
525 int l_grant_reserve_cycle; 517 * cacheline.
526 int l_grant_reserve_bytes; 518 */
527 int l_grant_write_cycle; 519 /* lsn of last LR on disk */
528 int l_grant_write_bytes; 520 atomic64_t l_last_sync_lsn ____cacheline_aligned_in_smp;
521 /* lsn of 1st LR with unflushed * buffers */
522 atomic64_t l_tail_lsn ____cacheline_aligned_in_smp;
523
524 /*
525 * ticket grant locks, queues and accounting have their own cachlines
526 * as these are quite hot and can be operated on concurrently.
527 */
528 spinlock_t l_grant_reserve_lock ____cacheline_aligned_in_smp;
529 struct list_head l_reserveq;
530 atomic64_t l_grant_reserve_head;
531
532 spinlock_t l_grant_write_lock ____cacheline_aligned_in_smp;
533 struct list_head l_writeq;
534 atomic64_t l_grant_write_head;
529 535
530 /* The following field are used for debugging; need to hold icloglock */ 536 /* The following field are used for debugging; need to hold icloglock */
531#ifdef DEBUG 537#ifdef DEBUG
@@ -534,6 +540,9 @@ typedef struct log {
534 540
535} xlog_t; 541} xlog_t;
536 542
543#define XLOG_BUF_CANCEL_BUCKET(log, blkno) \
544 ((log)->l_buf_cancel_table + ((__uint64_t)blkno % XLOG_BC_TABLE_SIZE))
545
537#define XLOG_FORCED_SHUTDOWN(log) ((log)->l_flags & XLOG_IO_ERROR) 546#define XLOG_FORCED_SHUTDOWN(log) ((log)->l_flags & XLOG_IO_ERROR)
538 547
539/* common routines */ 548/* common routines */
@@ -562,6 +571,61 @@ int xlog_write(struct log *log, struct xfs_log_vec *log_vector,
562 xlog_in_core_t **commit_iclog, uint flags); 571 xlog_in_core_t **commit_iclog, uint flags);
563 572
564/* 573/*
574 * When we crack an atomic LSN, we sample it first so that the value will not
575 * change while we are cracking it into the component values. This means we
576 * will always get consistent component values to work from. This should always
577 * be used to smaple and crack LSNs taht are stored and updated in atomic
578 * variables.
579 */
580static inline void
581xlog_crack_atomic_lsn(atomic64_t *lsn, uint *cycle, uint *block)
582{
583 xfs_lsn_t val = atomic64_read(lsn);
584
585 *cycle = CYCLE_LSN(val);
586 *block = BLOCK_LSN(val);
587}
588
589/*
590 * Calculate and assign a value to an atomic LSN variable from component pieces.
591 */
592static inline void
593xlog_assign_atomic_lsn(atomic64_t *lsn, uint cycle, uint block)
594{
595 atomic64_set(lsn, xlog_assign_lsn(cycle, block));
596}
597
598/*
599 * When we crack the grant head, we sample it first so that the value will not
600 * change while we are cracking it into the component values. This means we
601 * will always get consistent component values to work from.
602 */
603static inline void
604xlog_crack_grant_head_val(int64_t val, int *cycle, int *space)
605{
606 *cycle = val >> 32;
607 *space = val & 0xffffffff;
608}
609
610static inline void
611xlog_crack_grant_head(atomic64_t *head, int *cycle, int *space)
612{
613 xlog_crack_grant_head_val(atomic64_read(head), cycle, space);
614}
615
616static inline int64_t
617xlog_assign_grant_head_val(int cycle, int space)
618{
619 return ((int64_t)cycle << 32) | space;
620}
621
622static inline void
623xlog_assign_grant_head(atomic64_t *head, int cycle, int space)
624{
625 atomic64_set(head, xlog_assign_grant_head_val(cycle, space));
626}
627
628/*
565 * Committed Item List interfaces 629 * Committed Item List interfaces
566 */ 630 */
567int xlog_cil_init(struct log *log); 631int xlog_cil_init(struct log *log);
@@ -585,6 +649,21 @@ xlog_cil_force(struct log *log)
585 */ 649 */
586#define XLOG_UNMOUNT_REC_TYPE (-1U) 650#define XLOG_UNMOUNT_REC_TYPE (-1U)
587 651
652/*
653 * Wrapper function for waiting on a wait queue serialised against wakeups
654 * by a spinlock. This matches the semantics of all the wait queues used in the
655 * log code.
656 */
657static inline void xlog_wait(wait_queue_head_t *wq, spinlock_t *lock)
658{
659 DECLARE_WAITQUEUE(wait, current);
660
661 add_wait_queue_exclusive(wq, &wait);
662 __set_current_state(TASK_UNINTERRUPTIBLE);
663 spin_unlock(lock);
664 schedule();
665 remove_wait_queue(wq, &wait);
666}
588#endif /* __KERNEL__ */ 667#endif /* __KERNEL__ */
589 668
590#endif /* __XFS_LOG_PRIV_H__ */ 669#endif /* __XFS_LOG_PRIV_H__ */
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 966d3f97458c..204d8e5fa7fa 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -53,6 +53,17 @@ STATIC void xlog_recover_check_summary(xlog_t *);
53#endif 53#endif
54 54
55/* 55/*
56 * This structure is used during recovery to record the buf log items which
57 * have been canceled and should not be replayed.
58 */
59struct xfs_buf_cancel {
60 xfs_daddr_t bc_blkno;
61 uint bc_len;
62 int bc_refcount;
63 struct list_head bc_list;
64};
65
66/*
56 * Sector aligned buffer routines for buffer create/read/write/access 67 * Sector aligned buffer routines for buffer create/read/write/access
57 */ 68 */
58 69
@@ -925,12 +936,12 @@ xlog_find_tail(
925 log->l_curr_cycle = be32_to_cpu(rhead->h_cycle); 936 log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
926 if (found == 2) 937 if (found == 2)
927 log->l_curr_cycle++; 938 log->l_curr_cycle++;
928 log->l_tail_lsn = be64_to_cpu(rhead->h_tail_lsn); 939 atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn));
929 log->l_last_sync_lsn = be64_to_cpu(rhead->h_lsn); 940 atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn));
930 log->l_grant_reserve_cycle = log->l_curr_cycle; 941 xlog_assign_grant_head(&log->l_grant_reserve_head, log->l_curr_cycle,
931 log->l_grant_reserve_bytes = BBTOB(log->l_curr_block); 942 BBTOB(log->l_curr_block));
932 log->l_grant_write_cycle = log->l_curr_cycle; 943 xlog_assign_grant_head(&log->l_grant_write_head, log->l_curr_cycle,
933 log->l_grant_write_bytes = BBTOB(log->l_curr_block); 944 BBTOB(log->l_curr_block));
934 945
935 /* 946 /*
936 * Look for unmount record. If we find it, then we know there 947 * Look for unmount record. If we find it, then we know there
@@ -960,7 +971,7 @@ xlog_find_tail(
960 } 971 }
961 after_umount_blk = (i + hblks + (int) 972 after_umount_blk = (i + hblks + (int)
962 BTOBB(be32_to_cpu(rhead->h_len))) % log->l_logBBsize; 973 BTOBB(be32_to_cpu(rhead->h_len))) % log->l_logBBsize;
963 tail_lsn = log->l_tail_lsn; 974 tail_lsn = atomic64_read(&log->l_tail_lsn);
964 if (*head_blk == after_umount_blk && 975 if (*head_blk == after_umount_blk &&
965 be32_to_cpu(rhead->h_num_logops) == 1) { 976 be32_to_cpu(rhead->h_num_logops) == 1) {
966 umount_data_blk = (i + hblks) % log->l_logBBsize; 977 umount_data_blk = (i + hblks) % log->l_logBBsize;
@@ -975,12 +986,10 @@ xlog_find_tail(
975 * log records will point recovery to after the 986 * log records will point recovery to after the
976 * current unmount record. 987 * current unmount record.
977 */ 988 */
978 log->l_tail_lsn = 989 xlog_assign_atomic_lsn(&log->l_tail_lsn,
979 xlog_assign_lsn(log->l_curr_cycle, 990 log->l_curr_cycle, after_umount_blk);
980 after_umount_blk); 991 xlog_assign_atomic_lsn(&log->l_last_sync_lsn,
981 log->l_last_sync_lsn = 992 log->l_curr_cycle, after_umount_blk);
982 xlog_assign_lsn(log->l_curr_cycle,
983 after_umount_blk);
984 *tail_blk = after_umount_blk; 993 *tail_blk = after_umount_blk;
985 994
986 /* 995 /*
@@ -1605,82 +1614,45 @@ xlog_recover_reorder_trans(
1605 * record in the table to tell us how many times we expect to see this 1614 * record in the table to tell us how many times we expect to see this
1606 * record during the second pass. 1615 * record during the second pass.
1607 */ 1616 */
1608STATIC void 1617STATIC int
1609xlog_recover_do_buffer_pass1( 1618xlog_recover_buffer_pass1(
1610 xlog_t *log, 1619 struct log *log,
1611 xfs_buf_log_format_t *buf_f) 1620 xlog_recover_item_t *item)
1612{ 1621{
1613 xfs_buf_cancel_t *bcp; 1622 xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr;
1614 xfs_buf_cancel_t *nextp; 1623 struct list_head *bucket;
1615 xfs_buf_cancel_t *prevp; 1624 struct xfs_buf_cancel *bcp;
1616 xfs_buf_cancel_t **bucket;
1617 xfs_daddr_t blkno = 0;
1618 uint len = 0;
1619 ushort flags = 0;
1620
1621 switch (buf_f->blf_type) {
1622 case XFS_LI_BUF:
1623 blkno = buf_f->blf_blkno;
1624 len = buf_f->blf_len;
1625 flags = buf_f->blf_flags;
1626 break;
1627 }
1628 1625
1629 /* 1626 /*
1630 * If this isn't a cancel buffer item, then just return. 1627 * If this isn't a cancel buffer item, then just return.
1631 */ 1628 */
1632 if (!(flags & XFS_BLF_CANCEL)) { 1629 if (!(buf_f->blf_flags & XFS_BLF_CANCEL)) {
1633 trace_xfs_log_recover_buf_not_cancel(log, buf_f); 1630 trace_xfs_log_recover_buf_not_cancel(log, buf_f);
1634 return; 1631 return 0;
1635 }
1636
1637 /*
1638 * Insert an xfs_buf_cancel record into the hash table of
1639 * them. If there is already an identical record, bump
1640 * its reference count.
1641 */
1642 bucket = &log->l_buf_cancel_table[(__uint64_t)blkno %
1643 XLOG_BC_TABLE_SIZE];
1644 /*
1645 * If the hash bucket is empty then just insert a new record into
1646 * the bucket.
1647 */
1648 if (*bucket == NULL) {
1649 bcp = (xfs_buf_cancel_t *)kmem_alloc(sizeof(xfs_buf_cancel_t),
1650 KM_SLEEP);
1651 bcp->bc_blkno = blkno;
1652 bcp->bc_len = len;
1653 bcp->bc_refcount = 1;
1654 bcp->bc_next = NULL;
1655 *bucket = bcp;
1656 return;
1657 } 1632 }
1658 1633
1659 /* 1634 /*
1660 * The hash bucket is not empty, so search for duplicates of our 1635 * Insert an xfs_buf_cancel record into the hash table of them.
1661 * record. If we find one them just bump its refcount. If not 1636 * If there is already an identical record, bump its reference count.
1662 * then add us at the end of the list.
1663 */ 1637 */
1664 prevp = NULL; 1638 bucket = XLOG_BUF_CANCEL_BUCKET(log, buf_f->blf_blkno);
1665 nextp = *bucket; 1639 list_for_each_entry(bcp, bucket, bc_list) {
1666 while (nextp != NULL) { 1640 if (bcp->bc_blkno == buf_f->blf_blkno &&
1667 if (nextp->bc_blkno == blkno && nextp->bc_len == len) { 1641 bcp->bc_len == buf_f->blf_len) {
1668 nextp->bc_refcount++; 1642 bcp->bc_refcount++;
1669 trace_xfs_log_recover_buf_cancel_ref_inc(log, buf_f); 1643 trace_xfs_log_recover_buf_cancel_ref_inc(log, buf_f);
1670 return; 1644 return 0;
1671 } 1645 }
1672 prevp = nextp; 1646 }
1673 nextp = nextp->bc_next; 1647
1674 } 1648 bcp = kmem_alloc(sizeof(struct xfs_buf_cancel), KM_SLEEP);
1675 ASSERT(prevp != NULL); 1649 bcp->bc_blkno = buf_f->blf_blkno;
1676 bcp = (xfs_buf_cancel_t *)kmem_alloc(sizeof(xfs_buf_cancel_t), 1650 bcp->bc_len = buf_f->blf_len;
1677 KM_SLEEP);
1678 bcp->bc_blkno = blkno;
1679 bcp->bc_len = len;
1680 bcp->bc_refcount = 1; 1651 bcp->bc_refcount = 1;
1681 bcp->bc_next = NULL; 1652 list_add_tail(&bcp->bc_list, bucket);
1682 prevp->bc_next = bcp; 1653
1683 trace_xfs_log_recover_buf_cancel_add(log, buf_f); 1654 trace_xfs_log_recover_buf_cancel_add(log, buf_f);
1655 return 0;
1684} 1656}
1685 1657
1686/* 1658/*
@@ -1698,14 +1670,13 @@ xlog_recover_do_buffer_pass1(
1698 */ 1670 */
1699STATIC int 1671STATIC int
1700xlog_check_buffer_cancelled( 1672xlog_check_buffer_cancelled(
1701 xlog_t *log, 1673 struct log *log,
1702 xfs_daddr_t blkno, 1674 xfs_daddr_t blkno,
1703 uint len, 1675 uint len,
1704 ushort flags) 1676 ushort flags)
1705{ 1677{
1706 xfs_buf_cancel_t *bcp; 1678 struct list_head *bucket;
1707 xfs_buf_cancel_t *prevp; 1679 struct xfs_buf_cancel *bcp;
1708 xfs_buf_cancel_t **bucket;
1709 1680
1710 if (log->l_buf_cancel_table == NULL) { 1681 if (log->l_buf_cancel_table == NULL) {
1711 /* 1682 /*
@@ -1716,128 +1687,70 @@ xlog_check_buffer_cancelled(
1716 return 0; 1687 return 0;
1717 } 1688 }
1718 1689
1719 bucket = &log->l_buf_cancel_table[(__uint64_t)blkno %
1720 XLOG_BC_TABLE_SIZE];
1721 bcp = *bucket;
1722 if (bcp == NULL) {
1723 /*
1724 * There is no corresponding entry in the table built
1725 * in pass one, so this buffer has not been cancelled.
1726 */
1727 ASSERT(!(flags & XFS_BLF_CANCEL));
1728 return 0;
1729 }
1730
1731 /* 1690 /*
1732 * Search for an entry in the buffer cancel table that 1691 * Search for an entry in the cancel table that matches our buffer.
1733 * matches our buffer.
1734 */ 1692 */
1735 prevp = NULL; 1693 bucket = XLOG_BUF_CANCEL_BUCKET(log, blkno);
1736 while (bcp != NULL) { 1694 list_for_each_entry(bcp, bucket, bc_list) {
1737 if (bcp->bc_blkno == blkno && bcp->bc_len == len) { 1695 if (bcp->bc_blkno == blkno && bcp->bc_len == len)
1738 /* 1696 goto found;
1739 * We've go a match, so return 1 so that the
1740 * recovery of this buffer is cancelled.
1741 * If this buffer is actually a buffer cancel
1742 * log item, then decrement the refcount on the
1743 * one in the table and remove it if this is the
1744 * last reference.
1745 */
1746 if (flags & XFS_BLF_CANCEL) {
1747 bcp->bc_refcount--;
1748 if (bcp->bc_refcount == 0) {
1749 if (prevp == NULL) {
1750 *bucket = bcp->bc_next;
1751 } else {
1752 prevp->bc_next = bcp->bc_next;
1753 }
1754 kmem_free(bcp);
1755 }
1756 }
1757 return 1;
1758 }
1759 prevp = bcp;
1760 bcp = bcp->bc_next;
1761 } 1697 }
1698
1762 /* 1699 /*
1763 * We didn't find a corresponding entry in the table, so 1700 * We didn't find a corresponding entry in the table, so return 0 so
1764 * return 0 so that the buffer is NOT cancelled. 1701 * that the buffer is NOT cancelled.
1765 */ 1702 */
1766 ASSERT(!(flags & XFS_BLF_CANCEL)); 1703 ASSERT(!(flags & XFS_BLF_CANCEL));
1767 return 0; 1704 return 0;
1768}
1769 1705
1770STATIC int 1706found:
1771xlog_recover_do_buffer_pass2( 1707 /*
1772 xlog_t *log, 1708 * We've go a match, so return 1 so that the recovery of this buffer
1773 xfs_buf_log_format_t *buf_f) 1709 * is cancelled. If this buffer is actually a buffer cancel log
1774{ 1710 * item, then decrement the refcount on the one in the table and
1775 xfs_daddr_t blkno = 0; 1711 * remove it if this is the last reference.
1776 ushort flags = 0; 1712 */
1777 uint len = 0; 1713 if (flags & XFS_BLF_CANCEL) {
1778 1714 if (--bcp->bc_refcount == 0) {
1779 switch (buf_f->blf_type) { 1715 list_del(&bcp->bc_list);
1780 case XFS_LI_BUF: 1716 kmem_free(bcp);
1781 blkno = buf_f->blf_blkno; 1717 }
1782 flags = buf_f->blf_flags;
1783 len = buf_f->blf_len;
1784 break;
1785 } 1718 }
1786 1719 return 1;
1787 return xlog_check_buffer_cancelled(log, blkno, len, flags);
1788} 1720}
1789 1721
1790/* 1722/*
1791 * Perform recovery for a buffer full of inodes. In these buffers, 1723 * Perform recovery for a buffer full of inodes. In these buffers, the only
1792 * the only data which should be recovered is that which corresponds 1724 * data which should be recovered is that which corresponds to the
1793 * to the di_next_unlinked pointers in the on disk inode structures. 1725 * di_next_unlinked pointers in the on disk inode structures. The rest of the
1794 * The rest of the data for the inodes is always logged through the 1726 * data for the inodes is always logged through the inodes themselves rather
1795 * inodes themselves rather than the inode buffer and is recovered 1727 * than the inode buffer and is recovered in xlog_recover_inode_pass2().
1796 * in xlog_recover_do_inode_trans().
1797 * 1728 *
1798 * The only time when buffers full of inodes are fully recovered is 1729 * The only time when buffers full of inodes are fully recovered is when the
1799 * when the buffer is full of newly allocated inodes. In this case 1730 * buffer is full of newly allocated inodes. In this case the buffer will
1800 * the buffer will not be marked as an inode buffer and so will be 1731 * not be marked as an inode buffer and so will be sent to
1801 * sent to xlog_recover_do_reg_buffer() below during recovery. 1732 * xlog_recover_do_reg_buffer() below during recovery.
1802 */ 1733 */
1803STATIC int 1734STATIC int
1804xlog_recover_do_inode_buffer( 1735xlog_recover_do_inode_buffer(
1805 xfs_mount_t *mp, 1736 struct xfs_mount *mp,
1806 xlog_recover_item_t *item, 1737 xlog_recover_item_t *item,
1807 xfs_buf_t *bp, 1738 struct xfs_buf *bp,
1808 xfs_buf_log_format_t *buf_f) 1739 xfs_buf_log_format_t *buf_f)
1809{ 1740{
1810 int i; 1741 int i;
1811 int item_index; 1742 int item_index = 0;
1812 int bit; 1743 int bit = 0;
1813 int nbits; 1744 int nbits = 0;
1814 int reg_buf_offset; 1745 int reg_buf_offset = 0;
1815 int reg_buf_bytes; 1746 int reg_buf_bytes = 0;
1816 int next_unlinked_offset; 1747 int next_unlinked_offset;
1817 int inodes_per_buf; 1748 int inodes_per_buf;
1818 xfs_agino_t *logged_nextp; 1749 xfs_agino_t *logged_nextp;
1819 xfs_agino_t *buffer_nextp; 1750 xfs_agino_t *buffer_nextp;
1820 unsigned int *data_map = NULL;
1821 unsigned int map_size = 0;
1822 1751
1823 trace_xfs_log_recover_buf_inode_buf(mp->m_log, buf_f); 1752 trace_xfs_log_recover_buf_inode_buf(mp->m_log, buf_f);
1824 1753
1825 switch (buf_f->blf_type) {
1826 case XFS_LI_BUF:
1827 data_map = buf_f->blf_data_map;
1828 map_size = buf_f->blf_map_size;
1829 break;
1830 }
1831 /*
1832 * Set the variables corresponding to the current region to
1833 * 0 so that we'll initialize them on the first pass through
1834 * the loop.
1835 */
1836 reg_buf_offset = 0;
1837 reg_buf_bytes = 0;
1838 bit = 0;
1839 nbits = 0;
1840 item_index = 0;
1841 inodes_per_buf = XFS_BUF_COUNT(bp) >> mp->m_sb.sb_inodelog; 1754 inodes_per_buf = XFS_BUF_COUNT(bp) >> mp->m_sb.sb_inodelog;
1842 for (i = 0; i < inodes_per_buf; i++) { 1755 for (i = 0; i < inodes_per_buf; i++) {
1843 next_unlinked_offset = (i * mp->m_sb.sb_inodesize) + 1756 next_unlinked_offset = (i * mp->m_sb.sb_inodesize) +
@@ -1852,18 +1765,18 @@ xlog_recover_do_inode_buffer(
1852 * the current di_next_unlinked field. 1765 * the current di_next_unlinked field.
1853 */ 1766 */
1854 bit += nbits; 1767 bit += nbits;
1855 bit = xfs_next_bit(data_map, map_size, bit); 1768 bit = xfs_next_bit(buf_f->blf_data_map,
1769 buf_f->blf_map_size, bit);
1856 1770
1857 /* 1771 /*
1858 * If there are no more logged regions in the 1772 * If there are no more logged regions in the
1859 * buffer, then we're done. 1773 * buffer, then we're done.
1860 */ 1774 */
1861 if (bit == -1) { 1775 if (bit == -1)
1862 return 0; 1776 return 0;
1863 }
1864 1777
1865 nbits = xfs_contig_bits(data_map, map_size, 1778 nbits = xfs_contig_bits(buf_f->blf_data_map,
1866 bit); 1779 buf_f->blf_map_size, bit);
1867 ASSERT(nbits > 0); 1780 ASSERT(nbits > 0);
1868 reg_buf_offset = bit << XFS_BLF_SHIFT; 1781 reg_buf_offset = bit << XFS_BLF_SHIFT;
1869 reg_buf_bytes = nbits << XFS_BLF_SHIFT; 1782 reg_buf_bytes = nbits << XFS_BLF_SHIFT;
@@ -1875,9 +1788,8 @@ xlog_recover_do_inode_buffer(
1875 * di_next_unlinked field, then move on to the next 1788 * di_next_unlinked field, then move on to the next
1876 * di_next_unlinked field. 1789 * di_next_unlinked field.
1877 */ 1790 */
1878 if (next_unlinked_offset < reg_buf_offset) { 1791 if (next_unlinked_offset < reg_buf_offset)
1879 continue; 1792 continue;
1880 }
1881 1793
1882 ASSERT(item->ri_buf[item_index].i_addr != NULL); 1794 ASSERT(item->ri_buf[item_index].i_addr != NULL);
1883 ASSERT((item->ri_buf[item_index].i_len % XFS_BLF_CHUNK) == 0); 1795 ASSERT((item->ri_buf[item_index].i_len % XFS_BLF_CHUNK) == 0);
@@ -1913,36 +1825,29 @@ xlog_recover_do_inode_buffer(
1913 * given buffer. The bitmap in the buf log format structure indicates 1825 * given buffer. The bitmap in the buf log format structure indicates
1914 * where to place the logged data. 1826 * where to place the logged data.
1915 */ 1827 */
1916/*ARGSUSED*/
1917STATIC void 1828STATIC void
1918xlog_recover_do_reg_buffer( 1829xlog_recover_do_reg_buffer(
1919 struct xfs_mount *mp, 1830 struct xfs_mount *mp,
1920 xlog_recover_item_t *item, 1831 xlog_recover_item_t *item,
1921 xfs_buf_t *bp, 1832 struct xfs_buf *bp,
1922 xfs_buf_log_format_t *buf_f) 1833 xfs_buf_log_format_t *buf_f)
1923{ 1834{
1924 int i; 1835 int i;
1925 int bit; 1836 int bit;
1926 int nbits; 1837 int nbits;
1927 unsigned int *data_map = NULL;
1928 unsigned int map_size = 0;
1929 int error; 1838 int error;
1930 1839
1931 trace_xfs_log_recover_buf_reg_buf(mp->m_log, buf_f); 1840 trace_xfs_log_recover_buf_reg_buf(mp->m_log, buf_f);
1932 1841
1933 switch (buf_f->blf_type) {
1934 case XFS_LI_BUF:
1935 data_map = buf_f->blf_data_map;
1936 map_size = buf_f->blf_map_size;
1937 break;
1938 }
1939 bit = 0; 1842 bit = 0;
1940 i = 1; /* 0 is the buf format structure */ 1843 i = 1; /* 0 is the buf format structure */
1941 while (1) { 1844 while (1) {
1942 bit = xfs_next_bit(data_map, map_size, bit); 1845 bit = xfs_next_bit(buf_f->blf_data_map,
1846 buf_f->blf_map_size, bit);
1943 if (bit == -1) 1847 if (bit == -1)
1944 break; 1848 break;
1945 nbits = xfs_contig_bits(data_map, map_size, bit); 1849 nbits = xfs_contig_bits(buf_f->blf_data_map,
1850 buf_f->blf_map_size, bit);
1946 ASSERT(nbits > 0); 1851 ASSERT(nbits > 0);
1947 ASSERT(item->ri_buf[i].i_addr != NULL); 1852 ASSERT(item->ri_buf[i].i_addr != NULL);
1948 ASSERT(item->ri_buf[i].i_len % XFS_BLF_CHUNK == 0); 1853 ASSERT(item->ri_buf[i].i_len % XFS_BLF_CHUNK == 0);
@@ -2176,77 +2081,46 @@ xlog_recover_do_dquot_buffer(
2176 * for more details on the implementation of the table of cancel records. 2081 * for more details on the implementation of the table of cancel records.
2177 */ 2082 */
2178STATIC int 2083STATIC int
2179xlog_recover_do_buffer_trans( 2084xlog_recover_buffer_pass2(
2180 xlog_t *log, 2085 xlog_t *log,
2181 xlog_recover_item_t *item, 2086 xlog_recover_item_t *item)
2182 int pass)
2183{ 2087{
2184 xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr; 2088 xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr;
2185 xfs_mount_t *mp; 2089 xfs_mount_t *mp = log->l_mp;
2186 xfs_buf_t *bp; 2090 xfs_buf_t *bp;
2187 int error; 2091 int error;
2188 int cancel;
2189 xfs_daddr_t blkno;
2190 int len;
2191 ushort flags;
2192 uint buf_flags; 2092 uint buf_flags;
2193 2093
2194 if (pass == XLOG_RECOVER_PASS1) { 2094 /*
2195 /* 2095 * In this pass we only want to recover all the buffers which have
2196 * In this pass we're only looking for buf items 2096 * not been cancelled and are not cancellation buffers themselves.
2197 * with the XFS_BLF_CANCEL bit set. 2097 */
2198 */ 2098 if (xlog_check_buffer_cancelled(log, buf_f->blf_blkno,
2199 xlog_recover_do_buffer_pass1(log, buf_f); 2099 buf_f->blf_len, buf_f->blf_flags)) {
2100 trace_xfs_log_recover_buf_cancel(log, buf_f);
2200 return 0; 2101 return 0;
2201 } else {
2202 /*
2203 * In this pass we want to recover all the buffers
2204 * which have not been cancelled and are not
2205 * cancellation buffers themselves. The routine
2206 * we call here will tell us whether or not to
2207 * continue with the replay of this buffer.
2208 */
2209 cancel = xlog_recover_do_buffer_pass2(log, buf_f);
2210 if (cancel) {
2211 trace_xfs_log_recover_buf_cancel(log, buf_f);
2212 return 0;
2213 }
2214 } 2102 }
2103
2215 trace_xfs_log_recover_buf_recover(log, buf_f); 2104 trace_xfs_log_recover_buf_recover(log, buf_f);
2216 switch (buf_f->blf_type) {
2217 case XFS_LI_BUF:
2218 blkno = buf_f->blf_blkno;
2219 len = buf_f->blf_len;
2220 flags = buf_f->blf_flags;
2221 break;
2222 default:
2223 xfs_fs_cmn_err(CE_ALERT, log->l_mp,
2224 "xfs_log_recover: unknown buffer type 0x%x, logdev %s",
2225 buf_f->blf_type, log->l_mp->m_logname ?
2226 log->l_mp->m_logname : "internal");
2227 XFS_ERROR_REPORT("xlog_recover_do_buffer_trans",
2228 XFS_ERRLEVEL_LOW, log->l_mp);
2229 return XFS_ERROR(EFSCORRUPTED);
2230 }
2231 2105
2232 mp = log->l_mp;
2233 buf_flags = XBF_LOCK; 2106 buf_flags = XBF_LOCK;
2234 if (!(flags & XFS_BLF_INODE_BUF)) 2107 if (!(buf_f->blf_flags & XFS_BLF_INODE_BUF))
2235 buf_flags |= XBF_MAPPED; 2108 buf_flags |= XBF_MAPPED;
2236 2109
2237 bp = xfs_buf_read(mp->m_ddev_targp, blkno, len, buf_flags); 2110 bp = xfs_buf_read(mp->m_ddev_targp, buf_f->blf_blkno, buf_f->blf_len,
2111 buf_flags);
2238 if (XFS_BUF_ISERROR(bp)) { 2112 if (XFS_BUF_ISERROR(bp)) {
2239 xfs_ioerror_alert("xlog_recover_do..(read#1)", log->l_mp, 2113 xfs_ioerror_alert("xlog_recover_do..(read#1)", mp,
2240 bp, blkno); 2114 bp, buf_f->blf_blkno);
2241 error = XFS_BUF_GETERROR(bp); 2115 error = XFS_BUF_GETERROR(bp);
2242 xfs_buf_relse(bp); 2116 xfs_buf_relse(bp);
2243 return error; 2117 return error;
2244 } 2118 }
2245 2119
2246 error = 0; 2120 error = 0;
2247 if (flags & XFS_BLF_INODE_BUF) { 2121 if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
2248 error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f); 2122 error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f);
2249 } else if (flags & 2123 } else if (buf_f->blf_flags &
2250 (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) { 2124 (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
2251 xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f); 2125 xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f);
2252 } else { 2126 } else {
@@ -2286,16 +2160,14 @@ xlog_recover_do_buffer_trans(
2286} 2160}
2287 2161
2288STATIC int 2162STATIC int
2289xlog_recover_do_inode_trans( 2163xlog_recover_inode_pass2(
2290 xlog_t *log, 2164 xlog_t *log,
2291 xlog_recover_item_t *item, 2165 xlog_recover_item_t *item)
2292 int pass)
2293{ 2166{
2294 xfs_inode_log_format_t *in_f; 2167 xfs_inode_log_format_t *in_f;
2295 xfs_mount_t *mp; 2168 xfs_mount_t *mp = log->l_mp;
2296 xfs_buf_t *bp; 2169 xfs_buf_t *bp;
2297 xfs_dinode_t *dip; 2170 xfs_dinode_t *dip;
2298 xfs_ino_t ino;
2299 int len; 2171 int len;
2300 xfs_caddr_t src; 2172 xfs_caddr_t src;
2301 xfs_caddr_t dest; 2173 xfs_caddr_t dest;
@@ -2305,10 +2177,6 @@ xlog_recover_do_inode_trans(
2305 xfs_icdinode_t *dicp; 2177 xfs_icdinode_t *dicp;
2306 int need_free = 0; 2178 int need_free = 0;
2307 2179
2308 if (pass == XLOG_RECOVER_PASS1) {
2309 return 0;
2310 }
2311
2312 if (item->ri_buf[0].i_len == sizeof(xfs_inode_log_format_t)) { 2180 if (item->ri_buf[0].i_len == sizeof(xfs_inode_log_format_t)) {
2313 in_f = item->ri_buf[0].i_addr; 2181 in_f = item->ri_buf[0].i_addr;
2314 } else { 2182 } else {
@@ -2318,8 +2186,6 @@ xlog_recover_do_inode_trans(
2318 if (error) 2186 if (error)
2319 goto error; 2187 goto error;
2320 } 2188 }
2321 ino = in_f->ilf_ino;
2322 mp = log->l_mp;
2323 2189
2324 /* 2190 /*
2325 * Inode buffers can be freed, look out for it, 2191 * Inode buffers can be freed, look out for it,
@@ -2354,8 +2220,8 @@ xlog_recover_do_inode_trans(
2354 xfs_buf_relse(bp); 2220 xfs_buf_relse(bp);
2355 xfs_fs_cmn_err(CE_ALERT, mp, 2221 xfs_fs_cmn_err(CE_ALERT, mp,
2356 "xfs_inode_recover: Bad inode magic number, dino ptr = 0x%p, dino bp = 0x%p, ino = %Ld", 2222 "xfs_inode_recover: Bad inode magic number, dino ptr = 0x%p, dino bp = 0x%p, ino = %Ld",
2357 dip, bp, ino); 2223 dip, bp, in_f->ilf_ino);
2358 XFS_ERROR_REPORT("xlog_recover_do_inode_trans(1)", 2224 XFS_ERROR_REPORT("xlog_recover_inode_pass2(1)",
2359 XFS_ERRLEVEL_LOW, mp); 2225 XFS_ERRLEVEL_LOW, mp);
2360 error = EFSCORRUPTED; 2226 error = EFSCORRUPTED;
2361 goto error; 2227 goto error;
@@ -2365,8 +2231,8 @@ xlog_recover_do_inode_trans(
2365 xfs_buf_relse(bp); 2231 xfs_buf_relse(bp);
2366 xfs_fs_cmn_err(CE_ALERT, mp, 2232 xfs_fs_cmn_err(CE_ALERT, mp,
2367 "xfs_inode_recover: Bad inode log record, rec ptr 0x%p, ino %Ld", 2233 "xfs_inode_recover: Bad inode log record, rec ptr 0x%p, ino %Ld",
2368 item, ino); 2234 item, in_f->ilf_ino);
2369 XFS_ERROR_REPORT("xlog_recover_do_inode_trans(2)", 2235 XFS_ERROR_REPORT("xlog_recover_inode_pass2(2)",
2370 XFS_ERRLEVEL_LOW, mp); 2236 XFS_ERRLEVEL_LOW, mp);
2371 error = EFSCORRUPTED; 2237 error = EFSCORRUPTED;
2372 goto error; 2238 goto error;
@@ -2394,12 +2260,12 @@ xlog_recover_do_inode_trans(
2394 if (unlikely((dicp->di_mode & S_IFMT) == S_IFREG)) { 2260 if (unlikely((dicp->di_mode & S_IFMT) == S_IFREG)) {
2395 if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) && 2261 if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
2396 (dicp->di_format != XFS_DINODE_FMT_BTREE)) { 2262 (dicp->di_format != XFS_DINODE_FMT_BTREE)) {
2397 XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(3)", 2263 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(3)",
2398 XFS_ERRLEVEL_LOW, mp, dicp); 2264 XFS_ERRLEVEL_LOW, mp, dicp);
2399 xfs_buf_relse(bp); 2265 xfs_buf_relse(bp);
2400 xfs_fs_cmn_err(CE_ALERT, mp, 2266 xfs_fs_cmn_err(CE_ALERT, mp,
2401 "xfs_inode_recover: Bad regular inode log record, rec ptr 0x%p, ino ptr = 0x%p, ino bp = 0x%p, ino %Ld", 2267 "xfs_inode_recover: Bad regular inode log record, rec ptr 0x%p, ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
2402 item, dip, bp, ino); 2268 item, dip, bp, in_f->ilf_ino);
2403 error = EFSCORRUPTED; 2269 error = EFSCORRUPTED;
2404 goto error; 2270 goto error;
2405 } 2271 }
@@ -2407,40 +2273,40 @@ xlog_recover_do_inode_trans(
2407 if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) && 2273 if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
2408 (dicp->di_format != XFS_DINODE_FMT_BTREE) && 2274 (dicp->di_format != XFS_DINODE_FMT_BTREE) &&
2409 (dicp->di_format != XFS_DINODE_FMT_LOCAL)) { 2275 (dicp->di_format != XFS_DINODE_FMT_LOCAL)) {
2410 XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(4)", 2276 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(4)",
2411 XFS_ERRLEVEL_LOW, mp, dicp); 2277 XFS_ERRLEVEL_LOW, mp, dicp);
2412 xfs_buf_relse(bp); 2278 xfs_buf_relse(bp);
2413 xfs_fs_cmn_err(CE_ALERT, mp, 2279 xfs_fs_cmn_err(CE_ALERT, mp,
2414 "xfs_inode_recover: Bad dir inode log record, rec ptr 0x%p, ino ptr = 0x%p, ino bp = 0x%p, ino %Ld", 2280 "xfs_inode_recover: Bad dir inode log record, rec ptr 0x%p, ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
2415 item, dip, bp, ino); 2281 item, dip, bp, in_f->ilf_ino);
2416 error = EFSCORRUPTED; 2282 error = EFSCORRUPTED;
2417 goto error; 2283 goto error;
2418 } 2284 }
2419 } 2285 }
2420 if (unlikely(dicp->di_nextents + dicp->di_anextents > dicp->di_nblocks)){ 2286 if (unlikely(dicp->di_nextents + dicp->di_anextents > dicp->di_nblocks)){
2421 XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(5)", 2287 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(5)",
2422 XFS_ERRLEVEL_LOW, mp, dicp); 2288 XFS_ERRLEVEL_LOW, mp, dicp);
2423 xfs_buf_relse(bp); 2289 xfs_buf_relse(bp);
2424 xfs_fs_cmn_err(CE_ALERT, mp, 2290 xfs_fs_cmn_err(CE_ALERT, mp,
2425 "xfs_inode_recover: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, dino bp 0x%p, ino %Ld, total extents = %d, nblocks = %Ld", 2291 "xfs_inode_recover: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, dino bp 0x%p, ino %Ld, total extents = %d, nblocks = %Ld",
2426 item, dip, bp, ino, 2292 item, dip, bp, in_f->ilf_ino,
2427 dicp->di_nextents + dicp->di_anextents, 2293 dicp->di_nextents + dicp->di_anextents,
2428 dicp->di_nblocks); 2294 dicp->di_nblocks);
2429 error = EFSCORRUPTED; 2295 error = EFSCORRUPTED;
2430 goto error; 2296 goto error;
2431 } 2297 }
2432 if (unlikely(dicp->di_forkoff > mp->m_sb.sb_inodesize)) { 2298 if (unlikely(dicp->di_forkoff > mp->m_sb.sb_inodesize)) {
2433 XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(6)", 2299 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(6)",
2434 XFS_ERRLEVEL_LOW, mp, dicp); 2300 XFS_ERRLEVEL_LOW, mp, dicp);
2435 xfs_buf_relse(bp); 2301 xfs_buf_relse(bp);
2436 xfs_fs_cmn_err(CE_ALERT, mp, 2302 xfs_fs_cmn_err(CE_ALERT, mp,
2437 "xfs_inode_recover: Bad inode log rec ptr 0x%p, dino ptr 0x%p, dino bp 0x%p, ino %Ld, forkoff 0x%x", 2303 "xfs_inode_recover: Bad inode log rec ptr 0x%p, dino ptr 0x%p, dino bp 0x%p, ino %Ld, forkoff 0x%x",
2438 item, dip, bp, ino, dicp->di_forkoff); 2304 item, dip, bp, in_f->ilf_ino, dicp->di_forkoff);
2439 error = EFSCORRUPTED; 2305 error = EFSCORRUPTED;
2440 goto error; 2306 goto error;
2441 } 2307 }
2442 if (unlikely(item->ri_buf[1].i_len > sizeof(struct xfs_icdinode))) { 2308 if (unlikely(item->ri_buf[1].i_len > sizeof(struct xfs_icdinode))) {
2443 XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(7)", 2309 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(7)",
2444 XFS_ERRLEVEL_LOW, mp, dicp); 2310 XFS_ERRLEVEL_LOW, mp, dicp);
2445 xfs_buf_relse(bp); 2311 xfs_buf_relse(bp);
2446 xfs_fs_cmn_err(CE_ALERT, mp, 2312 xfs_fs_cmn_err(CE_ALERT, mp,
@@ -2532,7 +2398,7 @@ xlog_recover_do_inode_trans(
2532 break; 2398 break;
2533 2399
2534 default: 2400 default:
2535 xlog_warn("XFS: xlog_recover_do_inode_trans: Invalid flag"); 2401 xlog_warn("XFS: xlog_recover_inode_pass2: Invalid flag");
2536 ASSERT(0); 2402 ASSERT(0);
2537 xfs_buf_relse(bp); 2403 xfs_buf_relse(bp);
2538 error = EIO; 2404 error = EIO;
@@ -2556,18 +2422,11 @@ error:
2556 * of that type. 2422 * of that type.
2557 */ 2423 */
2558STATIC int 2424STATIC int
2559xlog_recover_do_quotaoff_trans( 2425xlog_recover_quotaoff_pass1(
2560 xlog_t *log, 2426 xlog_t *log,
2561 xlog_recover_item_t *item, 2427 xlog_recover_item_t *item)
2562 int pass)
2563{ 2428{
2564 xfs_qoff_logformat_t *qoff_f; 2429 xfs_qoff_logformat_t *qoff_f = item->ri_buf[0].i_addr;
2565
2566 if (pass == XLOG_RECOVER_PASS2) {
2567 return (0);
2568 }
2569
2570 qoff_f = item->ri_buf[0].i_addr;
2571 ASSERT(qoff_f); 2430 ASSERT(qoff_f);
2572 2431
2573 /* 2432 /*
@@ -2588,22 +2447,17 @@ xlog_recover_do_quotaoff_trans(
2588 * Recover a dquot record 2447 * Recover a dquot record
2589 */ 2448 */
2590STATIC int 2449STATIC int
2591xlog_recover_do_dquot_trans( 2450xlog_recover_dquot_pass2(
2592 xlog_t *log, 2451 xlog_t *log,
2593 xlog_recover_item_t *item, 2452 xlog_recover_item_t *item)
2594 int pass)
2595{ 2453{
2596 xfs_mount_t *mp; 2454 xfs_mount_t *mp = log->l_mp;
2597 xfs_buf_t *bp; 2455 xfs_buf_t *bp;
2598 struct xfs_disk_dquot *ddq, *recddq; 2456 struct xfs_disk_dquot *ddq, *recddq;
2599 int error; 2457 int error;
2600 xfs_dq_logformat_t *dq_f; 2458 xfs_dq_logformat_t *dq_f;
2601 uint type; 2459 uint type;
2602 2460
2603 if (pass == XLOG_RECOVER_PASS1) {
2604 return 0;
2605 }
2606 mp = log->l_mp;
2607 2461
2608 /* 2462 /*
2609 * Filesystems are required to send in quota flags at mount time. 2463 * Filesystems are required to send in quota flags at mount time.
@@ -2647,7 +2501,7 @@ xlog_recover_do_dquot_trans(
2647 if ((error = xfs_qm_dqcheck(recddq, 2501 if ((error = xfs_qm_dqcheck(recddq,
2648 dq_f->qlf_id, 2502 dq_f->qlf_id,
2649 0, XFS_QMOPT_DOWARN, 2503 0, XFS_QMOPT_DOWARN,
2650 "xlog_recover_do_dquot_trans (log copy)"))) { 2504 "xlog_recover_dquot_pass2 (log copy)"))) {
2651 return XFS_ERROR(EIO); 2505 return XFS_ERROR(EIO);
2652 } 2506 }
2653 ASSERT(dq_f->qlf_len == 1); 2507 ASSERT(dq_f->qlf_len == 1);
@@ -2670,7 +2524,7 @@ xlog_recover_do_dquot_trans(
2670 * minimal initialization then. 2524 * minimal initialization then.
2671 */ 2525 */
2672 if (xfs_qm_dqcheck(ddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN, 2526 if (xfs_qm_dqcheck(ddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
2673 "xlog_recover_do_dquot_trans")) { 2527 "xlog_recover_dquot_pass2")) {
2674 xfs_buf_relse(bp); 2528 xfs_buf_relse(bp);
2675 return XFS_ERROR(EIO); 2529 return XFS_ERROR(EIO);
2676 } 2530 }
@@ -2693,38 +2547,31 @@ xlog_recover_do_dquot_trans(
2693 * LSN. 2547 * LSN.
2694 */ 2548 */
2695STATIC int 2549STATIC int
2696xlog_recover_do_efi_trans( 2550xlog_recover_efi_pass2(
2697 xlog_t *log, 2551 xlog_t *log,
2698 xlog_recover_item_t *item, 2552 xlog_recover_item_t *item,
2699 xfs_lsn_t lsn, 2553 xfs_lsn_t lsn)
2700 int pass)
2701{ 2554{
2702 int error; 2555 int error;
2703 xfs_mount_t *mp; 2556 xfs_mount_t *mp = log->l_mp;
2704 xfs_efi_log_item_t *efip; 2557 xfs_efi_log_item_t *efip;
2705 xfs_efi_log_format_t *efi_formatp; 2558 xfs_efi_log_format_t *efi_formatp;
2706 2559
2707 if (pass == XLOG_RECOVER_PASS1) {
2708 return 0;
2709 }
2710
2711 efi_formatp = item->ri_buf[0].i_addr; 2560 efi_formatp = item->ri_buf[0].i_addr;
2712 2561
2713 mp = log->l_mp;
2714 efip = xfs_efi_init(mp, efi_formatp->efi_nextents); 2562 efip = xfs_efi_init(mp, efi_formatp->efi_nextents);
2715 if ((error = xfs_efi_copy_format(&(item->ri_buf[0]), 2563 if ((error = xfs_efi_copy_format(&(item->ri_buf[0]),
2716 &(efip->efi_format)))) { 2564 &(efip->efi_format)))) {
2717 xfs_efi_item_free(efip); 2565 xfs_efi_item_free(efip);
2718 return error; 2566 return error;
2719 } 2567 }
2720 efip->efi_next_extent = efi_formatp->efi_nextents; 2568 atomic_set(&efip->efi_next_extent, efi_formatp->efi_nextents);
2721 efip->efi_flags |= XFS_EFI_COMMITTED;
2722 2569
2723 spin_lock(&log->l_ailp->xa_lock); 2570 spin_lock(&log->l_ailp->xa_lock);
2724 /* 2571 /*
2725 * xfs_trans_ail_update() drops the AIL lock. 2572 * xfs_trans_ail_update() drops the AIL lock.
2726 */ 2573 */
2727 xfs_trans_ail_update(log->l_ailp, (xfs_log_item_t *)efip, lsn); 2574 xfs_trans_ail_update(log->l_ailp, &efip->efi_item, lsn);
2728 return 0; 2575 return 0;
2729} 2576}
2730 2577
@@ -2737,11 +2584,10 @@ xlog_recover_do_efi_trans(
2737 * efd format structure. If we find it, we remove the efi from the 2584 * efd format structure. If we find it, we remove the efi from the
2738 * AIL and free it. 2585 * AIL and free it.
2739 */ 2586 */
2740STATIC void 2587STATIC int
2741xlog_recover_do_efd_trans( 2588xlog_recover_efd_pass2(
2742 xlog_t *log, 2589 xlog_t *log,
2743 xlog_recover_item_t *item, 2590 xlog_recover_item_t *item)
2744 int pass)
2745{ 2591{
2746 xfs_efd_log_format_t *efd_formatp; 2592 xfs_efd_log_format_t *efd_formatp;
2747 xfs_efi_log_item_t *efip = NULL; 2593 xfs_efi_log_item_t *efip = NULL;
@@ -2750,10 +2596,6 @@ xlog_recover_do_efd_trans(
2750 struct xfs_ail_cursor cur; 2596 struct xfs_ail_cursor cur;
2751 struct xfs_ail *ailp = log->l_ailp; 2597 struct xfs_ail *ailp = log->l_ailp;
2752 2598
2753 if (pass == XLOG_RECOVER_PASS1) {
2754 return;
2755 }
2756
2757 efd_formatp = item->ri_buf[0].i_addr; 2599 efd_formatp = item->ri_buf[0].i_addr;
2758 ASSERT((item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_32_t) + 2600 ASSERT((item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_32_t) +
2759 ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_32_t)))) || 2601 ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_32_t)))) ||
@@ -2785,62 +2627,6 @@ xlog_recover_do_efd_trans(
2785 } 2627 }
2786 xfs_trans_ail_cursor_done(ailp, &cur); 2628 xfs_trans_ail_cursor_done(ailp, &cur);
2787 spin_unlock(&ailp->xa_lock); 2629 spin_unlock(&ailp->xa_lock);
2788}
2789
2790/*
2791 * Perform the transaction
2792 *
2793 * If the transaction modifies a buffer or inode, do it now. Otherwise,
2794 * EFIs and EFDs get queued up by adding entries into the AIL for them.
2795 */
2796STATIC int
2797xlog_recover_do_trans(
2798 xlog_t *log,
2799 xlog_recover_t *trans,
2800 int pass)
2801{
2802 int error = 0;
2803 xlog_recover_item_t *item;
2804
2805 error = xlog_recover_reorder_trans(log, trans, pass);
2806 if (error)
2807 return error;
2808
2809 list_for_each_entry(item, &trans->r_itemq, ri_list) {
2810 trace_xfs_log_recover_item_recover(log, trans, item, pass);
2811 switch (ITEM_TYPE(item)) {
2812 case XFS_LI_BUF:
2813 error = xlog_recover_do_buffer_trans(log, item, pass);
2814 break;
2815 case XFS_LI_INODE:
2816 error = xlog_recover_do_inode_trans(log, item, pass);
2817 break;
2818 case XFS_LI_EFI:
2819 error = xlog_recover_do_efi_trans(log, item,
2820 trans->r_lsn, pass);
2821 break;
2822 case XFS_LI_EFD:
2823 xlog_recover_do_efd_trans(log, item, pass);
2824 error = 0;
2825 break;
2826 case XFS_LI_DQUOT:
2827 error = xlog_recover_do_dquot_trans(log, item, pass);
2828 break;
2829 case XFS_LI_QUOTAOFF:
2830 error = xlog_recover_do_quotaoff_trans(log, item,
2831 pass);
2832 break;
2833 default:
2834 xlog_warn(
2835 "XFS: invalid item type (%d) xlog_recover_do_trans", ITEM_TYPE(item));
2836 ASSERT(0);
2837 error = XFS_ERROR(EIO);
2838 break;
2839 }
2840
2841 if (error)
2842 return error;
2843 }
2844 2630
2845 return 0; 2631 return 0;
2846} 2632}
@@ -2852,7 +2638,7 @@ xlog_recover_do_trans(
2852 */ 2638 */
2853STATIC void 2639STATIC void
2854xlog_recover_free_trans( 2640xlog_recover_free_trans(
2855 xlog_recover_t *trans) 2641 struct xlog_recover *trans)
2856{ 2642{
2857 xlog_recover_item_t *item, *n; 2643 xlog_recover_item_t *item, *n;
2858 int i; 2644 int i;
@@ -2871,17 +2657,95 @@ xlog_recover_free_trans(
2871} 2657}
2872 2658
2873STATIC int 2659STATIC int
2660xlog_recover_commit_pass1(
2661 struct log *log,
2662 struct xlog_recover *trans,
2663 xlog_recover_item_t *item)
2664{
2665 trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS1);
2666
2667 switch (ITEM_TYPE(item)) {
2668 case XFS_LI_BUF:
2669 return xlog_recover_buffer_pass1(log, item);
2670 case XFS_LI_QUOTAOFF:
2671 return xlog_recover_quotaoff_pass1(log, item);
2672 case XFS_LI_INODE:
2673 case XFS_LI_EFI:
2674 case XFS_LI_EFD:
2675 case XFS_LI_DQUOT:
2676 /* nothing to do in pass 1 */
2677 return 0;
2678 default:
2679 xlog_warn(
2680 "XFS: invalid item type (%d) xlog_recover_commit_pass1",
2681 ITEM_TYPE(item));
2682 ASSERT(0);
2683 return XFS_ERROR(EIO);
2684 }
2685}
2686
2687STATIC int
2688xlog_recover_commit_pass2(
2689 struct log *log,
2690 struct xlog_recover *trans,
2691 xlog_recover_item_t *item)
2692{
2693 trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS2);
2694
2695 switch (ITEM_TYPE(item)) {
2696 case XFS_LI_BUF:
2697 return xlog_recover_buffer_pass2(log, item);
2698 case XFS_LI_INODE:
2699 return xlog_recover_inode_pass2(log, item);
2700 case XFS_LI_EFI:
2701 return xlog_recover_efi_pass2(log, item, trans->r_lsn);
2702 case XFS_LI_EFD:
2703 return xlog_recover_efd_pass2(log, item);
2704 case XFS_LI_DQUOT:
2705 return xlog_recover_dquot_pass2(log, item);
2706 case XFS_LI_QUOTAOFF:
2707 /* nothing to do in pass2 */
2708 return 0;
2709 default:
2710 xlog_warn(
2711 "XFS: invalid item type (%d) xlog_recover_commit_pass2",
2712 ITEM_TYPE(item));
2713 ASSERT(0);
2714 return XFS_ERROR(EIO);
2715 }
2716}
2717
2718/*
2719 * Perform the transaction.
2720 *
2721 * If the transaction modifies a buffer or inode, do it now. Otherwise,
2722 * EFIs and EFDs get queued up by adding entries into the AIL for them.
2723 */
2724STATIC int
2874xlog_recover_commit_trans( 2725xlog_recover_commit_trans(
2875 xlog_t *log, 2726 struct log *log,
2876 xlog_recover_t *trans, 2727 struct xlog_recover *trans,
2877 int pass) 2728 int pass)
2878{ 2729{
2879 int error; 2730 int error = 0;
2731 xlog_recover_item_t *item;
2880 2732
2881 hlist_del(&trans->r_list); 2733 hlist_del(&trans->r_list);
2882 if ((error = xlog_recover_do_trans(log, trans, pass))) 2734
2735 error = xlog_recover_reorder_trans(log, trans, pass);
2736 if (error)
2883 return error; 2737 return error;
2884 xlog_recover_free_trans(trans); /* no error */ 2738
2739 list_for_each_entry(item, &trans->r_itemq, ri_list) {
2740 if (pass == XLOG_RECOVER_PASS1)
2741 error = xlog_recover_commit_pass1(log, trans, item);
2742 else
2743 error = xlog_recover_commit_pass2(log, trans, item);
2744 if (error)
2745 return error;
2746 }
2747
2748 xlog_recover_free_trans(trans);
2885 return 0; 2749 return 0;
2886} 2750}
2887 2751
@@ -3011,7 +2875,7 @@ xlog_recover_process_efi(
3011 xfs_extent_t *extp; 2875 xfs_extent_t *extp;
3012 xfs_fsblock_t startblock_fsb; 2876 xfs_fsblock_t startblock_fsb;
3013 2877
3014 ASSERT(!(efip->efi_flags & XFS_EFI_RECOVERED)); 2878 ASSERT(!test_bit(XFS_EFI_RECOVERED, &efip->efi_flags));
3015 2879
3016 /* 2880 /*
3017 * First check the validity of the extents described by the 2881 * First check the validity of the extents described by the
@@ -3050,7 +2914,7 @@ xlog_recover_process_efi(
3050 extp->ext_len); 2914 extp->ext_len);
3051 } 2915 }
3052 2916
3053 efip->efi_flags |= XFS_EFI_RECOVERED; 2917 set_bit(XFS_EFI_RECOVERED, &efip->efi_flags);
3054 error = xfs_trans_commit(tp, 0); 2918 error = xfs_trans_commit(tp, 0);
3055 return error; 2919 return error;
3056 2920
@@ -3107,7 +2971,7 @@ xlog_recover_process_efis(
3107 * Skip EFIs that we've already processed. 2971 * Skip EFIs that we've already processed.
3108 */ 2972 */
3109 efip = (xfs_efi_log_item_t *)lip; 2973 efip = (xfs_efi_log_item_t *)lip;
3110 if (efip->efi_flags & XFS_EFI_RECOVERED) { 2974 if (test_bit(XFS_EFI_RECOVERED, &efip->efi_flags)) {
3111 lip = xfs_trans_ail_cursor_next(ailp, &cur); 2975 lip = xfs_trans_ail_cursor_next(ailp, &cur);
3112 continue; 2976 continue;
3113 } 2977 }
@@ -3724,7 +3588,7 @@ xlog_do_log_recovery(
3724 xfs_daddr_t head_blk, 3588 xfs_daddr_t head_blk,
3725 xfs_daddr_t tail_blk) 3589 xfs_daddr_t tail_blk)
3726{ 3590{
3727 int error; 3591 int error, i;
3728 3592
3729 ASSERT(head_blk != tail_blk); 3593 ASSERT(head_blk != tail_blk);
3730 3594
@@ -3732,10 +3596,12 @@ xlog_do_log_recovery(
3732 * First do a pass to find all of the cancelled buf log items. 3596 * First do a pass to find all of the cancelled buf log items.
3733 * Store them in the buf_cancel_table for use in the second pass. 3597 * Store them in the buf_cancel_table for use in the second pass.
3734 */ 3598 */
3735 log->l_buf_cancel_table = 3599 log->l_buf_cancel_table = kmem_zalloc(XLOG_BC_TABLE_SIZE *
3736 (xfs_buf_cancel_t **)kmem_zalloc(XLOG_BC_TABLE_SIZE * 3600 sizeof(struct list_head),
3737 sizeof(xfs_buf_cancel_t*),
3738 KM_SLEEP); 3601 KM_SLEEP);
3602 for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
3603 INIT_LIST_HEAD(&log->l_buf_cancel_table[i]);
3604
3739 error = xlog_do_recovery_pass(log, head_blk, tail_blk, 3605 error = xlog_do_recovery_pass(log, head_blk, tail_blk,
3740 XLOG_RECOVER_PASS1); 3606 XLOG_RECOVER_PASS1);
3741 if (error != 0) { 3607 if (error != 0) {
@@ -3754,7 +3620,7 @@ xlog_do_log_recovery(
3754 int i; 3620 int i;
3755 3621
3756 for (i = 0; i < XLOG_BC_TABLE_SIZE; i++) 3622 for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
3757 ASSERT(log->l_buf_cancel_table[i] == NULL); 3623 ASSERT(list_empty(&log->l_buf_cancel_table[i]));
3758 } 3624 }
3759#endif /* DEBUG */ 3625#endif /* DEBUG */
3760 3626
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 19e9dfa1c254..d447aef84bc3 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -472,7 +472,7 @@ xfs_initialize_perag(
472 goto out_unwind; 472 goto out_unwind;
473 pag->pag_agno = index; 473 pag->pag_agno = index;
474 pag->pag_mount = mp; 474 pag->pag_mount = mp;
475 rwlock_init(&pag->pag_ici_lock); 475 spin_lock_init(&pag->pag_ici_lock);
476 mutex_init(&pag->pag_ici_reclaim_lock); 476 mutex_init(&pag->pag_ici_reclaim_lock);
477 INIT_RADIX_TREE(&pag->pag_ici_root, GFP_ATOMIC); 477 INIT_RADIX_TREE(&pag->pag_ici_root, GFP_ATOMIC);
478 spin_lock_init(&pag->pag_buf_lock); 478 spin_lock_init(&pag->pag_buf_lock);
@@ -975,6 +975,24 @@ xfs_set_rw_sizes(xfs_mount_t *mp)
975} 975}
976 976
977/* 977/*
978 * precalculate the low space thresholds for dynamic speculative preallocation.
979 */
980void
981xfs_set_low_space_thresholds(
982 struct xfs_mount *mp)
983{
984 int i;
985
986 for (i = 0; i < XFS_LOWSP_MAX; i++) {
987 __uint64_t space = mp->m_sb.sb_dblocks;
988
989 do_div(space, 100);
990 mp->m_low_space[i] = space * (i + 1);
991 }
992}
993
994
995/*
978 * Set whether we're using inode alignment. 996 * Set whether we're using inode alignment.
979 */ 997 */
980STATIC void 998STATIC void
@@ -1196,6 +1214,9 @@ xfs_mountfs(
1196 */ 1214 */
1197 xfs_set_rw_sizes(mp); 1215 xfs_set_rw_sizes(mp);
1198 1216
1217 /* set the low space thresholds for dynamic preallocation */
1218 xfs_set_low_space_thresholds(mp);
1219
1199 /* 1220 /*
1200 * Set the inode cluster size. 1221 * Set the inode cluster size.
1201 * This may still be overridden by the file system 1222 * This may still be overridden by the file system
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 5861b4980740..a62e8971539d 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -103,6 +103,16 @@ extern int xfs_icsb_modify_counters(struct xfs_mount *, xfs_sb_field_t,
103 xfs_mod_incore_sb(mp, field, delta, rsvd) 103 xfs_mod_incore_sb(mp, field, delta, rsvd)
104#endif 104#endif
105 105
106/* dynamic preallocation free space thresholds, 5% down to 1% */
107enum {
108 XFS_LOWSP_1_PCNT = 0,
109 XFS_LOWSP_2_PCNT,
110 XFS_LOWSP_3_PCNT,
111 XFS_LOWSP_4_PCNT,
112 XFS_LOWSP_5_PCNT,
113 XFS_LOWSP_MAX,
114};
115
106typedef struct xfs_mount { 116typedef struct xfs_mount {
107 struct super_block *m_super; 117 struct super_block *m_super;
108 xfs_tid_t m_tid; /* next unused tid for fs */ 118 xfs_tid_t m_tid; /* next unused tid for fs */
@@ -202,6 +212,8 @@ typedef struct xfs_mount {
202 __int64_t m_update_flags; /* sb flags we need to update 212 __int64_t m_update_flags; /* sb flags we need to update
203 on the next remount,rw */ 213 on the next remount,rw */
204 struct shrinker m_inode_shrink; /* inode reclaim shrinker */ 214 struct shrinker m_inode_shrink; /* inode reclaim shrinker */
215 int64_t m_low_space[XFS_LOWSP_MAX];
216 /* low free space thresholds */
205} xfs_mount_t; 217} xfs_mount_t;
206 218
207/* 219/*
@@ -379,6 +391,8 @@ extern int xfs_sb_validate_fsb_count(struct xfs_sb *, __uint64_t);
379 391
380extern int xfs_dev_is_read_only(struct xfs_mount *, char *); 392extern int xfs_dev_is_read_only(struct xfs_mount *, char *);
381 393
394extern void xfs_set_low_space_thresholds(struct xfs_mount *);
395
382#endif /* __KERNEL__ */ 396#endif /* __KERNEL__ */
383 397
384extern void xfs_mod_sb(struct xfs_trans *, __int64_t); 398extern void xfs_mod_sb(struct xfs_trans *, __int64_t);
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index f6d956b7711e..f80a067a4658 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -1350,7 +1350,7 @@ xfs_trans_fill_vecs(
1350 * they could be immediately flushed and we'd have to race with the flusher 1350 * they could be immediately flushed and we'd have to race with the flusher
1351 * trying to pull the item from the AIL as we add it. 1351 * trying to pull the item from the AIL as we add it.
1352 */ 1352 */
1353void 1353static void
1354xfs_trans_item_committed( 1354xfs_trans_item_committed(
1355 struct xfs_log_item *lip, 1355 struct xfs_log_item *lip,
1356 xfs_lsn_t commit_lsn, 1356 xfs_lsn_t commit_lsn,
@@ -1425,6 +1425,83 @@ xfs_trans_committed(
1425 xfs_trans_free(tp); 1425 xfs_trans_free(tp);
1426} 1426}
1427 1427
1428static inline void
1429xfs_log_item_batch_insert(
1430 struct xfs_ail *ailp,
1431 struct xfs_log_item **log_items,
1432 int nr_items,
1433 xfs_lsn_t commit_lsn)
1434{
1435 int i;
1436
1437 spin_lock(&ailp->xa_lock);
1438 /* xfs_trans_ail_update_bulk drops ailp->xa_lock */
1439 xfs_trans_ail_update_bulk(ailp, log_items, nr_items, commit_lsn);
1440
1441 for (i = 0; i < nr_items; i++)
1442 IOP_UNPIN(log_items[i], 0);
1443}
1444
1445/*
1446 * Bulk operation version of xfs_trans_committed that takes a log vector of
1447 * items to insert into the AIL. This uses bulk AIL insertion techniques to
1448 * minimise lock traffic.
1449 */
1450void
1451xfs_trans_committed_bulk(
1452 struct xfs_ail *ailp,
1453 struct xfs_log_vec *log_vector,
1454 xfs_lsn_t commit_lsn,
1455 int aborted)
1456{
1457#define LOG_ITEM_BATCH_SIZE 32
1458 struct xfs_log_item *log_items[LOG_ITEM_BATCH_SIZE];
1459 struct xfs_log_vec *lv;
1460 int i = 0;
1461
1462 /* unpin all the log items */
1463 for (lv = log_vector; lv; lv = lv->lv_next ) {
1464 struct xfs_log_item *lip = lv->lv_item;
1465 xfs_lsn_t item_lsn;
1466
1467 if (aborted)
1468 lip->li_flags |= XFS_LI_ABORTED;
1469 item_lsn = IOP_COMMITTED(lip, commit_lsn);
1470
1471 /* item_lsn of -1 means the item was freed */
1472 if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0)
1473 continue;
1474
1475 if (item_lsn != commit_lsn) {
1476
1477 /*
1478 * Not a bulk update option due to unusual item_lsn.
1479 * Push into AIL immediately, rechecking the lsn once
1480 * we have the ail lock. Then unpin the item.
1481 */
1482 spin_lock(&ailp->xa_lock);
1483 if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0)
1484 xfs_trans_ail_update(ailp, lip, item_lsn);
1485 else
1486 spin_unlock(&ailp->xa_lock);
1487 IOP_UNPIN(lip, 0);
1488 continue;
1489 }
1490
1491 /* Item is a candidate for bulk AIL insert. */
1492 log_items[i++] = lv->lv_item;
1493 if (i >= LOG_ITEM_BATCH_SIZE) {
1494 xfs_log_item_batch_insert(ailp, log_items,
1495 LOG_ITEM_BATCH_SIZE, commit_lsn);
1496 i = 0;
1497 }
1498 }
1499
1500 /* make sure we insert the remainder! */
1501 if (i)
1502 xfs_log_item_batch_insert(ailp, log_items, i, commit_lsn);
1503}
1504
1428/* 1505/*
1429 * Called from the trans_commit code when we notice that 1506 * Called from the trans_commit code when we notice that
1430 * the filesystem is in the middle of a forced shutdown. 1507 * the filesystem is in the middle of a forced shutdown.
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index 246286b77a86..c2042b736b81 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -294,8 +294,8 @@ struct xfs_log_item_desc {
294#define XFS_ALLOC_BTREE_REF 2 294#define XFS_ALLOC_BTREE_REF 2
295#define XFS_BMAP_BTREE_REF 2 295#define XFS_BMAP_BTREE_REF 2
296#define XFS_DIR_BTREE_REF 2 296#define XFS_DIR_BTREE_REF 2
297#define XFS_INO_REF 2
297#define XFS_ATTR_BTREE_REF 1 298#define XFS_ATTR_BTREE_REF 1
298#define XFS_INO_REF 1
299#define XFS_DQUOT_REF 1 299#define XFS_DQUOT_REF 1
300 300
301#ifdef __KERNEL__ 301#ifdef __KERNEL__
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
index dc9069568ff7..c5bbbc45db91 100644
--- a/fs/xfs/xfs_trans_ail.c
+++ b/fs/xfs/xfs_trans_ail.c
@@ -28,8 +28,8 @@
28#include "xfs_trans_priv.h" 28#include "xfs_trans_priv.h"
29#include "xfs_error.h" 29#include "xfs_error.h"
30 30
31STATIC void xfs_ail_insert(struct xfs_ail *, xfs_log_item_t *); 31STATIC void xfs_ail_splice(struct xfs_ail *, struct list_head *, xfs_lsn_t);
32STATIC xfs_log_item_t * xfs_ail_delete(struct xfs_ail *, xfs_log_item_t *); 32STATIC void xfs_ail_delete(struct xfs_ail *, xfs_log_item_t *);
33STATIC xfs_log_item_t * xfs_ail_min(struct xfs_ail *); 33STATIC xfs_log_item_t * xfs_ail_min(struct xfs_ail *);
34STATIC xfs_log_item_t * xfs_ail_next(struct xfs_ail *, xfs_log_item_t *); 34STATIC xfs_log_item_t * xfs_ail_next(struct xfs_ail *, xfs_log_item_t *);
35 35
@@ -449,129 +449,152 @@ xfs_trans_unlocked_item(
449 xfs_log_move_tail(ailp->xa_mount, 1); 449 xfs_log_move_tail(ailp->xa_mount, 1);
450} /* xfs_trans_unlocked_item */ 450} /* xfs_trans_unlocked_item */
451 451
452
453/* 452/*
454 * Update the position of the item in the AIL with the new 453 * xfs_trans_ail_update - bulk AIL insertion operation.
455 * lsn. If it is not yet in the AIL, add it. Otherwise, move 454 *
456 * it to its new position by removing it and re-adding it. 455 * @xfs_trans_ail_update takes an array of log items that all need to be
456 * positioned at the same LSN in the AIL. If an item is not in the AIL, it will
457 * be added. Otherwise, it will be repositioned by removing it and re-adding
458 * it to the AIL. If we move the first item in the AIL, update the log tail to
459 * match the new minimum LSN in the AIL.
457 * 460 *
458 * Wakeup anyone with an lsn less than the item's lsn. If the item 461 * This function takes the AIL lock once to execute the update operations on
459 * we move in the AIL is the minimum one, update the tail lsn in the 462 * all the items in the array, and as such should not be called with the AIL
460 * log manager. 463 * lock held. As a result, once we have the AIL lock, we need to check each log
464 * item LSN to confirm it needs to be moved forward in the AIL.
461 * 465 *
462 * This function must be called with the AIL lock held. The lock 466 * To optimise the insert operation, we delete all the items from the AIL in
463 * is dropped before returning. 467 * the first pass, moving them into a temporary list, then splice the temporary
468 * list into the correct position in the AIL. This avoids needing to do an
469 * insert operation on every item.
470 *
471 * This function must be called with the AIL lock held. The lock is dropped
472 * before returning.
464 */ 473 */
465void 474void
466xfs_trans_ail_update( 475xfs_trans_ail_update_bulk(
467 struct xfs_ail *ailp, 476 struct xfs_ail *ailp,
468 xfs_log_item_t *lip, 477 struct xfs_log_item **log_items,
469 xfs_lsn_t lsn) __releases(ailp->xa_lock) 478 int nr_items,
479 xfs_lsn_t lsn) __releases(ailp->xa_lock)
470{ 480{
471 xfs_log_item_t *dlip = NULL; 481 xfs_log_item_t *mlip;
472 xfs_log_item_t *mlip; /* ptr to minimum lip */
473 xfs_lsn_t tail_lsn; 482 xfs_lsn_t tail_lsn;
483 int mlip_changed = 0;
484 int i;
485 LIST_HEAD(tmp);
474 486
475 mlip = xfs_ail_min(ailp); 487 mlip = xfs_ail_min(ailp);
476 488
477 if (lip->li_flags & XFS_LI_IN_AIL) { 489 for (i = 0; i < nr_items; i++) {
478 dlip = xfs_ail_delete(ailp, lip); 490 struct xfs_log_item *lip = log_items[i];
479 ASSERT(dlip == lip); 491 if (lip->li_flags & XFS_LI_IN_AIL) {
480 xfs_trans_ail_cursor_clear(ailp, dlip); 492 /* check if we really need to move the item */
481 } else { 493 if (XFS_LSN_CMP(lsn, lip->li_lsn) <= 0)
482 lip->li_flags |= XFS_LI_IN_AIL; 494 continue;
495
496 xfs_ail_delete(ailp, lip);
497 if (mlip == lip)
498 mlip_changed = 1;
499 } else {
500 lip->li_flags |= XFS_LI_IN_AIL;
501 }
502 lip->li_lsn = lsn;
503 list_add(&lip->li_ail, &tmp);
483 } 504 }
484 505
485 lip->li_lsn = lsn; 506 xfs_ail_splice(ailp, &tmp, lsn);
486 xfs_ail_insert(ailp, lip);
487 507
488 if (mlip == dlip) { 508 if (!mlip_changed) {
489 mlip = xfs_ail_min(ailp);
490 /*
491 * It is not safe to access mlip after the AIL lock is
492 * dropped, so we must get a copy of li_lsn before we do
493 * so. This is especially important on 32-bit platforms
494 * where accessing and updating 64-bit values like li_lsn
495 * is not atomic.
496 */
497 tail_lsn = mlip->li_lsn;
498 spin_unlock(&ailp->xa_lock);
499 xfs_log_move_tail(ailp->xa_mount, tail_lsn);
500 } else {
501 spin_unlock(&ailp->xa_lock); 509 spin_unlock(&ailp->xa_lock);
510 return;
502 } 511 }
503 512
504 513 /*
505} /* xfs_trans_update_ail */ 514 * It is not safe to access mlip after the AIL lock is dropped, so we
515 * must get a copy of li_lsn before we do so. This is especially
516 * important on 32-bit platforms where accessing and updating 64-bit
517 * values like li_lsn is not atomic.
518 */
519 mlip = xfs_ail_min(ailp);
520 tail_lsn = mlip->li_lsn;
521 spin_unlock(&ailp->xa_lock);
522 xfs_log_move_tail(ailp->xa_mount, tail_lsn);
523}
506 524
507/* 525/*
508 * Delete the given item from the AIL. It must already be in 526 * xfs_trans_ail_delete_bulk - remove multiple log items from the AIL
509 * the AIL.
510 * 527 *
511 * Wakeup anyone with an lsn less than item's lsn. If the item 528 * @xfs_trans_ail_delete_bulk takes an array of log items that all need to
512 * we delete in the AIL is the minimum one, update the tail lsn in the 529 * removed from the AIL. The caller is already holding the AIL lock, and done
513 * log manager. 530 * all the checks necessary to ensure the items passed in via @log_items are
531 * ready for deletion. This includes checking that the items are in the AIL.
514 * 532 *
515 * Clear the IN_AIL flag from the item, reset its lsn to 0, and 533 * For each log item to be removed, unlink it from the AIL, clear the IN_AIL
516 * bump the AIL's generation count to indicate that the tree 534 * flag from the item and reset the item's lsn to 0. If we remove the first
517 * has changed. 535 * item in the AIL, update the log tail to match the new minimum LSN in the
536 * AIL.
518 * 537 *
519 * This function must be called with the AIL lock held. The lock 538 * This function will not drop the AIL lock until all items are removed from
520 * is dropped before returning. 539 * the AIL to minimise the amount of lock traffic on the AIL. This does not
540 * greatly increase the AIL hold time, but does significantly reduce the amount
541 * of traffic on the lock, especially during IO completion.
542 *
543 * This function must be called with the AIL lock held. The lock is dropped
544 * before returning.
521 */ 545 */
522void 546void
523xfs_trans_ail_delete( 547xfs_trans_ail_delete_bulk(
524 struct xfs_ail *ailp, 548 struct xfs_ail *ailp,
525 xfs_log_item_t *lip) __releases(ailp->xa_lock) 549 struct xfs_log_item **log_items,
550 int nr_items) __releases(ailp->xa_lock)
526{ 551{
527 xfs_log_item_t *dlip;
528 xfs_log_item_t *mlip; 552 xfs_log_item_t *mlip;
529 xfs_lsn_t tail_lsn; 553 xfs_lsn_t tail_lsn;
554 int mlip_changed = 0;
555 int i;
530 556
531 if (lip->li_flags & XFS_LI_IN_AIL) { 557 mlip = xfs_ail_min(ailp);
532 mlip = xfs_ail_min(ailp);
533 dlip = xfs_ail_delete(ailp, lip);
534 ASSERT(dlip == lip);
535 xfs_trans_ail_cursor_clear(ailp, dlip);
536
537 558
538 lip->li_flags &= ~XFS_LI_IN_AIL; 559 for (i = 0; i < nr_items; i++) {
539 lip->li_lsn = 0; 560 struct xfs_log_item *lip = log_items[i];
561 if (!(lip->li_flags & XFS_LI_IN_AIL)) {
562 struct xfs_mount *mp = ailp->xa_mount;
540 563
541 if (mlip == dlip) {
542 mlip = xfs_ail_min(ailp);
543 /*
544 * It is not safe to access mlip after the AIL lock
545 * is dropped, so we must get a copy of li_lsn
546 * before we do so. This is especially important
547 * on 32-bit platforms where accessing and updating
548 * 64-bit values like li_lsn is not atomic.
549 */
550 tail_lsn = mlip ? mlip->li_lsn : 0;
551 spin_unlock(&ailp->xa_lock);
552 xfs_log_move_tail(ailp->xa_mount, tail_lsn);
553 } else {
554 spin_unlock(&ailp->xa_lock); 564 spin_unlock(&ailp->xa_lock);
565 if (!XFS_FORCED_SHUTDOWN(mp)) {
566 xfs_cmn_err(XFS_PTAG_AILDELETE, CE_ALERT, mp,
567 "%s: attempting to delete a log item that is not in the AIL",
568 __func__);
569 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
570 }
571 return;
555 } 572 }
573
574 xfs_ail_delete(ailp, lip);
575 lip->li_flags &= ~XFS_LI_IN_AIL;
576 lip->li_lsn = 0;
577 if (mlip == lip)
578 mlip_changed = 1;
556 } 579 }
557 else {
558 /*
559 * If the file system is not being shutdown, we are in
560 * serious trouble if we get to this stage.
561 */
562 struct xfs_mount *mp = ailp->xa_mount;
563 580
581 if (!mlip_changed) {
564 spin_unlock(&ailp->xa_lock); 582 spin_unlock(&ailp->xa_lock);
565 if (!XFS_FORCED_SHUTDOWN(mp)) { 583 return;
566 xfs_cmn_err(XFS_PTAG_AILDELETE, CE_ALERT, mp,
567 "%s: attempting to delete a log item that is not in the AIL",
568 __func__);
569 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
570 }
571 } 584 }
572}
573
574 585
586 /*
587 * It is not safe to access mlip after the AIL lock is dropped, so we
588 * must get a copy of li_lsn before we do so. This is especially
589 * important on 32-bit platforms where accessing and updating 64-bit
590 * values like li_lsn is not atomic. It is possible we've emptied the
591 * AIL here, so if that is the case, pass an LSN of 0 to the tail move.
592 */
593 mlip = xfs_ail_min(ailp);
594 tail_lsn = mlip ? mlip->li_lsn : 0;
595 spin_unlock(&ailp->xa_lock);
596 xfs_log_move_tail(ailp->xa_mount, tail_lsn);
597}
575 598
576/* 599/*
577 * The active item list (AIL) is a doubly linked list of log 600 * The active item list (AIL) is a doubly linked list of log
@@ -623,16 +646,13 @@ xfs_trans_ail_destroy(
623} 646}
624 647
625/* 648/*
626 * Insert the given log item into the AIL. 649 * splice the log item list into the AIL at the given LSN.
627 * We almost always insert at the end of the list, so on inserts
628 * we search from the end of the list to find where the
629 * new item belongs.
630 */ 650 */
631STATIC void 651STATIC void
632xfs_ail_insert( 652xfs_ail_splice(
633 struct xfs_ail *ailp, 653 struct xfs_ail *ailp,
634 xfs_log_item_t *lip) 654 struct list_head *list,
635/* ARGSUSED */ 655 xfs_lsn_t lsn)
636{ 656{
637 xfs_log_item_t *next_lip; 657 xfs_log_item_t *next_lip;
638 658
@@ -640,39 +660,33 @@ xfs_ail_insert(
640 * If the list is empty, just insert the item. 660 * If the list is empty, just insert the item.
641 */ 661 */
642 if (list_empty(&ailp->xa_ail)) { 662 if (list_empty(&ailp->xa_ail)) {
643 list_add(&lip->li_ail, &ailp->xa_ail); 663 list_splice(list, &ailp->xa_ail);
644 return; 664 return;
645 } 665 }
646 666
647 list_for_each_entry_reverse(next_lip, &ailp->xa_ail, li_ail) { 667 list_for_each_entry_reverse(next_lip, &ailp->xa_ail, li_ail) {
648 if (XFS_LSN_CMP(next_lip->li_lsn, lip->li_lsn) <= 0) 668 if (XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0)
649 break; 669 break;
650 } 670 }
651 671
652 ASSERT((&next_lip->li_ail == &ailp->xa_ail) || 672 ASSERT((&next_lip->li_ail == &ailp->xa_ail) ||
653 (XFS_LSN_CMP(next_lip->li_lsn, lip->li_lsn) <= 0)); 673 (XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0));
654
655 list_add(&lip->li_ail, &next_lip->li_ail);
656 674
657 xfs_ail_check(ailp, lip); 675 list_splice_init(list, &next_lip->li_ail);
658 return; 676 return;
659} 677}
660 678
661/* 679/*
662 * Delete the given item from the AIL. Return a pointer to the item. 680 * Delete the given item from the AIL. Return a pointer to the item.
663 */ 681 */
664/*ARGSUSED*/ 682STATIC void
665STATIC xfs_log_item_t *
666xfs_ail_delete( 683xfs_ail_delete(
667 struct xfs_ail *ailp, 684 struct xfs_ail *ailp,
668 xfs_log_item_t *lip) 685 xfs_log_item_t *lip)
669/* ARGSUSED */
670{ 686{
671 xfs_ail_check(ailp, lip); 687 xfs_ail_check(ailp, lip);
672
673 list_del(&lip->li_ail); 688 list_del(&lip->li_ail);
674 689 xfs_trans_ail_cursor_clear(ailp, lip);
675 return lip;
676} 690}
677 691
678/* 692/*
@@ -682,7 +696,6 @@ xfs_ail_delete(
682STATIC xfs_log_item_t * 696STATIC xfs_log_item_t *
683xfs_ail_min( 697xfs_ail_min(
684 struct xfs_ail *ailp) 698 struct xfs_ail *ailp)
685/* ARGSUSED */
686{ 699{
687 if (list_empty(&ailp->xa_ail)) 700 if (list_empty(&ailp->xa_ail))
688 return NULL; 701 return NULL;
@@ -699,7 +712,6 @@ STATIC xfs_log_item_t *
699xfs_ail_next( 712xfs_ail_next(
700 struct xfs_ail *ailp, 713 struct xfs_ail *ailp,
701 xfs_log_item_t *lip) 714 xfs_log_item_t *lip)
702/* ARGSUSED */
703{ 715{
704 if (lip->li_ail.next == &ailp->xa_ail) 716 if (lip->li_ail.next == &ailp->xa_ail)
705 return NULL; 717 return NULL;
diff --git a/fs/xfs/xfs_trans_extfree.c b/fs/xfs/xfs_trans_extfree.c
index f783d5e9fa70..f7590f5badea 100644
--- a/fs/xfs/xfs_trans_extfree.c
+++ b/fs/xfs/xfs_trans_extfree.c
@@ -69,12 +69,16 @@ xfs_trans_log_efi_extent(xfs_trans_t *tp,
69 tp->t_flags |= XFS_TRANS_DIRTY; 69 tp->t_flags |= XFS_TRANS_DIRTY;
70 efip->efi_item.li_desc->lid_flags |= XFS_LID_DIRTY; 70 efip->efi_item.li_desc->lid_flags |= XFS_LID_DIRTY;
71 71
72 next_extent = efip->efi_next_extent; 72 /*
73 * atomic_inc_return gives us the value after the increment;
74 * we want to use it as an array index so we need to subtract 1 from
75 * it.
76 */
77 next_extent = atomic_inc_return(&efip->efi_next_extent) - 1;
73 ASSERT(next_extent < efip->efi_format.efi_nextents); 78 ASSERT(next_extent < efip->efi_format.efi_nextents);
74 extp = &(efip->efi_format.efi_extents[next_extent]); 79 extp = &(efip->efi_format.efi_extents[next_extent]);
75 extp->ext_start = start_block; 80 extp->ext_start = start_block;
76 extp->ext_len = ext_len; 81 extp->ext_len = ext_len;
77 efip->efi_next_extent++;
78} 82}
79 83
80 84
diff --git a/fs/xfs/xfs_trans_priv.h b/fs/xfs/xfs_trans_priv.h
index 62da86c90de5..35162c238fa3 100644
--- a/fs/xfs/xfs_trans_priv.h
+++ b/fs/xfs/xfs_trans_priv.h
@@ -22,15 +22,17 @@ struct xfs_log_item;
22struct xfs_log_item_desc; 22struct xfs_log_item_desc;
23struct xfs_mount; 23struct xfs_mount;
24struct xfs_trans; 24struct xfs_trans;
25struct xfs_ail;
26struct xfs_log_vec;
25 27
26void xfs_trans_add_item(struct xfs_trans *, struct xfs_log_item *); 28void xfs_trans_add_item(struct xfs_trans *, struct xfs_log_item *);
27void xfs_trans_del_item(struct xfs_log_item *); 29void xfs_trans_del_item(struct xfs_log_item *);
28void xfs_trans_free_items(struct xfs_trans *tp, xfs_lsn_t commit_lsn, 30void xfs_trans_free_items(struct xfs_trans *tp, xfs_lsn_t commit_lsn,
29 int flags); 31 int flags);
30void xfs_trans_item_committed(struct xfs_log_item *lip,
31 xfs_lsn_t commit_lsn, int aborted);
32void xfs_trans_unreserve_and_mod_sb(struct xfs_trans *tp); 32void xfs_trans_unreserve_and_mod_sb(struct xfs_trans *tp);
33 33
34void xfs_trans_committed_bulk(struct xfs_ail *ailp, struct xfs_log_vec *lv,
35 xfs_lsn_t commit_lsn, int aborted);
34/* 36/*
35 * AIL traversal cursor. 37 * AIL traversal cursor.
36 * 38 *
@@ -73,12 +75,29 @@ struct xfs_ail {
73/* 75/*
74 * From xfs_trans_ail.c 76 * From xfs_trans_ail.c
75 */ 77 */
76void xfs_trans_ail_update(struct xfs_ail *ailp, 78void xfs_trans_ail_update_bulk(struct xfs_ail *ailp,
77 struct xfs_log_item *lip, xfs_lsn_t lsn) 79 struct xfs_log_item **log_items, int nr_items,
78 __releases(ailp->xa_lock); 80 xfs_lsn_t lsn) __releases(ailp->xa_lock);
79void xfs_trans_ail_delete(struct xfs_ail *ailp, 81static inline void
80 struct xfs_log_item *lip) 82xfs_trans_ail_update(
81 __releases(ailp->xa_lock); 83 struct xfs_ail *ailp,
84 struct xfs_log_item *lip,
85 xfs_lsn_t lsn) __releases(ailp->xa_lock)
86{
87 xfs_trans_ail_update_bulk(ailp, &lip, 1, lsn);
88}
89
90void xfs_trans_ail_delete_bulk(struct xfs_ail *ailp,
91 struct xfs_log_item **log_items, int nr_items)
92 __releases(ailp->xa_lock);
93static inline void
94xfs_trans_ail_delete(
95 struct xfs_ail *ailp,
96 xfs_log_item_t *lip) __releases(ailp->xa_lock)
97{
98 xfs_trans_ail_delete_bulk(ailp, &lip, 1);
99}
100
82void xfs_trans_ail_push(struct xfs_ail *, xfs_lsn_t); 101void xfs_trans_ail_push(struct xfs_ail *, xfs_lsn_t);
83void xfs_trans_unlocked_item(struct xfs_ail *, 102void xfs_trans_unlocked_item(struct xfs_ail *,
84 xfs_log_item_t *); 103 xfs_log_item_t *);
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index 8e4a63c4151a..d8e6f8cd6f0c 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -964,29 +964,48 @@ xfs_release(
964 xfs_flush_pages(ip, 0, -1, XBF_ASYNC, FI_NONE); 964 xfs_flush_pages(ip, 0, -1, XBF_ASYNC, FI_NONE);
965 } 965 }
966 966
967 if (ip->i_d.di_nlink != 0) { 967 if (ip->i_d.di_nlink == 0)
968 if ((((ip->i_d.di_mode & S_IFMT) == S_IFREG) && 968 return 0;
969 ((ip->i_size > 0) || (VN_CACHED(VFS_I(ip)) > 0 ||
970 ip->i_delayed_blks > 0)) &&
971 (ip->i_df.if_flags & XFS_IFEXTENTS)) &&
972 (!(ip->i_d.di_flags &
973 (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)))) {
974 969
975 /* 970 if ((((ip->i_d.di_mode & S_IFMT) == S_IFREG) &&
976 * If we can't get the iolock just skip truncating 971 ((ip->i_size > 0) || (VN_CACHED(VFS_I(ip)) > 0 ||
977 * the blocks past EOF because we could deadlock 972 ip->i_delayed_blks > 0)) &&
978 * with the mmap_sem otherwise. We'll get another 973 (ip->i_df.if_flags & XFS_IFEXTENTS)) &&
979 * chance to drop them once the last reference to 974 (!(ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)))) {
980 * the inode is dropped, so we'll never leak blocks
981 * permanently.
982 */
983 error = xfs_free_eofblocks(mp, ip,
984 XFS_FREE_EOF_TRYLOCK);
985 if (error)
986 return error;
987 }
988 }
989 975
976 /*
977 * If we can't get the iolock just skip truncating the blocks
978 * past EOF because we could deadlock with the mmap_sem
979 * otherwise. We'll get another chance to drop them once the
980 * last reference to the inode is dropped, so we'll never leak
981 * blocks permanently.
982 *
983 * Further, check if the inode is being opened, written and
984 * closed frequently and we have delayed allocation blocks
985 * oustanding (e.g. streaming writes from the NFS server),
986 * truncating the blocks past EOF will cause fragmentation to
987 * occur.
988 *
989 * In this case don't do the truncation, either, but we have to
990 * be careful how we detect this case. Blocks beyond EOF show
991 * up as i_delayed_blks even when the inode is clean, so we
992 * need to truncate them away first before checking for a dirty
993 * release. Hence on the first dirty close we will still remove
994 * the speculative allocation, but after that we will leave it
995 * in place.
996 */
997 if (xfs_iflags_test(ip, XFS_IDIRTY_RELEASE))
998 return 0;
999
1000 error = xfs_free_eofblocks(mp, ip,
1001 XFS_FREE_EOF_TRYLOCK);
1002 if (error)
1003 return error;
1004
1005 /* delalloc blocks after truncation means it really is dirty */
1006 if (ip->i_delayed_blks)
1007 xfs_iflags_set(ip, XFS_IDIRTY_RELEASE);
1008 }
990 return 0; 1009 return 0;
991} 1010}
992 1011
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index 3577ca11a0be..4644c9a7f724 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -211,6 +211,36 @@ static inline void outsl(unsigned long addr, const void *buffer, int count)
211} 211}
212#endif 212#endif
213 213
214static inline void readsl(const void __iomem *addr, void *buf, int len)
215{
216 insl((unsigned long)addr, buf, len);
217}
218
219static inline void readsw(const void __iomem *addr, void *buf, int len)
220{
221 insw((unsigned long)addr, buf, len);
222}
223
224static inline void readsb(const void __iomem *addr, void *buf, int len)
225{
226 insb((unsigned long)addr, buf, len);
227}
228
229static inline void writesl(const void __iomem *addr, const void *buf, int len)
230{
231 outsl((unsigned long)addr, buf, len);
232}
233
234static inline void writesw(const void __iomem *addr, const void *buf, int len)
235{
236 outsw((unsigned long)addr, buf, len);
237}
238
239static inline void writesb(const void __iomem *addr, const void *buf, int len)
240{
241 outsb((unsigned long)addr, buf, len);
242}
243
214#ifndef CONFIG_GENERIC_IOMAP 244#ifndef CONFIG_GENERIC_IOMAP
215#define ioread8(addr) readb(addr) 245#define ioread8(addr) readb(addr)
216#define ioread16(addr) readw(addr) 246#define ioread16(addr) readw(addr)
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 274eaaa15c36..a4694c610330 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -683,6 +683,21 @@ struct drm_master {
683 void *driver_priv; /**< Private structure for driver to use */ 683 void *driver_priv; /**< Private structure for driver to use */
684}; 684};
685 685
686/* Size of ringbuffer for vblank timestamps. Just double-buffer
687 * in initial implementation.
688 */
689#define DRM_VBLANKTIME_RBSIZE 2
690
691/* Flags and return codes for get_vblank_timestamp() driver function. */
692#define DRM_CALLED_FROM_VBLIRQ 1
693#define DRM_VBLANKTIME_SCANOUTPOS_METHOD (1 << 0)
694#define DRM_VBLANKTIME_INVBL (1 << 1)
695
696/* get_scanout_position() return flags */
697#define DRM_SCANOUTPOS_VALID (1 << 0)
698#define DRM_SCANOUTPOS_INVBL (1 << 1)
699#define DRM_SCANOUTPOS_ACCURATE (1 << 2)
700
686/** 701/**
687 * DRM driver structure. This structure represent the common code for 702 * DRM driver structure. This structure represent the common code for
688 * a family of cards. There will one drm_device for each card present 703 * a family of cards. There will one drm_device for each card present
@@ -760,6 +775,68 @@ struct drm_driver {
760 */ 775 */
761 int (*device_is_agp) (struct drm_device *dev); 776 int (*device_is_agp) (struct drm_device *dev);
762 777
778 /**
779 * Called by vblank timestamping code.
780 *
781 * Return the current display scanout position from a crtc.
782 *
783 * \param dev DRM device.
784 * \param crtc Id of the crtc to query.
785 * \param *vpos Target location for current vertical scanout position.
786 * \param *hpos Target location for current horizontal scanout position.
787 *
788 * Returns vpos as a positive number while in active scanout area.
789 * Returns vpos as a negative number inside vblank, counting the number
790 * of scanlines to go until end of vblank, e.g., -1 means "one scanline
791 * until start of active scanout / end of vblank."
792 *
793 * \return Flags, or'ed together as follows:
794 *
795 * DRM_SCANOUTPOS_VALID = Query successfull.
796 * DRM_SCANOUTPOS_INVBL = Inside vblank.
797 * DRM_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of
798 * this flag means that returned position may be offset by a constant
799 * but unknown small number of scanlines wrt. real scanout position.
800 *
801 */
802 int (*get_scanout_position) (struct drm_device *dev, int crtc,
803 int *vpos, int *hpos);
804
805 /**
806 * Called by \c drm_get_last_vbltimestamp. Should return a precise
807 * timestamp when the most recent VBLANK interval ended or will end.
808 *
809 * Specifically, the timestamp in @vblank_time should correspond as
810 * closely as possible to the time when the first video scanline of
811 * the video frame after the end of VBLANK will start scanning out,
812 * the time immmediately after end of the VBLANK interval. If the
813 * @crtc is currently inside VBLANK, this will be a time in the future.
814 * If the @crtc is currently scanning out a frame, this will be the
815 * past start time of the current scanout. This is meant to adhere
816 * to the OpenML OML_sync_control extension specification.
817 *
818 * \param dev dev DRM device handle.
819 * \param crtc crtc for which timestamp should be returned.
820 * \param *max_error Maximum allowable timestamp error in nanoseconds.
821 * Implementation should strive to provide timestamp
822 * with an error of at most *max_error nanoseconds.
823 * Returns true upper bound on error for timestamp.
824 * \param *vblank_time Target location for returned vblank timestamp.
825 * \param flags 0 = Defaults, no special treatment needed.
826 * \param DRM_CALLED_FROM_VBLIRQ = Function is called from vblank
827 * irq handler. Some drivers need to apply some workarounds
828 * for gpu-specific vblank irq quirks if flag is set.
829 *
830 * \returns
831 * Zero if timestamping isn't supported in current display mode or a
832 * negative number on failure. A positive status code on success,
833 * which describes how the vblank_time timestamp was computed.
834 */
835 int (*get_vblank_timestamp) (struct drm_device *dev, int crtc,
836 int *max_error,
837 struct timeval *vblank_time,
838 unsigned flags);
839
763 /* these have to be filled in */ 840 /* these have to be filled in */
764 841
765 irqreturn_t(*irq_handler) (DRM_IRQ_ARGS); 842 irqreturn_t(*irq_handler) (DRM_IRQ_ARGS);
@@ -983,6 +1060,8 @@ struct drm_device {
983 1060
984 wait_queue_head_t *vbl_queue; /**< VBLANK wait queue */ 1061 wait_queue_head_t *vbl_queue; /**< VBLANK wait queue */
985 atomic_t *_vblank_count; /**< number of VBLANK interrupts (driver must alloc the right number of counters) */ 1062 atomic_t *_vblank_count; /**< number of VBLANK interrupts (driver must alloc the right number of counters) */
1063 struct timeval *_vblank_time; /**< timestamp of current vblank_count (drivers must alloc right number of fields) */
1064 spinlock_t vblank_time_lock; /**< Protects vblank count and time updates during vblank enable/disable */
986 spinlock_t vbl_lock; 1065 spinlock_t vbl_lock;
987 atomic_t *vblank_refcount; /* number of users of vblank interruptsper crtc */ 1066 atomic_t *vblank_refcount; /* number of users of vblank interruptsper crtc */
988 u32 *last_vblank; /* protected by dev->vbl_lock, used */ 1067 u32 *last_vblank; /* protected by dev->vbl_lock, used */
@@ -1041,12 +1120,14 @@ struct drm_device {
1041 /*@{ */ 1120 /*@{ */
1042 spinlock_t object_name_lock; 1121 spinlock_t object_name_lock;
1043 struct idr object_name_idr; 1122 struct idr object_name_idr;
1044 uint32_t invalidate_domains; /* domains pending invalidation */
1045 uint32_t flush_domains; /* domains pending flush */
1046 /*@} */ 1123 /*@} */
1047 1124 int switch_power_state;
1048}; 1125};
1049 1126
1127#define DRM_SWITCH_POWER_ON 0
1128#define DRM_SWITCH_POWER_OFF 1
1129#define DRM_SWITCH_POWER_CHANGING 2
1130
1050static __inline__ int drm_core_check_feature(struct drm_device *dev, 1131static __inline__ int drm_core_check_feature(struct drm_device *dev,
1051 int feature) 1132 int feature)
1052{ 1133{
@@ -1284,11 +1365,22 @@ extern int drm_wait_vblank(struct drm_device *dev, void *data,
1284 struct drm_file *filp); 1365 struct drm_file *filp);
1285extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq); 1366extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq);
1286extern u32 drm_vblank_count(struct drm_device *dev, int crtc); 1367extern u32 drm_vblank_count(struct drm_device *dev, int crtc);
1368extern u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
1369 struct timeval *vblanktime);
1287extern void drm_handle_vblank(struct drm_device *dev, int crtc); 1370extern void drm_handle_vblank(struct drm_device *dev, int crtc);
1288extern int drm_vblank_get(struct drm_device *dev, int crtc); 1371extern int drm_vblank_get(struct drm_device *dev, int crtc);
1289extern void drm_vblank_put(struct drm_device *dev, int crtc); 1372extern void drm_vblank_put(struct drm_device *dev, int crtc);
1290extern void drm_vblank_off(struct drm_device *dev, int crtc); 1373extern void drm_vblank_off(struct drm_device *dev, int crtc);
1291extern void drm_vblank_cleanup(struct drm_device *dev); 1374extern void drm_vblank_cleanup(struct drm_device *dev);
1375extern u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
1376 struct timeval *tvblank, unsigned flags);
1377extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
1378 int crtc, int *max_error,
1379 struct timeval *vblank_time,
1380 unsigned flags,
1381 struct drm_crtc *refcrtc);
1382extern void drm_calc_timestamping_constants(struct drm_crtc *crtc);
1383
1292/* Modesetting support */ 1384/* Modesetting support */
1293extern void drm_vblank_pre_modeset(struct drm_device *dev, int crtc); 1385extern void drm_vblank_pre_modeset(struct drm_device *dev, int crtc);
1294extern void drm_vblank_post_modeset(struct drm_device *dev, int crtc); 1386extern void drm_vblank_post_modeset(struct drm_device *dev, int crtc);
@@ -1321,7 +1413,6 @@ extern int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
1321extern int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request); 1413extern int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request);
1322extern int drm_agp_bind_ioctl(struct drm_device *dev, void *data, 1414extern int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
1323 struct drm_file *file_priv); 1415 struct drm_file *file_priv);
1324extern void drm_agp_chipset_flush(struct drm_device *dev);
1325 1416
1326 /* Stub support (drm_stub.h) */ 1417 /* Stub support (drm_stub.h) */
1327extern int drm_setmaster_ioctl(struct drm_device *dev, void *data, 1418extern int drm_setmaster_ioctl(struct drm_device *dev, void *data,
@@ -1340,6 +1431,9 @@ extern void drm_put_dev(struct drm_device *dev);
1340extern int drm_put_minor(struct drm_minor **minor); 1431extern int drm_put_minor(struct drm_minor **minor);
1341extern unsigned int drm_debug; 1432extern unsigned int drm_debug;
1342 1433
1434extern unsigned int drm_vblank_offdelay;
1435extern unsigned int drm_timestamp_precision;
1436
1343extern struct class *drm_class; 1437extern struct class *drm_class;
1344extern struct proc_dir_entry *drm_proc_root; 1438extern struct proc_dir_entry *drm_proc_root;
1345extern struct dentry *drm_debugfs_root; 1439extern struct dentry *drm_debugfs_root;
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 029aa688e787..acd7fade160d 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -351,8 +351,14 @@ struct drm_crtc {
351 351
352 bool enabled; 352 bool enabled;
353 353
354 /* Requested mode from modesetting. */
354 struct drm_display_mode mode; 355 struct drm_display_mode mode;
355 356
357 /* Programmed mode in hw, after adjustments for encoders,
358 * crtc, panel scaling etc. Needed for timestamping etc.
359 */
360 struct drm_display_mode hwmode;
361
356 int x, y; 362 int x, y;
357 const struct drm_crtc_funcs *funcs; 363 const struct drm_crtc_funcs *funcs;
358 364
@@ -360,6 +366,9 @@ struct drm_crtc {
360 uint32_t gamma_size; 366 uint32_t gamma_size;
361 uint16_t *gamma_store; 367 uint16_t *gamma_store;
362 368
369 /* Constants needed for precise vblank and swap timestamping. */
370 s64 framedur_ns, linedur_ns, pixeldur_ns;
371
363 /* if you are using the helper */ 372 /* if you are using the helper */
364 void *helper_private; 373 void *helper_private;
365}; 374};
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index f22e7fe4b6db..aac27bd56e89 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -121,9 +121,6 @@ int drm_fb_helper_setcolreg(unsigned regno,
121void drm_fb_helper_restore(void); 121void drm_fb_helper_restore(void);
122void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper, 122void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
123 uint32_t fb_width, uint32_t fb_height); 123 uint32_t fb_width, uint32_t fb_height);
124void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
125 uint32_t depth);
126
127int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info); 124int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info);
128 125
129bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper); 126bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper);
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index bf01531193d5..e39177778601 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -62,11 +62,14 @@ struct drm_mm {
62 struct list_head unused_nodes; 62 struct list_head unused_nodes;
63 int num_unused; 63 int num_unused;
64 spinlock_t unused_lock; 64 spinlock_t unused_lock;
65 unsigned int scan_check_range : 1;
65 unsigned scan_alignment; 66 unsigned scan_alignment;
66 unsigned long scan_size; 67 unsigned long scan_size;
67 unsigned long scan_hit_start; 68 unsigned long scan_hit_start;
68 unsigned scan_hit_size; 69 unsigned scan_hit_size;
69 unsigned scanned_blocks; 70 unsigned scanned_blocks;
71 unsigned long scan_start;
72 unsigned long scan_end;
70}; 73};
71 74
72/* 75/*
@@ -145,6 +148,10 @@ static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block)
145 148
146void drm_mm_init_scan(struct drm_mm *mm, unsigned long size, 149void drm_mm_init_scan(struct drm_mm *mm, unsigned long size,
147 unsigned alignment); 150 unsigned alignment);
151void drm_mm_init_scan_with_range(struct drm_mm *mm, unsigned long size,
152 unsigned alignment,
153 unsigned long start,
154 unsigned long end);
148int drm_mm_scan_add_block(struct drm_mm_node *node); 155int drm_mm_scan_add_block(struct drm_mm_node *node);
149int drm_mm_scan_remove_block(struct drm_mm_node *node); 156int drm_mm_scan_remove_block(struct drm_mm_node *node);
150 157
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index 883c1d439899..fe29ae328bd9 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -142,6 +142,42 @@
142 {0x1002, 0x5e4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ 142 {0x1002, 0x5e4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
143 {0x1002, 0x5e4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ 143 {0x1002, 0x5e4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
144 {0x1002, 0x5e4f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ 144 {0x1002, 0x5e4f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
145 {0x1002, 0x6720, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
146 {0x1002, 0x6721, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
147 {0x1002, 0x6722, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \
148 {0x1002, 0x6723, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \
149 {0x1002, 0x6724, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
150 {0x1002, 0x6725, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
151 {0x1002, 0x6726, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \
152 {0x1002, 0x6727, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \
153 {0x1002, 0x6728, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \
154 {0x1002, 0x6729, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \
155 {0x1002, 0x6738, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \
156 {0x1002, 0x6739, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \
157 {0x1002, 0x6740, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
158 {0x1002, 0x6741, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
159 {0x1002, 0x6742, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
160 {0x1002, 0x6743, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
161 {0x1002, 0x6744, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
162 {0x1002, 0x6745, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
163 {0x1002, 0x6746, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
164 {0x1002, 0x6747, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
165 {0x1002, 0x6748, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
166 {0x1002, 0x6749, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
167 {0x1002, 0x6750, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
168 {0x1002, 0x6758, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
169 {0x1002, 0x6759, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
170 {0x1002, 0x6760, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
171 {0x1002, 0x6761, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
172 {0x1002, 0x6762, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
173 {0x1002, 0x6763, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
174 {0x1002, 0x6764, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
175 {0x1002, 0x6765, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
176 {0x1002, 0x6766, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
177 {0x1002, 0x6767, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
178 {0x1002, 0x6768, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
179 {0x1002, 0x6770, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
180 {0x1002, 0x6779, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
145 {0x1002, 0x6880, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 181 {0x1002, 0x6880, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
146 {0x1002, 0x6888, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ 182 {0x1002, 0x6888, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
147 {0x1002, 0x6889, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ 183 {0x1002, 0x6889, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
@@ -419,6 +455,10 @@
419 {0x1002, 0x9713, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 455 {0x1002, 0x9713, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
420 {0x1002, 0x9714, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 456 {0x1002, 0x9714, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
421 {0x1002, 0x9715, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 457 {0x1002, 0x9715, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
458 {0x1002, 0x9802, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
459 {0x1002, 0x9803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
460 {0x1002, 0x9804, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
461 {0x1002, 0x9805, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
422 {0, 0, 0} 462 {0, 0, 0}
423 463
424#define r128_PCI_IDS \ 464#define r128_PCI_IDS \
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index a2776e2807a4..0039f1f97ad8 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -289,6 +289,7 @@ typedef struct drm_i915_irq_wait {
289#define I915_PARAM_HAS_BLT 11 289#define I915_PARAM_HAS_BLT 11
290#define I915_PARAM_HAS_RELAXED_FENCING 12 290#define I915_PARAM_HAS_RELAXED_FENCING 12
291#define I915_PARAM_HAS_COHERENT_RINGS 13 291#define I915_PARAM_HAS_COHERENT_RINGS 13
292#define I915_PARAM_HAS_EXEC_CONSTANTS 14
292 293
293typedef struct drm_i915_getparam { 294typedef struct drm_i915_getparam {
294 int param; 295 int param;
@@ -635,6 +636,17 @@ struct drm_i915_gem_execbuffer2 {
635#define I915_EXEC_RENDER (1<<0) 636#define I915_EXEC_RENDER (1<<0)
636#define I915_EXEC_BSD (2<<0) 637#define I915_EXEC_BSD (2<<0)
637#define I915_EXEC_BLT (3<<0) 638#define I915_EXEC_BLT (3<<0)
639
640/* Used for switching the constants addressing mode on gen4+ RENDER ring.
641 * Gen6+ only supports relative addressing to dynamic state (default) and
642 * absolute addressing.
643 *
644 * These flags are ignored for the BSD and BLT rings.
645 */
646#define I915_EXEC_CONSTANTS_MASK (3<<6)
647#define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */
648#define I915_EXEC_CONSTANTS_ABSOLUTE (1<<6)
649#define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */
638 __u64 flags; 650 __u64 flags;
639 __u64 rsvd1; 651 __u64 rsvd1;
640 __u64 rsvd2; 652 __u64 rsvd2;
diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h
index d3c81946f613..9e343c0998b4 100644
--- a/include/drm/intel-gtt.h
+++ b/include/drm/intel-gtt.h
@@ -2,17 +2,40 @@
2 2
3#ifndef _DRM_INTEL_GTT_H 3#ifndef _DRM_INTEL_GTT_H
4#define _DRM_INTEL_GTT_H 4#define _DRM_INTEL_GTT_H
5struct intel_gtt { 5
6 /* Number of stolen gtt entries at the beginning. */ 6const struct intel_gtt {
7 unsigned int gtt_stolen_entries; 7 /* Size of memory reserved for graphics by the BIOS */
8 unsigned int stolen_size;
8 /* Total number of gtt entries. */ 9 /* Total number of gtt entries. */
9 unsigned int gtt_total_entries; 10 unsigned int gtt_total_entries;
10 /* Part of the gtt that is mappable by the cpu, for those chips where 11 /* Part of the gtt that is mappable by the cpu, for those chips where
11 * this is not the full gtt. */ 12 * this is not the full gtt. */
12 unsigned int gtt_mappable_entries; 13 unsigned int gtt_mappable_entries;
13}; 14 /* Whether i915 needs to use the dmar apis or not. */
15 unsigned int needs_dmar : 1;
16} *intel_gtt_get(void);
14 17
15struct intel_gtt *intel_gtt_get(void); 18void intel_gtt_chipset_flush(void);
19void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg);
20void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries);
21int intel_gtt_map_memory(struct page **pages, unsigned int num_entries,
22 struct scatterlist **sg_list, int *num_sg);
23void intel_gtt_insert_sg_entries(struct scatterlist *sg_list,
24 unsigned int sg_len,
25 unsigned int pg_start,
26 unsigned int flags);
27void intel_gtt_insert_pages(unsigned int first_entry, unsigned int num_entries,
28 struct page **pages, unsigned int flags);
16 29
17#endif 30/* Special gtt memory types */
31#define AGP_DCACHE_MEMORY 1
32#define AGP_PHYS_MEMORY 2
33
34/* New caching attributes for gen6/sandybridge */
35#define AGP_USER_CACHED_MEMORY_LLC_MLC (AGP_USER_TYPES + 2)
36#define AGP_USER_UNCACHED_MEMORY (AGP_USER_TYPES + 4)
18 37
38/* flag for GFDT type */
39#define AGP_USER_CACHED_MEMORY_GFDT (1 << 3)
40
41#endif
diff --git a/include/drm/nouveau_drm.h b/include/drm/nouveau_drm.h
index bc5590b1a1ac..e2cfe80f6fca 100644
--- a/include/drm/nouveau_drm.h
+++ b/include/drm/nouveau_drm.h
@@ -71,16 +71,14 @@ struct drm_nouveau_gpuobj_free {
71#define NOUVEAU_GETPARAM_PCI_VENDOR 3 71#define NOUVEAU_GETPARAM_PCI_VENDOR 3
72#define NOUVEAU_GETPARAM_PCI_DEVICE 4 72#define NOUVEAU_GETPARAM_PCI_DEVICE 4
73#define NOUVEAU_GETPARAM_BUS_TYPE 5 73#define NOUVEAU_GETPARAM_BUS_TYPE 5
74#define NOUVEAU_GETPARAM_FB_PHYSICAL 6
75#define NOUVEAU_GETPARAM_AGP_PHYSICAL 7
76#define NOUVEAU_GETPARAM_FB_SIZE 8 74#define NOUVEAU_GETPARAM_FB_SIZE 8
77#define NOUVEAU_GETPARAM_AGP_SIZE 9 75#define NOUVEAU_GETPARAM_AGP_SIZE 9
78#define NOUVEAU_GETPARAM_PCI_PHYSICAL 10
79#define NOUVEAU_GETPARAM_CHIPSET_ID 11 76#define NOUVEAU_GETPARAM_CHIPSET_ID 11
80#define NOUVEAU_GETPARAM_VM_VRAM_BASE 12 77#define NOUVEAU_GETPARAM_VM_VRAM_BASE 12
81#define NOUVEAU_GETPARAM_GRAPH_UNITS 13 78#define NOUVEAU_GETPARAM_GRAPH_UNITS 13
82#define NOUVEAU_GETPARAM_PTIMER_TIME 14 79#define NOUVEAU_GETPARAM_PTIMER_TIME 14
83#define NOUVEAU_GETPARAM_HAS_BO_USAGE 15 80#define NOUVEAU_GETPARAM_HAS_BO_USAGE 15
81#define NOUVEAU_GETPARAM_HAS_PAGEFLIP 16
84struct drm_nouveau_getparam { 82struct drm_nouveau_getparam {
85 uint64_t param; 83 uint64_t param;
86 uint64_t value; 84 uint64_t value;
@@ -171,7 +169,6 @@ struct drm_nouveau_gem_pushbuf {
171}; 169};
172 170
173#define NOUVEAU_GEM_CPU_PREP_NOWAIT 0x00000001 171#define NOUVEAU_GEM_CPU_PREP_NOWAIT 0x00000001
174#define NOUVEAU_GEM_CPU_PREP_NOBLOCK 0x00000002
175#define NOUVEAU_GEM_CPU_PREP_WRITE 0x00000004 172#define NOUVEAU_GEM_CPU_PREP_WRITE 0x00000004
176struct drm_nouveau_gem_cpu_prep { 173struct drm_nouveau_gem_cpu_prep {
177 uint32_t handle; 174 uint32_t handle;
diff --git a/include/drm/radeon_drm.h b/include/drm/radeon_drm.h
index 10f8b53bdd40..e95a86b8b689 100644
--- a/include/drm/radeon_drm.h
+++ b/include/drm/radeon_drm.h
@@ -906,6 +906,7 @@ struct drm_radeon_cs {
906#define RADEON_INFO_ACCEL_WORKING2 0x05 906#define RADEON_INFO_ACCEL_WORKING2 0x05
907#define RADEON_INFO_TILING_CONFIG 0x06 907#define RADEON_INFO_TILING_CONFIG 0x06
908#define RADEON_INFO_WANT_HYPERZ 0x07 908#define RADEON_INFO_WANT_HYPERZ 0x07
909#define RADEON_INFO_WANT_CMASK 0x08 /* get access to CMASK on r300 */
909 910
910struct drm_radeon_info { 911struct drm_radeon_info {
911 uint32_t request; 912 uint32_t request;
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index beafc156a535..50852aad260a 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -74,6 +74,8 @@ struct ttm_placement {
74 * @is_iomem: is this io memory ? 74 * @is_iomem: is this io memory ?
75 * @size: size in byte 75 * @size: size in byte
76 * @offset: offset from the base address 76 * @offset: offset from the base address
77 * @io_reserved_vm: The VM system has a refcount in @io_reserved_count
78 * @io_reserved_count: Refcounting the numbers of callers to ttm_mem_io_reserve
77 * 79 *
78 * Structure indicating the bus placement of an object. 80 * Structure indicating the bus placement of an object.
79 */ 81 */
@@ -83,7 +85,8 @@ struct ttm_bus_placement {
83 unsigned long size; 85 unsigned long size;
84 unsigned long offset; 86 unsigned long offset;
85 bool is_iomem; 87 bool is_iomem;
86 bool io_reserved; 88 bool io_reserved_vm;
89 uint64_t io_reserved_count;
87}; 90};
88 91
89 92
@@ -154,7 +157,6 @@ struct ttm_tt;
154 * keeps one refcount. When this refcount reaches zero, 157 * keeps one refcount. When this refcount reaches zero,
155 * the object is destroyed. 158 * the object is destroyed.
156 * @event_queue: Queue for processes waiting on buffer object status change. 159 * @event_queue: Queue for processes waiting on buffer object status change.
157 * @lock: spinlock protecting mostly synchronization members.
158 * @mem: structure describing current placement. 160 * @mem: structure describing current placement.
159 * @persistant_swap_storage: Usually the swap storage is deleted for buffers 161 * @persistant_swap_storage: Usually the swap storage is deleted for buffers
160 * pinned in physical memory. If this behaviour is not desired, this member 162 * pinned in physical memory. If this behaviour is not desired, this member
@@ -213,7 +215,6 @@ struct ttm_buffer_object {
213 struct kref kref; 215 struct kref kref;
214 struct kref list_kref; 216 struct kref list_kref;
215 wait_queue_head_t event_queue; 217 wait_queue_head_t event_queue;
216 spinlock_t lock;
217 218
218 /** 219 /**
219 * Members protected by the bo::reserved lock. 220 * Members protected by the bo::reserved lock.
@@ -237,6 +238,7 @@ struct ttm_buffer_object {
237 struct list_head lru; 238 struct list_head lru;
238 struct list_head ddestroy; 239 struct list_head ddestroy;
239 struct list_head swap; 240 struct list_head swap;
241 struct list_head io_reserve_lru;
240 uint32_t val_seq; 242 uint32_t val_seq;
241 bool seq_valid; 243 bool seq_valid;
242 244
@@ -248,10 +250,10 @@ struct ttm_buffer_object {
248 atomic_t reserved; 250 atomic_t reserved;
249 251
250 /** 252 /**
251 * Members protected by the bo::lock 253 * Members protected by struct buffer_object_device::fence_lock
252 * In addition, setting sync_obj to anything else 254 * In addition, setting sync_obj to anything else
253 * than NULL requires bo::reserved to be held. This allows for 255 * than NULL requires bo::reserved to be held. This allows for
254 * checking NULL while reserved but not holding bo::lock. 256 * checking NULL while reserved but not holding the mentioned lock.
255 */ 257 */
256 258
257 void *sync_obj_arg; 259 void *sync_obj_arg;
@@ -364,6 +366,44 @@ extern int ttm_bo_validate(struct ttm_buffer_object *bo,
364 */ 366 */
365extern void ttm_bo_unref(struct ttm_buffer_object **bo); 367extern void ttm_bo_unref(struct ttm_buffer_object **bo);
366 368
369
370/**
371 * ttm_bo_list_ref_sub
372 *
373 * @bo: The buffer object.
374 * @count: The number of references with which to decrease @bo::list_kref;
375 * @never_free: The refcount should not reach zero with this operation.
376 *
377 * Release @count lru list references to this buffer object.
378 */
379extern void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count,
380 bool never_free);
381
382/**
383 * ttm_bo_add_to_lru
384 *
385 * @bo: The buffer object.
386 *
387 * Add this bo to the relevant mem type lru and, if it's backed by
388 * system pages (ttms) to the swap list.
389 * This function must be called with struct ttm_bo_global::lru_lock held, and
390 * is typically called immediately prior to unreserving a bo.
391 */
392extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
393
394/**
395 * ttm_bo_del_from_lru
396 *
397 * @bo: The buffer object.
398 *
399 * Remove this bo from all lru lists used to lookup and reserve an object.
400 * This function must be called with struct ttm_bo_global::lru_lock held,
401 * and is usually called just immediately after the bo has been reserved to
402 * avoid recursive reservation from lru lists.
403 */
404extern int ttm_bo_del_from_lru(struct ttm_buffer_object *bo);
405
406
367/** 407/**
368 * ttm_bo_lock_delayed_workqueue 408 * ttm_bo_lock_delayed_workqueue
369 * 409 *
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 8e0c848326b6..1da8af6ac884 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -179,30 +179,6 @@ struct ttm_tt {
179#define TTM_MEMTYPE_FLAG_MAPPABLE (1 << 1) /* Memory mappable */ 179#define TTM_MEMTYPE_FLAG_MAPPABLE (1 << 1) /* Memory mappable */
180#define TTM_MEMTYPE_FLAG_CMA (1 << 3) /* Can't map aperture */ 180#define TTM_MEMTYPE_FLAG_CMA (1 << 3) /* Can't map aperture */
181 181
182/**
183 * struct ttm_mem_type_manager
184 *
185 * @has_type: The memory type has been initialized.
186 * @use_type: The memory type is enabled.
187 * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory
188 * managed by this memory type.
189 * @gpu_offset: If used, the GPU offset of the first managed page of
190 * fixed memory or the first managed location in an aperture.
191 * @size: Size of the managed region.
192 * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX,
193 * as defined in ttm_placement_common.h
194 * @default_caching: The default caching policy used for a buffer object
195 * placed in this memory type if the user doesn't provide one.
196 * @manager: The range manager used for this memory type. FIXME: If the aperture
197 * has a page size different from the underlying system, the granularity
198 * of this manager should take care of this. But the range allocating code
199 * in ttm_bo.c needs to be modified for this.
200 * @lru: The lru list for this memory type.
201 *
202 * This structure is used to identify and manage memory types for a device.
203 * It's set up by the ttm_bo_driver::init_mem_type method.
204 */
205
206struct ttm_mem_type_manager; 182struct ttm_mem_type_manager;
207 183
208struct ttm_mem_type_manager_func { 184struct ttm_mem_type_manager_func {
@@ -287,6 +263,36 @@ struct ttm_mem_type_manager_func {
287 void (*debug)(struct ttm_mem_type_manager *man, const char *prefix); 263 void (*debug)(struct ttm_mem_type_manager *man, const char *prefix);
288}; 264};
289 265
266/**
267 * struct ttm_mem_type_manager
268 *
269 * @has_type: The memory type has been initialized.
270 * @use_type: The memory type is enabled.
271 * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory
272 * managed by this memory type.
273 * @gpu_offset: If used, the GPU offset of the first managed page of
274 * fixed memory or the first managed location in an aperture.
275 * @size: Size of the managed region.
276 * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX,
277 * as defined in ttm_placement_common.h
278 * @default_caching: The default caching policy used for a buffer object
279 * placed in this memory type if the user doesn't provide one.
280 * @func: structure pointer implementing the range manager. See above
281 * @priv: Driver private closure for @func.
282 * @io_reserve_mutex: Mutex optionally protecting shared io_reserve structures
283 * @use_io_reserve_lru: Use an lru list to try to unreserve io_mem_regions
284 * reserved by the TTM vm system.
285 * @io_reserve_lru: Optional lru list for unreserving io mem regions.
286 * @io_reserve_fastpath: Only use bdev::driver::io_mem_reserve to obtain
287 * static information. bdev::driver::io_mem_free is never used.
288 * @lru: The lru list for this memory type.
289 *
290 * This structure is used to identify and manage memory types for a device.
291 * It's set up by the ttm_bo_driver::init_mem_type method.
292 */
293
294
295
290struct ttm_mem_type_manager { 296struct ttm_mem_type_manager {
291 struct ttm_bo_device *bdev; 297 struct ttm_bo_device *bdev;
292 298
@@ -303,6 +309,15 @@ struct ttm_mem_type_manager {
303 uint32_t default_caching; 309 uint32_t default_caching;
304 const struct ttm_mem_type_manager_func *func; 310 const struct ttm_mem_type_manager_func *func;
305 void *priv; 311 void *priv;
312 struct mutex io_reserve_mutex;
313 bool use_io_reserve_lru;
314 bool io_reserve_fastpath;
315
316 /*
317 * Protected by @io_reserve_mutex:
318 */
319
320 struct list_head io_reserve_lru;
306 321
307 /* 322 /*
308 * Protected by the global->lru_lock. 323 * Protected by the global->lru_lock.
@@ -510,9 +525,12 @@ struct ttm_bo_global {
510 * 525 *
511 * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver. 526 * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver.
512 * @man: An array of mem_type_managers. 527 * @man: An array of mem_type_managers.
528 * @fence_lock: Protects the synchronizing members on *all* bos belonging
529 * to this device.
513 * @addr_space_mm: Range manager for the device address space. 530 * @addr_space_mm: Range manager for the device address space.
514 * lru_lock: Spinlock that protects the buffer+device lru lists and 531 * lru_lock: Spinlock that protects the buffer+device lru lists and
515 * ddestroy lists. 532 * ddestroy lists.
533 * @val_seq: Current validation sequence.
516 * @nice_mode: Try nicely to wait for buffer idle when cleaning a manager. 534 * @nice_mode: Try nicely to wait for buffer idle when cleaning a manager.
517 * If a GPU lockup has been detected, this is forced to 0. 535 * If a GPU lockup has been detected, this is forced to 0.
518 * @dev_mapping: A pointer to the struct address_space representing the 536 * @dev_mapping: A pointer to the struct address_space representing the
@@ -531,6 +549,7 @@ struct ttm_bo_device {
531 struct ttm_bo_driver *driver; 549 struct ttm_bo_driver *driver;
532 rwlock_t vm_lock; 550 rwlock_t vm_lock;
533 struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES]; 551 struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
552 spinlock_t fence_lock;
534 /* 553 /*
535 * Protected by the vm lock. 554 * Protected by the vm lock.
536 */ 555 */
@@ -541,6 +560,7 @@ struct ttm_bo_device {
541 * Protected by the global:lru lock. 560 * Protected by the global:lru lock.
542 */ 561 */
543 struct list_head ddestroy; 562 struct list_head ddestroy;
563 uint32_t val_seq;
544 564
545 /* 565 /*
546 * Protected by load / firstopen / lastclose /unload sync. 566 * Protected by load / firstopen / lastclose /unload sync.
@@ -753,31 +773,6 @@ extern void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo,
753 773
754extern int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait); 774extern int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait);
755 775
756/**
757 * ttm_bo_pci_offset - Get the PCI offset for the buffer object memory.
758 *
759 * @bo Pointer to a struct ttm_buffer_object.
760 * @bus_base On return the base of the PCI region
761 * @bus_offset On return the byte offset into the PCI region
762 * @bus_size On return the byte size of the buffer object or zero if
763 * the buffer object memory is not accessible through a PCI region.
764 *
765 * Returns:
766 * -EINVAL if the buffer object is currently not mappable.
767 * 0 otherwise.
768 */
769
770extern int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
771 struct ttm_mem_reg *mem,
772 unsigned long *bus_base,
773 unsigned long *bus_offset,
774 unsigned long *bus_size);
775
776extern int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
777 struct ttm_mem_reg *mem);
778extern void ttm_mem_io_free(struct ttm_bo_device *bdev,
779 struct ttm_mem_reg *mem);
780
781extern void ttm_bo_global_release(struct drm_global_reference *ref); 776extern void ttm_bo_global_release(struct drm_global_reference *ref);
782extern int ttm_bo_global_init(struct drm_global_reference *ref); 777extern int ttm_bo_global_init(struct drm_global_reference *ref);
783 778
@@ -810,6 +805,22 @@ extern int ttm_bo_device_init(struct ttm_bo_device *bdev,
810extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo); 805extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
811 806
812/** 807/**
808 * ttm_bo_unmap_virtual
809 *
810 * @bo: tear down the virtual mappings for this BO
811 *
812 * The caller must take ttm_mem_io_lock before calling this function.
813 */
814extern void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo);
815
816extern int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo);
817extern void ttm_mem_io_free_vm(struct ttm_buffer_object *bo);
818extern int ttm_mem_io_lock(struct ttm_mem_type_manager *man,
819 bool interruptible);
820extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man);
821
822
823/**
813 * ttm_bo_reserve: 824 * ttm_bo_reserve:
814 * 825 *
815 * @bo: A pointer to a struct ttm_buffer_object. 826 * @bo: A pointer to a struct ttm_buffer_object.
@@ -859,11 +870,44 @@ extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
859 * try again. (only if use_sequence == 1). 870 * try again. (only if use_sequence == 1).
860 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by 871 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
861 * a signal. Release all buffer reservations and return to user-space. 872 * a signal. Release all buffer reservations and return to user-space.
873 * -EBUSY: The function needed to sleep, but @no_wait was true
874 * -EDEADLK: Bo already reserved using @sequence. This error code will only
875 * be returned if @use_sequence is set to true.
862 */ 876 */
863extern int ttm_bo_reserve(struct ttm_buffer_object *bo, 877extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
864 bool interruptible, 878 bool interruptible,
865 bool no_wait, bool use_sequence, uint32_t sequence); 879 bool no_wait, bool use_sequence, uint32_t sequence);
866 880
881
882/**
883 * ttm_bo_reserve_locked:
884 *
885 * @bo: A pointer to a struct ttm_buffer_object.
886 * @interruptible: Sleep interruptible if waiting.
887 * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
888 * @use_sequence: If @bo is already reserved, Only sleep waiting for
889 * it to become unreserved if @sequence < (@bo)->sequence.
890 *
891 * Must be called with struct ttm_bo_global::lru_lock held,
892 * and will not remove reserved buffers from the lru lists.
893 * The function may release the LRU spinlock if it needs to sleep.
894 * Otherwise identical to ttm_bo_reserve.
895 *
896 * Returns:
897 * -EAGAIN: The reservation may cause a deadlock.
898 * Release all buffer reservations, wait for @bo to become unreserved and
899 * try again. (only if use_sequence == 1).
900 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
901 * a signal. Release all buffer reservations and return to user-space.
902 * -EBUSY: The function needed to sleep, but @no_wait was true
903 * -EDEADLK: Bo already reserved using @sequence. This error code will only
904 * be returned if @use_sequence is set to true.
905 */
906extern int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
907 bool interruptible,
908 bool no_wait, bool use_sequence,
909 uint32_t sequence);
910
867/** 911/**
868 * ttm_bo_unreserve 912 * ttm_bo_unreserve
869 * 913 *
@@ -874,6 +918,16 @@ extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
874extern void ttm_bo_unreserve(struct ttm_buffer_object *bo); 918extern void ttm_bo_unreserve(struct ttm_buffer_object *bo);
875 919
876/** 920/**
921 * ttm_bo_unreserve_locked
922 *
923 * @bo: A pointer to a struct ttm_buffer_object.
924 *
925 * Unreserve a previous reservation of @bo.
926 * Needs to be called with struct ttm_bo_global::lru_lock held.
927 */
928extern void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo);
929
930/**
877 * ttm_bo_wait_unreserved 931 * ttm_bo_wait_unreserved
878 * 932 *
879 * @bo: A pointer to a struct ttm_buffer_object. 933 * @bo: A pointer to a struct ttm_buffer_object.
diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h
index cd2c475da9ea..26cc7f9ffa41 100644
--- a/include/drm/ttm/ttm_execbuf_util.h
+++ b/include/drm/ttm/ttm_execbuf_util.h
@@ -41,7 +41,10 @@
41 * @bo: refcounted buffer object pointer. 41 * @bo: refcounted buffer object pointer.
42 * @new_sync_obj_arg: New sync_obj_arg for @bo, to be used once 42 * @new_sync_obj_arg: New sync_obj_arg for @bo, to be used once
43 * adding a new sync object. 43 * adding a new sync object.
44 * @reservied: Indicates whether @bo has been reserved for validation. 44 * @reserved: Indicates whether @bo has been reserved for validation.
45 * @removed: Indicates whether @bo has been removed from lru lists.
46 * @put_count: Number of outstanding references on bo::list_kref.
47 * @old_sync_obj: Pointer to a sync object about to be unreferenced
45 */ 48 */
46 49
47struct ttm_validate_buffer { 50struct ttm_validate_buffer {
@@ -49,6 +52,9 @@ struct ttm_validate_buffer {
49 struct ttm_buffer_object *bo; 52 struct ttm_buffer_object *bo;
50 void *new_sync_obj_arg; 53 void *new_sync_obj_arg;
51 bool reserved; 54 bool reserved;
55 bool removed;
56 int put_count;
57 void *old_sync_obj;
52}; 58};
53 59
54/** 60/**
@@ -66,7 +72,6 @@ extern void ttm_eu_backoff_reservation(struct list_head *list);
66 * function ttm_eu_reserve_buffers 72 * function ttm_eu_reserve_buffers
67 * 73 *
68 * @list: thread private list of ttm_validate_buffer structs. 74 * @list: thread private list of ttm_validate_buffer structs.
69 * @val_seq: A unique sequence number.
70 * 75 *
71 * Tries to reserve bos pointed to by the list entries for validation. 76 * Tries to reserve bos pointed to by the list entries for validation.
72 * If the function returns 0, all buffers are marked as "unfenced", 77 * If the function returns 0, all buffers are marked as "unfenced",
@@ -88,7 +93,7 @@ extern void ttm_eu_backoff_reservation(struct list_head *list);
88 * has failed. 93 * has failed.
89 */ 94 */
90 95
91extern int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq); 96extern int ttm_eu_reserve_buffers(struct list_head *list);
92 97
93/** 98/**
94 * function ttm_eu_fence_buffer_objects. 99 * function ttm_eu_fence_buffer_objects.
diff --git a/include/linux/agp_backend.h b/include/linux/agp_backend.h
index 09ea4a1e9505..eaf6cd75a1b1 100644
--- a/include/linux/agp_backend.h
+++ b/include/linux/agp_backend.h
@@ -102,10 +102,8 @@ extern struct agp_memory *agp_allocate_memory(struct agp_bridge_data *, size_t,
102extern int agp_copy_info(struct agp_bridge_data *, struct agp_kern_info *); 102extern int agp_copy_info(struct agp_bridge_data *, struct agp_kern_info *);
103extern int agp_bind_memory(struct agp_memory *, off_t); 103extern int agp_bind_memory(struct agp_memory *, off_t);
104extern int agp_unbind_memory(struct agp_memory *); 104extern int agp_unbind_memory(struct agp_memory *);
105extern int agp_rebind_memory(void);
106extern void agp_enable(struct agp_bridge_data *, u32); 105extern void agp_enable(struct agp_bridge_data *, u32);
107extern struct agp_bridge_data *agp_backend_acquire(struct pci_dev *); 106extern struct agp_bridge_data *agp_backend_acquire(struct pci_dev *);
108extern void agp_backend_release(struct agp_bridge_data *); 107extern void agp_backend_release(struct agp_bridge_data *);
109extern void agp_flush_chipset(struct agp_bridge_data *);
110 108
111#endif /* _AGP_BACKEND_H */ 109#endif /* _AGP_BACKEND_H */
diff --git a/include/linux/bfin_mac.h b/include/linux/bfin_mac.h
index 904dec7d03a1..a69554ef8476 100644
--- a/include/linux/bfin_mac.h
+++ b/include/linux/bfin_mac.h
@@ -24,6 +24,7 @@ struct bfin_mii_bus_platform_data {
24 const unsigned short *mac_peripherals; 24 const unsigned short *mac_peripherals;
25 int phy_mode; 25 int phy_mode;
26 unsigned int phy_mask; 26 unsigned int phy_mask;
27 unsigned short vlan1_mask, vlan2_mask;
27}; 28};
28 29
29#endif 30#endif
diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h
index a90b3892074a..1c70028f81f9 100644
--- a/include/linux/dynamic_debug.h
+++ b/include/linux/dynamic_debug.h
@@ -44,34 +44,24 @@ int ddebug_add_module(struct _ddebug *tab, unsigned int n,
44extern int ddebug_remove_module(const char *mod_name); 44extern int ddebug_remove_module(const char *mod_name);
45 45
46#define dynamic_pr_debug(fmt, ...) do { \ 46#define dynamic_pr_debug(fmt, ...) do { \
47 __label__ do_printk; \
48 __label__ out; \
49 static struct _ddebug descriptor \ 47 static struct _ddebug descriptor \
50 __used \ 48 __used \
51 __attribute__((section("__verbose"), aligned(8))) = \ 49 __attribute__((section("__verbose"), aligned(8))) = \
52 { KBUILD_MODNAME, __func__, __FILE__, fmt, __LINE__, \ 50 { KBUILD_MODNAME, __func__, __FILE__, fmt, __LINE__, \
53 _DPRINTK_FLAGS_DEFAULT }; \ 51 _DPRINTK_FLAGS_DEFAULT }; \
54 JUMP_LABEL(&descriptor.enabled, do_printk); \ 52 if (unlikely(descriptor.enabled)) \
55 goto out; \ 53 printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); \
56do_printk: \
57 printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); \
58out: ; \
59 } while (0) 54 } while (0)
60 55
61 56
62#define dynamic_dev_dbg(dev, fmt, ...) do { \ 57#define dynamic_dev_dbg(dev, fmt, ...) do { \
63 __label__ do_printk; \
64 __label__ out; \
65 static struct _ddebug descriptor \ 58 static struct _ddebug descriptor \
66 __used \ 59 __used \
67 __attribute__((section("__verbose"), aligned(8))) = \ 60 __attribute__((section("__verbose"), aligned(8))) = \
68 { KBUILD_MODNAME, __func__, __FILE__, fmt, __LINE__, \ 61 { KBUILD_MODNAME, __func__, __FILE__, fmt, __LINE__, \
69 _DPRINTK_FLAGS_DEFAULT }; \ 62 _DPRINTK_FLAGS_DEFAULT }; \
70 JUMP_LABEL(&descriptor.enabled, do_printk); \ 63 if (unlikely(descriptor.enabled)) \
71 goto out; \ 64 dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); \
72do_printk: \
73 dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); \
74out: ; \
75 } while (0) 65 } while (0)
76 66
77#else 67#else
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index f16a01081e15..bec8b82889bf 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -48,8 +48,10 @@ extern int eth_validate_addr(struct net_device *dev);
48 48
49 49
50 50
51extern struct net_device *alloc_etherdev_mq(int sizeof_priv, unsigned int queue_count); 51extern struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
52 unsigned int rxqs);
52#define alloc_etherdev(sizeof_priv) alloc_etherdev_mq(sizeof_priv, 1) 53#define alloc_etherdev(sizeof_priv) alloc_etherdev_mq(sizeof_priv, 1)
54#define alloc_etherdev_mq(sizeof_priv, count) alloc_etherdev_mqs(sizeof_priv, count, count)
53 55
54/** 56/**
55 * is_zero_ether_addr - Determine if give Ethernet address is all zeros. 57 * is_zero_ether_addr - Determine if give Ethernet address is all zeros.
diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h
index 6ce1bca01724..65990ef612f5 100644
--- a/include/linux/ext3_fs.h
+++ b/include/linux/ext3_fs.h
@@ -724,21 +724,30 @@ struct ext3_dir_entry_2 {
724 ~EXT3_DIR_ROUND) 724 ~EXT3_DIR_ROUND)
725#define EXT3_MAX_REC_LEN ((1<<16)-1) 725#define EXT3_MAX_REC_LEN ((1<<16)-1)
726 726
727/*
728 * Tests against MAX_REC_LEN etc were put in place for 64k block
729 * sizes; if that is not possible on this arch, we can skip
730 * those tests and speed things up.
731 */
727static inline unsigned ext3_rec_len_from_disk(__le16 dlen) 732static inline unsigned ext3_rec_len_from_disk(__le16 dlen)
728{ 733{
729 unsigned len = le16_to_cpu(dlen); 734 unsigned len = le16_to_cpu(dlen);
730 735
736#if (PAGE_CACHE_SIZE >= 65536)
731 if (len == EXT3_MAX_REC_LEN) 737 if (len == EXT3_MAX_REC_LEN)
732 return 1 << 16; 738 return 1 << 16;
739#endif
733 return len; 740 return len;
734} 741}
735 742
736static inline __le16 ext3_rec_len_to_disk(unsigned len) 743static inline __le16 ext3_rec_len_to_disk(unsigned len)
737{ 744{
745#if (PAGE_CACHE_SIZE >= 65536)
738 if (len == (1 << 16)) 746 if (len == (1 << 16))
739 return cpu_to_le16(EXT3_MAX_REC_LEN); 747 return cpu_to_le16(EXT3_MAX_REC_LEN);
740 else if (len > (1 << 16)) 748 else if (len > (1 << 16))
741 BUG(); 749 BUG();
750#endif
742 return cpu_to_le16(len); 751 return cpu_to_le16(len);
743} 752}
744 753
@@ -856,6 +865,7 @@ extern struct ext3_group_desc * ext3_get_group_desc(struct super_block * sb,
856extern int ext3_should_retry_alloc(struct super_block *sb, int *retries); 865extern int ext3_should_retry_alloc(struct super_block *sb, int *retries);
857extern void ext3_init_block_alloc_info(struct inode *); 866extern void ext3_init_block_alloc_info(struct inode *);
858extern void ext3_rsv_window_add(struct super_block *sb, struct ext3_reserve_window_node *rsv); 867extern void ext3_rsv_window_add(struct super_block *sb, struct ext3_reserve_window_node *rsv);
868extern int ext3_trim_fs(struct super_block *sb, struct fstrim_range *range);
859 869
860/* dir.c */ 870/* dir.c */
861extern int ext3_check_dir_entry(const char *, struct inode *, 871extern int ext3_check_dir_entry(const char *, struct inode *,
diff --git a/include/linux/fec.h b/include/linux/fec.h
index 5d3523d8dd0c..bcff455d1d53 100644
--- a/include/linux/fec.h
+++ b/include/linux/fec.h
@@ -3,6 +3,8 @@
3 * Copyright (c) 2009 Orex Computed Radiography 3 * Copyright (c) 2009 Orex Computed Radiography
4 * Baruch Siach <baruch@tkos.co.il> 4 * Baruch Siach <baruch@tkos.co.il>
5 * 5 *
6 * Copyright (C) 2010 Freescale Semiconductor, Inc.
7 *
6 * Header file for the FEC platform data 8 * Header file for the FEC platform data
7 * 9 *
8 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
@@ -16,6 +18,7 @@
16 18
17struct fec_platform_data { 19struct fec_platform_data {
18 phy_interface_t phy; 20 phy_interface_t phy;
21 unsigned char mac[ETH_ALEN];
19}; 22};
20 23
21#endif 24#endif
diff --git a/include/linux/gpio-i2cmux.h b/include/linux/gpio-i2cmux.h
new file mode 100644
index 000000000000..4a333bb0bd0d
--- /dev/null
+++ b/include/linux/gpio-i2cmux.h
@@ -0,0 +1,38 @@
1/*
2 * gpio-i2cmux interface to platform code
3 *
4 * Peter Korsgaard <peter.korsgaard@barco.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#ifndef _LINUX_GPIO_I2CMUX_H
12#define _LINUX_GPIO_I2CMUX_H
13
14/* MUX has no specific idle mode */
15#define GPIO_I2CMUX_NO_IDLE ((unsigned)-1)
16
17/**
18 * struct gpio_i2cmux_platform_data - Platform-dependent data for gpio-i2cmux
19 * @parent: Parent I2C bus adapter number
20 * @base_nr: Base I2C bus number to number adapters from or zero for dynamic
21 * @values: Array of bitmasks of GPIO settings (low/high) for each
22 * position
23 * @n_values: Number of multiplexer positions (busses to instantiate)
24 * @gpios: Array of GPIO numbers used to control MUX
25 * @n_gpios: Number of GPIOs used to control MUX
26 * @idle: Bitmask to write to MUX when idle or GPIO_I2CMUX_NO_IDLE if not used
27 */
28struct gpio_i2cmux_platform_data {
29 int parent;
30 int base_nr;
31 const unsigned *values;
32 int n_values;
33 const unsigned *gpios;
34 int n_gpios;
35 unsigned idle;
36};
37
38#endif /* _LINUX_GPIO_I2CMUX_H */
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 56cfe23ffb39..903576df88dc 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -57,9 +57,10 @@ struct i2c_board_info;
57 * transmit an arbitrary number of messages without interruption. 57 * transmit an arbitrary number of messages without interruption.
58 * @count must be be less than 64k since msg.len is u16. 58 * @count must be be less than 64k since msg.len is u16.
59 */ 59 */
60extern int i2c_master_send(struct i2c_client *client, const char *buf, 60extern int i2c_master_send(const struct i2c_client *client, const char *buf,
61 int count);
62extern int i2c_master_recv(const struct i2c_client *client, char *buf,
61 int count); 63 int count);
62extern int i2c_master_recv(struct i2c_client *client, char *buf, int count);
63 64
64/* Transfer num messages. 65/* Transfer num messages.
65 */ 66 */
@@ -78,23 +79,25 @@ extern s32 i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
78/* Now follow the 'nice' access routines. These also document the calling 79/* Now follow the 'nice' access routines. These also document the calling
79 conventions of i2c_smbus_xfer. */ 80 conventions of i2c_smbus_xfer. */
80 81
81extern s32 i2c_smbus_read_byte(struct i2c_client *client); 82extern s32 i2c_smbus_read_byte(const struct i2c_client *client);
82extern s32 i2c_smbus_write_byte(struct i2c_client *client, u8 value); 83extern s32 i2c_smbus_write_byte(const struct i2c_client *client, u8 value);
83extern s32 i2c_smbus_read_byte_data(struct i2c_client *client, u8 command); 84extern s32 i2c_smbus_read_byte_data(const struct i2c_client *client,
84extern s32 i2c_smbus_write_byte_data(struct i2c_client *client, 85 u8 command);
86extern s32 i2c_smbus_write_byte_data(const struct i2c_client *client,
85 u8 command, u8 value); 87 u8 command, u8 value);
86extern s32 i2c_smbus_read_word_data(struct i2c_client *client, u8 command); 88extern s32 i2c_smbus_read_word_data(const struct i2c_client *client,
87extern s32 i2c_smbus_write_word_data(struct i2c_client *client, 89 u8 command);
90extern s32 i2c_smbus_write_word_data(const struct i2c_client *client,
88 u8 command, u16 value); 91 u8 command, u16 value);
89/* Returns the number of read bytes */ 92/* Returns the number of read bytes */
90extern s32 i2c_smbus_read_block_data(struct i2c_client *client, 93extern s32 i2c_smbus_read_block_data(const struct i2c_client *client,
91 u8 command, u8 *values); 94 u8 command, u8 *values);
92extern s32 i2c_smbus_write_block_data(struct i2c_client *client, 95extern s32 i2c_smbus_write_block_data(const struct i2c_client *client,
93 u8 command, u8 length, const u8 *values); 96 u8 command, u8 length, const u8 *values);
94/* Returns the number of read bytes */ 97/* Returns the number of read bytes */
95extern s32 i2c_smbus_read_i2c_block_data(struct i2c_client *client, 98extern s32 i2c_smbus_read_i2c_block_data(const struct i2c_client *client,
96 u8 command, u8 length, u8 *values); 99 u8 command, u8 length, u8 *values);
97extern s32 i2c_smbus_write_i2c_block_data(struct i2c_client *client, 100extern s32 i2c_smbus_write_i2c_block_data(const struct i2c_client *client,
98 u8 command, u8 length, 101 u8 command, u8 length,
99 const u8 *values); 102 const u8 *values);
100#endif /* I2C */ 103#endif /* I2C */
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
index f7e73c338c40..dd3f20139640 100644
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -103,7 +103,7 @@ struct __fdb_entry {
103 103
104extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *)); 104extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *));
105 105
106typedef int (*br_should_route_hook_t)(struct sk_buff *skb); 106typedef int br_should_route_hook_t(struct sk_buff *skb);
107extern br_should_route_hook_t __rcu *br_should_route_hook; 107extern br_should_route_hook_t __rcu *br_should_route_hook;
108 108
109#endif 109#endif
diff --git a/include/linux/intel-gtt.h b/include/linux/intel-gtt.h
deleted file mode 100644
index 1d19ab2afa39..000000000000
--- a/include/linux/intel-gtt.h
+++ /dev/null
@@ -1,20 +0,0 @@
1/*
2 * Common Intel AGPGART and GTT definitions.
3 */
4#ifndef _INTEL_GTT_H
5#define _INTEL_GTT_H
6
7#include <linux/agp_backend.h>
8
9/* This is for Intel only GTT controls.
10 *
11 * Sandybridge: AGP_USER_CACHED_MEMORY default to LLC only
12 */
13
14#define AGP_USER_CACHED_MEMORY_LLC_MLC (AGP_USER_TYPES + 2)
15#define AGP_USER_UNCACHED_MEMORY (AGP_USER_TYPES + 4)
16
17/* flag for GFDT type */
18#define AGP_USER_CACHED_MEMORY_GFDT (1 << 3)
19
20#endif
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index 2ae86aa21fce..27e79c27ba08 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -94,7 +94,7 @@ extern void jbd2_free(void *ptr, size_t size);
94 * 94 *
95 * This is an opaque datatype. 95 * This is an opaque datatype.
96 **/ 96 **/
97typedef struct handle_s handle_t; /* Atomic operation type */ 97typedef struct jbd2_journal_handle handle_t; /* Atomic operation type */
98 98
99 99
100/** 100/**
@@ -416,7 +416,7 @@ struct jbd2_revoke_table_s;
416 * in so it can be fixed later. 416 * in so it can be fixed later.
417 */ 417 */
418 418
419struct handle_s 419struct jbd2_journal_handle
420{ 420{
421 /* Which compound transaction is this update a part of? */ 421 /* Which compound transaction is this update a part of? */
422 transaction_t *h_transaction; 422 transaction_t *h_transaction;
@@ -1158,6 +1158,22 @@ static inline void jbd2_free_handle(handle_t *handle)
1158 kmem_cache_free(jbd2_handle_cache, handle); 1158 kmem_cache_free(jbd2_handle_cache, handle);
1159} 1159}
1160 1160
1161/*
1162 * jbd2_inode management (optional, for those file systems that want to use
1163 * dynamically allocated jbd2_inode structures)
1164 */
1165extern struct kmem_cache *jbd2_inode_cache;
1166
1167static inline struct jbd2_inode *jbd2_alloc_inode(gfp_t gfp_flags)
1168{
1169 return kmem_cache_alloc(jbd2_inode_cache, gfp_flags);
1170}
1171
1172static inline void jbd2_free_inode(struct jbd2_inode *jinode)
1173{
1174 kmem_cache_free(jbd2_inode_cache, jinode);
1175}
1176
1161/* Primary revoke support */ 1177/* Primary revoke support */
1162#define JOURNAL_REVOKE_DEFAULT_HASH 256 1178#define JOURNAL_REVOKE_DEFAULT_HASH 256
1163extern int jbd2_journal_init_revoke(journal_t *, int); 1179extern int jbd2_journal_init_revoke(journal_t *, int);
diff --git a/include/linux/kref.h b/include/linux/kref.h
index 6cc38fc07ab7..d4a62ab2ee5e 100644
--- a/include/linux/kref.h
+++ b/include/linux/kref.h
@@ -24,5 +24,7 @@ struct kref {
24void kref_init(struct kref *kref); 24void kref_init(struct kref *kref);
25void kref_get(struct kref *kref); 25void kref_get(struct kref *kref);
26int kref_put(struct kref *kref, void (*release) (struct kref *kref)); 26int kref_put(struct kref *kref, void (*release) (struct kref *kref));
27int kref_sub(struct kref *kref, unsigned int count,
28 void (*release) (struct kref *kref));
27 29
28#endif /* _KREF_H_ */ 30#endif /* _KREF_H_ */
diff --git a/include/linux/lockd/debug.h b/include/linux/lockd/debug.h
index 34b2b7f33c3b..257d3779f2ab 100644
--- a/include/linux/lockd/debug.h
+++ b/include/linux/lockd/debug.h
@@ -44,14 +44,4 @@
44#define NLMDBG_XDR 0x0100 44#define NLMDBG_XDR 0x0100
45#define NLMDBG_ALL 0x7fff 45#define NLMDBG_ALL 0x7fff
46 46
47
48/*
49 * Support for printing NLM cookies in dprintk()
50 */
51#ifdef RPC_DEBUG
52struct nlm_cookie;
53/* Call this function with the BKL held (it uses a static buffer) */
54extern const char *nlmdbg_cookie2a(const struct nlm_cookie *);
55#endif
56
57#endif /* LINUX_LOCKD_DEBUG_H */ 47#endif /* LINUX_LOCKD_DEBUG_H */
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
index 2dee05e5119a..ff9abff55aa0 100644
--- a/include/linux/lockd/lockd.h
+++ b/include/linux/lockd/lockd.h
@@ -202,9 +202,9 @@ extern u32 nsm_local_state;
202 * Lockd client functions 202 * Lockd client functions
203 */ 203 */
204struct nlm_rqst * nlm_alloc_call(struct nlm_host *host); 204struct nlm_rqst * nlm_alloc_call(struct nlm_host *host);
205void nlm_release_call(struct nlm_rqst *);
206int nlm_async_call(struct nlm_rqst *, u32, const struct rpc_call_ops *); 205int nlm_async_call(struct nlm_rqst *, u32, const struct rpc_call_ops *);
207int nlm_async_reply(struct nlm_rqst *, u32, const struct rpc_call_ops *); 206int nlm_async_reply(struct nlm_rqst *, u32, const struct rpc_call_ops *);
207void nlmclnt_release_call(struct nlm_rqst *);
208struct nlm_wait * nlmclnt_prepare_block(struct nlm_host *host, struct file_lock *fl); 208struct nlm_wait * nlmclnt_prepare_block(struct nlm_host *host, struct file_lock *fl);
209void nlmclnt_finish_block(struct nlm_wait *block); 209void nlmclnt_finish_block(struct nlm_wait *block);
210int nlmclnt_block(struct nlm_wait *block, struct nlm_rqst *req, long timeout); 210int nlmclnt_block(struct nlm_wait *block, struct nlm_rqst *req, long timeout);
@@ -223,13 +223,14 @@ struct nlm_host *nlmclnt_lookup_host(const struct sockaddr *sap,
223 const u32 version, 223 const u32 version,
224 const char *hostname, 224 const char *hostname,
225 int noresvport); 225 int noresvport);
226void nlmclnt_release_host(struct nlm_host *);
226struct nlm_host *nlmsvc_lookup_host(const struct svc_rqst *rqstp, 227struct nlm_host *nlmsvc_lookup_host(const struct svc_rqst *rqstp,
227 const char *hostname, 228 const char *hostname,
228 const size_t hostname_len); 229 const size_t hostname_len);
230void nlmsvc_release_host(struct nlm_host *);
229struct rpc_clnt * nlm_bind_host(struct nlm_host *); 231struct rpc_clnt * nlm_bind_host(struct nlm_host *);
230void nlm_rebind_host(struct nlm_host *); 232void nlm_rebind_host(struct nlm_host *);
231struct nlm_host * nlm_get_host(struct nlm_host *); 233struct nlm_host * nlm_get_host(struct nlm_host *);
232void nlm_release_host(struct nlm_host *);
233void nlm_shutdown_hosts(void); 234void nlm_shutdown_hosts(void);
234void nlm_host_rebooted(const struct nlm_reboot *); 235void nlm_host_rebooted(const struct nlm_reboot *);
235 236
@@ -267,6 +268,7 @@ unsigned long nlmsvc_retry_blocked(void);
267void nlmsvc_traverse_blocks(struct nlm_host *, struct nlm_file *, 268void nlmsvc_traverse_blocks(struct nlm_host *, struct nlm_file *,
268 nlm_host_match_fn_t match); 269 nlm_host_match_fn_t match);
269void nlmsvc_grant_reply(struct nlm_cookie *, __be32); 270void nlmsvc_grant_reply(struct nlm_cookie *, __be32);
271void nlmsvc_release_call(struct nlm_rqst *);
270 272
271/* 273/*
272 * File handling for the server personality 274 * File handling for the server personality
diff --git a/include/linux/mbcache.h b/include/linux/mbcache.h
index 54cbbac1e71d..5525d370701d 100644
--- a/include/linux/mbcache.h
+++ b/include/linux/mbcache.h
@@ -18,6 +18,17 @@ struct mb_cache_entry {
18 } e_index; 18 } e_index;
19}; 19};
20 20
21struct mb_cache {
22 struct list_head c_cache_list;
23 const char *c_name;
24 atomic_t c_entry_count;
25 int c_max_entries;
26 int c_bucket_bits;
27 struct kmem_cache *c_entry_cache;
28 struct list_head *c_block_hash;
29 struct list_head *c_index_hash;
30};
31
21/* Functions on caches */ 32/* Functions on caches */
22 33
23struct mb_cache *mb_cache_create(const char *, int); 34struct mb_cache *mb_cache_create(const char *, int);
diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h
index 085f041197dc..8e70310ee945 100644
--- a/include/linux/mfd/tmio.h
+++ b/include/linux/mfd/tmio.h
@@ -57,6 +57,10 @@
57 * is configured in 4-bit mode. 57 * is configured in 4-bit mode.
58 */ 58 */
59#define TMIO_MMC_BLKSZ_2BYTES (1 << 1) 59#define TMIO_MMC_BLKSZ_2BYTES (1 << 1)
60/*
61 * Some controllers can support SDIO IRQ signalling.
62 */
63#define TMIO_MMC_SDIO_IRQ (1 << 2)
60 64
61int tmio_core_mmc_enable(void __iomem *cnf, int shift, unsigned long base); 65int tmio_core_mmc_enable(void __iomem *cnf, int shift, unsigned long base);
62int tmio_core_mmc_resume(void __iomem *cnf, int shift, unsigned long base); 66int tmio_core_mmc_resume(void __iomem *cnf, int shift, unsigned long base);
@@ -66,6 +70,7 @@ void tmio_core_mmc_clk_div(void __iomem *cnf, int shift, int state);
66struct tmio_mmc_dma { 70struct tmio_mmc_dma {
67 void *chan_priv_tx; 71 void *chan_priv_tx;
68 void *chan_priv_rx; 72 void *chan_priv_rx;
73 int alignment_shift;
69}; 74};
70 75
71/* 76/*
diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h
new file mode 100644
index 000000000000..16b0261763ed
--- /dev/null
+++ b/include/linux/mmc/dw_mmc.h
@@ -0,0 +1,217 @@
1/*
2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
4 *
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#ifndef _LINUX_MMC_DW_MMC_H_
15#define _LINUX_MMC_DW_MMC_H_
16
17#define MAX_MCI_SLOTS 2
18
19enum dw_mci_state {
20 STATE_IDLE = 0,
21 STATE_SENDING_CMD,
22 STATE_SENDING_DATA,
23 STATE_DATA_BUSY,
24 STATE_SENDING_STOP,
25 STATE_DATA_ERROR,
26};
27
28enum {
29 EVENT_CMD_COMPLETE = 0,
30 EVENT_XFER_COMPLETE,
31 EVENT_DATA_COMPLETE,
32 EVENT_DATA_ERROR,
33 EVENT_XFER_ERROR
34};
35
36struct mmc_data;
37
38/**
39 * struct dw_mci - MMC controller state shared between all slots
40 * @lock: Spinlock protecting the queue and associated data.
41 * @regs: Pointer to MMIO registers.
42 * @sg: Scatterlist entry currently being processed by PIO code, if any.
43 * @pio_offset: Offset into the current scatterlist entry.
44 * @cur_slot: The slot which is currently using the controller.
45 * @mrq: The request currently being processed on @cur_slot,
46 * or NULL if the controller is idle.
47 * @cmd: The command currently being sent to the card, or NULL.
48 * @data: The data currently being transferred, or NULL if no data
49 * transfer is in progress.
50 * @use_dma: Whether DMA channel is initialized or not.
51 * @sg_dma: Bus address of DMA buffer.
52 * @sg_cpu: Virtual address of DMA buffer.
53 * @dma_ops: Pointer to platform-specific DMA callbacks.
54 * @cmd_status: Snapshot of SR taken upon completion of the current
55 * command. Only valid when EVENT_CMD_COMPLETE is pending.
56 * @data_status: Snapshot of SR taken upon completion of the current
57 * data transfer. Only valid when EVENT_DATA_COMPLETE or
58 * EVENT_DATA_ERROR is pending.
59 * @stop_cmdr: Value to be loaded into CMDR when the stop command is
60 * to be sent.
61 * @dir_status: Direction of current transfer.
62 * @tasklet: Tasklet running the request state machine.
63 * @card_tasklet: Tasklet handling card detect.
64 * @pending_events: Bitmask of events flagged by the interrupt handler
65 * to be processed by the tasklet.
66 * @completed_events: Bitmask of events which the state machine has
67 * processed.
68 * @state: Tasklet state.
69 * @queue: List of slots waiting for access to the controller.
70 * @bus_hz: The rate of @mck in Hz. This forms the basis for MMC bus
71 * rate and timeout calculations.
72 * @current_speed: Configured rate of the controller.
73 * @num_slots: Number of slots available.
74 * @pdev: Platform device associated with the MMC controller.
75 * @pdata: Platform data associated with the MMC controller.
76 * @slot: Slots sharing this MMC controller.
77 * @data_shift: log2 of FIFO item size.
78 * @push_data: Pointer to FIFO push function.
79 * @pull_data: Pointer to FIFO pull function.
80 * @quirks: Set of quirks that apply to specific versions of the IP.
81 *
82 * Locking
83 * =======
84 *
85 * @lock is a softirq-safe spinlock protecting @queue as well as
86 * @cur_slot, @mrq and @state. These must always be updated
87 * at the same time while holding @lock.
88 *
89 * The @mrq field of struct dw_mci_slot is also protected by @lock,
90 * and must always be written at the same time as the slot is added to
91 * @queue.
92 *
93 * @pending_events and @completed_events are accessed using atomic bit
94 * operations, so they don't need any locking.
95 *
96 * None of the fields touched by the interrupt handler need any
97 * locking. However, ordering is important: Before EVENT_DATA_ERROR or
98 * EVENT_DATA_COMPLETE is set in @pending_events, all data-related
99 * interrupts must be disabled and @data_status updated with a
100 * snapshot of SR. Similarly, before EVENT_CMD_COMPLETE is set, the
101 * CMDRDY interupt must be disabled and @cmd_status updated with a
102 * snapshot of SR, and before EVENT_XFER_COMPLETE can be set, the
103 * bytes_xfered field of @data must be written. This is ensured by
104 * using barriers.
105 */
106struct dw_mci {
107 spinlock_t lock;
108 void __iomem *regs;
109
110 struct scatterlist *sg;
111 unsigned int pio_offset;
112
113 struct dw_mci_slot *cur_slot;
114 struct mmc_request *mrq;
115 struct mmc_command *cmd;
116 struct mmc_data *data;
117
118 /* DMA interface members*/
119 int use_dma;
120
121 dma_addr_t sg_dma;
122 void *sg_cpu;
123 struct dw_mci_dma_ops *dma_ops;
124#ifdef CONFIG_MMC_DW_IDMAC
125 unsigned int ring_size;
126#else
127 struct dw_mci_dma_data *dma_data;
128#endif
129 u32 cmd_status;
130 u32 data_status;
131 u32 stop_cmdr;
132 u32 dir_status;
133 struct tasklet_struct tasklet;
134 struct tasklet_struct card_tasklet;
135 unsigned long pending_events;
136 unsigned long completed_events;
137 enum dw_mci_state state;
138 struct list_head queue;
139
140 u32 bus_hz;
141 u32 current_speed;
142 u32 num_slots;
143 struct platform_device *pdev;
144 struct dw_mci_board *pdata;
145 struct dw_mci_slot *slot[MAX_MCI_SLOTS];
146
147 /* FIFO push and pull */
148 int data_shift;
149 void (*push_data)(struct dw_mci *host, void *buf, int cnt);
150 void (*pull_data)(struct dw_mci *host, void *buf, int cnt);
151
152 /* Workaround flags */
153 u32 quirks;
154};
155
156/* DMA ops for Internal/External DMAC interface */
157struct dw_mci_dma_ops {
158 /* DMA Ops */
159 int (*init)(struct dw_mci *host);
160 void (*start)(struct dw_mci *host, unsigned int sg_len);
161 void (*complete)(struct dw_mci *host);
162 void (*stop)(struct dw_mci *host);
163 void (*cleanup)(struct dw_mci *host);
164 void (*exit)(struct dw_mci *host);
165};
166
167/* IP Quirks/flags. */
168/* No special quirks or flags to cater for */
169#define DW_MCI_QUIRK_NONE 0
170/* DTO fix for command transmission with IDMAC configured */
171#define DW_MCI_QUIRK_IDMAC_DTO 1
172/* delay needed between retries on some 2.11a implementations */
173#define DW_MCI_QUIRK_RETRY_DELAY 2
174/* High Speed Capable - Supports HS cards (upto 50MHz) */
175#define DW_MCI_QUIRK_HIGHSPEED 4
176
177
178struct dma_pdata;
179
180struct block_settings {
181 unsigned short max_segs; /* see blk_queue_max_segments */
182 unsigned int max_blk_size; /* maximum size of one mmc block */
183 unsigned int max_blk_count; /* maximum number of blocks in one req*/
184 unsigned int max_req_size; /* maximum number of bytes in one req*/
185 unsigned int max_seg_size; /* see blk_queue_max_segment_size */
186};
187
188/* Board platform data */
189struct dw_mci_board {
190 u32 num_slots;
191
192 u32 quirks; /* Workaround / Quirk flags */
193 unsigned int bus_hz; /* Bus speed */
194
195 /* delay in mS before detecting cards after interrupt */
196 u32 detect_delay_ms;
197
198 int (*init)(u32 slot_id, irq_handler_t , void *);
199 int (*get_ro)(u32 slot_id);
200 int (*get_cd)(u32 slot_id);
201 int (*get_ocr)(u32 slot_id);
202 int (*get_bus_wd)(u32 slot_id);
203 /*
204 * Enable power to selected slot and set voltage to desired level.
205 * Voltage levels are specified using MMC_VDD_xxx defines defined
206 * in linux/mmc/host.h file.
207 */
208 void (*setpower)(u32 slot_id, u32 volt);
209 void (*exit)(u32 slot_id);
210 void (*select_slot)(u32 slot_id);
211
212 struct dw_mci_dma_ops *dma_ops;
213 struct dma_pdata *data;
214 struct block_settings *blk_settings;
215};
216
217#endif /* _LINUX_MMC_DW_MMC_H_ */
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 30f6fad99a58..bcb793ec7374 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -131,6 +131,9 @@ struct mmc_host {
131 unsigned int f_max; 131 unsigned int f_max;
132 unsigned int f_init; 132 unsigned int f_init;
133 u32 ocr_avail; 133 u32 ocr_avail;
134 u32 ocr_avail_sdio; /* SDIO-specific OCR */
135 u32 ocr_avail_sd; /* SD-specific OCR */
136 u32 ocr_avail_mmc; /* MMC-specific OCR */
134 struct notifier_block pm_notify; 137 struct notifier_block pm_notify;
135 138
136#define MMC_VDD_165_195 0x00000080 /* VDD voltage 1.65 - 1.95 */ 139#define MMC_VDD_165_195 0x00000080 /* VDD voltage 1.65 - 1.95 */
@@ -169,9 +172,20 @@ struct mmc_host {
169#define MMC_CAP_1_2V_DDR (1 << 12) /* can support */ 172#define MMC_CAP_1_2V_DDR (1 << 12) /* can support */
170 /* DDR mode at 1.2V */ 173 /* DDR mode at 1.2V */
171#define MMC_CAP_POWER_OFF_CARD (1 << 13) /* Can power off after boot */ 174#define MMC_CAP_POWER_OFF_CARD (1 << 13) /* Can power off after boot */
175#define MMC_CAP_BUS_WIDTH_TEST (1 << 14) /* CMD14/CMD19 bus width ok */
172 176
173 mmc_pm_flag_t pm_caps; /* supported pm features */ 177 mmc_pm_flag_t pm_caps; /* supported pm features */
174 178
179#ifdef CONFIG_MMC_CLKGATE
180 int clk_requests; /* internal reference counter */
181 unsigned int clk_delay; /* number of MCI clk hold cycles */
182 bool clk_gated; /* clock gated */
183 struct work_struct clk_gate_work; /* delayed clock gate */
184 unsigned int clk_old; /* old clock value cache */
185 spinlock_t clk_lock; /* lock for clk fields */
186 struct mutex clk_gate_mutex; /* mutex for clock gating */
187#endif
188
175 /* host specific block data */ 189 /* host specific block data */
176 unsigned int max_seg_size; /* see blk_queue_max_segment_size */ 190 unsigned int max_seg_size; /* see blk_queue_max_segment_size */
177 unsigned short max_segs; /* see blk_queue_max_segments */ 191 unsigned short max_segs; /* see blk_queue_max_segments */
@@ -307,5 +321,10 @@ static inline int mmc_card_is_removable(struct mmc_host *host)
307 return !(host->caps & MMC_CAP_NONREMOVABLE) && mmc_assume_removable; 321 return !(host->caps & MMC_CAP_NONREMOVABLE) && mmc_assume_removable;
308} 322}
309 323
324static inline int mmc_card_is_powered_resumed(struct mmc_host *host)
325{
326 return host->pm_flags & MMC_PM_KEEP_POWER;
327}
328
310#endif 329#endif
311 330
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index 956fbd877692..612301f85d14 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -40,7 +40,9 @@
40#define MMC_READ_DAT_UNTIL_STOP 11 /* adtc [31:0] dadr R1 */ 40#define MMC_READ_DAT_UNTIL_STOP 11 /* adtc [31:0] dadr R1 */
41#define MMC_STOP_TRANSMISSION 12 /* ac R1b */ 41#define MMC_STOP_TRANSMISSION 12 /* ac R1b */
42#define MMC_SEND_STATUS 13 /* ac [31:16] RCA R1 */ 42#define MMC_SEND_STATUS 13 /* ac [31:16] RCA R1 */
43#define MMC_BUS_TEST_R 14 /* adtc R1 */
43#define MMC_GO_INACTIVE_STATE 15 /* ac [31:16] RCA */ 44#define MMC_GO_INACTIVE_STATE 15 /* ac [31:16] RCA */
45#define MMC_BUS_TEST_W 19 /* adtc R1 */
44#define MMC_SPI_READ_OCR 58 /* spi spi_R3 */ 46#define MMC_SPI_READ_OCR 58 /* spi spi_R3 */
45#define MMC_SPI_CRC_ON_OFF 59 /* spi [0:0] flag spi_R1 */ 47#define MMC_SPI_CRC_ON_OFF 59 /* spi [0:0] flag spi_R1 */
46 48
diff --git a/include/linux/mmc/sdhci.h b/include/linux/mmc/sdhci.h
index 1fdc673f2396..83bd9f76709a 100644
--- a/include/linux/mmc/sdhci.h
+++ b/include/linux/mmc/sdhci.h
@@ -83,6 +83,8 @@ struct sdhci_host {
83#define SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12 (1<<28) 83#define SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12 (1<<28)
84/* Controller doesn't have HISPD bit field in HI-SPEED SD card */ 84/* Controller doesn't have HISPD bit field in HI-SPEED SD card */
85#define SDHCI_QUIRK_NO_HISPD_BIT (1<<29) 85#define SDHCI_QUIRK_NO_HISPD_BIT (1<<29)
86/* Controller treats ADMA descriptors with length 0000h incorrectly */
87#define SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC (1<<30)
86 88
87 int irq; /* Device IRQ */ 89 int irq; /* Device IRQ */
88 void __iomem *ioaddr; /* Mapped address */ 90 void __iomem *ioaddr; /* Mapped address */
@@ -139,6 +141,10 @@ struct sdhci_host {
139 141
140 unsigned int caps; /* Alternative capabilities */ 142 unsigned int caps; /* Alternative capabilities */
141 143
144 unsigned int ocr_avail_sdio; /* OCR bit masks */
145 unsigned int ocr_avail_sd;
146 unsigned int ocr_avail_mmc;
147
142 unsigned long private[0] ____cacheline_aligned; 148 unsigned long private[0] ____cacheline_aligned;
143}; 149};
144#endif /* __SDHCI_H */ 150#endif /* __SDHCI_H */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 0f6b1c965815..be4957cf6511 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2191,11 +2191,15 @@ static inline void netif_addr_unlock_bh(struct net_device *dev)
2191extern void ether_setup(struct net_device *dev); 2191extern void ether_setup(struct net_device *dev);
2192 2192
2193/* Support for loadable net-drivers */ 2193/* Support for loadable net-drivers */
2194extern struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, 2194extern struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
2195 void (*setup)(struct net_device *), 2195 void (*setup)(struct net_device *),
2196 unsigned int queue_count); 2196 unsigned int txqs, unsigned int rxqs);
2197#define alloc_netdev(sizeof_priv, name, setup) \ 2197#define alloc_netdev(sizeof_priv, name, setup) \
2198 alloc_netdev_mq(sizeof_priv, name, setup, 1) 2198 alloc_netdev_mqs(sizeof_priv, name, setup, 1, 1)
2199
2200#define alloc_netdev_mq(sizeof_priv, name, setup, count) \
2201 alloc_netdev_mqs(sizeof_priv, name, setup, count, count)
2202
2199extern int register_netdev(struct net_device *dev); 2203extern int register_netdev(struct net_device *dev);
2200extern void unregister_netdev(struct net_device *dev); 2204extern void unregister_netdev(struct net_device *dev);
2201 2205
@@ -2303,7 +2307,7 @@ unsigned long netdev_fix_features(unsigned long features, const char *name);
2303void netif_stacked_transfer_operstate(const struct net_device *rootdev, 2307void netif_stacked_transfer_operstate(const struct net_device *rootdev,
2304 struct net_device *dev); 2308 struct net_device *dev);
2305 2309
2306int netif_get_vlan_features(struct sk_buff *skb, struct net_device *dev); 2310int netif_skb_features(struct sk_buff *skb);
2307 2311
2308static inline int net_gso_ok(int features, int gso_type) 2312static inline int net_gso_ok(int features, int gso_type)
2309{ 2313{
@@ -2317,16 +2321,10 @@ static inline int skb_gso_ok(struct sk_buff *skb, int features)
2317 (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST)); 2321 (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
2318} 2322}
2319 2323
2320static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb) 2324static inline int netif_needs_gso(struct sk_buff *skb, int features)
2321{ 2325{
2322 if (skb_is_gso(skb)) { 2326 return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
2323 int features = netif_get_vlan_features(skb, dev); 2327 unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
2324
2325 return (!skb_gso_ok(skb, features) ||
2326 unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
2327 }
2328
2329 return 0;
2330} 2328}
2331 2329
2332static inline void netif_set_gso_max_size(struct net_device *dev, 2330static inline void netif_set_gso_max_size(struct net_device *dev,
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index 742bec051440..6712e713b299 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -472,7 +472,7 @@ extern void xt_free_table_info(struct xt_table_info *info);
472 * necessary for reading the counters. 472 * necessary for reading the counters.
473 */ 473 */
474struct xt_info_lock { 474struct xt_info_lock {
475 spinlock_t lock; 475 seqlock_t lock;
476 unsigned char readers; 476 unsigned char readers;
477}; 477};
478DECLARE_PER_CPU(struct xt_info_lock, xt_info_locks); 478DECLARE_PER_CPU(struct xt_info_lock, xt_info_locks);
@@ -497,7 +497,7 @@ static inline void xt_info_rdlock_bh(void)
497 local_bh_disable(); 497 local_bh_disable();
498 lock = &__get_cpu_var(xt_info_locks); 498 lock = &__get_cpu_var(xt_info_locks);
499 if (likely(!lock->readers++)) 499 if (likely(!lock->readers++))
500 spin_lock(&lock->lock); 500 write_seqlock(&lock->lock);
501} 501}
502 502
503static inline void xt_info_rdunlock_bh(void) 503static inline void xt_info_rdunlock_bh(void)
@@ -505,7 +505,7 @@ static inline void xt_info_rdunlock_bh(void)
505 struct xt_info_lock *lock = &__get_cpu_var(xt_info_locks); 505 struct xt_info_lock *lock = &__get_cpu_var(xt_info_locks);
506 506
507 if (likely(!--lock->readers)) 507 if (likely(!--lock->readers))
508 spin_unlock(&lock->lock); 508 write_sequnlock(&lock->lock);
509 local_bh_enable(); 509 local_bh_enable();
510} 510}
511 511
@@ -516,12 +516,12 @@ static inline void xt_info_rdunlock_bh(void)
516 */ 516 */
517static inline void xt_info_wrlock(unsigned int cpu) 517static inline void xt_info_wrlock(unsigned int cpu)
518{ 518{
519 spin_lock(&per_cpu(xt_info_locks, cpu).lock); 519 write_seqlock(&per_cpu(xt_info_locks, cpu).lock);
520} 520}
521 521
522static inline void xt_info_wrunlock(unsigned int cpu) 522static inline void xt_info_wrunlock(unsigned int cpu)
523{ 523{
524 spin_unlock(&per_cpu(xt_info_locks, cpu).lock); 524 write_sequnlock(&per_cpu(xt_info_locks, cpu).lock);
525} 525}
526 526
527/* 527/*
diff --git a/include/linux/nfs3.h b/include/linux/nfs3.h
index ac33806ec7f9..6ccfe3b641e1 100644
--- a/include/linux/nfs3.h
+++ b/include/linux/nfs3.h
@@ -11,6 +11,9 @@
11#define NFS3_MAXGROUPS 16 11#define NFS3_MAXGROUPS 16
12#define NFS3_FHSIZE 64 12#define NFS3_FHSIZE 64
13#define NFS3_COOKIESIZE 4 13#define NFS3_COOKIESIZE 4
14#define NFS3_CREATEVERFSIZE 8
15#define NFS3_COOKIEVERFSIZE 8
16#define NFS3_WRITEVERFSIZE 8
14#define NFS3_FIFO_DEV (-1) 17#define NFS3_FIFO_DEV (-1)
15#define NFS3MODE_FMT 0170000 18#define NFS3MODE_FMT 0170000
16#define NFS3MODE_DIR 0040000 19#define NFS3MODE_DIR 0040000
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index 4925b22219d2..9b46300b4305 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -111,9 +111,13 @@
111 111
112#define EXCHGID4_FLAG_SUPP_MOVED_REFER 0x00000001 112#define EXCHGID4_FLAG_SUPP_MOVED_REFER 0x00000001
113#define EXCHGID4_FLAG_SUPP_MOVED_MIGR 0x00000002 113#define EXCHGID4_FLAG_SUPP_MOVED_MIGR 0x00000002
114#define EXCHGID4_FLAG_BIND_PRINC_STATEID 0x00000100
115
114#define EXCHGID4_FLAG_USE_NON_PNFS 0x00010000 116#define EXCHGID4_FLAG_USE_NON_PNFS 0x00010000
115#define EXCHGID4_FLAG_USE_PNFS_MDS 0x00020000 117#define EXCHGID4_FLAG_USE_PNFS_MDS 0x00020000
116#define EXCHGID4_FLAG_USE_PNFS_DS 0x00040000 118#define EXCHGID4_FLAG_USE_PNFS_DS 0x00040000
119#define EXCHGID4_FLAG_MASK_PNFS 0x00070000
120
117#define EXCHGID4_FLAG_UPD_CONFIRMED_REC_A 0x40000000 121#define EXCHGID4_FLAG_UPD_CONFIRMED_REC_A 0x40000000
118#define EXCHGID4_FLAG_CONFIRMED_R 0x80000000 122#define EXCHGID4_FLAG_CONFIRMED_R 0x80000000
119/* 123/*
@@ -121,8 +125,8 @@
121 * they're set in the argument or response, have separate 125 * they're set in the argument or response, have separate
122 * invalid flag masks for arg (_A) and resp (_R). 126 * invalid flag masks for arg (_A) and resp (_R).
123 */ 127 */
124#define EXCHGID4_FLAG_MASK_A 0x40070003 128#define EXCHGID4_FLAG_MASK_A 0x40070103
125#define EXCHGID4_FLAG_MASK_R 0x80070003 129#define EXCHGID4_FLAG_MASK_R 0x80070103
126 130
127#define SEQ4_STATUS_CB_PATH_DOWN 0x00000001 131#define SEQ4_STATUS_CB_PATH_DOWN 0x00000001
128#define SEQ4_STATUS_CB_GSS_CONTEXTS_EXPIRING 0x00000002 132#define SEQ4_STATUS_CB_GSS_CONTEXTS_EXPIRING 0x00000002
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 452d96436d26..b197563913bf 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -47,11 +47,6 @@ struct nfs_client {
47 u64 cl_clientid; /* constant */ 47 u64 cl_clientid; /* constant */
48 unsigned long cl_state; 48 unsigned long cl_state;
49 49
50 struct rb_root cl_openowner_id;
51 struct rb_root cl_lockowner_id;
52
53 struct list_head cl_delegations;
54 struct rb_root cl_state_owners;
55 spinlock_t cl_lock; 50 spinlock_t cl_lock;
56 51
57 unsigned long cl_lease_time; 52 unsigned long cl_lease_time;
@@ -71,6 +66,7 @@ struct nfs_client {
71 */ 66 */
72 char cl_ipaddr[48]; 67 char cl_ipaddr[48];
73 unsigned char cl_id_uniquifier; 68 unsigned char cl_id_uniquifier;
69 u32 cl_cb_ident; /* v4.0 callback identifier */
74 const struct nfs4_minor_version_ops *cl_mvops; 70 const struct nfs4_minor_version_ops *cl_mvops;
75#endif /* CONFIG_NFS_V4 */ 71#endif /* CONFIG_NFS_V4 */
76 72
@@ -148,7 +144,14 @@ struct nfs_server {
148 that are supported on this 144 that are supported on this
149 filesystem */ 145 filesystem */
150 struct pnfs_layoutdriver_type *pnfs_curr_ld; /* Active layout driver */ 146 struct pnfs_layoutdriver_type *pnfs_curr_ld; /* Active layout driver */
147 struct rpc_wait_queue roc_rpcwaitq;
148
149 /* the following fields are protected by nfs_client->cl_lock */
150 struct rb_root state_owners;
151 struct rb_root openowner_id;
152 struct rb_root lockowner_id;
151#endif 153#endif
154 struct list_head delegations;
152 void (*destroy)(struct nfs_server *); 155 void (*destroy)(struct nfs_server *);
153 156
154 atomic_t active; /* Keep trace of any activity to this server */ 157 atomic_t active; /* Keep trace of any activity to this server */
@@ -196,6 +199,7 @@ struct nfs4_slot_table {
196 * op for dynamic resizing */ 199 * op for dynamic resizing */
197 int target_max_slots; /* Set by CB_RECALL_SLOT as 200 int target_max_slots; /* Set by CB_RECALL_SLOT as
198 * the new max_slots */ 201 * the new max_slots */
202 struct completion complete;
199}; 203};
200 204
201static inline int slot_idx(struct nfs4_slot_table *tbl, struct nfs4_slot *sp) 205static inline int slot_idx(struct nfs4_slot_table *tbl, struct nfs4_slot *sp)
@@ -212,7 +216,6 @@ struct nfs4_session {
212 unsigned long session_state; 216 unsigned long session_state;
213 u32 hash_alg; 217 u32 hash_alg;
214 u32 ssv_len; 218 u32 ssv_len;
215 struct completion complete;
216 219
217 /* The fore and back channel */ 220 /* The fore and back channel */
218 struct nfs4_channel_attrs fc_attrs; 221 struct nfs4_channel_attrs fc_attrs;
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 80f07198a31a..b0068579bec2 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -208,6 +208,7 @@ struct nfs4_layoutget_args {
208 struct inode *inode; 208 struct inode *inode;
209 struct nfs_open_context *ctx; 209 struct nfs_open_context *ctx;
210 struct nfs4_sequence_args seq_args; 210 struct nfs4_sequence_args seq_args;
211 nfs4_stateid stateid;
211}; 212};
212 213
213struct nfs4_layoutget_res { 214struct nfs4_layoutget_res {
@@ -223,7 +224,6 @@ struct nfs4_layoutget {
223 struct nfs4_layoutget_args args; 224 struct nfs4_layoutget_args args;
224 struct nfs4_layoutget_res res; 225 struct nfs4_layoutget_res res;
225 struct pnfs_layout_segment **lsegpp; 226 struct pnfs_layout_segment **lsegpp;
226 int status;
227}; 227};
228 228
229struct nfs4_getdeviceinfo_args { 229struct nfs4_getdeviceinfo_args {
@@ -317,6 +317,7 @@ struct nfs_closeres {
317struct nfs_lowner { 317struct nfs_lowner {
318 __u64 clientid; 318 __u64 clientid;
319 __u64 id; 319 __u64 id;
320 dev_t s_dev;
320}; 321};
321 322
322struct nfs_lock_args { 323struct nfs_lock_args {
@@ -484,6 +485,7 @@ struct nfs_entry {
484 struct nfs_fh * fh; 485 struct nfs_fh * fh;
485 struct nfs_fattr * fattr; 486 struct nfs_fattr * fattr;
486 unsigned char d_type; 487 unsigned char d_type;
488 struct nfs_server * server;
487}; 489};
488 490
489/* 491/*
@@ -1089,7 +1091,7 @@ struct nfs_rpc_ops {
1089 int (*pathconf) (struct nfs_server *, struct nfs_fh *, 1091 int (*pathconf) (struct nfs_server *, struct nfs_fh *,
1090 struct nfs_pathconf *); 1092 struct nfs_pathconf *);
1091 int (*set_capabilities)(struct nfs_server *, struct nfs_fh *); 1093 int (*set_capabilities)(struct nfs_server *, struct nfs_fh *);
1092 __be32 *(*decode_dirent)(struct xdr_stream *, struct nfs_entry *, struct nfs_server *, int plus); 1094 int (*decode_dirent)(struct xdr_stream *, struct nfs_entry *, int);
1093 void (*read_setup) (struct nfs_read_data *, struct rpc_message *); 1095 void (*read_setup) (struct nfs_read_data *, struct rpc_message *);
1094 int (*read_done) (struct rpc_task *, struct nfs_read_data *); 1096 int (*read_done) (struct rpc_task *, struct nfs_read_data *);
1095 void (*write_setup) (struct nfs_write_data *, struct rpc_message *); 1097 void (*write_setup) (struct nfs_write_data *, struct rpc_message *);
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index cb845c16ad7d..ab47732d81e0 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -518,6 +518,7 @@
518#define PCI_DEVICE_ID_AMD_11H_NB_MISC 0x1303 518#define PCI_DEVICE_ID_AMD_11H_NB_MISC 0x1303
519#define PCI_DEVICE_ID_AMD_11H_NB_LINK 0x1304 519#define PCI_DEVICE_ID_AMD_11H_NB_LINK 0x1304
520#define PCI_DEVICE_ID_AMD_15H_NB_MISC 0x1603 520#define PCI_DEVICE_ID_AMD_15H_NB_MISC 0x1603
521#define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703
521#define PCI_DEVICE_ID_AMD_LANCE 0x2000 522#define PCI_DEVICE_ID_AMD_LANCE 0x2000
522#define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001 523#define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001
523#define PCI_DEVICE_ID_AMD_SCSI 0x2020 524#define PCI_DEVICE_ID_AMD_SCSI 0x2020
@@ -1650,6 +1651,11 @@
1650#define PCI_DEVICE_ID_O2_6836 0x6836 1651#define PCI_DEVICE_ID_O2_6836 0x6836
1651#define PCI_DEVICE_ID_O2_6812 0x6872 1652#define PCI_DEVICE_ID_O2_6812 0x6872
1652#define PCI_DEVICE_ID_O2_6933 0x6933 1653#define PCI_DEVICE_ID_O2_6933 0x6933
1654#define PCI_DEVICE_ID_O2_8120 0x8120
1655#define PCI_DEVICE_ID_O2_8220 0x8220
1656#define PCI_DEVICE_ID_O2_8221 0x8221
1657#define PCI_DEVICE_ID_O2_8320 0x8320
1658#define PCI_DEVICE_ID_O2_8321 0x8321
1653 1659
1654#define PCI_VENDOR_ID_3DFX 0x121a 1660#define PCI_VENDOR_ID_3DFX 0x121a
1655#define PCI_DEVICE_ID_3DFX_VOODOO 0x0001 1661#define PCI_DEVICE_ID_3DFX_VOODOO 0x0001
@@ -2363,6 +2369,8 @@
2363#define PCI_DEVICE_ID_JMICRON_JMB38X_SD 0x2381 2369#define PCI_DEVICE_ID_JMICRON_JMB38X_SD 0x2381
2364#define PCI_DEVICE_ID_JMICRON_JMB38X_MMC 0x2382 2370#define PCI_DEVICE_ID_JMICRON_JMB38X_MMC 0x2382
2365#define PCI_DEVICE_ID_JMICRON_JMB38X_MS 0x2383 2371#define PCI_DEVICE_ID_JMICRON_JMB38X_MS 0x2383
2372#define PCI_DEVICE_ID_JMICRON_JMB388_SD 0x2391
2373#define PCI_DEVICE_ID_JMICRON_JMB388_ESD 0x2392
2366 2374
2367#define PCI_VENDOR_ID_KORENIX 0x1982 2375#define PCI_VENDOR_ID_KORENIX 0x1982
2368#define PCI_DEVICE_ID_KORENIX_JETCARDF0 0x1600 2376#define PCI_DEVICE_ID_KORENIX_JETCARDF0 0x1600
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index d1a9193960f1..223b14cd129c 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -31,8 +31,9 @@ static inline bool is_quota_modification(struct inode *inode, struct iattr *ia)
31#define quota_error(sb, fmt, args...) \ 31#define quota_error(sb, fmt, args...) \
32 __quota_error((sb), __func__, fmt , ## args) 32 __quota_error((sb), __func__, fmt , ## args)
33 33
34extern void __quota_error(struct super_block *sb, const char *func, 34extern __attribute__((format (printf, 3, 4)))
35 const char *fmt, ...); 35void __quota_error(struct super_block *sb, const char *func,
36 const char *fmt, ...);
36 37
37/* 38/*
38 * declaration of quota_function calls in kernel. 39 * declaration of quota_function calls in kernel.
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index 14dbc83ded20..3c995b4d742c 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -107,12 +107,17 @@ extern int rtc_year_days(unsigned int day, unsigned int month, unsigned int year
107extern int rtc_valid_tm(struct rtc_time *tm); 107extern int rtc_valid_tm(struct rtc_time *tm);
108extern int rtc_tm_to_time(struct rtc_time *tm, unsigned long *time); 108extern int rtc_tm_to_time(struct rtc_time *tm, unsigned long *time);
109extern void rtc_time_to_tm(unsigned long time, struct rtc_time *tm); 109extern void rtc_time_to_tm(unsigned long time, struct rtc_time *tm);
110ktime_t rtc_tm_to_ktime(struct rtc_time tm);
111struct rtc_time rtc_ktime_to_tm(ktime_t kt);
112
110 113
111#include <linux/device.h> 114#include <linux/device.h>
112#include <linux/seq_file.h> 115#include <linux/seq_file.h>
113#include <linux/cdev.h> 116#include <linux/cdev.h>
114#include <linux/poll.h> 117#include <linux/poll.h>
115#include <linux/mutex.h> 118#include <linux/mutex.h>
119#include <linux/timerqueue.h>
120#include <linux/workqueue.h>
116 121
117extern struct class *rtc_class; 122extern struct class *rtc_class;
118 123
@@ -151,7 +156,19 @@ struct rtc_class_ops {
151}; 156};
152 157
153#define RTC_DEVICE_NAME_SIZE 20 158#define RTC_DEVICE_NAME_SIZE 20
154struct rtc_task; 159typedef struct rtc_task {
160 void (*func)(void *private_data);
161 void *private_data;
162} rtc_task_t;
163
164
165struct rtc_timer {
166 struct rtc_task task;
167 struct timerqueue_node node;
168 ktime_t period;
169 int enabled;
170};
171
155 172
156/* flags */ 173/* flags */
157#define RTC_DEV_BUSY 0 174#define RTC_DEV_BUSY 0
@@ -179,16 +196,13 @@ struct rtc_device
179 spinlock_t irq_task_lock; 196 spinlock_t irq_task_lock;
180 int irq_freq; 197 int irq_freq;
181 int max_user_freq; 198 int max_user_freq;
182#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL 199
183 struct work_struct uie_task; 200 struct timerqueue_head timerqueue;
184 struct timer_list uie_timer; 201 struct rtc_timer aie_timer;
185 /* Those fields are protected by rtc->irq_lock */ 202 struct rtc_timer uie_rtctimer;
186 unsigned int oldsecs; 203 struct hrtimer pie_timer; /* sub second exp, so needs hrtimer */
187 unsigned int uie_irq_active:1; 204 int pie_enabled;
188 unsigned int stop_uie_polling:1; 205 struct work_struct irqwork;
189 unsigned int uie_task_active:1;
190 unsigned int uie_timer_active:1;
191#endif
192}; 206};
193#define to_rtc_device(d) container_of(d, struct rtc_device, dev) 207#define to_rtc_device(d) container_of(d, struct rtc_device, dev)
194 208
@@ -224,15 +238,22 @@ extern int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled);
224extern int rtc_dev_update_irq_enable_emul(struct rtc_device *rtc, 238extern int rtc_dev_update_irq_enable_emul(struct rtc_device *rtc,
225 unsigned int enabled); 239 unsigned int enabled);
226 240
227typedef struct rtc_task { 241void rtc_aie_update_irq(void *private);
228 void (*func)(void *private_data); 242void rtc_uie_update_irq(void *private);
229 void *private_data; 243enum hrtimer_restart rtc_pie_update_irq(struct hrtimer *timer);
230} rtc_task_t;
231 244
232int rtc_register(rtc_task_t *task); 245int rtc_register(rtc_task_t *task);
233int rtc_unregister(rtc_task_t *task); 246int rtc_unregister(rtc_task_t *task);
234int rtc_control(rtc_task_t *t, unsigned int cmd, unsigned long arg); 247int rtc_control(rtc_task_t *t, unsigned int cmd, unsigned long arg);
235 248
249void rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer);
250void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer);
251void rtc_timer_init(struct rtc_timer *timer, void (*f)(void* p), void* data);
252int rtc_timer_start(struct rtc_device *rtc, struct rtc_timer* timer,
253 ktime_t expires, ktime_t period);
254int rtc_timer_cancel(struct rtc_device *rtc, struct rtc_timer* timer);
255void rtc_timer_do_work(struct work_struct *work);
256
236static inline bool is_leap_year(unsigned int year) 257static inline bool is_leap_year(unsigned int year)
237{ 258{
238 return (!(year % 4) && (year % 100)) || !(year % 400); 259 return (!(year % 4) && (year % 100)) || !(year % 400);
diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h
index b2024757edd5..8521067ed4f7 100644
--- a/include/linux/sunrpc/auth.h
+++ b/include/linux/sunrpc/auth.h
@@ -110,9 +110,9 @@ struct rpc_credops {
110 __be32 * (*crmarshal)(struct rpc_task *, __be32 *); 110 __be32 * (*crmarshal)(struct rpc_task *, __be32 *);
111 int (*crrefresh)(struct rpc_task *); 111 int (*crrefresh)(struct rpc_task *);
112 __be32 * (*crvalidate)(struct rpc_task *, __be32 *); 112 __be32 * (*crvalidate)(struct rpc_task *, __be32 *);
113 int (*crwrap_req)(struct rpc_task *, kxdrproc_t, 113 int (*crwrap_req)(struct rpc_task *, kxdreproc_t,
114 void *, __be32 *, void *); 114 void *, __be32 *, void *);
115 int (*crunwrap_resp)(struct rpc_task *, kxdrproc_t, 115 int (*crunwrap_resp)(struct rpc_task *, kxdrdproc_t,
116 void *, __be32 *, void *); 116 void *, __be32 *, void *);
117}; 117};
118 118
@@ -139,8 +139,8 @@ struct rpc_cred * rpcauth_generic_bind_cred(struct rpc_task *, struct rpc_cred *
139void put_rpccred(struct rpc_cred *); 139void put_rpccred(struct rpc_cred *);
140__be32 * rpcauth_marshcred(struct rpc_task *, __be32 *); 140__be32 * rpcauth_marshcred(struct rpc_task *, __be32 *);
141__be32 * rpcauth_checkverf(struct rpc_task *, __be32 *); 141__be32 * rpcauth_checkverf(struct rpc_task *, __be32 *);
142int rpcauth_wrap_req(struct rpc_task *task, kxdrproc_t encode, void *rqstp, __be32 *data, void *obj); 142int rpcauth_wrap_req(struct rpc_task *task, kxdreproc_t encode, void *rqstp, __be32 *data, void *obj);
143int rpcauth_unwrap_resp(struct rpc_task *task, kxdrproc_t decode, void *rqstp, __be32 *data, void *obj); 143int rpcauth_unwrap_resp(struct rpc_task *task, kxdrdproc_t decode, void *rqstp, __be32 *data, void *obj);
144int rpcauth_refreshcred(struct rpc_task *); 144int rpcauth_refreshcred(struct rpc_task *);
145void rpcauth_invalcred(struct rpc_task *); 145void rpcauth_invalcred(struct rpc_task *);
146int rpcauth_uptodatecred(struct rpc_task *); 146int rpcauth_uptodatecred(struct rpc_task *);
diff --git a/include/linux/sunrpc/bc_xprt.h b/include/linux/sunrpc/bc_xprt.h
index 7c91260c44a9..c50b458b8a3f 100644
--- a/include/linux/sunrpc/bc_xprt.h
+++ b/include/linux/sunrpc/bc_xprt.h
@@ -43,10 +43,18 @@ int bc_send(struct rpc_rqst *req);
43 */ 43 */
44static inline int svc_is_backchannel(const struct svc_rqst *rqstp) 44static inline int svc_is_backchannel(const struct svc_rqst *rqstp)
45{ 45{
46 if (rqstp->rq_server->bc_xprt) 46 if (rqstp->rq_server->sv_bc_xprt)
47 return 1; 47 return 1;
48 return 0; 48 return 0;
49} 49}
50static inline struct nfs4_sessionid *bc_xprt_sid(struct svc_rqst *rqstp)
51{
52 if (svc_is_backchannel(rqstp))
53 return (struct nfs4_sessionid *)
54 rqstp->rq_server->sv_bc_xprt->xpt_bc_sid;
55 return NULL;
56}
57
50#else /* CONFIG_NFS_V4_1 */ 58#else /* CONFIG_NFS_V4_1 */
51static inline int xprt_setup_backchannel(struct rpc_xprt *xprt, 59static inline int xprt_setup_backchannel(struct rpc_xprt *xprt,
52 unsigned int min_reqs) 60 unsigned int min_reqs)
@@ -59,6 +67,11 @@ static inline int svc_is_backchannel(const struct svc_rqst *rqstp)
59 return 0; 67 return 0;
60} 68}
61 69
70static inline struct nfs4_sessionid *bc_xprt_sid(struct svc_rqst *rqstp)
71{
72 return NULL;
73}
74
62static inline void xprt_free_bc_request(struct rpc_rqst *req) 75static inline void xprt_free_bc_request(struct rpc_rqst *req)
63{ 76{
64} 77}
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index a5a55f284b7d..ef9476a36ff7 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -89,8 +89,8 @@ struct rpc_version {
89 */ 89 */
90struct rpc_procinfo { 90struct rpc_procinfo {
91 u32 p_proc; /* RPC procedure number */ 91 u32 p_proc; /* RPC procedure number */
92 kxdrproc_t p_encode; /* XDR encode function */ 92 kxdreproc_t p_encode; /* XDR encode function */
93 kxdrproc_t p_decode; /* XDR decode function */ 93 kxdrdproc_t p_decode; /* XDR decode function */
94 unsigned int p_arglen; /* argument hdr length (u32) */ 94 unsigned int p_arglen; /* argument hdr length (u32) */
95 unsigned int p_replen; /* reply hdr length (u32) */ 95 unsigned int p_replen; /* reply hdr length (u32) */
96 unsigned int p_count; /* call count */ 96 unsigned int p_count; /* call count */
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 5a3085b9b394..c81d4d8be3a9 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -99,7 +99,7 @@ struct svc_serv {
99 spinlock_t sv_cb_lock; /* protects the svc_cb_list */ 99 spinlock_t sv_cb_lock; /* protects the svc_cb_list */
100 wait_queue_head_t sv_cb_waitq; /* sleep here if there are no 100 wait_queue_head_t sv_cb_waitq; /* sleep here if there are no
101 * entries in the svc_cb_list */ 101 * entries in the svc_cb_list */
102 struct svc_xprt *bc_xprt; 102 struct svc_xprt *sv_bc_xprt; /* callback on fore channel */
103#endif /* CONFIG_NFS_V4_1 */ 103#endif /* CONFIG_NFS_V4_1 */
104}; 104};
105 105
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index aea0d438e3c7..357da5e0daa3 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -78,6 +78,7 @@ struct svc_xprt {
78 size_t xpt_remotelen; /* length of address */ 78 size_t xpt_remotelen; /* length of address */
79 struct rpc_wait_queue xpt_bc_pending; /* backchannel wait queue */ 79 struct rpc_wait_queue xpt_bc_pending; /* backchannel wait queue */
80 struct list_head xpt_users; /* callbacks on free */ 80 struct list_head xpt_users; /* callbacks on free */
81 void *xpt_bc_sid; /* back channel session ID */
81 82
82 struct net *xpt_net; 83 struct net *xpt_net;
83}; 84};
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
index 498ab93a81e4..fc84b7a19ca3 100644
--- a/include/linux/sunrpc/xdr.h
+++ b/include/linux/sunrpc/xdr.h
@@ -33,8 +33,8 @@ struct xdr_netobj {
33}; 33};
34 34
35/* 35/*
36 * This is the generic XDR function. rqstp is either a rpc_rqst (client 36 * This is the legacy generic XDR function. rqstp is either a rpc_rqst
37 * side) or svc_rqst pointer (server side). 37 * (client side) or svc_rqst pointer (server side).
38 * Encode functions always assume there's enough room in the buffer. 38 * Encode functions always assume there's enough room in the buffer.
39 */ 39 */
40typedef int (*kxdrproc_t)(void *rqstp, __be32 *data, void *obj); 40typedef int (*kxdrproc_t)(void *rqstp, __be32 *data, void *obj);
@@ -201,14 +201,22 @@ struct xdr_stream {
201 201
202 __be32 *end; /* end of available buffer space */ 202 __be32 *end; /* end of available buffer space */
203 struct kvec *iov; /* pointer to the current kvec */ 203 struct kvec *iov; /* pointer to the current kvec */
204 struct kvec scratch; /* Scratch buffer */
205 struct page **page_ptr; /* pointer to the current page */
204}; 206};
205 207
208/*
209 * These are the xdr_stream style generic XDR encode and decode functions.
210 */
211typedef void (*kxdreproc_t)(void *rqstp, struct xdr_stream *xdr, void *obj);
212typedef int (*kxdrdproc_t)(void *rqstp, struct xdr_stream *xdr, void *obj);
213
206extern void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p); 214extern void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p);
207extern __be32 *xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes); 215extern __be32 *xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes);
208extern void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, 216extern void xdr_write_pages(struct xdr_stream *xdr, struct page **pages,
209 unsigned int base, unsigned int len); 217 unsigned int base, unsigned int len);
210extern void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p); 218extern void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p);
211extern __be32 *xdr_inline_peek(struct xdr_stream *xdr, size_t nbytes); 219extern void xdr_set_scratch_buffer(struct xdr_stream *xdr, void *buf, size_t buflen);
212extern __be32 *xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes); 220extern __be32 *xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes);
213extern void xdr_read_pages(struct xdr_stream *xdr, unsigned int len); 221extern void xdr_read_pages(struct xdr_stream *xdr, unsigned int len);
214extern void xdr_enter_page(struct xdr_stream *xdr, unsigned int len); 222extern void xdr_enter_page(struct xdr_stream *xdr, unsigned int len);
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index d3e4f87e95c0..c6814616653b 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -32,7 +32,7 @@ struct tracepoint {
32 int state; /* State. */ 32 int state; /* State. */
33 void (*regfunc)(void); 33 void (*regfunc)(void);
34 void (*unregfunc)(void); 34 void (*unregfunc)(void);
35 struct tracepoint_func *funcs; 35 struct tracepoint_func __rcu *funcs;
36} __attribute__((aligned(32))); /* 36} __attribute__((aligned(32))); /*
37 * Aligned on 32 bytes because it is 37 * Aligned on 32 bytes because it is
38 * globally visible and gcc happily 38 * globally visible and gcc happily
@@ -326,7 +326,7 @@ do_trace: \
326 * memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN); 326 * memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
327 * __entry->next_pid = next->pid; 327 * __entry->next_pid = next->pid;
328 * __entry->next_prio = next->prio; 328 * __entry->next_prio = next->prio;
329 * ) 329 * ),
330 * 330 *
331 * * 331 * *
332 * * Formatted output of a trace record via TP_printk(). 332 * * Formatted output of a trace record via TP_printk().
diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
index ae9ab13b963d..4b9a7f596f92 100644
--- a/include/linux/vga_switcheroo.h
+++ b/include/linux/vga_switcheroo.h
@@ -33,6 +33,7 @@ struct vga_switcheroo_handler {
33void vga_switcheroo_unregister_client(struct pci_dev *dev); 33void vga_switcheroo_unregister_client(struct pci_dev *dev);
34int vga_switcheroo_register_client(struct pci_dev *dev, 34int vga_switcheroo_register_client(struct pci_dev *dev,
35 void (*set_gpu_state)(struct pci_dev *dev, enum vga_switcheroo_state), 35 void (*set_gpu_state)(struct pci_dev *dev, enum vga_switcheroo_state),
36 void (*reprobe)(struct pci_dev *dev),
36 bool (*can_switch)(struct pci_dev *dev)); 37 bool (*can_switch)(struct pci_dev *dev));
37 38
38void vga_switcheroo_client_fb_set(struct pci_dev *dev, 39void vga_switcheroo_client_fb_set(struct pci_dev *dev,
@@ -48,6 +49,7 @@ int vga_switcheroo_process_delayed_switch(void);
48static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {} 49static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {}
49static inline int vga_switcheroo_register_client(struct pci_dev *dev, 50static inline int vga_switcheroo_register_client(struct pci_dev *dev,
50 void (*set_gpu_state)(struct pci_dev *dev, enum vga_switcheroo_state), 51 void (*set_gpu_state)(struct pci_dev *dev, enum vga_switcheroo_state),
52 void (*reprobe)(struct pci_dev *dev),
51 bool (*can_switch)(struct pci_dev *dev)) { return 0; } 53 bool (*can_switch)(struct pci_dev *dev)) { return 0; }
52static inline void vga_switcheroo_client_fb_set(struct pci_dev *dev, struct fb_info *info) {} 54static inline void vga_switcheroo_client_fb_set(struct pci_dev *dev, struct fb_info *info) {}
53static inline int vga_switcheroo_register_handler(struct vga_switcheroo_handler *handler) { return 0; } 55static inline int vga_switcheroo_register_handler(struct vga_switcheroo_handler *handler) { return 0; }
diff --git a/include/net/ah.h b/include/net/ah.h
index f0129f79a31a..be7798dea6f4 100644
--- a/include/net/ah.h
+++ b/include/net/ah.h
@@ -4,7 +4,7 @@
4#include <linux/skbuff.h> 4#include <linux/skbuff.h>
5 5
6/* This is the maximum truncated ICV length that we know of. */ 6/* This is the maximum truncated ICV length that we know of. */
7#define MAX_AH_AUTH_LEN 12 7#define MAX_AH_AUTH_LEN 16
8 8
9struct crypto_ahash; 9struct crypto_ahash;
10 10
diff --git a/include/net/arp.h b/include/net/arp.h
index f4cf6ce66586..91f0568a04ef 100644
--- a/include/net/arp.h
+++ b/include/net/arp.h
@@ -25,5 +25,6 @@ extern struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
25 const unsigned char *src_hw, 25 const unsigned char *src_hw,
26 const unsigned char *target_hw); 26 const unsigned char *target_hw);
27extern void arp_xmit(struct sk_buff *skb); 27extern void arp_xmit(struct sk_buff *skb);
28int arp_invalidate(struct net_device *dev, __be32 ip);
28 29
29#endif /* _ARP_H */ 30#endif /* _ARP_H */
diff --git a/include/net/phonet/phonet.h b/include/net/phonet/phonet.h
index d5df797f9540..5395e09187df 100644
--- a/include/net/phonet/phonet.h
+++ b/include/net/phonet/phonet.h
@@ -107,8 +107,8 @@ struct phonet_protocol {
107 int sock_type; 107 int sock_type;
108}; 108};
109 109
110int phonet_proto_register(int protocol, struct phonet_protocol *pp); 110int phonet_proto_register(unsigned int protocol, struct phonet_protocol *pp);
111void phonet_proto_unregister(int protocol, struct phonet_protocol *pp); 111void phonet_proto_unregister(unsigned int protocol, struct phonet_protocol *pp);
112 112
113int phonet_sysctl_init(void); 113int phonet_sysctl_init(void);
114void phonet_sysctl_exit(void); 114void phonet_sysctl_exit(void);
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 0af57ebae762..e9eee99d8b1f 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -207,7 +207,7 @@ static inline int qdisc_qlen(struct Qdisc *q)
207 return q->q.qlen; 207 return q->q.qlen;
208} 208}
209 209
210static inline struct qdisc_skb_cb *qdisc_skb_cb(struct sk_buff *skb) 210static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb)
211{ 211{
212 return (struct qdisc_skb_cb *)skb->cb; 212 return (struct qdisc_skb_cb *)skb->cb;
213} 213}
@@ -394,7 +394,7 @@ static inline bool qdisc_tx_is_noop(const struct net_device *dev)
394 return true; 394 return true;
395} 395}
396 396
397static inline unsigned int qdisc_pkt_len(struct sk_buff *skb) 397static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb)
398{ 398{
399 return qdisc_skb_cb(skb)->pkt_len; 399 return qdisc_skb_cb(skb)->pkt_len;
400} 400}
@@ -426,10 +426,18 @@ static inline int qdisc_enqueue_root(struct sk_buff *skb, struct Qdisc *sch)
426 return qdisc_enqueue(skb, sch) & NET_XMIT_MASK; 426 return qdisc_enqueue(skb, sch) & NET_XMIT_MASK;
427} 427}
428 428
429static inline void __qdisc_update_bstats(struct Qdisc *sch, unsigned int len) 429
430static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
431 const struct sk_buff *skb)
432{
433 bstats->bytes += qdisc_pkt_len(skb);
434 bstats->packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
435}
436
437static inline void qdisc_bstats_update(struct Qdisc *sch,
438 const struct sk_buff *skb)
430{ 439{
431 sch->bstats.bytes += len; 440 bstats_update(&sch->bstats, skb);
432 sch->bstats.packets++;
433} 441}
434 442
435static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch, 443static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
@@ -437,7 +445,7 @@ static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
437{ 445{
438 __skb_queue_tail(list, skb); 446 __skb_queue_tail(list, skb);
439 sch->qstats.backlog += qdisc_pkt_len(skb); 447 sch->qstats.backlog += qdisc_pkt_len(skb);
440 __qdisc_update_bstats(sch, qdisc_pkt_len(skb)); 448 qdisc_bstats_update(sch, skb);
441 449
442 return NET_XMIT_SUCCESS; 450 return NET_XMIT_SUCCESS;
443} 451}
diff --git a/include/net/sock.h b/include/net/sock.h
index 21a02f7e4f45..d884d268c704 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -152,14 +152,18 @@ struct sock_common {
152 * fields between dontcopy_begin/dontcopy_end 152 * fields between dontcopy_begin/dontcopy_end
153 * are not copied in sock_copy() 153 * are not copied in sock_copy()
154 */ 154 */
155 /* private: */
155 int skc_dontcopy_begin[0]; 156 int skc_dontcopy_begin[0];
157 /* public: */
156 union { 158 union {
157 struct hlist_node skc_node; 159 struct hlist_node skc_node;
158 struct hlist_nulls_node skc_nulls_node; 160 struct hlist_nulls_node skc_nulls_node;
159 }; 161 };
160 int skc_tx_queue_mapping; 162 int skc_tx_queue_mapping;
161 atomic_t skc_refcnt; 163 atomic_t skc_refcnt;
164 /* private: */
162 int skc_dontcopy_end[0]; 165 int skc_dontcopy_end[0];
166 /* public: */
163}; 167};
164 168
165/** 169/**
diff --git a/include/trace/define_trace.h b/include/trace/define_trace.h
index b0b4eb24d592..da39b22636f7 100644
--- a/include/trace/define_trace.h
+++ b/include/trace/define_trace.h
@@ -21,6 +21,16 @@
21#undef CREATE_TRACE_POINTS 21#undef CREATE_TRACE_POINTS
22 22
23#include <linux/stringify.h> 23#include <linux/stringify.h>
24/*
25 * module.h includes tracepoints, and because ftrace.h
26 * pulls in module.h:
27 * trace/ftrace.h -> linux/ftrace_event.h -> linux/perf_event.h ->
28 * linux/ftrace.h -> linux/module.h
29 * we must include module.h here before we play with any of
30 * the TRACE_EVENT() macros, otherwise the tracepoints included
31 * by module.h may break the build.
32 */
33#include <linux/module.h>
24 34
25#undef TRACE_EVENT 35#undef TRACE_EVENT
26#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \ 36#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
diff --git a/include/trace/events/skb.h b/include/trace/events/skb.h
index 75ce9d500d8e..f10293c41b1e 100644
--- a/include/trace/events/skb.h
+++ b/include/trace/events/skb.h
@@ -25,9 +25,7 @@ TRACE_EVENT(kfree_skb,
25 25
26 TP_fast_assign( 26 TP_fast_assign(
27 __entry->skbaddr = skb; 27 __entry->skbaddr = skb;
28 if (skb) { 28 __entry->protocol = ntohs(skb->protocol);
29 __entry->protocol = ntohs(skb->protocol);
30 }
31 __entry->location = location; 29 __entry->location = location;
32 ), 30 ),
33 31
diff --git a/kernel/Makefile b/kernel/Makefile
index 33e0a39cf359..5669f71dfdd5 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -100,6 +100,7 @@ obj-$(CONFIG_FUNCTION_TRACER) += trace/
100obj-$(CONFIG_TRACING) += trace/ 100obj-$(CONFIG_TRACING) += trace/
101obj-$(CONFIG_X86_DS) += trace/ 101obj-$(CONFIG_X86_DS) += trace/
102obj-$(CONFIG_RING_BUFFER) += trace/ 102obj-$(CONFIG_RING_BUFFER) += trace/
103obj-$(CONFIG_TRACEPOINTS) += trace/
103obj-$(CONFIG_SMP) += sched_cpupri.o 104obj-$(CONFIG_SMP) += sched_cpupri.o
104obj-$(CONFIG_IRQ_WORK) += irq_work.o 105obj-$(CONFIG_IRQ_WORK) += irq_work.o
105obj-$(CONFIG_PERF_EVENTS) += perf_event.o 106obj-$(CONFIG_PERF_EVENTS) += perf_event.o
diff --git a/kernel/exit.c b/kernel/exit.c
index 89c74861a3da..f9a45ebcc7b1 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -994,6 +994,15 @@ NORET_TYPE void do_exit(long code)
994 exit_fs(tsk); 994 exit_fs(tsk);
995 check_stack_usage(); 995 check_stack_usage();
996 exit_thread(); 996 exit_thread();
997
998 /*
999 * Flush inherited counters to the parent - before the parent
1000 * gets woken up by child-exit notifications.
1001 *
1002 * because of cgroup mode, must be called before cgroup_exit()
1003 */
1004 perf_event_exit_task(tsk);
1005
997 cgroup_exit(tsk, 1); 1006 cgroup_exit(tsk, 1);
998 1007
999 if (group_dead) 1008 if (group_dead)
@@ -1007,11 +1016,6 @@ NORET_TYPE void do_exit(long code)
1007 * FIXME: do that only when needed, using sched_exit tracepoint 1016 * FIXME: do that only when needed, using sched_exit tracepoint
1008 */ 1017 */
1009 flush_ptrace_hw_breakpoint(tsk); 1018 flush_ptrace_hw_breakpoint(tsk);
1010 /*
1011 * Flush inherited counters to the parent - before the parent
1012 * gets woken up by child-exit notifications.
1013 */
1014 perf_event_exit_task(tsk);
1015 1019
1016 exit_notify(tsk, group_dead); 1020 exit_notify(tsk, group_dead);
1017#ifdef CONFIG_NUMA 1021#ifdef CONFIG_NUMA
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 11847bf1e8cc..b782b7a79f00 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -38,6 +38,12 @@
38 38
39#include <asm/irq_regs.h> 39#include <asm/irq_regs.h>
40 40
41enum event_type_t {
42 EVENT_FLEXIBLE = 0x1,
43 EVENT_PINNED = 0x2,
44 EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
45};
46
41atomic_t perf_task_events __read_mostly; 47atomic_t perf_task_events __read_mostly;
42static atomic_t nr_mmap_events __read_mostly; 48static atomic_t nr_mmap_events __read_mostly;
43static atomic_t nr_comm_events __read_mostly; 49static atomic_t nr_comm_events __read_mostly;
@@ -65,6 +71,12 @@ int sysctl_perf_event_sample_rate __read_mostly = 100000;
65 71
66static atomic64_t perf_event_id; 72static atomic64_t perf_event_id;
67 73
74static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
75 enum event_type_t event_type);
76
77static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
78 enum event_type_t event_type);
79
68void __weak perf_event_print_debug(void) { } 80void __weak perf_event_print_debug(void) { }
69 81
70extern __weak const char *perf_pmu_name(void) 82extern __weak const char *perf_pmu_name(void)
@@ -72,6 +84,11 @@ extern __weak const char *perf_pmu_name(void)
72 return "pmu"; 84 return "pmu";
73} 85}
74 86
87static inline u64 perf_clock(void)
88{
89 return local_clock();
90}
91
75void perf_pmu_disable(struct pmu *pmu) 92void perf_pmu_disable(struct pmu *pmu)
76{ 93{
77 int *count = this_cpu_ptr(pmu->pmu_disable_count); 94 int *count = this_cpu_ptr(pmu->pmu_disable_count);
@@ -240,11 +257,6 @@ static void perf_unpin_context(struct perf_event_context *ctx)
240 put_ctx(ctx); 257 put_ctx(ctx);
241} 258}
242 259
243static inline u64 perf_clock(void)
244{
245 return local_clock();
246}
247
248/* 260/*
249 * Update the record of the current time in a context. 261 * Update the record of the current time in a context.
250 */ 262 */
@@ -256,6 +268,12 @@ static void update_context_time(struct perf_event_context *ctx)
256 ctx->timestamp = now; 268 ctx->timestamp = now;
257} 269}
258 270
271static u64 perf_event_time(struct perf_event *event)
272{
273 struct perf_event_context *ctx = event->ctx;
274 return ctx ? ctx->time : 0;
275}
276
259/* 277/*
260 * Update the total_time_enabled and total_time_running fields for a event. 278 * Update the total_time_enabled and total_time_running fields for a event.
261 */ 279 */
@@ -269,7 +287,7 @@ static void update_event_times(struct perf_event *event)
269 return; 287 return;
270 288
271 if (ctx->is_active) 289 if (ctx->is_active)
272 run_end = ctx->time; 290 run_end = perf_event_time(event);
273 else 291 else
274 run_end = event->tstamp_stopped; 292 run_end = event->tstamp_stopped;
275 293
@@ -278,7 +296,7 @@ static void update_event_times(struct perf_event *event)
278 if (event->state == PERF_EVENT_STATE_INACTIVE) 296 if (event->state == PERF_EVENT_STATE_INACTIVE)
279 run_end = event->tstamp_stopped; 297 run_end = event->tstamp_stopped;
280 else 298 else
281 run_end = ctx->time; 299 run_end = perf_event_time(event);
282 300
283 event->total_time_running = run_end - event->tstamp_running; 301 event->total_time_running = run_end - event->tstamp_running;
284} 302}
@@ -534,6 +552,7 @@ event_sched_out(struct perf_event *event,
534 struct perf_cpu_context *cpuctx, 552 struct perf_cpu_context *cpuctx,
535 struct perf_event_context *ctx) 553 struct perf_event_context *ctx)
536{ 554{
555 u64 tstamp = perf_event_time(event);
537 u64 delta; 556 u64 delta;
538 /* 557 /*
539 * An event which could not be activated because of 558 * An event which could not be activated because of
@@ -545,7 +564,7 @@ event_sched_out(struct perf_event *event,
545 && !event_filter_match(event)) { 564 && !event_filter_match(event)) {
546 delta = ctx->time - event->tstamp_stopped; 565 delta = ctx->time - event->tstamp_stopped;
547 event->tstamp_running += delta; 566 event->tstamp_running += delta;
548 event->tstamp_stopped = ctx->time; 567 event->tstamp_stopped = tstamp;
549 } 568 }
550 569
551 if (event->state != PERF_EVENT_STATE_ACTIVE) 570 if (event->state != PERF_EVENT_STATE_ACTIVE)
@@ -556,7 +575,7 @@ event_sched_out(struct perf_event *event,
556 event->pending_disable = 0; 575 event->pending_disable = 0;
557 event->state = PERF_EVENT_STATE_OFF; 576 event->state = PERF_EVENT_STATE_OFF;
558 } 577 }
559 event->tstamp_stopped = ctx->time; 578 event->tstamp_stopped = tstamp;
560 event->pmu->del(event, 0); 579 event->pmu->del(event, 0);
561 event->oncpu = -1; 580 event->oncpu = -1;
562 581
@@ -768,6 +787,8 @@ event_sched_in(struct perf_event *event,
768 struct perf_cpu_context *cpuctx, 787 struct perf_cpu_context *cpuctx,
769 struct perf_event_context *ctx) 788 struct perf_event_context *ctx)
770{ 789{
790 u64 tstamp = perf_event_time(event);
791
771 if (event->state <= PERF_EVENT_STATE_OFF) 792 if (event->state <= PERF_EVENT_STATE_OFF)
772 return 0; 793 return 0;
773 794
@@ -784,9 +805,9 @@ event_sched_in(struct perf_event *event,
784 return -EAGAIN; 805 return -EAGAIN;
785 } 806 }
786 807
787 event->tstamp_running += ctx->time - event->tstamp_stopped; 808 event->tstamp_running += tstamp - event->tstamp_stopped;
788 809
789 event->shadow_ctx_time = ctx->time - ctx->timestamp; 810 event->shadow_ctx_time = tstamp - ctx->timestamp;
790 811
791 if (!is_software_event(event)) 812 if (!is_software_event(event))
792 cpuctx->active_oncpu++; 813 cpuctx->active_oncpu++;
@@ -898,11 +919,13 @@ static int group_can_go_on(struct perf_event *event,
898static void add_event_to_ctx(struct perf_event *event, 919static void add_event_to_ctx(struct perf_event *event,
899 struct perf_event_context *ctx) 920 struct perf_event_context *ctx)
900{ 921{
922 u64 tstamp = perf_event_time(event);
923
901 list_add_event(event, ctx); 924 list_add_event(event, ctx);
902 perf_group_attach(event); 925 perf_group_attach(event);
903 event->tstamp_enabled = ctx->time; 926 event->tstamp_enabled = tstamp;
904 event->tstamp_running = ctx->time; 927 event->tstamp_running = tstamp;
905 event->tstamp_stopped = ctx->time; 928 event->tstamp_stopped = tstamp;
906} 929}
907 930
908/* 931/*
@@ -937,7 +960,7 @@ static void __perf_install_in_context(void *info)
937 960
938 add_event_to_ctx(event, ctx); 961 add_event_to_ctx(event, ctx);
939 962
940 if (event->cpu != -1 && event->cpu != smp_processor_id()) 963 if (!event_filter_match(event))
941 goto unlock; 964 goto unlock;
942 965
943 /* 966 /*
@@ -1042,14 +1065,13 @@ static void __perf_event_mark_enabled(struct perf_event *event,
1042 struct perf_event_context *ctx) 1065 struct perf_event_context *ctx)
1043{ 1066{
1044 struct perf_event *sub; 1067 struct perf_event *sub;
1068 u64 tstamp = perf_event_time(event);
1045 1069
1046 event->state = PERF_EVENT_STATE_INACTIVE; 1070 event->state = PERF_EVENT_STATE_INACTIVE;
1047 event->tstamp_enabled = ctx->time - event->total_time_enabled; 1071 event->tstamp_enabled = tstamp - event->total_time_enabled;
1048 list_for_each_entry(sub, &event->sibling_list, group_entry) { 1072 list_for_each_entry(sub, &event->sibling_list, group_entry) {
1049 if (sub->state >= PERF_EVENT_STATE_INACTIVE) { 1073 if (sub->state >= PERF_EVENT_STATE_INACTIVE)
1050 sub->tstamp_enabled = 1074 sub->tstamp_enabled = tstamp - sub->total_time_enabled;
1051 ctx->time - sub->total_time_enabled;
1052 }
1053 } 1075 }
1054} 1076}
1055 1077
@@ -1082,7 +1104,7 @@ static void __perf_event_enable(void *info)
1082 goto unlock; 1104 goto unlock;
1083 __perf_event_mark_enabled(event, ctx); 1105 __perf_event_mark_enabled(event, ctx);
1084 1106
1085 if (event->cpu != -1 && event->cpu != smp_processor_id()) 1107 if (!event_filter_match(event))
1086 goto unlock; 1108 goto unlock;
1087 1109
1088 /* 1110 /*
@@ -1193,12 +1215,6 @@ static int perf_event_refresh(struct perf_event *event, int refresh)
1193 return 0; 1215 return 0;
1194} 1216}
1195 1217
1196enum event_type_t {
1197 EVENT_FLEXIBLE = 0x1,
1198 EVENT_PINNED = 0x2,
1199 EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
1200};
1201
1202static void ctx_sched_out(struct perf_event_context *ctx, 1218static void ctx_sched_out(struct perf_event_context *ctx,
1203 struct perf_cpu_context *cpuctx, 1219 struct perf_cpu_context *cpuctx,
1204 enum event_type_t event_type) 1220 enum event_type_t event_type)
@@ -1435,7 +1451,7 @@ ctx_pinned_sched_in(struct perf_event_context *ctx,
1435 list_for_each_entry(event, &ctx->pinned_groups, group_entry) { 1451 list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
1436 if (event->state <= PERF_EVENT_STATE_OFF) 1452 if (event->state <= PERF_EVENT_STATE_OFF)
1437 continue; 1453 continue;
1438 if (event->cpu != -1 && event->cpu != smp_processor_id()) 1454 if (!event_filter_match(event))
1439 continue; 1455 continue;
1440 1456
1441 if (group_can_go_on(event, cpuctx, 1)) 1457 if (group_can_go_on(event, cpuctx, 1))
@@ -1467,7 +1483,7 @@ ctx_flexible_sched_in(struct perf_event_context *ctx,
1467 * Listen to the 'cpu' scheduling filter constraint 1483 * Listen to the 'cpu' scheduling filter constraint
1468 * of events: 1484 * of events:
1469 */ 1485 */
1470 if (event->cpu != -1 && event->cpu != smp_processor_id()) 1486 if (!event_filter_match(event))
1471 continue; 1487 continue;
1472 1488
1473 if (group_can_go_on(event, cpuctx, can_add_hw)) { 1489 if (group_can_go_on(event, cpuctx, can_add_hw)) {
@@ -1694,7 +1710,7 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period)
1694 if (event->state != PERF_EVENT_STATE_ACTIVE) 1710 if (event->state != PERF_EVENT_STATE_ACTIVE)
1695 continue; 1711 continue;
1696 1712
1697 if (event->cpu != -1 && event->cpu != smp_processor_id()) 1713 if (!event_filter_match(event))
1698 continue; 1714 continue;
1699 1715
1700 hwc = &event->hw; 1716 hwc = &event->hw;
@@ -3893,7 +3909,7 @@ static int perf_event_task_match(struct perf_event *event)
3893 if (event->state < PERF_EVENT_STATE_INACTIVE) 3909 if (event->state < PERF_EVENT_STATE_INACTIVE)
3894 return 0; 3910 return 0;
3895 3911
3896 if (event->cpu != -1 && event->cpu != smp_processor_id()) 3912 if (!event_filter_match(event))
3897 return 0; 3913 return 0;
3898 3914
3899 if (event->attr.comm || event->attr.mmap || 3915 if (event->attr.comm || event->attr.mmap ||
@@ -4030,7 +4046,7 @@ static int perf_event_comm_match(struct perf_event *event)
4030 if (event->state < PERF_EVENT_STATE_INACTIVE) 4046 if (event->state < PERF_EVENT_STATE_INACTIVE)
4031 return 0; 4047 return 0;
4032 4048
4033 if (event->cpu != -1 && event->cpu != smp_processor_id()) 4049 if (!event_filter_match(event))
4034 return 0; 4050 return 0;
4035 4051
4036 if (event->attr.comm) 4052 if (event->attr.comm)
@@ -4178,7 +4194,7 @@ static int perf_event_mmap_match(struct perf_event *event,
4178 if (event->state < PERF_EVENT_STATE_INACTIVE) 4194 if (event->state < PERF_EVENT_STATE_INACTIVE)
4179 return 0; 4195 return 0;
4180 4196
4181 if (event->cpu != -1 && event->cpu != smp_processor_id()) 4197 if (!event_filter_match(event))
4182 return 0; 4198 return 0;
4183 4199
4184 if ((!executable && event->attr.mmap_data) || 4200 if ((!executable && event->attr.mmap_data) ||
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index 53f338190b26..761c510a06c5 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -52,7 +52,7 @@ obj-$(CONFIG_EVENT_TRACING) += trace_event_perf.o
52endif 52endif
53obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o 53obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o
54obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o 54obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o
55obj-$(CONFIG_EVENT_TRACING) += power-traces.o 55obj-$(CONFIG_TRACEPOINTS) += power-traces.o
56ifeq ($(CONFIG_TRACING),y) 56ifeq ($(CONFIG_TRACING),y)
57obj-$(CONFIG_KGDB_KDB) += trace_kdb.o 57obj-$(CONFIG_KGDB_KDB) += trace_kdb.o
58endif 58endif
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index f8cf959bad45..dc53ecb80589 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1313,12 +1313,10 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
1313 1313
1314 __this_cpu_inc(user_stack_count); 1314 __this_cpu_inc(user_stack_count);
1315 1315
1316
1317
1318 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK, 1316 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
1319 sizeof(*entry), flags, pc); 1317 sizeof(*entry), flags, pc);
1320 if (!event) 1318 if (!event)
1321 return; 1319 goto out_drop_count;
1322 entry = ring_buffer_event_data(event); 1320 entry = ring_buffer_event_data(event);
1323 1321
1324 entry->tgid = current->tgid; 1322 entry->tgid = current->tgid;
@@ -1333,8 +1331,8 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
1333 if (!filter_check_discard(call, entry, buffer, event)) 1331 if (!filter_check_discard(call, entry, buffer, event))
1334 ring_buffer_unlock_commit(buffer, event); 1332 ring_buffer_unlock_commit(buffer, event);
1335 1333
1334 out_drop_count:
1336 __this_cpu_dec(user_stack_count); 1335 __this_cpu_dec(user_stack_count);
1337
1338 out: 1336 out:
1339 preempt_enable(); 1337 preempt_enable();
1340} 1338}
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index 3094318bfea7..b335acb43be2 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -141,11 +141,10 @@ static void ddebug_change(const struct ddebug_query *query,
141 else if (!dp->flags) 141 else if (!dp->flags)
142 dt->num_enabled++; 142 dt->num_enabled++;
143 dp->flags = newflags; 143 dp->flags = newflags;
144 if (newflags) { 144 if (newflags)
145 jump_label_enable(&dp->enabled); 145 dp->enabled = 1;
146 } else { 146 else
147 jump_label_disable(&dp->enabled); 147 dp->enabled = 0;
148 }
149 if (verbose) 148 if (verbose)
150 printk(KERN_INFO 149 printk(KERN_INFO
151 "ddebug: changed %s:%d [%s]%s %s\n", 150 "ddebug: changed %s:%d [%s]%s %s\n",
diff --git a/lib/kref.c b/lib/kref.c
index d3d227a08a4b..3efb882b11db 100644
--- a/lib/kref.c
+++ b/lib/kref.c
@@ -62,6 +62,36 @@ int kref_put(struct kref *kref, void (*release)(struct kref *kref))
62 return 0; 62 return 0;
63} 63}
64 64
65
66/**
67 * kref_sub - subtract a number of refcounts for object.
68 * @kref: object.
69 * @count: Number of recounts to subtract.
70 * @release: pointer to the function that will clean up the object when the
71 * last reference to the object is released.
72 * This pointer is required, and it is not acceptable to pass kfree
73 * in as this function.
74 *
75 * Subtract @count from the refcount, and if 0, call release().
76 * Return 1 if the object was removed, otherwise return 0. Beware, if this
77 * function returns 0, you still can not count on the kref from remaining in
78 * memory. Only use the return value if you want to see if the kref is now
79 * gone, not present.
80 */
81int kref_sub(struct kref *kref, unsigned int count,
82 void (*release)(struct kref *kref))
83{
84 WARN_ON(release == NULL);
85 WARN_ON(release == (void (*)(struct kref *))kfree);
86
87 if (atomic_sub_and_test((int) count, &kref->refcount)) {
88 release(kref);
89 return 1;
90 }
91 return 0;
92}
93
65EXPORT_SYMBOL(kref_init); 94EXPORT_SYMBOL(kref_init);
66EXPORT_SYMBOL(kref_get); 95EXPORT_SYMBOL(kref_get);
67EXPORT_SYMBOL(kref_put); 96EXPORT_SYMBOL(kref_put);
97EXPORT_SYMBOL(kref_sub);
diff --git a/net/9p/protocol.c b/net/9p/protocol.c
index 798beac7f100..1e308f210928 100644
--- a/net/9p/protocol.c
+++ b/net/9p/protocol.c
@@ -178,27 +178,24 @@ p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt,
178 break; 178 break;
179 case 's':{ 179 case 's':{
180 char **sptr = va_arg(ap, char **); 180 char **sptr = va_arg(ap, char **);
181 int16_t len; 181 uint16_t len;
182 int size;
183 182
184 errcode = p9pdu_readf(pdu, proto_version, 183 errcode = p9pdu_readf(pdu, proto_version,
185 "w", &len); 184 "w", &len);
186 if (errcode) 185 if (errcode)
187 break; 186 break;
188 187
189 size = max_t(int16_t, len, 0); 188 *sptr = kmalloc(len + 1, GFP_KERNEL);
190
191 *sptr = kmalloc(size + 1, GFP_KERNEL);
192 if (*sptr == NULL) { 189 if (*sptr == NULL) {
193 errcode = -EFAULT; 190 errcode = -EFAULT;
194 break; 191 break;
195 } 192 }
196 if (pdu_read(pdu, *sptr, size)) { 193 if (pdu_read(pdu, *sptr, len)) {
197 errcode = -EFAULT; 194 errcode = -EFAULT;
198 kfree(*sptr); 195 kfree(*sptr);
199 *sptr = NULL; 196 *sptr = NULL;
200 } else 197 } else
201 (*sptr)[size] = 0; 198 (*sptr)[len] = 0;
202 } 199 }
203 break; 200 break;
204 case 'Q':{ 201 case 'Q':{
@@ -234,14 +231,14 @@ p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt,
234 } 231 }
235 break; 232 break;
236 case 'D':{ 233 case 'D':{
237 int32_t *count = va_arg(ap, int32_t *); 234 uint32_t *count = va_arg(ap, uint32_t *);
238 void **data = va_arg(ap, void **); 235 void **data = va_arg(ap, void **);
239 236
240 errcode = 237 errcode =
241 p9pdu_readf(pdu, proto_version, "d", count); 238 p9pdu_readf(pdu, proto_version, "d", count);
242 if (!errcode) { 239 if (!errcode) {
243 *count = 240 *count =
244 min_t(int32_t, *count, 241 min_t(uint32_t, *count,
245 pdu->size - pdu->offset); 242 pdu->size - pdu->offset);
246 *data = &pdu->sdata[pdu->offset]; 243 *data = &pdu->sdata[pdu->offset];
247 } 244 }
@@ -404,9 +401,10 @@ p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt,
404 break; 401 break;
405 case 's':{ 402 case 's':{
406 const char *sptr = va_arg(ap, const char *); 403 const char *sptr = va_arg(ap, const char *);
407 int16_t len = 0; 404 uint16_t len = 0;
408 if (sptr) 405 if (sptr)
409 len = min_t(int16_t, strlen(sptr), USHRT_MAX); 406 len = min_t(uint16_t, strlen(sptr),
407 USHRT_MAX);
410 408
411 errcode = p9pdu_writef(pdu, proto_version, 409 errcode = p9pdu_writef(pdu, proto_version,
412 "w", len); 410 "w", len);
@@ -438,7 +436,7 @@ p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt,
438 stbuf->n_gid, stbuf->n_muid); 436 stbuf->n_gid, stbuf->n_muid);
439 } break; 437 } break;
440 case 'D':{ 438 case 'D':{
441 int32_t count = va_arg(ap, int32_t); 439 uint32_t count = va_arg(ap, uint32_t);
442 const void *data = va_arg(ap, const void *); 440 const void *data = va_arg(ap, const void *);
443 441
444 errcode = p9pdu_writef(pdu, proto_version, "d", 442 errcode = p9pdu_writef(pdu, proto_version, "d",
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index 1bf0cf503796..8184c031d028 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -740,12 +740,12 @@ static int setsockopt(struct socket *sock,
740 if (cf_sk->sk.sk_protocol != CAIFPROTO_UTIL) 740 if (cf_sk->sk.sk_protocol != CAIFPROTO_UTIL)
741 return -ENOPROTOOPT; 741 return -ENOPROTOOPT;
742 lock_sock(&(cf_sk->sk)); 742 lock_sock(&(cf_sk->sk));
743 cf_sk->conn_req.param.size = ol;
744 if (ol > sizeof(cf_sk->conn_req.param.data) || 743 if (ol > sizeof(cf_sk->conn_req.param.data) ||
745 copy_from_user(&cf_sk->conn_req.param.data, ov, ol)) { 744 copy_from_user(&cf_sk->conn_req.param.data, ov, ol)) {
746 release_sock(&cf_sk->sk); 745 release_sock(&cf_sk->sk);
747 return -EINVAL; 746 return -EINVAL;
748 } 747 }
748 cf_sk->conn_req.param.size = ol;
749 release_sock(&cf_sk->sk); 749 release_sock(&cf_sk->sk);
750 return 0; 750 return 0;
751 751
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
index 84a422c98941..fa9dab372b68 100644
--- a/net/caif/chnl_net.c
+++ b/net/caif/chnl_net.c
@@ -76,6 +76,8 @@ static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt)
76 struct chnl_net *priv = container_of(layr, struct chnl_net, chnl); 76 struct chnl_net *priv = container_of(layr, struct chnl_net, chnl);
77 int pktlen; 77 int pktlen;
78 int err = 0; 78 int err = 0;
79 const u8 *ip_version;
80 u8 buf;
79 81
80 priv = container_of(layr, struct chnl_net, chnl); 82 priv = container_of(layr, struct chnl_net, chnl);
81 83
@@ -90,7 +92,21 @@ static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt)
90 * send the packet to the net stack. 92 * send the packet to the net stack.
91 */ 93 */
92 skb->dev = priv->netdev; 94 skb->dev = priv->netdev;
93 skb->protocol = htons(ETH_P_IP); 95
96 /* check the version of IP */
97 ip_version = skb_header_pointer(skb, 0, 1, &buf);
98 if (!ip_version)
99 return -EINVAL;
100 switch (*ip_version >> 4) {
101 case 4:
102 skb->protocol = htons(ETH_P_IP);
103 break;
104 case 6:
105 skb->protocol = htons(ETH_P_IPV6);
106 break;
107 default:
108 return -EINVAL;
109 }
94 110
95 /* If we change the header in loop mode, the checksum is corrupted. */ 111 /* If we change the header in loop mode, the checksum is corrupted. */
96 if (priv->conn_req.protocol == CAIFPROTO_DATAGRAM_LOOP) 112 if (priv->conn_req.protocol == CAIFPROTO_DATAGRAM_LOOP)
diff --git a/net/core/dev.c b/net/core/dev.c
index a215269d2e35..a3ef808b5e36 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1732,33 +1732,6 @@ void netif_device_attach(struct net_device *dev)
1732} 1732}
1733EXPORT_SYMBOL(netif_device_attach); 1733EXPORT_SYMBOL(netif_device_attach);
1734 1734
1735static bool can_checksum_protocol(unsigned long features, __be16 protocol)
1736{
1737 return ((features & NETIF_F_NO_CSUM) ||
1738 ((features & NETIF_F_V4_CSUM) &&
1739 protocol == htons(ETH_P_IP)) ||
1740 ((features & NETIF_F_V6_CSUM) &&
1741 protocol == htons(ETH_P_IPV6)) ||
1742 ((features & NETIF_F_FCOE_CRC) &&
1743 protocol == htons(ETH_P_FCOE)));
1744}
1745
1746static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
1747{
1748 __be16 protocol = skb->protocol;
1749 int features = dev->features;
1750
1751 if (vlan_tx_tag_present(skb)) {
1752 features &= dev->vlan_features;
1753 } else if (protocol == htons(ETH_P_8021Q)) {
1754 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
1755 protocol = veh->h_vlan_encapsulated_proto;
1756 features &= dev->vlan_features;
1757 }
1758
1759 return can_checksum_protocol(features, protocol);
1760}
1761
1762/** 1735/**
1763 * skb_dev_set -- assign a new device to a buffer 1736 * skb_dev_set -- assign a new device to a buffer
1764 * @skb: buffer for the new device 1737 * @skb: buffer for the new device
@@ -1971,16 +1944,14 @@ static void dev_gso_skb_destructor(struct sk_buff *skb)
1971/** 1944/**
1972 * dev_gso_segment - Perform emulated hardware segmentation on skb. 1945 * dev_gso_segment - Perform emulated hardware segmentation on skb.
1973 * @skb: buffer to segment 1946 * @skb: buffer to segment
1947 * @features: device features as applicable to this skb
1974 * 1948 *
1975 * This function segments the given skb and stores the list of segments 1949 * This function segments the given skb and stores the list of segments
1976 * in skb->next. 1950 * in skb->next.
1977 */ 1951 */
1978static int dev_gso_segment(struct sk_buff *skb) 1952static int dev_gso_segment(struct sk_buff *skb, int features)
1979{ 1953{
1980 struct net_device *dev = skb->dev;
1981 struct sk_buff *segs; 1954 struct sk_buff *segs;
1982 int features = dev->features & ~(illegal_highdma(dev, skb) ?
1983 NETIF_F_SG : 0);
1984 1955
1985 segs = skb_gso_segment(skb, features); 1956 segs = skb_gso_segment(skb, features);
1986 1957
@@ -2017,22 +1988,52 @@ static inline void skb_orphan_try(struct sk_buff *skb)
2017 } 1988 }
2018} 1989}
2019 1990
2020int netif_get_vlan_features(struct sk_buff *skb, struct net_device *dev) 1991static bool can_checksum_protocol(unsigned long features, __be16 protocol)
1992{
1993 return ((features & NETIF_F_GEN_CSUM) ||
1994 ((features & NETIF_F_V4_CSUM) &&
1995 protocol == htons(ETH_P_IP)) ||
1996 ((features & NETIF_F_V6_CSUM) &&
1997 protocol == htons(ETH_P_IPV6)) ||
1998 ((features & NETIF_F_FCOE_CRC) &&
1999 protocol == htons(ETH_P_FCOE)));
2000}
2001
2002static int harmonize_features(struct sk_buff *skb, __be16 protocol, int features)
2003{
2004 if (!can_checksum_protocol(protocol, features)) {
2005 features &= ~NETIF_F_ALL_CSUM;
2006 features &= ~NETIF_F_SG;
2007 } else if (illegal_highdma(skb->dev, skb)) {
2008 features &= ~NETIF_F_SG;
2009 }
2010
2011 return features;
2012}
2013
2014int netif_skb_features(struct sk_buff *skb)
2021{ 2015{
2022 __be16 protocol = skb->protocol; 2016 __be16 protocol = skb->protocol;
2017 int features = skb->dev->features;
2023 2018
2024 if (protocol == htons(ETH_P_8021Q)) { 2019 if (protocol == htons(ETH_P_8021Q)) {
2025 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; 2020 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
2026 protocol = veh->h_vlan_encapsulated_proto; 2021 protocol = veh->h_vlan_encapsulated_proto;
2027 } else if (!skb->vlan_tci) 2022 } else if (!vlan_tx_tag_present(skb)) {
2028 return dev->features; 2023 return harmonize_features(skb, protocol, features);
2024 }
2029 2025
2030 if (protocol != htons(ETH_P_8021Q)) 2026 features &= skb->dev->vlan_features;
2031 return dev->features & dev->vlan_features; 2027
2032 else 2028 if (protocol != htons(ETH_P_8021Q)) {
2033 return 0; 2029 return harmonize_features(skb, protocol, features);
2030 } else {
2031 features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
2032 NETIF_F_GEN_CSUM;
2033 return harmonize_features(skb, protocol, features);
2034 }
2034} 2035}
2035EXPORT_SYMBOL(netif_get_vlan_features); 2036EXPORT_SYMBOL(netif_skb_features);
2036 2037
2037/* 2038/*
2038 * Returns true if either: 2039 * Returns true if either:
@@ -2042,22 +2043,13 @@ EXPORT_SYMBOL(netif_get_vlan_features);
2042 * support DMA from it. 2043 * support DMA from it.
2043 */ 2044 */
2044static inline int skb_needs_linearize(struct sk_buff *skb, 2045static inline int skb_needs_linearize(struct sk_buff *skb,
2045 struct net_device *dev) 2046 int features)
2046{ 2047{
2047 if (skb_is_nonlinear(skb)) { 2048 return skb_is_nonlinear(skb) &&
2048 int features = dev->features; 2049 ((skb_has_frag_list(skb) &&
2049 2050 !(features & NETIF_F_FRAGLIST)) ||
2050 if (vlan_tx_tag_present(skb))
2051 features &= dev->vlan_features;
2052
2053 return (skb_has_frag_list(skb) &&
2054 !(features & NETIF_F_FRAGLIST)) ||
2055 (skb_shinfo(skb)->nr_frags && 2051 (skb_shinfo(skb)->nr_frags &&
2056 (!(features & NETIF_F_SG) || 2052 !(features & NETIF_F_SG)));
2057 illegal_highdma(dev, skb)));
2058 }
2059
2060 return 0;
2061} 2053}
2062 2054
2063int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, 2055int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
@@ -2067,6 +2059,8 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2067 int rc = NETDEV_TX_OK; 2059 int rc = NETDEV_TX_OK;
2068 2060
2069 if (likely(!skb->next)) { 2061 if (likely(!skb->next)) {
2062 int features;
2063
2070 /* 2064 /*
2071 * If device doesnt need skb->dst, release it right now while 2065 * If device doesnt need skb->dst, release it right now while
2072 * its hot in this cpu cache 2066 * its hot in this cpu cache
@@ -2079,8 +2073,10 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2079 2073
2080 skb_orphan_try(skb); 2074 skb_orphan_try(skb);
2081 2075
2076 features = netif_skb_features(skb);
2077
2082 if (vlan_tx_tag_present(skb) && 2078 if (vlan_tx_tag_present(skb) &&
2083 !(dev->features & NETIF_F_HW_VLAN_TX)) { 2079 !(features & NETIF_F_HW_VLAN_TX)) {
2084 skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb)); 2080 skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb));
2085 if (unlikely(!skb)) 2081 if (unlikely(!skb))
2086 goto out; 2082 goto out;
@@ -2088,13 +2084,13 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2088 skb->vlan_tci = 0; 2084 skb->vlan_tci = 0;
2089 } 2085 }
2090 2086
2091 if (netif_needs_gso(dev, skb)) { 2087 if (netif_needs_gso(skb, features)) {
2092 if (unlikely(dev_gso_segment(skb))) 2088 if (unlikely(dev_gso_segment(skb, features)))
2093 goto out_kfree_skb; 2089 goto out_kfree_skb;
2094 if (skb->next) 2090 if (skb->next)
2095 goto gso; 2091 goto gso;
2096 } else { 2092 } else {
2097 if (skb_needs_linearize(skb, dev) && 2093 if (skb_needs_linearize(skb, features) &&
2098 __skb_linearize(skb)) 2094 __skb_linearize(skb))
2099 goto out_kfree_skb; 2095 goto out_kfree_skb;
2100 2096
@@ -2105,7 +2101,7 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2105 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2101 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2106 skb_set_transport_header(skb, 2102 skb_set_transport_header(skb,
2107 skb_checksum_start_offset(skb)); 2103 skb_checksum_start_offset(skb));
2108 if (!dev_can_checksum(dev, skb) && 2104 if (!(features & NETIF_F_ALL_CSUM) &&
2109 skb_checksum_help(skb)) 2105 skb_checksum_help(skb))
2110 goto out_kfree_skb; 2106 goto out_kfree_skb;
2111 } 2107 }
@@ -2301,7 +2297,10 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2301 */ 2297 */
2302 if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE)) 2298 if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
2303 skb_dst_force(skb); 2299 skb_dst_force(skb);
2304 __qdisc_update_bstats(q, skb->len); 2300
2301 qdisc_skb_cb(skb)->pkt_len = skb->len;
2302 qdisc_bstats_update(q, skb);
2303
2305 if (sch_direct_xmit(skb, q, dev, txq, root_lock)) { 2304 if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
2306 if (unlikely(contended)) { 2305 if (unlikely(contended)) {
2307 spin_unlock(&q->busylock); 2306 spin_unlock(&q->busylock);
@@ -5621,18 +5620,20 @@ struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
5621} 5620}
5622 5621
5623/** 5622/**
5624 * alloc_netdev_mq - allocate network device 5623 * alloc_netdev_mqs - allocate network device
5625 * @sizeof_priv: size of private data to allocate space for 5624 * @sizeof_priv: size of private data to allocate space for
5626 * @name: device name format string 5625 * @name: device name format string
5627 * @setup: callback to initialize device 5626 * @setup: callback to initialize device
5628 * @queue_count: the number of subqueues to allocate 5627 * @txqs: the number of TX subqueues to allocate
5628 * @rxqs: the number of RX subqueues to allocate
5629 * 5629 *
5630 * Allocates a struct net_device with private data area for driver use 5630 * Allocates a struct net_device with private data area for driver use
5631 * and performs basic initialization. Also allocates subquue structs 5631 * and performs basic initialization. Also allocates subquue structs
5632 * for each queue on the device at the end of the netdevice. 5632 * for each queue on the device.
5633 */ 5633 */
5634struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, 5634struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
5635 void (*setup)(struct net_device *), unsigned int queue_count) 5635 void (*setup)(struct net_device *),
5636 unsigned int txqs, unsigned int rxqs)
5636{ 5637{
5637 struct net_device *dev; 5638 struct net_device *dev;
5638 size_t alloc_size; 5639 size_t alloc_size;
@@ -5640,12 +5641,20 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
5640 5641
5641 BUG_ON(strlen(name) >= sizeof(dev->name)); 5642 BUG_ON(strlen(name) >= sizeof(dev->name));
5642 5643
5643 if (queue_count < 1) { 5644 if (txqs < 1) {
5644 pr_err("alloc_netdev: Unable to allocate device " 5645 pr_err("alloc_netdev: Unable to allocate device "
5645 "with zero queues.\n"); 5646 "with zero queues.\n");
5646 return NULL; 5647 return NULL;
5647 } 5648 }
5648 5649
5650#ifdef CONFIG_RPS
5651 if (rxqs < 1) {
5652 pr_err("alloc_netdev: Unable to allocate device "
5653 "with zero RX queues.\n");
5654 return NULL;
5655 }
5656#endif
5657
5649 alloc_size = sizeof(struct net_device); 5658 alloc_size = sizeof(struct net_device);
5650 if (sizeof_priv) { 5659 if (sizeof_priv) {
5651 /* ensure 32-byte alignment of private area */ 5660 /* ensure 32-byte alignment of private area */
@@ -5676,14 +5685,14 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
5676 5685
5677 dev_net_set(dev, &init_net); 5686 dev_net_set(dev, &init_net);
5678 5687
5679 dev->num_tx_queues = queue_count; 5688 dev->num_tx_queues = txqs;
5680 dev->real_num_tx_queues = queue_count; 5689 dev->real_num_tx_queues = txqs;
5681 if (netif_alloc_netdev_queues(dev)) 5690 if (netif_alloc_netdev_queues(dev))
5682 goto free_pcpu; 5691 goto free_pcpu;
5683 5692
5684#ifdef CONFIG_RPS 5693#ifdef CONFIG_RPS
5685 dev->num_rx_queues = queue_count; 5694 dev->num_rx_queues = rxqs;
5686 dev->real_num_rx_queues = queue_count; 5695 dev->real_num_rx_queues = rxqs;
5687 if (netif_alloc_rx_queues(dev)) 5696 if (netif_alloc_rx_queues(dev))
5688 goto free_pcpu; 5697 goto free_pcpu;
5689#endif 5698#endif
@@ -5711,7 +5720,7 @@ free_p:
5711 kfree(p); 5720 kfree(p);
5712 return NULL; 5721 return NULL;
5713} 5722}
5714EXPORT_SYMBOL(alloc_netdev_mq); 5723EXPORT_SYMBOL(alloc_netdev_mqs);
5715 5724
5716/** 5725/**
5717 * free_netdev - free network device 5726 * free_netdev - free network device
diff --git a/net/core/filter.c b/net/core/filter.c
index 2b27d4efdd48..afc58374ca96 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -158,7 +158,7 @@ EXPORT_SYMBOL(sk_filter);
158/** 158/**
159 * sk_run_filter - run a filter on a socket 159 * sk_run_filter - run a filter on a socket
160 * @skb: buffer to run the filter on 160 * @skb: buffer to run the filter on
161 * @filter: filter to apply 161 * @fentry: filter to apply
162 * 162 *
163 * Decode and apply filter instructions to the skb->data. 163 * Decode and apply filter instructions to the skb->data.
164 * Return length to keep, 0 for none. @skb is the data we are 164 * Return length to keep, 0 for none. @skb is the data we are
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 750db57f3bb3..a5f7535aab5b 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1820,7 +1820,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1820 if (kind != 2 && security_netlink_recv(skb, CAP_NET_ADMIN)) 1820 if (kind != 2 && security_netlink_recv(skb, CAP_NET_ADMIN))
1821 return -EPERM; 1821 return -EPERM;
1822 1822
1823 if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) { 1823 if (kind == 2 && (nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) {
1824 struct sock *rtnl; 1824 struct sock *rtnl;
1825 rtnl_dumpit_func dumpit; 1825 rtnl_dumpit_func dumpit;
1826 1826
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index 45087052d894..5fdb07229017 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -426,7 +426,8 @@ static inline void dccp_update_gsr(struct sock *sk, u64 seq)
426{ 426{
427 struct dccp_sock *dp = dccp_sk(sk); 427 struct dccp_sock *dp = dccp_sk(sk);
428 428
429 dp->dccps_gsr = seq; 429 if (after48(seq, dp->dccps_gsr))
430 dp->dccps_gsr = seq;
430 /* Sequence validity window depends on remote Sequence Window (7.5.1) */ 431 /* Sequence validity window depends on remote Sequence Window (7.5.1) */
431 dp->dccps_swl = SUB48(ADD48(dp->dccps_gsr, 1), dp->dccps_r_seq_win / 4); 432 dp->dccps_swl = SUB48(ADD48(dp->dccps_gsr, 1), dp->dccps_r_seq_win / 4);
432 /* 433 /*
diff --git a/net/dccp/input.c b/net/dccp/input.c
index 15af247ea007..8cde009e8b85 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -260,7 +260,7 @@ static int dccp_check_seqno(struct sock *sk, struct sk_buff *skb)
260 */ 260 */
261 if (time_before(now, (dp->dccps_rate_last + 261 if (time_before(now, (dp->dccps_rate_last +
262 sysctl_dccp_sync_ratelimit))) 262 sysctl_dccp_sync_ratelimit)))
263 return 0; 263 return -1;
264 264
265 DCCP_WARN("Step 6 failed for %s packet, " 265 DCCP_WARN("Step 6 failed for %s packet, "
266 "(LSWL(%llu) <= P.seqno(%llu) <= S.SWH(%llu)) and " 266 "(LSWL(%llu) <= P.seqno(%llu) <= S.SWH(%llu)) and "
diff --git a/net/dccp/sysctl.c b/net/dccp/sysctl.c
index 563943822e58..42348824ee31 100644
--- a/net/dccp/sysctl.c
+++ b/net/dccp/sysctl.c
@@ -21,7 +21,8 @@
21/* Boundary values */ 21/* Boundary values */
22static int zero = 0, 22static int zero = 0,
23 u8_max = 0xFF; 23 u8_max = 0xFF;
24static unsigned long seqw_min = 32; 24static unsigned long seqw_min = DCCPF_SEQ_WMIN,
25 seqw_max = 0xFFFFFFFF; /* maximum on 32 bit */
25 26
26static struct ctl_table dccp_default_table[] = { 27static struct ctl_table dccp_default_table[] = {
27 { 28 {
@@ -31,6 +32,7 @@ static struct ctl_table dccp_default_table[] = {
31 .mode = 0644, 32 .mode = 0644,
32 .proc_handler = proc_doulongvec_minmax, 33 .proc_handler = proc_doulongvec_minmax,
33 .extra1 = &seqw_min, /* RFC 4340, 7.5.2 */ 34 .extra1 = &seqw_min, /* RFC 4340, 7.5.2 */
35 .extra2 = &seqw_max,
34 }, 36 },
35 { 37 {
36 .procname = "rx_ccid", 38 .procname = "rx_ccid",
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index f00ef2f1d814..f9d7ac924f15 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -347,10 +347,11 @@ void ether_setup(struct net_device *dev)
347EXPORT_SYMBOL(ether_setup); 347EXPORT_SYMBOL(ether_setup);
348 348
349/** 349/**
350 * alloc_etherdev_mq - Allocates and sets up an Ethernet device 350 * alloc_etherdev_mqs - Allocates and sets up an Ethernet device
351 * @sizeof_priv: Size of additional driver-private structure to be allocated 351 * @sizeof_priv: Size of additional driver-private structure to be allocated
352 * for this Ethernet device 352 * for this Ethernet device
353 * @queue_count: The number of queues this device has. 353 * @txqs: The number of TX queues this device has.
354 * @txqs: The number of RX queues this device has.
354 * 355 *
355 * Fill in the fields of the device structure with Ethernet-generic 356 * Fill in the fields of the device structure with Ethernet-generic
356 * values. Basically does everything except registering the device. 357 * values. Basically does everything except registering the device.
@@ -360,11 +361,12 @@ EXPORT_SYMBOL(ether_setup);
360 * this private data area. 361 * this private data area.
361 */ 362 */
362 363
363struct net_device *alloc_etherdev_mq(int sizeof_priv, unsigned int queue_count) 364struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
365 unsigned int rxqs)
364{ 366{
365 return alloc_netdev_mq(sizeof_priv, "eth%d", ether_setup, queue_count); 367 return alloc_netdev_mqs(sizeof_priv, "eth%d", ether_setup, txqs, rxqs);
366} 368}
367EXPORT_SYMBOL(alloc_etherdev_mq); 369EXPORT_SYMBOL(alloc_etherdev_mqs);
368 370
369static size_t _format_mac_addr(char *buf, int buflen, 371static size_t _format_mac_addr(char *buf, int buflen,
370 const unsigned char *addr, int len) 372 const unsigned char *addr, int len)
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index 880a5ec6dce0..86961bec70ab 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -314,14 +314,15 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
314 314
315 skb->ip_summed = CHECKSUM_NONE; 315 skb->ip_summed = CHECKSUM_NONE;
316 316
317 ah = (struct ip_auth_hdr *)skb->data;
318 iph = ip_hdr(skb);
319 ihl = ip_hdrlen(skb);
320 317
321 if ((err = skb_cow_data(skb, 0, &trailer)) < 0) 318 if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
322 goto out; 319 goto out;
323 nfrags = err; 320 nfrags = err;
324 321
322 ah = (struct ip_auth_hdr *)skb->data;
323 iph = ip_hdr(skb);
324 ihl = ip_hdrlen(skb);
325
325 work_iph = ah_alloc_tmp(ahash, nfrags, ihl + ahp->icv_trunc_len); 326 work_iph = ah_alloc_tmp(ahash, nfrags, ihl + ahp->icv_trunc_len);
326 if (!work_iph) 327 if (!work_iph)
327 goto out; 328 goto out;
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index a2fc7b961dbc..04c8b69fd426 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -1143,6 +1143,23 @@ static int arp_req_get(struct arpreq *r, struct net_device *dev)
1143 return err; 1143 return err;
1144} 1144}
1145 1145
1146int arp_invalidate(struct net_device *dev, __be32 ip)
1147{
1148 struct neighbour *neigh = neigh_lookup(&arp_tbl, &ip, dev);
1149 int err = -ENXIO;
1150
1151 if (neigh) {
1152 if (neigh->nud_state & ~NUD_NOARP)
1153 err = neigh_update(neigh, NULL, NUD_FAILED,
1154 NEIGH_UPDATE_F_OVERRIDE|
1155 NEIGH_UPDATE_F_ADMIN);
1156 neigh_release(neigh);
1157 }
1158
1159 return err;
1160}
1161EXPORT_SYMBOL(arp_invalidate);
1162
1146static int arp_req_delete_public(struct net *net, struct arpreq *r, 1163static int arp_req_delete_public(struct net *net, struct arpreq *r,
1147 struct net_device *dev) 1164 struct net_device *dev)
1148{ 1165{
@@ -1163,7 +1180,6 @@ static int arp_req_delete(struct net *net, struct arpreq *r,
1163{ 1180{
1164 int err; 1181 int err;
1165 __be32 ip; 1182 __be32 ip;
1166 struct neighbour *neigh;
1167 1183
1168 if (r->arp_flags & ATF_PUBL) 1184 if (r->arp_flags & ATF_PUBL)
1169 return arp_req_delete_public(net, r, dev); 1185 return arp_req_delete_public(net, r, dev);
@@ -1181,16 +1197,7 @@ static int arp_req_delete(struct net *net, struct arpreq *r,
1181 if (!dev) 1197 if (!dev)
1182 return -EINVAL; 1198 return -EINVAL;
1183 } 1199 }
1184 err = -ENXIO; 1200 return arp_invalidate(dev, ip);
1185 neigh = neigh_lookup(&arp_tbl, &ip, dev);
1186 if (neigh) {
1187 if (neigh->nud_state & ~NUD_NOARP)
1188 err = neigh_update(neigh, NULL, NUD_FAILED,
1189 NEIGH_UPDATE_F_OVERRIDE|
1190 NEIGH_UPDATE_F_ADMIN);
1191 neigh_release(neigh);
1192 }
1193 return err;
1194} 1201}
1195 1202
1196/* 1203/*
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 25e318153f14..97e5fb765265 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -73,7 +73,7 @@ int inet_csk_bind_conflict(const struct sock *sk,
73 !sk2->sk_bound_dev_if || 73 !sk2->sk_bound_dev_if ||
74 sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) { 74 sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
75 if (!reuse || !sk2->sk_reuse || 75 if (!reuse || !sk2->sk_reuse ||
76 sk2->sk_state == TCP_LISTEN) { 76 ((1 << sk2->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))) {
77 const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2); 77 const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2);
78 if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) || 78 if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) ||
79 sk2_rcv_saddr == sk_rcv_saddr(sk)) 79 sk2_rcv_saddr == sk_rcv_saddr(sk))
@@ -122,7 +122,8 @@ again:
122 (tb->num_owners < smallest_size || smallest_size == -1)) { 122 (tb->num_owners < smallest_size || smallest_size == -1)) {
123 smallest_size = tb->num_owners; 123 smallest_size = tb->num_owners;
124 smallest_rover = rover; 124 smallest_rover = rover;
125 if (atomic_read(&hashinfo->bsockets) > (high - low) + 1) { 125 if (atomic_read(&hashinfo->bsockets) > (high - low) + 1 &&
126 !inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) {
126 spin_unlock(&head->lock); 127 spin_unlock(&head->lock);
127 snum = smallest_rover; 128 snum = smallest_rover;
128 goto have_snum; 129 goto have_snum;
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 2ada17129fce..2746c1fa6417 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -858,7 +858,7 @@ static int inet_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
858 nlmsg_len(nlh) < hdrlen) 858 nlmsg_len(nlh) < hdrlen)
859 return -EINVAL; 859 return -EINVAL;
860 860
861 if (nlh->nlmsg_flags & NLM_F_DUMP) { 861 if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) {
862 if (nlmsg_attrlen(nlh, hdrlen)) { 862 if (nlmsg_attrlen(nlh, hdrlen)) {
863 struct nlattr *attr; 863 struct nlattr *attr;
864 864
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 3fac340a28d5..e855fffaed95 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -710,42 +710,25 @@ static void get_counters(const struct xt_table_info *t,
710 struct arpt_entry *iter; 710 struct arpt_entry *iter;
711 unsigned int cpu; 711 unsigned int cpu;
712 unsigned int i; 712 unsigned int i;
713 unsigned int curcpu = get_cpu();
714
715 /* Instead of clearing (by a previous call to memset())
716 * the counters and using adds, we set the counters
717 * with data used by 'current' CPU
718 *
719 * Bottom half has to be disabled to prevent deadlock
720 * if new softirq were to run and call ipt_do_table
721 */
722 local_bh_disable();
723 i = 0;
724 xt_entry_foreach(iter, t->entries[curcpu], t->size) {
725 SET_COUNTER(counters[i], iter->counters.bcnt,
726 iter->counters.pcnt);
727 ++i;
728 }
729 local_bh_enable();
730 /* Processing counters from other cpus, we can let bottom half enabled,
731 * (preemption is disabled)
732 */
733 713
734 for_each_possible_cpu(cpu) { 714 for_each_possible_cpu(cpu) {
735 if (cpu == curcpu) 715 seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock;
736 continue; 716
737 i = 0; 717 i = 0;
738 local_bh_disable();
739 xt_info_wrlock(cpu);
740 xt_entry_foreach(iter, t->entries[cpu], t->size) { 718 xt_entry_foreach(iter, t->entries[cpu], t->size) {
741 ADD_COUNTER(counters[i], iter->counters.bcnt, 719 u64 bcnt, pcnt;
742 iter->counters.pcnt); 720 unsigned int start;
721
722 do {
723 start = read_seqbegin(lock);
724 bcnt = iter->counters.bcnt;
725 pcnt = iter->counters.pcnt;
726 } while (read_seqretry(lock, start));
727
728 ADD_COUNTER(counters[i], bcnt, pcnt);
743 ++i; 729 ++i;
744 } 730 }
745 xt_info_wrunlock(cpu);
746 local_bh_enable();
747 } 731 }
748 put_cpu();
749} 732}
750 733
751static struct xt_counters *alloc_counters(const struct xt_table *table) 734static struct xt_counters *alloc_counters(const struct xt_table *table)
@@ -759,7 +742,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
759 * about). 742 * about).
760 */ 743 */
761 countersize = sizeof(struct xt_counters) * private->number; 744 countersize = sizeof(struct xt_counters) * private->number;
762 counters = vmalloc(countersize); 745 counters = vzalloc(countersize);
763 746
764 if (counters == NULL) 747 if (counters == NULL)
765 return ERR_PTR(-ENOMEM); 748 return ERR_PTR(-ENOMEM);
@@ -1007,7 +990,7 @@ static int __do_replace(struct net *net, const char *name,
1007 struct arpt_entry *iter; 990 struct arpt_entry *iter;
1008 991
1009 ret = 0; 992 ret = 0;
1010 counters = vmalloc(num_counters * sizeof(struct xt_counters)); 993 counters = vzalloc(num_counters * sizeof(struct xt_counters));
1011 if (!counters) { 994 if (!counters) {
1012 ret = -ENOMEM; 995 ret = -ENOMEM;
1013 goto out; 996 goto out;
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index a846d633b3b6..652efea013dc 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -884,42 +884,25 @@ get_counters(const struct xt_table_info *t,
884 struct ipt_entry *iter; 884 struct ipt_entry *iter;
885 unsigned int cpu; 885 unsigned int cpu;
886 unsigned int i; 886 unsigned int i;
887 unsigned int curcpu = get_cpu();
888
889 /* Instead of clearing (by a previous call to memset())
890 * the counters and using adds, we set the counters
891 * with data used by 'current' CPU.
892 *
893 * Bottom half has to be disabled to prevent deadlock
894 * if new softirq were to run and call ipt_do_table
895 */
896 local_bh_disable();
897 i = 0;
898 xt_entry_foreach(iter, t->entries[curcpu], t->size) {
899 SET_COUNTER(counters[i], iter->counters.bcnt,
900 iter->counters.pcnt);
901 ++i;
902 }
903 local_bh_enable();
904 /* Processing counters from other cpus, we can let bottom half enabled,
905 * (preemption is disabled)
906 */
907 887
908 for_each_possible_cpu(cpu) { 888 for_each_possible_cpu(cpu) {
909 if (cpu == curcpu) 889 seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock;
910 continue; 890
911 i = 0; 891 i = 0;
912 local_bh_disable();
913 xt_info_wrlock(cpu);
914 xt_entry_foreach(iter, t->entries[cpu], t->size) { 892 xt_entry_foreach(iter, t->entries[cpu], t->size) {
915 ADD_COUNTER(counters[i], iter->counters.bcnt, 893 u64 bcnt, pcnt;
916 iter->counters.pcnt); 894 unsigned int start;
895
896 do {
897 start = read_seqbegin(lock);
898 bcnt = iter->counters.bcnt;
899 pcnt = iter->counters.pcnt;
900 } while (read_seqretry(lock, start));
901
902 ADD_COUNTER(counters[i], bcnt, pcnt);
917 ++i; /* macro does multi eval of i */ 903 ++i; /* macro does multi eval of i */
918 } 904 }
919 xt_info_wrunlock(cpu);
920 local_bh_enable();
921 } 905 }
922 put_cpu();
923} 906}
924 907
925static struct xt_counters *alloc_counters(const struct xt_table *table) 908static struct xt_counters *alloc_counters(const struct xt_table *table)
@@ -932,7 +915,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
932 (other than comefrom, which userspace doesn't care 915 (other than comefrom, which userspace doesn't care
933 about). */ 916 about). */
934 countersize = sizeof(struct xt_counters) * private->number; 917 countersize = sizeof(struct xt_counters) * private->number;
935 counters = vmalloc(countersize); 918 counters = vzalloc(countersize);
936 919
937 if (counters == NULL) 920 if (counters == NULL)
938 return ERR_PTR(-ENOMEM); 921 return ERR_PTR(-ENOMEM);
@@ -1203,7 +1186,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1203 struct ipt_entry *iter; 1186 struct ipt_entry *iter;
1204 1187
1205 ret = 0; 1188 ret = 0;
1206 counters = vmalloc(num_counters * sizeof(struct xt_counters)); 1189 counters = vzalloc(num_counters * sizeof(struct xt_counters));
1207 if (!counters) { 1190 if (!counters) {
1208 ret = -ENOMEM; 1191 ret = -ENOMEM;
1209 goto out; 1192 goto out;
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index ee82d4ef26ce..1aba54ae53c4 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -538,14 +538,16 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
538 if (!pskb_may_pull(skb, ah_hlen)) 538 if (!pskb_may_pull(skb, ah_hlen))
539 goto out; 539 goto out;
540 540
541 ip6h = ipv6_hdr(skb);
542
543 skb_push(skb, hdr_len);
544 541
545 if ((err = skb_cow_data(skb, 0, &trailer)) < 0) 542 if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
546 goto out; 543 goto out;
547 nfrags = err; 544 nfrags = err;
548 545
546 ah = (struct ip_auth_hdr *)skb->data;
547 ip6h = ipv6_hdr(skb);
548
549 skb_push(skb, hdr_len);
550
549 work_iph = ah_alloc_tmp(ahash, nfrags, hdr_len + ahp->icv_trunc_len); 551 work_iph = ah_alloc_tmp(ahash, nfrags, hdr_len + ahp->icv_trunc_len);
550 if (!work_iph) 552 if (!work_iph)
551 goto out; 553 goto out;
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index e46305d1815a..d144e629d2b4 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -44,7 +44,7 @@ int inet6_csk_bind_conflict(const struct sock *sk,
44 !sk2->sk_bound_dev_if || 44 !sk2->sk_bound_dev_if ||
45 sk->sk_bound_dev_if == sk2->sk_bound_dev_if) && 45 sk->sk_bound_dev_if == sk2->sk_bound_dev_if) &&
46 (!sk->sk_reuse || !sk2->sk_reuse || 46 (!sk->sk_reuse || !sk2->sk_reuse ||
47 sk2->sk_state == TCP_LISTEN) && 47 ((1 << sk2->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))) &&
48 ipv6_rcv_saddr_equal(sk, sk2)) 48 ipv6_rcv_saddr_equal(sk, sk2))
49 break; 49 break;
50 } 50 }
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 455582384ece..7d227c644f72 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -897,42 +897,25 @@ get_counters(const struct xt_table_info *t,
897 struct ip6t_entry *iter; 897 struct ip6t_entry *iter;
898 unsigned int cpu; 898 unsigned int cpu;
899 unsigned int i; 899 unsigned int i;
900 unsigned int curcpu = get_cpu();
901
902 /* Instead of clearing (by a previous call to memset())
903 * the counters and using adds, we set the counters
904 * with data used by 'current' CPU
905 *
906 * Bottom half has to be disabled to prevent deadlock
907 * if new softirq were to run and call ipt_do_table
908 */
909 local_bh_disable();
910 i = 0;
911 xt_entry_foreach(iter, t->entries[curcpu], t->size) {
912 SET_COUNTER(counters[i], iter->counters.bcnt,
913 iter->counters.pcnt);
914 ++i;
915 }
916 local_bh_enable();
917 /* Processing counters from other cpus, we can let bottom half enabled,
918 * (preemption is disabled)
919 */
920 900
921 for_each_possible_cpu(cpu) { 901 for_each_possible_cpu(cpu) {
922 if (cpu == curcpu) 902 seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock;
923 continue; 903
924 i = 0; 904 i = 0;
925 local_bh_disable();
926 xt_info_wrlock(cpu);
927 xt_entry_foreach(iter, t->entries[cpu], t->size) { 905 xt_entry_foreach(iter, t->entries[cpu], t->size) {
928 ADD_COUNTER(counters[i], iter->counters.bcnt, 906 u64 bcnt, pcnt;
929 iter->counters.pcnt); 907 unsigned int start;
908
909 do {
910 start = read_seqbegin(lock);
911 bcnt = iter->counters.bcnt;
912 pcnt = iter->counters.pcnt;
913 } while (read_seqretry(lock, start));
914
915 ADD_COUNTER(counters[i], bcnt, pcnt);
930 ++i; 916 ++i;
931 } 917 }
932 xt_info_wrunlock(cpu);
933 local_bh_enable();
934 } 918 }
935 put_cpu();
936} 919}
937 920
938static struct xt_counters *alloc_counters(const struct xt_table *table) 921static struct xt_counters *alloc_counters(const struct xt_table *table)
@@ -945,7 +928,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
945 (other than comefrom, which userspace doesn't care 928 (other than comefrom, which userspace doesn't care
946 about). */ 929 about). */
947 countersize = sizeof(struct xt_counters) * private->number; 930 countersize = sizeof(struct xt_counters) * private->number;
948 counters = vmalloc(countersize); 931 counters = vzalloc(countersize);
949 932
950 if (counters == NULL) 933 if (counters == NULL)
951 return ERR_PTR(-ENOMEM); 934 return ERR_PTR(-ENOMEM);
@@ -1216,7 +1199,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1216 struct ip6t_entry *iter; 1199 struct ip6t_entry *iter;
1217 1200
1218 ret = 0; 1201 ret = 0;
1219 counters = vmalloc(num_counters * sizeof(struct xt_counters)); 1202 counters = vzalloc(num_counters * sizeof(struct xt_counters));
1220 if (!counters) { 1203 if (!counters) {
1221 ret = -ENOMEM; 1204 ret = -ENOMEM;
1222 goto out; 1205 goto out;
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 0cdba50c0d69..5cb8d3027b18 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -645,25 +645,23 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
645 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh); 645 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
646 u_int8_t l3proto = nfmsg->nfgen_family; 646 u_int8_t l3proto = nfmsg->nfgen_family;
647 647
648 rcu_read_lock(); 648 spin_lock_bh(&nf_conntrack_lock);
649 last = (struct nf_conn *)cb->args[1]; 649 last = (struct nf_conn *)cb->args[1];
650 for (; cb->args[0] < net->ct.htable_size; cb->args[0]++) { 650 for (; cb->args[0] < net->ct.htable_size; cb->args[0]++) {
651restart: 651restart:
652 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[cb->args[0]], 652 hlist_nulls_for_each_entry(h, n, &net->ct.hash[cb->args[0]],
653 hnnode) { 653 hnnode) {
654 if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL) 654 if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
655 continue; 655 continue;
656 ct = nf_ct_tuplehash_to_ctrack(h); 656 ct = nf_ct_tuplehash_to_ctrack(h);
657 if (!atomic_inc_not_zero(&ct->ct_general.use))
658 continue;
659 /* Dump entries of a given L3 protocol number. 657 /* Dump entries of a given L3 protocol number.
660 * If it is not specified, ie. l3proto == 0, 658 * If it is not specified, ie. l3proto == 0,
661 * then dump everything. */ 659 * then dump everything. */
662 if (l3proto && nf_ct_l3num(ct) != l3proto) 660 if (l3proto && nf_ct_l3num(ct) != l3proto)
663 goto releasect; 661 continue;
664 if (cb->args[1]) { 662 if (cb->args[1]) {
665 if (ct != last) 663 if (ct != last)
666 goto releasect; 664 continue;
667 cb->args[1] = 0; 665 cb->args[1] = 0;
668 } 666 }
669 if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid, 667 if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid,
@@ -681,8 +679,6 @@ restart:
681 if (acct) 679 if (acct)
682 memset(acct, 0, sizeof(struct nf_conn_counter[IP_CT_DIR_MAX])); 680 memset(acct, 0, sizeof(struct nf_conn_counter[IP_CT_DIR_MAX]));
683 } 681 }
684releasect:
685 nf_ct_put(ct);
686 } 682 }
687 if (cb->args[1]) { 683 if (cb->args[1]) {
688 cb->args[1] = 0; 684 cb->args[1] = 0;
@@ -690,7 +686,7 @@ releasect:
690 } 686 }
691 } 687 }
692out: 688out:
693 rcu_read_unlock(); 689 spin_unlock_bh(&nf_conntrack_lock);
694 if (last) 690 if (last)
695 nf_ct_put(last); 691 nf_ct_put(last);
696 692
@@ -928,7 +924,7 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
928 u16 zone; 924 u16 zone;
929 int err; 925 int err;
930 926
931 if (nlh->nlmsg_flags & NLM_F_DUMP) 927 if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP)
932 return netlink_dump_start(ctnl, skb, nlh, ctnetlink_dump_table, 928 return netlink_dump_start(ctnl, skb, nlh, ctnetlink_dump_table,
933 ctnetlink_done); 929 ctnetlink_done);
934 930
@@ -1790,7 +1786,7 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
1790 u16 zone; 1786 u16 zone;
1791 int err; 1787 int err;
1792 1788
1793 if (nlh->nlmsg_flags & NLM_F_DUMP) { 1789 if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) {
1794 return netlink_dump_start(ctnl, skb, nlh, 1790 return netlink_dump_start(ctnl, skb, nlh,
1795 ctnetlink_exp_dump_table, 1791 ctnetlink_exp_dump_table,
1796 ctnetlink_exp_done); 1792 ctnetlink_exp_done);
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 80463507420e..c94237631077 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -1325,7 +1325,8 @@ static int __init xt_init(void)
1325 1325
1326 for_each_possible_cpu(i) { 1326 for_each_possible_cpu(i) {
1327 struct xt_info_lock *lock = &per_cpu(xt_info_locks, i); 1327 struct xt_info_lock *lock = &per_cpu(xt_info_locks, i);
1328 spin_lock_init(&lock->lock); 1328
1329 seqlock_init(&lock->lock);
1329 lock->readers = 0; 1330 lock->readers = 0;
1330 } 1331 }
1331 1332
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 1781d99145e2..f83cb370292b 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -519,7 +519,7 @@ static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
519 security_netlink_recv(skb, CAP_NET_ADMIN)) 519 security_netlink_recv(skb, CAP_NET_ADMIN))
520 return -EPERM; 520 return -EPERM;
521 521
522 if (nlh->nlmsg_flags & NLM_F_DUMP) { 522 if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) {
523 if (ops->dumpit == NULL) 523 if (ops->dumpit == NULL)
524 return -EOPNOTSUPP; 524 return -EOPNOTSUPP;
525 525
diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
index fd95beb72f5d..1072b2c19d31 100644
--- a/net/phonet/af_phonet.c
+++ b/net/phonet/af_phonet.c
@@ -37,7 +37,7 @@
37/* Transport protocol registration */ 37/* Transport protocol registration */
38static struct phonet_protocol *proto_tab[PHONET_NPROTO] __read_mostly; 38static struct phonet_protocol *proto_tab[PHONET_NPROTO] __read_mostly;
39 39
40static struct phonet_protocol *phonet_proto_get(int protocol) 40static struct phonet_protocol *phonet_proto_get(unsigned int protocol)
41{ 41{
42 struct phonet_protocol *pp; 42 struct phonet_protocol *pp;
43 43
@@ -458,7 +458,7 @@ static struct packet_type phonet_packet_type __read_mostly = {
458 458
459static DEFINE_MUTEX(proto_tab_lock); 459static DEFINE_MUTEX(proto_tab_lock);
460 460
461int __init_or_module phonet_proto_register(int protocol, 461int __init_or_module phonet_proto_register(unsigned int protocol,
462 struct phonet_protocol *pp) 462 struct phonet_protocol *pp)
463{ 463{
464 int err = 0; 464 int err = 0;
@@ -481,7 +481,7 @@ int __init_or_module phonet_proto_register(int protocol,
481} 481}
482EXPORT_SYMBOL(phonet_proto_register); 482EXPORT_SYMBOL(phonet_proto_register);
483 483
484void phonet_proto_unregister(int protocol, struct phonet_protocol *pp) 484void phonet_proto_unregister(unsigned int protocol, struct phonet_protocol *pp)
485{ 485{
486 mutex_lock(&proto_tab_lock); 486 mutex_lock(&proto_tab_lock);
487 BUG_ON(proto_tab[protocol] != pp); 487 BUG_ON(proto_tab[protocol] != pp);
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index 67dc7ce9b63a..83ddfc07e45d 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -508,8 +508,7 @@ static int tcf_csum(struct sk_buff *skb,
508 508
509 spin_lock(&p->tcf_lock); 509 spin_lock(&p->tcf_lock);
510 p->tcf_tm.lastuse = jiffies; 510 p->tcf_tm.lastuse = jiffies;
511 p->tcf_bstats.bytes += qdisc_pkt_len(skb); 511 bstats_update(&p->tcf_bstats, skb);
512 p->tcf_bstats.packets++;
513 action = p->tcf_action; 512 action = p->tcf_action;
514 update_flags = p->update_flags; 513 update_flags = p->update_flags;
515 spin_unlock(&p->tcf_lock); 514 spin_unlock(&p->tcf_lock);
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 8daef9632255..c2a7c20e81c1 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -209,8 +209,7 @@ static int tcf_ipt(struct sk_buff *skb, struct tc_action *a,
209 spin_lock(&ipt->tcf_lock); 209 spin_lock(&ipt->tcf_lock);
210 210
211 ipt->tcf_tm.lastuse = jiffies; 211 ipt->tcf_tm.lastuse = jiffies;
212 ipt->tcf_bstats.bytes += qdisc_pkt_len(skb); 212 bstats_update(&ipt->tcf_bstats, skb);
213 ipt->tcf_bstats.packets++;
214 213
215 /* yes, we have to worry about both in and out dev 214 /* yes, we have to worry about both in and out dev
216 worry later - danger - this API seems to have changed 215 worry later - danger - this API seems to have changed
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 0c311be92827..d765067e99db 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -165,8 +165,7 @@ static int tcf_mirred(struct sk_buff *skb, struct tc_action *a,
165 165
166 spin_lock(&m->tcf_lock); 166 spin_lock(&m->tcf_lock);
167 m->tcf_tm.lastuse = jiffies; 167 m->tcf_tm.lastuse = jiffies;
168 m->tcf_bstats.bytes += qdisc_pkt_len(skb); 168 bstats_update(&m->tcf_bstats, skb);
169 m->tcf_bstats.packets++;
170 169
171 dev = m->tcfm_dev; 170 dev = m->tcfm_dev;
172 if (!dev) { 171 if (!dev) {
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index 186eb837e600..178a4bd7b7cb 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -125,8 +125,7 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a,
125 egress = p->flags & TCA_NAT_FLAG_EGRESS; 125 egress = p->flags & TCA_NAT_FLAG_EGRESS;
126 action = p->tcf_action; 126 action = p->tcf_action;
127 127
128 p->tcf_bstats.bytes += qdisc_pkt_len(skb); 128 bstats_update(&p->tcf_bstats, skb);
129 p->tcf_bstats.packets++;
130 129
131 spin_unlock(&p->tcf_lock); 130 spin_unlock(&p->tcf_lock);
132 131
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index a0593c9640db..445bef716f77 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -187,8 +187,7 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a,
187bad: 187bad:
188 p->tcf_qstats.overlimits++; 188 p->tcf_qstats.overlimits++;
189done: 189done:
190 p->tcf_bstats.bytes += qdisc_pkt_len(skb); 190 bstats_update(&p->tcf_bstats, skb);
191 p->tcf_bstats.packets++;
192 spin_unlock(&p->tcf_lock); 191 spin_unlock(&p->tcf_lock);
193 return p->tcf_action; 192 return p->tcf_action;
194} 193}
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 7ebf7439b478..e2f08b1e2e58 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -298,8 +298,7 @@ static int tcf_act_police(struct sk_buff *skb, struct tc_action *a,
298 298
299 spin_lock(&police->tcf_lock); 299 spin_lock(&police->tcf_lock);
300 300
301 police->tcf_bstats.bytes += qdisc_pkt_len(skb); 301 bstats_update(&police->tcf_bstats, skb);
302 police->tcf_bstats.packets++;
303 302
304 if (police->tcfp_ewma_rate && 303 if (police->tcfp_ewma_rate &&
305 police->tcf_rate_est.bps >= police->tcfp_ewma_rate) { 304 police->tcf_rate_est.bps >= police->tcfp_ewma_rate) {
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 97e84f3ee775..7287cff7af3e 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -42,8 +42,7 @@ static int tcf_simp(struct sk_buff *skb, struct tc_action *a, struct tcf_result
42 42
43 spin_lock(&d->tcf_lock); 43 spin_lock(&d->tcf_lock);
44 d->tcf_tm.lastuse = jiffies; 44 d->tcf_tm.lastuse = jiffies;
45 d->tcf_bstats.bytes += qdisc_pkt_len(skb); 45 bstats_update(&d->tcf_bstats, skb);
46 d->tcf_bstats.packets++;
47 46
48 /* print policy string followed by _ then packet count 47 /* print policy string followed by _ then packet count
49 * Example if this was the 3rd packet and the string was "hello" 48 * Example if this was the 3rd packet and the string was "hello"
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index 66cbf4eb8855..836f5fee9e58 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -46,8 +46,7 @@ static int tcf_skbedit(struct sk_buff *skb, struct tc_action *a,
46 46
47 spin_lock(&d->tcf_lock); 47 spin_lock(&d->tcf_lock);
48 d->tcf_tm.lastuse = jiffies; 48 d->tcf_tm.lastuse = jiffies;
49 d->tcf_bstats.bytes += qdisc_pkt_len(skb); 49 bstats_update(&d->tcf_bstats, skb);
50 d->tcf_bstats.packets++;
51 50
52 if (d->flags & SKBEDIT_F_PRIORITY) 51 if (d->flags & SKBEDIT_F_PRIORITY)
53 skb->priority = d->priority; 52 skb->priority = d->priority;
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index 282540778aa8..943d733409d0 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -422,10 +422,8 @@ drop: __maybe_unused
422 } 422 }
423 return ret; 423 return ret;
424 } 424 }
425 sch->bstats.bytes += qdisc_pkt_len(skb); 425 qdisc_bstats_update(sch, skb);
426 sch->bstats.packets++; 426 bstats_update(&flow->bstats, skb);
427 flow->bstats.bytes += qdisc_pkt_len(skb);
428 flow->bstats.packets++;
429 /* 427 /*
430 * Okay, this may seem weird. We pretend we've dropped the packet if 428 * Okay, this may seem weird. We pretend we've dropped the packet if
431 * it goes via ATM. The reason for this is that the outer qdisc 429 * it goes via ATM. The reason for this is that the outer qdisc
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index eb7631590865..c80d1c210c5d 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -390,8 +390,7 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
390 ret = qdisc_enqueue(skb, cl->q); 390 ret = qdisc_enqueue(skb, cl->q);
391 if (ret == NET_XMIT_SUCCESS) { 391 if (ret == NET_XMIT_SUCCESS) {
392 sch->q.qlen++; 392 sch->q.qlen++;
393 sch->bstats.packets++; 393 qdisc_bstats_update(sch, skb);
394 sch->bstats.bytes += qdisc_pkt_len(skb);
395 cbq_mark_toplevel(q, cl); 394 cbq_mark_toplevel(q, cl);
396 if (!cl->next_alive) 395 if (!cl->next_alive)
397 cbq_activate_class(cl); 396 cbq_activate_class(cl);
@@ -650,8 +649,7 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
650 ret = qdisc_enqueue(skb, cl->q); 649 ret = qdisc_enqueue(skb, cl->q);
651 if (ret == NET_XMIT_SUCCESS) { 650 if (ret == NET_XMIT_SUCCESS) {
652 sch->q.qlen++; 651 sch->q.qlen++;
653 sch->bstats.packets++; 652 qdisc_bstats_update(sch, skb);
654 sch->bstats.bytes += qdisc_pkt_len(skb);
655 if (!cl->next_alive) 653 if (!cl->next_alive)
656 cbq_activate_class(cl); 654 cbq_activate_class(cl);
657 return 0; 655 return 0;
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index aa8b5313f8cf..de55e642eafc 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -351,7 +351,6 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch)
351{ 351{
352 struct drr_sched *q = qdisc_priv(sch); 352 struct drr_sched *q = qdisc_priv(sch);
353 struct drr_class *cl; 353 struct drr_class *cl;
354 unsigned int len;
355 int err; 354 int err;
356 355
357 cl = drr_classify(skb, sch, &err); 356 cl = drr_classify(skb, sch, &err);
@@ -362,7 +361,6 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch)
362 return err; 361 return err;
363 } 362 }
364 363
365 len = qdisc_pkt_len(skb);
366 err = qdisc_enqueue(skb, cl->qdisc); 364 err = qdisc_enqueue(skb, cl->qdisc);
367 if (unlikely(err != NET_XMIT_SUCCESS)) { 365 if (unlikely(err != NET_XMIT_SUCCESS)) {
368 if (net_xmit_drop_count(err)) { 366 if (net_xmit_drop_count(err)) {
@@ -377,10 +375,8 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch)
377 cl->deficit = cl->quantum; 375 cl->deficit = cl->quantum;
378 } 376 }
379 377
380 cl->bstats.packets++; 378 bstats_update(&cl->bstats, skb);
381 cl->bstats.bytes += len; 379 qdisc_bstats_update(sch, skb);
382 sch->bstats.packets++;
383 sch->bstats.bytes += len;
384 380
385 sch->q.qlen++; 381 sch->q.qlen++;
386 return err; 382 return err;
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index 1d295d62bb5c..60f4bdd4408e 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -260,8 +260,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
260 return err; 260 return err;
261 } 261 }
262 262
263 sch->bstats.bytes += qdisc_pkt_len(skb); 263 qdisc_bstats_update(sch, skb);
264 sch->bstats.packets++;
265 sch->q.qlen++; 264 sch->q.qlen++;
266 265
267 return NET_XMIT_SUCCESS; 266 return NET_XMIT_SUCCESS;
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 069c62b7bb36..2e45791d4f6c 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1599,10 +1599,8 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
1599 if (cl->qdisc->q.qlen == 1) 1599 if (cl->qdisc->q.qlen == 1)
1600 set_active(cl, qdisc_pkt_len(skb)); 1600 set_active(cl, qdisc_pkt_len(skb));
1601 1601
1602 cl->bstats.packets++; 1602 bstats_update(&cl->bstats, skb);
1603 cl->bstats.bytes += qdisc_pkt_len(skb); 1603 qdisc_bstats_update(sch, skb);
1604 sch->bstats.packets++;
1605 sch->bstats.bytes += qdisc_pkt_len(skb);
1606 sch->q.qlen++; 1604 sch->q.qlen++;
1607 1605
1608 return NET_XMIT_SUCCESS; 1606 return NET_XMIT_SUCCESS;
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 01b519d6c52d..984c1b0c6836 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -569,15 +569,12 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
569 } 569 }
570 return ret; 570 return ret;
571 } else { 571 } else {
572 cl->bstats.packets += 572 bstats_update(&cl->bstats, skb);
573 skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1;
574 cl->bstats.bytes += qdisc_pkt_len(skb);
575 htb_activate(q, cl); 573 htb_activate(q, cl);
576 } 574 }
577 575
578 sch->q.qlen++; 576 sch->q.qlen++;
579 sch->bstats.packets += skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1; 577 qdisc_bstats_update(sch, skb);
580 sch->bstats.bytes += qdisc_pkt_len(skb);
581 return NET_XMIT_SUCCESS; 578 return NET_XMIT_SUCCESS;
582} 579}
583 580
@@ -648,12 +645,10 @@ static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
648 htb_add_to_wait_tree(q, cl, diff); 645 htb_add_to_wait_tree(q, cl, diff);
649 } 646 }
650 647
651 /* update byte stats except for leaves which are already updated */ 648 /* update basic stats except for leaves which are already updated */
652 if (cl->level) { 649 if (cl->level)
653 cl->bstats.bytes += bytes; 650 bstats_update(&cl->bstats, skb);
654 cl->bstats.packets += skb_is_gso(skb)? 651
655 skb_shinfo(skb)->gso_segs:1;
656 }
657 cl = cl->parent; 652 cl = cl->parent;
658 } 653 }
659} 654}
diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c
index f10e34a68445..bce1665239b8 100644
--- a/net/sched/sch_ingress.c
+++ b/net/sched/sch_ingress.c
@@ -63,8 +63,7 @@ static int ingress_enqueue(struct sk_buff *skb, struct Qdisc *sch)
63 63
64 result = tc_classify(skb, p->filter_list, &res); 64 result = tc_classify(skb, p->filter_list, &res);
65 65
66 sch->bstats.packets++; 66 qdisc_bstats_update(sch, skb);
67 sch->bstats.bytes += qdisc_pkt_len(skb);
68 switch (result) { 67 switch (result) {
69 case TC_ACT_SHOT: 68 case TC_ACT_SHOT:
70 result = TC_ACT_SHOT; 69 result = TC_ACT_SHOT;
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
index 32690deab5d0..21f13da24763 100644
--- a/net/sched/sch_multiq.c
+++ b/net/sched/sch_multiq.c
@@ -83,8 +83,7 @@ multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
83 83
84 ret = qdisc_enqueue(skb, qdisc); 84 ret = qdisc_enqueue(skb, qdisc);
85 if (ret == NET_XMIT_SUCCESS) { 85 if (ret == NET_XMIT_SUCCESS) {
86 sch->bstats.bytes += qdisc_pkt_len(skb); 86 qdisc_bstats_update(sch, skb);
87 sch->bstats.packets++;
88 sch->q.qlen++; 87 sch->q.qlen++;
89 return NET_XMIT_SUCCESS; 88 return NET_XMIT_SUCCESS;
90 } 89 }
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index e5593c083a78..1c4bce863479 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -240,8 +240,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
240 240
241 if (likely(ret == NET_XMIT_SUCCESS)) { 241 if (likely(ret == NET_XMIT_SUCCESS)) {
242 sch->q.qlen++; 242 sch->q.qlen++;
243 sch->bstats.bytes += qdisc_pkt_len(skb); 243 qdisc_bstats_update(sch, skb);
244 sch->bstats.packets++;
245 } else if (net_xmit_drop_count(ret)) { 244 } else if (net_xmit_drop_count(ret)) {
246 sch->qstats.drops++; 245 sch->qstats.drops++;
247 } 246 }
@@ -477,8 +476,7 @@ static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
477 __skb_queue_after(list, skb, nskb); 476 __skb_queue_after(list, skb, nskb);
478 477
479 sch->qstats.backlog += qdisc_pkt_len(nskb); 478 sch->qstats.backlog += qdisc_pkt_len(nskb);
480 sch->bstats.bytes += qdisc_pkt_len(nskb); 479 qdisc_bstats_update(sch, nskb);
481 sch->bstats.packets++;
482 480
483 return NET_XMIT_SUCCESS; 481 return NET_XMIT_SUCCESS;
484 } 482 }
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index b1c95bce33ce..966158d49dd1 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -84,8 +84,7 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch)
84 84
85 ret = qdisc_enqueue(skb, qdisc); 85 ret = qdisc_enqueue(skb, qdisc);
86 if (ret == NET_XMIT_SUCCESS) { 86 if (ret == NET_XMIT_SUCCESS) {
87 sch->bstats.bytes += qdisc_pkt_len(skb); 87 qdisc_bstats_update(sch, skb);
88 sch->bstats.packets++;
89 sch->q.qlen++; 88 sch->q.qlen++;
90 return NET_XMIT_SUCCESS; 89 return NET_XMIT_SUCCESS;
91 } 90 }
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index a67ba3c5a0cc..a6009c5a2c97 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -94,8 +94,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
94 94
95 ret = qdisc_enqueue(skb, child); 95 ret = qdisc_enqueue(skb, child);
96 if (likely(ret == NET_XMIT_SUCCESS)) { 96 if (likely(ret == NET_XMIT_SUCCESS)) {
97 sch->bstats.bytes += qdisc_pkt_len(skb); 97 qdisc_bstats_update(sch, skb);
98 sch->bstats.packets++;
99 sch->q.qlen++; 98 sch->q.qlen++;
100 } else if (net_xmit_drop_count(ret)) { 99 } else if (net_xmit_drop_count(ret)) {
101 q->stats.pdrop++; 100 q->stats.pdrop++;
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index d54ac94066c2..239ec53a634d 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -403,8 +403,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
403 slot->allot = q->scaled_quantum; 403 slot->allot = q->scaled_quantum;
404 } 404 }
405 if (++sch->q.qlen <= q->limit) { 405 if (++sch->q.qlen <= q->limit) {
406 sch->bstats.bytes += qdisc_pkt_len(skb); 406 qdisc_bstats_update(sch, skb);
407 sch->bstats.packets++;
408 return NET_XMIT_SUCCESS; 407 return NET_XMIT_SUCCESS;
409 } 408 }
410 409
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 641a30d64635..77565e721811 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -134,8 +134,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
134 } 134 }
135 135
136 sch->q.qlen++; 136 sch->q.qlen++;
137 sch->bstats.bytes += qdisc_pkt_len(skb); 137 qdisc_bstats_update(sch, skb);
138 sch->bstats.packets++;
139 return NET_XMIT_SUCCESS; 138 return NET_XMIT_SUCCESS;
140} 139}
141 140
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 106479a7c94a..af9360d1f6eb 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -83,8 +83,7 @@ teql_enqueue(struct sk_buff *skb, struct Qdisc* sch)
83 83
84 if (q->q.qlen < dev->tx_queue_len) { 84 if (q->q.qlen < dev->tx_queue_len) {
85 __skb_queue_tail(&q->q, skb); 85 __skb_queue_tail(&q->q, skb);
86 sch->bstats.bytes += qdisc_pkt_len(skb); 86 qdisc_bstats_update(sch, skb);
87 sch->bstats.packets++;
88 return NET_XMIT_SUCCESS; 87 return NET_XMIT_SUCCESS;
89 } 88 }
90 89
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index afe67849269f..67e31276682a 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -563,8 +563,17 @@ rpcauth_checkverf(struct rpc_task *task, __be32 *p)
563 return cred->cr_ops->crvalidate(task, p); 563 return cred->cr_ops->crvalidate(task, p);
564} 564}
565 565
566static void rpcauth_wrap_req_encode(kxdreproc_t encode, struct rpc_rqst *rqstp,
567 __be32 *data, void *obj)
568{
569 struct xdr_stream xdr;
570
571 xdr_init_encode(&xdr, &rqstp->rq_snd_buf, data);
572 encode(rqstp, &xdr, obj);
573}
574
566int 575int
567rpcauth_wrap_req(struct rpc_task *task, kxdrproc_t encode, void *rqstp, 576rpcauth_wrap_req(struct rpc_task *task, kxdreproc_t encode, void *rqstp,
568 __be32 *data, void *obj) 577 __be32 *data, void *obj)
569{ 578{
570 struct rpc_cred *cred = task->tk_rqstp->rq_cred; 579 struct rpc_cred *cred = task->tk_rqstp->rq_cred;
@@ -574,11 +583,22 @@ rpcauth_wrap_req(struct rpc_task *task, kxdrproc_t encode, void *rqstp,
574 if (cred->cr_ops->crwrap_req) 583 if (cred->cr_ops->crwrap_req)
575 return cred->cr_ops->crwrap_req(task, encode, rqstp, data, obj); 584 return cred->cr_ops->crwrap_req(task, encode, rqstp, data, obj);
576 /* By default, we encode the arguments normally. */ 585 /* By default, we encode the arguments normally. */
577 return encode(rqstp, data, obj); 586 rpcauth_wrap_req_encode(encode, rqstp, data, obj);
587 return 0;
588}
589
590static int
591rpcauth_unwrap_req_decode(kxdrdproc_t decode, struct rpc_rqst *rqstp,
592 __be32 *data, void *obj)
593{
594 struct xdr_stream xdr;
595
596 xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, data);
597 return decode(rqstp, &xdr, obj);
578} 598}
579 599
580int 600int
581rpcauth_unwrap_resp(struct rpc_task *task, kxdrproc_t decode, void *rqstp, 601rpcauth_unwrap_resp(struct rpc_task *task, kxdrdproc_t decode, void *rqstp,
582 __be32 *data, void *obj) 602 __be32 *data, void *obj)
583{ 603{
584 struct rpc_cred *cred = task->tk_rqstp->rq_cred; 604 struct rpc_cred *cred = task->tk_rqstp->rq_cred;
@@ -589,7 +609,7 @@ rpcauth_unwrap_resp(struct rpc_task *task, kxdrproc_t decode, void *rqstp,
589 return cred->cr_ops->crunwrap_resp(task, decode, rqstp, 609 return cred->cr_ops->crunwrap_resp(task, decode, rqstp,
590 data, obj); 610 data, obj);
591 /* By default, we decode the arguments normally. */ 611 /* By default, we decode the arguments normally. */
592 return decode(rqstp, data, obj); 612 return rpcauth_unwrap_req_decode(decode, rqstp, data, obj);
593} 613}
594 614
595int 615int
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 3835ce35e224..45dbf1521b9a 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -1231,9 +1231,19 @@ out_bad:
1231 return NULL; 1231 return NULL;
1232} 1232}
1233 1233
1234static void gss_wrap_req_encode(kxdreproc_t encode, struct rpc_rqst *rqstp,
1235 __be32 *p, void *obj)
1236{
1237 struct xdr_stream xdr;
1238
1239 xdr_init_encode(&xdr, &rqstp->rq_snd_buf, p);
1240 encode(rqstp, &xdr, obj);
1241}
1242
1234static inline int 1243static inline int
1235gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx, 1244gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
1236 kxdrproc_t encode, struct rpc_rqst *rqstp, __be32 *p, void *obj) 1245 kxdreproc_t encode, struct rpc_rqst *rqstp,
1246 __be32 *p, void *obj)
1237{ 1247{
1238 struct xdr_buf *snd_buf = &rqstp->rq_snd_buf; 1248 struct xdr_buf *snd_buf = &rqstp->rq_snd_buf;
1239 struct xdr_buf integ_buf; 1249 struct xdr_buf integ_buf;
@@ -1249,9 +1259,7 @@ gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
1249 offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base; 1259 offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
1250 *p++ = htonl(rqstp->rq_seqno); 1260 *p++ = htonl(rqstp->rq_seqno);
1251 1261
1252 status = encode(rqstp, p, obj); 1262 gss_wrap_req_encode(encode, rqstp, p, obj);
1253 if (status)
1254 return status;
1255 1263
1256 if (xdr_buf_subsegment(snd_buf, &integ_buf, 1264 if (xdr_buf_subsegment(snd_buf, &integ_buf,
1257 offset, snd_buf->len - offset)) 1265 offset, snd_buf->len - offset))
@@ -1325,7 +1333,8 @@ out:
1325 1333
1326static inline int 1334static inline int
1327gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx, 1335gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
1328 kxdrproc_t encode, struct rpc_rqst *rqstp, __be32 *p, void *obj) 1336 kxdreproc_t encode, struct rpc_rqst *rqstp,
1337 __be32 *p, void *obj)
1329{ 1338{
1330 struct xdr_buf *snd_buf = &rqstp->rq_snd_buf; 1339 struct xdr_buf *snd_buf = &rqstp->rq_snd_buf;
1331 u32 offset; 1340 u32 offset;
@@ -1342,9 +1351,7 @@ gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
1342 offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base; 1351 offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
1343 *p++ = htonl(rqstp->rq_seqno); 1352 *p++ = htonl(rqstp->rq_seqno);
1344 1353
1345 status = encode(rqstp, p, obj); 1354 gss_wrap_req_encode(encode, rqstp, p, obj);
1346 if (status)
1347 return status;
1348 1355
1349 status = alloc_enc_pages(rqstp); 1356 status = alloc_enc_pages(rqstp);
1350 if (status) 1357 if (status)
@@ -1394,7 +1401,7 @@ gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
1394 1401
1395static int 1402static int
1396gss_wrap_req(struct rpc_task *task, 1403gss_wrap_req(struct rpc_task *task,
1397 kxdrproc_t encode, void *rqstp, __be32 *p, void *obj) 1404 kxdreproc_t encode, void *rqstp, __be32 *p, void *obj)
1398{ 1405{
1399 struct rpc_cred *cred = task->tk_rqstp->rq_cred; 1406 struct rpc_cred *cred = task->tk_rqstp->rq_cred;
1400 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, 1407 struct gss_cred *gss_cred = container_of(cred, struct gss_cred,
@@ -1407,12 +1414,14 @@ gss_wrap_req(struct rpc_task *task,
1407 /* The spec seems a little ambiguous here, but I think that not 1414 /* The spec seems a little ambiguous here, but I think that not
1408 * wrapping context destruction requests makes the most sense. 1415 * wrapping context destruction requests makes the most sense.
1409 */ 1416 */
1410 status = encode(rqstp, p, obj); 1417 gss_wrap_req_encode(encode, rqstp, p, obj);
1418 status = 0;
1411 goto out; 1419 goto out;
1412 } 1420 }
1413 switch (gss_cred->gc_service) { 1421 switch (gss_cred->gc_service) {
1414 case RPC_GSS_SVC_NONE: 1422 case RPC_GSS_SVC_NONE:
1415 status = encode(rqstp, p, obj); 1423 gss_wrap_req_encode(encode, rqstp, p, obj);
1424 status = 0;
1416 break; 1425 break;
1417 case RPC_GSS_SVC_INTEGRITY: 1426 case RPC_GSS_SVC_INTEGRITY:
1418 status = gss_wrap_req_integ(cred, ctx, encode, 1427 status = gss_wrap_req_integ(cred, ctx, encode,
@@ -1494,10 +1503,19 @@ gss_unwrap_resp_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
1494 return 0; 1503 return 0;
1495} 1504}
1496 1505
1506static int
1507gss_unwrap_req_decode(kxdrdproc_t decode, struct rpc_rqst *rqstp,
1508 __be32 *p, void *obj)
1509{
1510 struct xdr_stream xdr;
1511
1512 xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
1513 return decode(rqstp, &xdr, obj);
1514}
1497 1515
1498static int 1516static int
1499gss_unwrap_resp(struct rpc_task *task, 1517gss_unwrap_resp(struct rpc_task *task,
1500 kxdrproc_t decode, void *rqstp, __be32 *p, void *obj) 1518 kxdrdproc_t decode, void *rqstp, __be32 *p, void *obj)
1501{ 1519{
1502 struct rpc_cred *cred = task->tk_rqstp->rq_cred; 1520 struct rpc_cred *cred = task->tk_rqstp->rq_cred;
1503 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, 1521 struct gss_cred *gss_cred = container_of(cred, struct gss_cred,
@@ -1528,7 +1546,7 @@ gss_unwrap_resp(struct rpc_task *task,
1528 cred->cr_auth->au_rslack = cred->cr_auth->au_verfsize + (p - savedp) 1546 cred->cr_auth->au_rslack = cred->cr_auth->au_verfsize + (p - savedp)
1529 + (savedlen - head->iov_len); 1547 + (savedlen - head->iov_len);
1530out_decode: 1548out_decode:
1531 status = decode(rqstp, p, obj); 1549 status = gss_unwrap_req_decode(decode, rqstp, p, obj);
1532out: 1550out:
1533 gss_put_ctx(ctx); 1551 gss_put_ctx(ctx);
1534 dprintk("RPC: %5u gss_unwrap_resp returning %d\n", task->tk_pid, 1552 dprintk("RPC: %5u gss_unwrap_resp returning %d\n", task->tk_pid,
diff --git a/net/sunrpc/bc_svc.c b/net/sunrpc/bc_svc.c
index 7dcfe0cc3500..1dd1a6890007 100644
--- a/net/sunrpc/bc_svc.c
+++ b/net/sunrpc/bc_svc.c
@@ -59,8 +59,8 @@ int bc_send(struct rpc_rqst *req)
59 ret = task->tk_status; 59 ret = task->tk_status;
60 rpc_put_task(task); 60 rpc_put_task(task);
61 } 61 }
62 return ret;
63 dprintk("RPC: bc_send ret= %d\n", ret); 62 dprintk("RPC: bc_send ret= %d\n", ret);
63 return ret;
64} 64}
65 65
66#endif /* CONFIG_NFS_V4_1 */ 66#endif /* CONFIG_NFS_V4_1 */
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 92ce94f5146b..57d344cf2256 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -1095,7 +1095,7 @@ static void
1095rpc_xdr_encode(struct rpc_task *task) 1095rpc_xdr_encode(struct rpc_task *task)
1096{ 1096{
1097 struct rpc_rqst *req = task->tk_rqstp; 1097 struct rpc_rqst *req = task->tk_rqstp;
1098 kxdrproc_t encode; 1098 kxdreproc_t encode;
1099 __be32 *p; 1099 __be32 *p;
1100 1100
1101 dprint_status(task); 1101 dprint_status(task);
@@ -1535,7 +1535,7 @@ call_decode(struct rpc_task *task)
1535{ 1535{
1536 struct rpc_clnt *clnt = task->tk_client; 1536 struct rpc_clnt *clnt = task->tk_client;
1537 struct rpc_rqst *req = task->tk_rqstp; 1537 struct rpc_rqst *req = task->tk_rqstp;
1538 kxdrproc_t decode = task->tk_msg.rpc_proc->p_decode; 1538 kxdrdproc_t decode = task->tk_msg.rpc_proc->p_decode;
1539 __be32 *p; 1539 __be32 *p;
1540 1540
1541 dprintk("RPC: %5u call_decode (status %d)\n", 1541 dprintk("RPC: %5u call_decode (status %d)\n",
@@ -1776,12 +1776,11 @@ out_overflow:
1776 goto out_garbage; 1776 goto out_garbage;
1777} 1777}
1778 1778
1779static int rpcproc_encode_null(void *rqstp, __be32 *data, void *obj) 1779static void rpcproc_encode_null(void *rqstp, struct xdr_stream *xdr, void *obj)
1780{ 1780{
1781 return 0;
1782} 1781}
1783 1782
1784static int rpcproc_decode_null(void *rqstp, __be32 *data, void *obj) 1783static int rpcproc_decode_null(void *rqstp, struct xdr_stream *xdr, void *obj)
1785{ 1784{
1786 return 0; 1785 return 0;
1787} 1786}
@@ -1830,23 +1829,15 @@ static void rpc_show_task(const struct rpc_clnt *clnt,
1830 const struct rpc_task *task) 1829 const struct rpc_task *task)
1831{ 1830{
1832 const char *rpc_waitq = "none"; 1831 const char *rpc_waitq = "none";
1833 char *p, action[KSYM_SYMBOL_LEN];
1834 1832
1835 if (RPC_IS_QUEUED(task)) 1833 if (RPC_IS_QUEUED(task))
1836 rpc_waitq = rpc_qname(task->tk_waitqueue); 1834 rpc_waitq = rpc_qname(task->tk_waitqueue);
1837 1835
1838 /* map tk_action pointer to a function name; then trim off 1836 printk(KERN_INFO "%5u %04x %6d %8p %8p %8ld %8p %sv%u %s a:%ps q:%s\n",
1839 * the "+0x0 [sunrpc]" */
1840 sprint_symbol(action, (unsigned long)task->tk_action);
1841 p = strchr(action, '+');
1842 if (p)
1843 *p = '\0';
1844
1845 printk(KERN_INFO "%5u %04x %6d %8p %8p %8ld %8p %sv%u %s a:%s q:%s\n",
1846 task->tk_pid, task->tk_flags, task->tk_status, 1837 task->tk_pid, task->tk_flags, task->tk_status,
1847 clnt, task->tk_rqstp, task->tk_timeout, task->tk_ops, 1838 clnt, task->tk_rqstp, task->tk_timeout, task->tk_ops,
1848 clnt->cl_protname, clnt->cl_vers, rpc_proc_name(task), 1839 clnt->cl_protname, clnt->cl_vers, rpc_proc_name(task),
1849 action, rpc_waitq); 1840 task->tk_action, rpc_waitq);
1850} 1841}
1851 1842
1852void rpc_show_tasks(void) 1843void rpc_show_tasks(void)
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 09f01f41e55a..72bc53683965 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -474,7 +474,7 @@ static int __rpc_create_common(struct inode *dir, struct dentry *dentry,
474{ 474{
475 struct inode *inode; 475 struct inode *inode;
476 476
477 BUG_ON(!d_unhashed(dentry)); 477 d_drop(dentry);
478 inode = rpc_get_inode(dir->i_sb, mode); 478 inode = rpc_get_inode(dir->i_sb, mode);
479 if (!inode) 479 if (!inode)
480 goto out_err; 480 goto out_err;
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index fa6d7ca2c851..c652e4cc9fe9 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -57,10 +57,6 @@ enum {
57 RPCBPROC_GETSTAT, 57 RPCBPROC_GETSTAT,
58}; 58};
59 59
60#define RPCB_HIGHPROC_2 RPCBPROC_CALLIT
61#define RPCB_HIGHPROC_3 RPCBPROC_TADDR2UADDR
62#define RPCB_HIGHPROC_4 RPCBPROC_GETSTAT
63
64/* 60/*
65 * r_owner 61 * r_owner
66 * 62 *
@@ -693,46 +689,37 @@ static void rpcb_getport_done(struct rpc_task *child, void *data)
693 * XDR functions for rpcbind 689 * XDR functions for rpcbind
694 */ 690 */
695 691
696static int rpcb_enc_mapping(struct rpc_rqst *req, __be32 *p, 692static void rpcb_enc_mapping(struct rpc_rqst *req, struct xdr_stream *xdr,
697 const struct rpcbind_args *rpcb) 693 const struct rpcbind_args *rpcb)
698{ 694{
699 struct rpc_task *task = req->rq_task; 695 struct rpc_task *task = req->rq_task;
700 struct xdr_stream xdr; 696 __be32 *p;
701 697
702 dprintk("RPC: %5u encoding PMAP_%s call (%u, %u, %d, %u)\n", 698 dprintk("RPC: %5u encoding PMAP_%s call (%u, %u, %d, %u)\n",
703 task->tk_pid, task->tk_msg.rpc_proc->p_name, 699 task->tk_pid, task->tk_msg.rpc_proc->p_name,
704 rpcb->r_prog, rpcb->r_vers, rpcb->r_prot, rpcb->r_port); 700 rpcb->r_prog, rpcb->r_vers, rpcb->r_prot, rpcb->r_port);
705 701
706 xdr_init_encode(&xdr, &req->rq_snd_buf, p); 702 p = xdr_reserve_space(xdr, RPCB_mappingargs_sz << 2);
707 703 *p++ = cpu_to_be32(rpcb->r_prog);
708 p = xdr_reserve_space(&xdr, sizeof(__be32) * RPCB_mappingargs_sz); 704 *p++ = cpu_to_be32(rpcb->r_vers);
709 if (unlikely(p == NULL)) 705 *p++ = cpu_to_be32(rpcb->r_prot);
710 return -EIO; 706 *p = cpu_to_be32(rpcb->r_port);
711
712 *p++ = htonl(rpcb->r_prog);
713 *p++ = htonl(rpcb->r_vers);
714 *p++ = htonl(rpcb->r_prot);
715 *p = htonl(rpcb->r_port);
716
717 return 0;
718} 707}
719 708
720static int rpcb_dec_getport(struct rpc_rqst *req, __be32 *p, 709static int rpcb_dec_getport(struct rpc_rqst *req, struct xdr_stream *xdr,
721 struct rpcbind_args *rpcb) 710 struct rpcbind_args *rpcb)
722{ 711{
723 struct rpc_task *task = req->rq_task; 712 struct rpc_task *task = req->rq_task;
724 struct xdr_stream xdr;
725 unsigned long port; 713 unsigned long port;
726 714 __be32 *p;
727 xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
728 715
729 rpcb->r_port = 0; 716 rpcb->r_port = 0;
730 717
731 p = xdr_inline_decode(&xdr, sizeof(__be32)); 718 p = xdr_inline_decode(xdr, 4);
732 if (unlikely(p == NULL)) 719 if (unlikely(p == NULL))
733 return -EIO; 720 return -EIO;
734 721
735 port = ntohl(*p); 722 port = be32_to_cpup(p);
736 dprintk("RPC: %5u PMAP_%s result: %lu\n", task->tk_pid, 723 dprintk("RPC: %5u PMAP_%s result: %lu\n", task->tk_pid,
737 task->tk_msg.rpc_proc->p_name, port); 724 task->tk_msg.rpc_proc->p_name, port);
738 if (unlikely(port > USHRT_MAX)) 725 if (unlikely(port > USHRT_MAX))
@@ -742,20 +729,18 @@ static int rpcb_dec_getport(struct rpc_rqst *req, __be32 *p,
742 return 0; 729 return 0;
743} 730}
744 731
745static int rpcb_dec_set(struct rpc_rqst *req, __be32 *p, 732static int rpcb_dec_set(struct rpc_rqst *req, struct xdr_stream *xdr,
746 unsigned int *boolp) 733 unsigned int *boolp)
747{ 734{
748 struct rpc_task *task = req->rq_task; 735 struct rpc_task *task = req->rq_task;
749 struct xdr_stream xdr; 736 __be32 *p;
750
751 xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
752 737
753 p = xdr_inline_decode(&xdr, sizeof(__be32)); 738 p = xdr_inline_decode(xdr, 4);
754 if (unlikely(p == NULL)) 739 if (unlikely(p == NULL))
755 return -EIO; 740 return -EIO;
756 741
757 *boolp = 0; 742 *boolp = 0;
758 if (*p) 743 if (*p != xdr_zero)
759 *boolp = 1; 744 *boolp = 1;
760 745
761 dprintk("RPC: %5u RPCB_%s call %s\n", 746 dprintk("RPC: %5u RPCB_%s call %s\n",
@@ -764,73 +749,53 @@ static int rpcb_dec_set(struct rpc_rqst *req, __be32 *p,
764 return 0; 749 return 0;
765} 750}
766 751
767static int encode_rpcb_string(struct xdr_stream *xdr, const char *string, 752static void encode_rpcb_string(struct xdr_stream *xdr, const char *string,
768 const u32 maxstrlen) 753 const u32 maxstrlen)
769{ 754{
770 u32 len;
771 __be32 *p; 755 __be32 *p;
756 u32 len;
772 757
773 if (unlikely(string == NULL))
774 return -EIO;
775 len = strlen(string); 758 len = strlen(string);
776 if (unlikely(len > maxstrlen)) 759 BUG_ON(len > maxstrlen);
777 return -EIO; 760 p = xdr_reserve_space(xdr, 4 + len);
778
779 p = xdr_reserve_space(xdr, sizeof(__be32) + len);
780 if (unlikely(p == NULL))
781 return -EIO;
782 xdr_encode_opaque(p, string, len); 761 xdr_encode_opaque(p, string, len);
783
784 return 0;
785} 762}
786 763
787static int rpcb_enc_getaddr(struct rpc_rqst *req, __be32 *p, 764static void rpcb_enc_getaddr(struct rpc_rqst *req, struct xdr_stream *xdr,
788 const struct rpcbind_args *rpcb) 765 const struct rpcbind_args *rpcb)
789{ 766{
790 struct rpc_task *task = req->rq_task; 767 struct rpc_task *task = req->rq_task;
791 struct xdr_stream xdr; 768 __be32 *p;
792 769
793 dprintk("RPC: %5u encoding RPCB_%s call (%u, %u, '%s', '%s')\n", 770 dprintk("RPC: %5u encoding RPCB_%s call (%u, %u, '%s', '%s')\n",
794 task->tk_pid, task->tk_msg.rpc_proc->p_name, 771 task->tk_pid, task->tk_msg.rpc_proc->p_name,
795 rpcb->r_prog, rpcb->r_vers, 772 rpcb->r_prog, rpcb->r_vers,
796 rpcb->r_netid, rpcb->r_addr); 773 rpcb->r_netid, rpcb->r_addr);
797 774
798 xdr_init_encode(&xdr, &req->rq_snd_buf, p); 775 p = xdr_reserve_space(xdr, (RPCB_program_sz + RPCB_version_sz) << 2);
799 776 *p++ = cpu_to_be32(rpcb->r_prog);
800 p = xdr_reserve_space(&xdr, 777 *p = cpu_to_be32(rpcb->r_vers);
801 sizeof(__be32) * (RPCB_program_sz + RPCB_version_sz));
802 if (unlikely(p == NULL))
803 return -EIO;
804 *p++ = htonl(rpcb->r_prog);
805 *p = htonl(rpcb->r_vers);
806
807 if (encode_rpcb_string(&xdr, rpcb->r_netid, RPCBIND_MAXNETIDLEN))
808 return -EIO;
809 if (encode_rpcb_string(&xdr, rpcb->r_addr, RPCBIND_MAXUADDRLEN))
810 return -EIO;
811 if (encode_rpcb_string(&xdr, rpcb->r_owner, RPCB_MAXOWNERLEN))
812 return -EIO;
813 778
814 return 0; 779 encode_rpcb_string(xdr, rpcb->r_netid, RPCBIND_MAXNETIDLEN);
780 encode_rpcb_string(xdr, rpcb->r_addr, RPCBIND_MAXUADDRLEN);
781 encode_rpcb_string(xdr, rpcb->r_owner, RPCB_MAXOWNERLEN);
815} 782}
816 783
817static int rpcb_dec_getaddr(struct rpc_rqst *req, __be32 *p, 784static int rpcb_dec_getaddr(struct rpc_rqst *req, struct xdr_stream *xdr,
818 struct rpcbind_args *rpcb) 785 struct rpcbind_args *rpcb)
819{ 786{
820 struct sockaddr_storage address; 787 struct sockaddr_storage address;
821 struct sockaddr *sap = (struct sockaddr *)&address; 788 struct sockaddr *sap = (struct sockaddr *)&address;
822 struct rpc_task *task = req->rq_task; 789 struct rpc_task *task = req->rq_task;
823 struct xdr_stream xdr; 790 __be32 *p;
824 u32 len; 791 u32 len;
825 792
826 rpcb->r_port = 0; 793 rpcb->r_port = 0;
827 794
828 xdr_init_decode(&xdr, &req->rq_rcv_buf, p); 795 p = xdr_inline_decode(xdr, 4);
829
830 p = xdr_inline_decode(&xdr, sizeof(__be32));
831 if (unlikely(p == NULL)) 796 if (unlikely(p == NULL))
832 goto out_fail; 797 goto out_fail;
833 len = ntohl(*p); 798 len = be32_to_cpup(p);
834 799
835 /* 800 /*
836 * If the returned universal address is a null string, 801 * If the returned universal address is a null string,
@@ -845,7 +810,7 @@ static int rpcb_dec_getaddr(struct rpc_rqst *req, __be32 *p,
845 if (unlikely(len > RPCBIND_MAXUADDRLEN)) 810 if (unlikely(len > RPCBIND_MAXUADDRLEN))
846 goto out_fail; 811 goto out_fail;
847 812
848 p = xdr_inline_decode(&xdr, len); 813 p = xdr_inline_decode(xdr, len);
849 if (unlikely(p == NULL)) 814 if (unlikely(p == NULL))
850 goto out_fail; 815 goto out_fail;
851 dprintk("RPC: %5u RPCB_%s reply: %s\n", task->tk_pid, 816 dprintk("RPC: %5u RPCB_%s reply: %s\n", task->tk_pid,
@@ -871,8 +836,8 @@ out_fail:
871static struct rpc_procinfo rpcb_procedures2[] = { 836static struct rpc_procinfo rpcb_procedures2[] = {
872 [RPCBPROC_SET] = { 837 [RPCBPROC_SET] = {
873 .p_proc = RPCBPROC_SET, 838 .p_proc = RPCBPROC_SET,
874 .p_encode = (kxdrproc_t)rpcb_enc_mapping, 839 .p_encode = (kxdreproc_t)rpcb_enc_mapping,
875 .p_decode = (kxdrproc_t)rpcb_dec_set, 840 .p_decode = (kxdrdproc_t)rpcb_dec_set,
876 .p_arglen = RPCB_mappingargs_sz, 841 .p_arglen = RPCB_mappingargs_sz,
877 .p_replen = RPCB_setres_sz, 842 .p_replen = RPCB_setres_sz,
878 .p_statidx = RPCBPROC_SET, 843 .p_statidx = RPCBPROC_SET,
@@ -881,8 +846,8 @@ static struct rpc_procinfo rpcb_procedures2[] = {
881 }, 846 },
882 [RPCBPROC_UNSET] = { 847 [RPCBPROC_UNSET] = {
883 .p_proc = RPCBPROC_UNSET, 848 .p_proc = RPCBPROC_UNSET,
884 .p_encode = (kxdrproc_t)rpcb_enc_mapping, 849 .p_encode = (kxdreproc_t)rpcb_enc_mapping,
885 .p_decode = (kxdrproc_t)rpcb_dec_set, 850 .p_decode = (kxdrdproc_t)rpcb_dec_set,
886 .p_arglen = RPCB_mappingargs_sz, 851 .p_arglen = RPCB_mappingargs_sz,
887 .p_replen = RPCB_setres_sz, 852 .p_replen = RPCB_setres_sz,
888 .p_statidx = RPCBPROC_UNSET, 853 .p_statidx = RPCBPROC_UNSET,
@@ -891,8 +856,8 @@ static struct rpc_procinfo rpcb_procedures2[] = {
891 }, 856 },
892 [RPCBPROC_GETPORT] = { 857 [RPCBPROC_GETPORT] = {
893 .p_proc = RPCBPROC_GETPORT, 858 .p_proc = RPCBPROC_GETPORT,
894 .p_encode = (kxdrproc_t)rpcb_enc_mapping, 859 .p_encode = (kxdreproc_t)rpcb_enc_mapping,
895 .p_decode = (kxdrproc_t)rpcb_dec_getport, 860 .p_decode = (kxdrdproc_t)rpcb_dec_getport,
896 .p_arglen = RPCB_mappingargs_sz, 861 .p_arglen = RPCB_mappingargs_sz,
897 .p_replen = RPCB_getportres_sz, 862 .p_replen = RPCB_getportres_sz,
898 .p_statidx = RPCBPROC_GETPORT, 863 .p_statidx = RPCBPROC_GETPORT,
@@ -904,8 +869,8 @@ static struct rpc_procinfo rpcb_procedures2[] = {
904static struct rpc_procinfo rpcb_procedures3[] = { 869static struct rpc_procinfo rpcb_procedures3[] = {
905 [RPCBPROC_SET] = { 870 [RPCBPROC_SET] = {
906 .p_proc = RPCBPROC_SET, 871 .p_proc = RPCBPROC_SET,
907 .p_encode = (kxdrproc_t)rpcb_enc_getaddr, 872 .p_encode = (kxdreproc_t)rpcb_enc_getaddr,
908 .p_decode = (kxdrproc_t)rpcb_dec_set, 873 .p_decode = (kxdrdproc_t)rpcb_dec_set,
909 .p_arglen = RPCB_getaddrargs_sz, 874 .p_arglen = RPCB_getaddrargs_sz,
910 .p_replen = RPCB_setres_sz, 875 .p_replen = RPCB_setres_sz,
911 .p_statidx = RPCBPROC_SET, 876 .p_statidx = RPCBPROC_SET,
@@ -914,8 +879,8 @@ static struct rpc_procinfo rpcb_procedures3[] = {
914 }, 879 },
915 [RPCBPROC_UNSET] = { 880 [RPCBPROC_UNSET] = {
916 .p_proc = RPCBPROC_UNSET, 881 .p_proc = RPCBPROC_UNSET,
917 .p_encode = (kxdrproc_t)rpcb_enc_getaddr, 882 .p_encode = (kxdreproc_t)rpcb_enc_getaddr,
918 .p_decode = (kxdrproc_t)rpcb_dec_set, 883 .p_decode = (kxdrdproc_t)rpcb_dec_set,
919 .p_arglen = RPCB_getaddrargs_sz, 884 .p_arglen = RPCB_getaddrargs_sz,
920 .p_replen = RPCB_setres_sz, 885 .p_replen = RPCB_setres_sz,
921 .p_statidx = RPCBPROC_UNSET, 886 .p_statidx = RPCBPROC_UNSET,
@@ -924,8 +889,8 @@ static struct rpc_procinfo rpcb_procedures3[] = {
924 }, 889 },
925 [RPCBPROC_GETADDR] = { 890 [RPCBPROC_GETADDR] = {
926 .p_proc = RPCBPROC_GETADDR, 891 .p_proc = RPCBPROC_GETADDR,
927 .p_encode = (kxdrproc_t)rpcb_enc_getaddr, 892 .p_encode = (kxdreproc_t)rpcb_enc_getaddr,
928 .p_decode = (kxdrproc_t)rpcb_dec_getaddr, 893 .p_decode = (kxdrdproc_t)rpcb_dec_getaddr,
929 .p_arglen = RPCB_getaddrargs_sz, 894 .p_arglen = RPCB_getaddrargs_sz,
930 .p_replen = RPCB_getaddrres_sz, 895 .p_replen = RPCB_getaddrres_sz,
931 .p_statidx = RPCBPROC_GETADDR, 896 .p_statidx = RPCBPROC_GETADDR,
@@ -937,8 +902,8 @@ static struct rpc_procinfo rpcb_procedures3[] = {
937static struct rpc_procinfo rpcb_procedures4[] = { 902static struct rpc_procinfo rpcb_procedures4[] = {
938 [RPCBPROC_SET] = { 903 [RPCBPROC_SET] = {
939 .p_proc = RPCBPROC_SET, 904 .p_proc = RPCBPROC_SET,
940 .p_encode = (kxdrproc_t)rpcb_enc_getaddr, 905 .p_encode = (kxdreproc_t)rpcb_enc_getaddr,
941 .p_decode = (kxdrproc_t)rpcb_dec_set, 906 .p_decode = (kxdrdproc_t)rpcb_dec_set,
942 .p_arglen = RPCB_getaddrargs_sz, 907 .p_arglen = RPCB_getaddrargs_sz,
943 .p_replen = RPCB_setres_sz, 908 .p_replen = RPCB_setres_sz,
944 .p_statidx = RPCBPROC_SET, 909 .p_statidx = RPCBPROC_SET,
@@ -947,8 +912,8 @@ static struct rpc_procinfo rpcb_procedures4[] = {
947 }, 912 },
948 [RPCBPROC_UNSET] = { 913 [RPCBPROC_UNSET] = {
949 .p_proc = RPCBPROC_UNSET, 914 .p_proc = RPCBPROC_UNSET,
950 .p_encode = (kxdrproc_t)rpcb_enc_getaddr, 915 .p_encode = (kxdreproc_t)rpcb_enc_getaddr,
951 .p_decode = (kxdrproc_t)rpcb_dec_set, 916 .p_decode = (kxdrdproc_t)rpcb_dec_set,
952 .p_arglen = RPCB_getaddrargs_sz, 917 .p_arglen = RPCB_getaddrargs_sz,
953 .p_replen = RPCB_setres_sz, 918 .p_replen = RPCB_setres_sz,
954 .p_statidx = RPCBPROC_UNSET, 919 .p_statidx = RPCBPROC_UNSET,
@@ -957,8 +922,8 @@ static struct rpc_procinfo rpcb_procedures4[] = {
957 }, 922 },
958 [RPCBPROC_GETADDR] = { 923 [RPCBPROC_GETADDR] = {
959 .p_proc = RPCBPROC_GETADDR, 924 .p_proc = RPCBPROC_GETADDR,
960 .p_encode = (kxdrproc_t)rpcb_enc_getaddr, 925 .p_encode = (kxdreproc_t)rpcb_enc_getaddr,
961 .p_decode = (kxdrproc_t)rpcb_dec_getaddr, 926 .p_decode = (kxdrdproc_t)rpcb_dec_getaddr,
962 .p_arglen = RPCB_getaddrargs_sz, 927 .p_arglen = RPCB_getaddrargs_sz,
963 .p_replen = RPCB_getaddrres_sz, 928 .p_replen = RPCB_getaddrres_sz,
964 .p_statidx = RPCBPROC_GETADDR, 929 .p_statidx = RPCBPROC_GETADDR,
@@ -993,19 +958,19 @@ static struct rpcb_info rpcb_next_version6[] = {
993 958
994static struct rpc_version rpcb_version2 = { 959static struct rpc_version rpcb_version2 = {
995 .number = RPCBVERS_2, 960 .number = RPCBVERS_2,
996 .nrprocs = RPCB_HIGHPROC_2, 961 .nrprocs = ARRAY_SIZE(rpcb_procedures2),
997 .procs = rpcb_procedures2 962 .procs = rpcb_procedures2
998}; 963};
999 964
1000static struct rpc_version rpcb_version3 = { 965static struct rpc_version rpcb_version3 = {
1001 .number = RPCBVERS_3, 966 .number = RPCBVERS_3,
1002 .nrprocs = RPCB_HIGHPROC_3, 967 .nrprocs = ARRAY_SIZE(rpcb_procedures3),
1003 .procs = rpcb_procedures3 968 .procs = rpcb_procedures3
1004}; 969};
1005 970
1006static struct rpc_version rpcb_version4 = { 971static struct rpc_version rpcb_version4 = {
1007 .number = RPCBVERS_4, 972 .number = RPCBVERS_4,
1008 .nrprocs = RPCB_HIGHPROC_4, 973 .nrprocs = ARRAY_SIZE(rpcb_procedures4),
1009 .procs = rpcb_procedures4 974 .procs = rpcb_procedures4
1010}; 975};
1011 976
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 6359c42c4941..0e659c665a8d 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -488,10 +488,6 @@ svc_destroy(struct svc_serv *serv)
488 if (svc_serv_is_pooled(serv)) 488 if (svc_serv_is_pooled(serv))
489 svc_pool_map_put(); 489 svc_pool_map_put();
490 490
491#if defined(CONFIG_NFS_V4_1)
492 svc_sock_destroy(serv->bc_xprt);
493#endif /* CONFIG_NFS_V4_1 */
494
495 svc_unregister(serv); 491 svc_unregister(serv);
496 kfree(serv->sv_pools); 492 kfree(serv->sv_pools);
497 kfree(serv); 493 kfree(serv);
@@ -1147,7 +1143,6 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
1147 dropit: 1143 dropit:
1148 svc_authorise(rqstp); /* doesn't hurt to call this twice */ 1144 svc_authorise(rqstp); /* doesn't hurt to call this twice */
1149 dprintk("svc: svc_process dropit\n"); 1145 dprintk("svc: svc_process dropit\n");
1150 svc_drop(rqstp);
1151 return 0; 1146 return 0;
1152 1147
1153err_short_len: 1148err_short_len:
@@ -1218,7 +1213,6 @@ svc_process(struct svc_rqst *rqstp)
1218 struct kvec *resv = &rqstp->rq_res.head[0]; 1213 struct kvec *resv = &rqstp->rq_res.head[0];
1219 struct svc_serv *serv = rqstp->rq_server; 1214 struct svc_serv *serv = rqstp->rq_server;
1220 u32 dir; 1215 u32 dir;
1221 int error;
1222 1216
1223 /* 1217 /*
1224 * Setup response xdr_buf. 1218 * Setup response xdr_buf.
@@ -1246,11 +1240,13 @@ svc_process(struct svc_rqst *rqstp)
1246 return 0; 1240 return 0;
1247 } 1241 }
1248 1242
1249 error = svc_process_common(rqstp, argv, resv); 1243 /* Returns 1 for send, 0 for drop */
1250 if (error <= 0) 1244 if (svc_process_common(rqstp, argv, resv))
1251 return error; 1245 return svc_send(rqstp);
1252 1246 else {
1253 return svc_send(rqstp); 1247 svc_drop(rqstp);
1248 return 0;
1249 }
1254} 1250}
1255 1251
1256#if defined(CONFIG_NFS_V4_1) 1252#if defined(CONFIG_NFS_V4_1)
@@ -1264,10 +1260,9 @@ bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req,
1264{ 1260{
1265 struct kvec *argv = &rqstp->rq_arg.head[0]; 1261 struct kvec *argv = &rqstp->rq_arg.head[0];
1266 struct kvec *resv = &rqstp->rq_res.head[0]; 1262 struct kvec *resv = &rqstp->rq_res.head[0];
1267 int error;
1268 1263
1269 /* Build the svc_rqst used by the common processing routine */ 1264 /* Build the svc_rqst used by the common processing routine */
1270 rqstp->rq_xprt = serv->bc_xprt; 1265 rqstp->rq_xprt = serv->sv_bc_xprt;
1271 rqstp->rq_xid = req->rq_xid; 1266 rqstp->rq_xid = req->rq_xid;
1272 rqstp->rq_prot = req->rq_xprt->prot; 1267 rqstp->rq_prot = req->rq_xprt->prot;
1273 rqstp->rq_server = serv; 1268 rqstp->rq_server = serv;
@@ -1292,12 +1287,15 @@ bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req,
1292 svc_getu32(argv); /* XID */ 1287 svc_getu32(argv); /* XID */
1293 svc_getnl(argv); /* CALLDIR */ 1288 svc_getnl(argv); /* CALLDIR */
1294 1289
1295 error = svc_process_common(rqstp, argv, resv); 1290 /* Returns 1 for send, 0 for drop */
1296 if (error <= 0) 1291 if (svc_process_common(rqstp, argv, resv)) {
1297 return error; 1292 memcpy(&req->rq_snd_buf, &rqstp->rq_res,
1298 1293 sizeof(req->rq_snd_buf));
1299 memcpy(&req->rq_snd_buf, &rqstp->rq_res, sizeof(req->rq_snd_buf)); 1294 return bc_send(req);
1300 return bc_send(req); 1295 } else {
1296 /* Nothing to do to drop request */
1297 return 0;
1298 }
1301} 1299}
1302EXPORT_SYMBOL(bc_svc_process); 1300EXPORT_SYMBOL(bc_svc_process);
1303#endif /* CONFIG_NFS_V4_1 */ 1301#endif /* CONFIG_NFS_V4_1 */
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 07919e16be3e..d265aa700bb3 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -66,6 +66,13 @@ static void svc_sock_free(struct svc_xprt *);
66static struct svc_xprt *svc_create_socket(struct svc_serv *, int, 66static struct svc_xprt *svc_create_socket(struct svc_serv *, int,
67 struct net *, struct sockaddr *, 67 struct net *, struct sockaddr *,
68 int, int); 68 int, int);
69#if defined(CONFIG_NFS_V4_1)
70static struct svc_xprt *svc_bc_create_socket(struct svc_serv *, int,
71 struct net *, struct sockaddr *,
72 int, int);
73static void svc_bc_sock_free(struct svc_xprt *xprt);
74#endif /* CONFIG_NFS_V4_1 */
75
69#ifdef CONFIG_DEBUG_LOCK_ALLOC 76#ifdef CONFIG_DEBUG_LOCK_ALLOC
70static struct lock_class_key svc_key[2]; 77static struct lock_class_key svc_key[2];
71static struct lock_class_key svc_slock_key[2]; 78static struct lock_class_key svc_slock_key[2];
@@ -1184,6 +1191,57 @@ static struct svc_xprt *svc_tcp_create(struct svc_serv *serv,
1184 return svc_create_socket(serv, IPPROTO_TCP, net, sa, salen, flags); 1191 return svc_create_socket(serv, IPPROTO_TCP, net, sa, salen, flags);
1185} 1192}
1186 1193
1194#if defined(CONFIG_NFS_V4_1)
1195static struct svc_xprt *svc_bc_create_socket(struct svc_serv *, int,
1196 struct net *, struct sockaddr *,
1197 int, int);
1198static void svc_bc_sock_free(struct svc_xprt *xprt);
1199
1200static struct svc_xprt *svc_bc_tcp_create(struct svc_serv *serv,
1201 struct net *net,
1202 struct sockaddr *sa, int salen,
1203 int flags)
1204{
1205 return svc_bc_create_socket(serv, IPPROTO_TCP, net, sa, salen, flags);
1206}
1207
1208static void svc_bc_tcp_sock_detach(struct svc_xprt *xprt)
1209{
1210}
1211
1212static struct svc_xprt_ops svc_tcp_bc_ops = {
1213 .xpo_create = svc_bc_tcp_create,
1214 .xpo_detach = svc_bc_tcp_sock_detach,
1215 .xpo_free = svc_bc_sock_free,
1216 .xpo_prep_reply_hdr = svc_tcp_prep_reply_hdr,
1217};
1218
1219static struct svc_xprt_class svc_tcp_bc_class = {
1220 .xcl_name = "tcp-bc",
1221 .xcl_owner = THIS_MODULE,
1222 .xcl_ops = &svc_tcp_bc_ops,
1223 .xcl_max_payload = RPCSVC_MAXPAYLOAD_TCP,
1224};
1225
1226static void svc_init_bc_xprt_sock(void)
1227{
1228 svc_reg_xprt_class(&svc_tcp_bc_class);
1229}
1230
1231static void svc_cleanup_bc_xprt_sock(void)
1232{
1233 svc_unreg_xprt_class(&svc_tcp_bc_class);
1234}
1235#else /* CONFIG_NFS_V4_1 */
1236static void svc_init_bc_xprt_sock(void)
1237{
1238}
1239
1240static void svc_cleanup_bc_xprt_sock(void)
1241{
1242}
1243#endif /* CONFIG_NFS_V4_1 */
1244
1187static struct svc_xprt_ops svc_tcp_ops = { 1245static struct svc_xprt_ops svc_tcp_ops = {
1188 .xpo_create = svc_tcp_create, 1246 .xpo_create = svc_tcp_create,
1189 .xpo_recvfrom = svc_tcp_recvfrom, 1247 .xpo_recvfrom = svc_tcp_recvfrom,
@@ -1207,12 +1265,14 @@ void svc_init_xprt_sock(void)
1207{ 1265{
1208 svc_reg_xprt_class(&svc_tcp_class); 1266 svc_reg_xprt_class(&svc_tcp_class);
1209 svc_reg_xprt_class(&svc_udp_class); 1267 svc_reg_xprt_class(&svc_udp_class);
1268 svc_init_bc_xprt_sock();
1210} 1269}
1211 1270
1212void svc_cleanup_xprt_sock(void) 1271void svc_cleanup_xprt_sock(void)
1213{ 1272{
1214 svc_unreg_xprt_class(&svc_tcp_class); 1273 svc_unreg_xprt_class(&svc_tcp_class);
1215 svc_unreg_xprt_class(&svc_udp_class); 1274 svc_unreg_xprt_class(&svc_udp_class);
1275 svc_cleanup_bc_xprt_sock();
1216} 1276}
1217 1277
1218static void svc_tcp_init(struct svc_sock *svsk, struct svc_serv *serv) 1278static void svc_tcp_init(struct svc_sock *svsk, struct svc_serv *serv)
@@ -1509,41 +1569,45 @@ static void svc_sock_free(struct svc_xprt *xprt)
1509 kfree(svsk); 1569 kfree(svsk);
1510} 1570}
1511 1571
1572#if defined(CONFIG_NFS_V4_1)
1512/* 1573/*
1513 * Create a svc_xprt. 1574 * Create a back channel svc_xprt which shares the fore channel socket.
1514 *
1515 * For internal use only (e.g. nfsv4.1 backchannel).
1516 * Callers should typically use the xpo_create() method.
1517 */ 1575 */
1518struct svc_xprt *svc_sock_create(struct svc_serv *serv, int prot) 1576static struct svc_xprt *svc_bc_create_socket(struct svc_serv *serv,
1577 int protocol,
1578 struct net *net,
1579 struct sockaddr *sin, int len,
1580 int flags)
1519{ 1581{
1520 struct svc_sock *svsk; 1582 struct svc_sock *svsk;
1521 struct svc_xprt *xprt = NULL; 1583 struct svc_xprt *xprt;
1584
1585 if (protocol != IPPROTO_TCP) {
1586 printk(KERN_WARNING "svc: only TCP sockets"
1587 " supported on shared back channel\n");
1588 return ERR_PTR(-EINVAL);
1589 }
1522 1590
1523 dprintk("svc: %s\n", __func__);
1524 svsk = kzalloc(sizeof(*svsk), GFP_KERNEL); 1591 svsk = kzalloc(sizeof(*svsk), GFP_KERNEL);
1525 if (!svsk) 1592 if (!svsk)
1526 goto out; 1593 return ERR_PTR(-ENOMEM);
1527 1594
1528 xprt = &svsk->sk_xprt; 1595 xprt = &svsk->sk_xprt;
1529 if (prot == IPPROTO_TCP) 1596 svc_xprt_init(&svc_tcp_bc_class, xprt, serv);
1530 svc_xprt_init(&svc_tcp_class, xprt, serv); 1597
1531 else if (prot == IPPROTO_UDP) 1598 serv->sv_bc_xprt = xprt;
1532 svc_xprt_init(&svc_udp_class, xprt, serv); 1599
1533 else
1534 BUG();
1535out:
1536 dprintk("svc: %s return %p\n", __func__, xprt);
1537 return xprt; 1600 return xprt;
1538} 1601}
1539EXPORT_SYMBOL_GPL(svc_sock_create);
1540 1602
1541/* 1603/*
1542 * Destroy a svc_sock. 1604 * Free a back channel svc_sock.
1543 */ 1605 */
1544void svc_sock_destroy(struct svc_xprt *xprt) 1606static void svc_bc_sock_free(struct svc_xprt *xprt)
1545{ 1607{
1546 if (xprt) 1608 if (xprt) {
1609 kfree(xprt->xpt_bc_sid);
1547 kfree(container_of(xprt, struct svc_sock, sk_xprt)); 1610 kfree(container_of(xprt, struct svc_sock, sk_xprt));
1611 }
1548} 1612}
1549EXPORT_SYMBOL_GPL(svc_sock_destroy); 1613#endif /* CONFIG_NFS_V4_1 */
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index cd9e841e7492..679cd674b81d 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -552,6 +552,74 @@ void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int b
552} 552}
553EXPORT_SYMBOL_GPL(xdr_write_pages); 553EXPORT_SYMBOL_GPL(xdr_write_pages);
554 554
555static void xdr_set_iov(struct xdr_stream *xdr, struct kvec *iov,
556 __be32 *p, unsigned int len)
557{
558 if (len > iov->iov_len)
559 len = iov->iov_len;
560 if (p == NULL)
561 p = (__be32*)iov->iov_base;
562 xdr->p = p;
563 xdr->end = (__be32*)(iov->iov_base + len);
564 xdr->iov = iov;
565 xdr->page_ptr = NULL;
566}
567
568static int xdr_set_page_base(struct xdr_stream *xdr,
569 unsigned int base, unsigned int len)
570{
571 unsigned int pgnr;
572 unsigned int maxlen;
573 unsigned int pgoff;
574 unsigned int pgend;
575 void *kaddr;
576
577 maxlen = xdr->buf->page_len;
578 if (base >= maxlen)
579 return -EINVAL;
580 maxlen -= base;
581 if (len > maxlen)
582 len = maxlen;
583
584 base += xdr->buf->page_base;
585
586 pgnr = base >> PAGE_SHIFT;
587 xdr->page_ptr = &xdr->buf->pages[pgnr];
588 kaddr = page_address(*xdr->page_ptr);
589
590 pgoff = base & ~PAGE_MASK;
591 xdr->p = (__be32*)(kaddr + pgoff);
592
593 pgend = pgoff + len;
594 if (pgend > PAGE_SIZE)
595 pgend = PAGE_SIZE;
596 xdr->end = (__be32*)(kaddr + pgend);
597 xdr->iov = NULL;
598 return 0;
599}
600
601static void xdr_set_next_page(struct xdr_stream *xdr)
602{
603 unsigned int newbase;
604
605 newbase = (1 + xdr->page_ptr - xdr->buf->pages) << PAGE_SHIFT;
606 newbase -= xdr->buf->page_base;
607
608 if (xdr_set_page_base(xdr, newbase, PAGE_SIZE) < 0)
609 xdr_set_iov(xdr, xdr->buf->tail, NULL, xdr->buf->len);
610}
611
612static bool xdr_set_next_buffer(struct xdr_stream *xdr)
613{
614 if (xdr->page_ptr != NULL)
615 xdr_set_next_page(xdr);
616 else if (xdr->iov == xdr->buf->head) {
617 if (xdr_set_page_base(xdr, 0, PAGE_SIZE) < 0)
618 xdr_set_iov(xdr, xdr->buf->tail, NULL, xdr->buf->len);
619 }
620 return xdr->p != xdr->end;
621}
622
555/** 623/**
556 * xdr_init_decode - Initialize an xdr_stream for decoding data. 624 * xdr_init_decode - Initialize an xdr_stream for decoding data.
557 * @xdr: pointer to xdr_stream struct 625 * @xdr: pointer to xdr_stream struct
@@ -560,41 +628,67 @@ EXPORT_SYMBOL_GPL(xdr_write_pages);
560 */ 628 */
561void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p) 629void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
562{ 630{
563 struct kvec *iov = buf->head;
564 unsigned int len = iov->iov_len;
565
566 if (len > buf->len)
567 len = buf->len;
568 xdr->buf = buf; 631 xdr->buf = buf;
569 xdr->iov = iov; 632 xdr->scratch.iov_base = NULL;
570 xdr->p = p; 633 xdr->scratch.iov_len = 0;
571 xdr->end = (__be32 *)((char *)iov->iov_base + len); 634 if (buf->head[0].iov_len != 0)
635 xdr_set_iov(xdr, buf->head, p, buf->len);
636 else if (buf->page_len != 0)
637 xdr_set_page_base(xdr, 0, buf->len);
572} 638}
573EXPORT_SYMBOL_GPL(xdr_init_decode); 639EXPORT_SYMBOL_GPL(xdr_init_decode);
574 640
575/** 641static __be32 * __xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
576 * xdr_inline_peek - Allow read-ahead in the XDR data stream
577 * @xdr: pointer to xdr_stream struct
578 * @nbytes: number of bytes of data to decode
579 *
580 * Check if the input buffer is long enough to enable us to decode
581 * 'nbytes' more bytes of data starting at the current position.
582 * If so return the current pointer without updating the current
583 * pointer position.
584 */
585__be32 * xdr_inline_peek(struct xdr_stream *xdr, size_t nbytes)
586{ 642{
587 __be32 *p = xdr->p; 643 __be32 *p = xdr->p;
588 __be32 *q = p + XDR_QUADLEN(nbytes); 644 __be32 *q = p + XDR_QUADLEN(nbytes);
589 645
590 if (unlikely(q > xdr->end || q < p)) 646 if (unlikely(q > xdr->end || q < p))
591 return NULL; 647 return NULL;
648 xdr->p = q;
592 return p; 649 return p;
593} 650}
594EXPORT_SYMBOL_GPL(xdr_inline_peek);
595 651
596/** 652/**
597 * xdr_inline_decode - Retrieve non-page XDR data to decode 653 * xdr_set_scratch_buffer - Attach a scratch buffer for decoding data.
654 * @xdr: pointer to xdr_stream struct
655 * @buf: pointer to an empty buffer
656 * @buflen: size of 'buf'
657 *
658 * The scratch buffer is used when decoding from an array of pages.
659 * If an xdr_inline_decode() call spans across page boundaries, then
660 * we copy the data into the scratch buffer in order to allow linear
661 * access.
662 */
663void xdr_set_scratch_buffer(struct xdr_stream *xdr, void *buf, size_t buflen)
664{
665 xdr->scratch.iov_base = buf;
666 xdr->scratch.iov_len = buflen;
667}
668EXPORT_SYMBOL_GPL(xdr_set_scratch_buffer);
669
670static __be32 *xdr_copy_to_scratch(struct xdr_stream *xdr, size_t nbytes)
671{
672 __be32 *p;
673 void *cpdest = xdr->scratch.iov_base;
674 size_t cplen = (char *)xdr->end - (char *)xdr->p;
675
676 if (nbytes > xdr->scratch.iov_len)
677 return NULL;
678 memcpy(cpdest, xdr->p, cplen);
679 cpdest += cplen;
680 nbytes -= cplen;
681 if (!xdr_set_next_buffer(xdr))
682 return NULL;
683 p = __xdr_inline_decode(xdr, nbytes);
684 if (p == NULL)
685 return NULL;
686 memcpy(cpdest, p, nbytes);
687 return xdr->scratch.iov_base;
688}
689
690/**
691 * xdr_inline_decode - Retrieve XDR data to decode
598 * @xdr: pointer to xdr_stream struct 692 * @xdr: pointer to xdr_stream struct
599 * @nbytes: number of bytes of data to decode 693 * @nbytes: number of bytes of data to decode
600 * 694 *
@@ -605,13 +699,16 @@ EXPORT_SYMBOL_GPL(xdr_inline_peek);
605 */ 699 */
606__be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes) 700__be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
607{ 701{
608 __be32 *p = xdr->p; 702 __be32 *p;
609 __be32 *q = p + XDR_QUADLEN(nbytes);
610 703
611 if (unlikely(q > xdr->end || q < p)) 704 if (nbytes == 0)
705 return xdr->p;
706 if (xdr->p == xdr->end && !xdr_set_next_buffer(xdr))
612 return NULL; 707 return NULL;
613 xdr->p = q; 708 p = __xdr_inline_decode(xdr, nbytes);
614 return p; 709 if (p != NULL)
710 return p;
711 return xdr_copy_to_scratch(xdr, nbytes);
615} 712}
616EXPORT_SYMBOL_GPL(xdr_inline_decode); 713EXPORT_SYMBOL_GPL(xdr_inline_decode);
617 714
@@ -671,16 +768,12 @@ EXPORT_SYMBOL_GPL(xdr_read_pages);
671 */ 768 */
672void xdr_enter_page(struct xdr_stream *xdr, unsigned int len) 769void xdr_enter_page(struct xdr_stream *xdr, unsigned int len)
673{ 770{
674 char * kaddr = page_address(xdr->buf->pages[0]);
675 xdr_read_pages(xdr, len); 771 xdr_read_pages(xdr, len);
676 /* 772 /*
677 * Position current pointer at beginning of tail, and 773 * Position current pointer at beginning of tail, and
678 * set remaining message length. 774 * set remaining message length.
679 */ 775 */
680 if (len > PAGE_CACHE_SIZE - xdr->buf->page_base) 776 xdr_set_page_base(xdr, 0, len);
681 len = PAGE_CACHE_SIZE - xdr->buf->page_base;
682 xdr->p = (__be32 *)(kaddr + xdr->buf->page_base);
683 xdr->end = (__be32 *)((char *)xdr->p + len);
684} 777}
685EXPORT_SYMBOL_GPL(xdr_enter_page); 778EXPORT_SYMBOL_GPL(xdr_enter_page);
686 779
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 8eb889510916..d5e1e0b08890 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -26,6 +26,7 @@
26#include <net/sock.h> 26#include <net/sock.h>
27#include <net/xfrm.h> 27#include <net/xfrm.h>
28#include <net/netlink.h> 28#include <net/netlink.h>
29#include <net/ah.h>
29#include <asm/uaccess.h> 30#include <asm/uaccess.h>
30#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 31#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
31#include <linux/in6.h> 32#include <linux/in6.h>
@@ -302,7 +303,8 @@ static int attach_auth_trunc(struct xfrm_algo_auth **algpp, u8 *props,
302 algo = xfrm_aalg_get_byname(ualg->alg_name, 1); 303 algo = xfrm_aalg_get_byname(ualg->alg_name, 1);
303 if (!algo) 304 if (!algo)
304 return -ENOSYS; 305 return -ENOSYS;
305 if (ualg->alg_trunc_len > algo->uinfo.auth.icv_fullbits) 306 if ((ualg->alg_trunc_len / 8) > MAX_AH_AUTH_LEN ||
307 ualg->alg_trunc_len > algo->uinfo.auth.icv_fullbits)
306 return -EINVAL; 308 return -EINVAL;
307 *props = algo->desc.sadb_alg_id; 309 *props = algo->desc.sadb_alg_id;
308 310
@@ -2187,7 +2189,7 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
2187 2189
2188 if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) || 2190 if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) ||
2189 type == (XFRM_MSG_GETPOLICY - XFRM_MSG_BASE)) && 2191 type == (XFRM_MSG_GETPOLICY - XFRM_MSG_BASE)) &&
2190 (nlh->nlmsg_flags & NLM_F_DUMP)) { 2192 (nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) {
2191 if (link->dump == NULL) 2193 if (link->dump == NULL)
2192 return -EINVAL; 2194 return -EINVAL;
2193 2195
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 1b9b13ee2a72..2b5387d53ba5 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -227,7 +227,7 @@ ifndef PERF_DEBUG
227 CFLAGS_OPTIMIZE = -O6 227 CFLAGS_OPTIMIZE = -O6
228endif 228endif
229 229
230CFLAGS = -ggdb3 -Wall -Wextra -std=gnu99 -Werror $(CFLAGS_OPTIMIZE) -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) $(EXTRA_CFLAGS) 230CFLAGS = -fno-omit-frame-pointer -ggdb3 -Wall -Wextra -std=gnu99 -Werror $(CFLAGS_OPTIMIZE) -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) $(EXTRA_CFLAGS)
231EXTLIBS = -lpthread -lrt -lelf -lm 231EXTLIBS = -lpthread -lrt -lelf -lm
232ALL_CFLAGS = $(CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 232ALL_CFLAGS = $(CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64
233ALL_LDFLAGS = $(LDFLAGS) 233ALL_LDFLAGS = $(LDFLAGS)
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 7bc049035484..7069bd3e90b3 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -331,6 +331,9 @@ try_again:
331 else if (err == ENODEV && cpu_list) { 331 else if (err == ENODEV && cpu_list) {
332 die("No such device - did you specify" 332 die("No such device - did you specify"
333 " an out-of-range profile CPU?\n"); 333 " an out-of-range profile CPU?\n");
334 } else if (err == ENOENT) {
335 die("%s event is not supported. ",
336 event_name(evsel));
334 } else if (err == EINVAL && sample_id_all_avail) { 337 } else if (err == EINVAL && sample_id_all_avail) {
335 /* 338 /*
336 * Old kernel, no attr->sample_id_type_all field 339 * Old kernel, no attr->sample_id_type_all field
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index 7a4ebeb8b016..abd4b8497bc4 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -489,7 +489,8 @@ static void create_tasks(void)
489 489
490 err = pthread_attr_init(&attr); 490 err = pthread_attr_init(&attr);
491 BUG_ON(err); 491 BUG_ON(err);
492 err = pthread_attr_setstacksize(&attr, (size_t)(16*1024)); 492 err = pthread_attr_setstacksize(&attr,
493 (size_t) max(16 * 1024, PTHREAD_STACK_MIN));
493 BUG_ON(err); 494 BUG_ON(err);
494 err = pthread_mutex_lock(&start_work_mutex); 495 err = pthread_mutex_lock(&start_work_mutex);
495 BUG_ON(err); 496 BUG_ON(err);
@@ -1861,7 +1862,7 @@ static int __cmd_record(int argc, const char **argv)
1861 rec_argc = ARRAY_SIZE(record_args) + argc - 1; 1862 rec_argc = ARRAY_SIZE(record_args) + argc - 1;
1862 rec_argv = calloc(rec_argc + 1, sizeof(char *)); 1863 rec_argv = calloc(rec_argc + 1, sizeof(char *));
1863 1864
1864 if (rec_argv) 1865 if (rec_argv == NULL)
1865 return -ENOMEM; 1866 return -ENOMEM;
1866 1867
1867 for (i = 0; i < ARRAY_SIZE(record_args); i++) 1868 for (i = 0; i < ARRAY_SIZE(record_args); i++)
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 02b2d8013a61..c385a63ebfd1 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -316,6 +316,8 @@ static int run_perf_stat(int argc __used, const char **argv)
316 "\t Consider tweaking" 316 "\t Consider tweaking"
317 " /proc/sys/kernel/perf_event_paranoid or running as root.", 317 " /proc/sys/kernel/perf_event_paranoid or running as root.",
318 system_wide ? "system-wide " : ""); 318 system_wide ? "system-wide " : "");
319 } else if (errno == ENOENT) {
320 error("%s event is not supported. ", event_name(counter));
319 } else { 321 } else {
320 error("open_counter returned with %d (%s). " 322 error("open_counter returned with %d (%s). "
321 "/bin/dmesg may provide additional information.\n", 323 "/bin/dmesg may provide additional information.\n",
@@ -683,8 +685,7 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used)
683 nr_counters = ARRAY_SIZE(default_attrs); 685 nr_counters = ARRAY_SIZE(default_attrs);
684 686
685 for (c = 0; c < ARRAY_SIZE(default_attrs); ++c) { 687 for (c = 0; c < ARRAY_SIZE(default_attrs); ++c) {
686 pos = perf_evsel__new(default_attrs[c].type, 688 pos = perf_evsel__new(&default_attrs[c],
687 default_attrs[c].config,
688 nr_counters); 689 nr_counters);
689 if (pos == NULL) 690 if (pos == NULL)
690 goto out; 691 goto out;
diff --git a/tools/perf/builtin-test.c b/tools/perf/builtin-test.c
index 1c984342a579..ed5696198d3d 100644
--- a/tools/perf/builtin-test.c
+++ b/tools/perf/builtin-test.c
@@ -234,6 +234,7 @@ out:
234 return err; 234 return err;
235} 235}
236 236
237#include "util/cpumap.h"
237#include "util/evsel.h" 238#include "util/evsel.h"
238#include <sys/types.h> 239#include <sys/types.h>
239 240
@@ -264,6 +265,7 @@ static int test__open_syscall_event(void)
264 int err = -1, fd; 265 int err = -1, fd;
265 struct thread_map *threads; 266 struct thread_map *threads;
266 struct perf_evsel *evsel; 267 struct perf_evsel *evsel;
268 struct perf_event_attr attr;
267 unsigned int nr_open_calls = 111, i; 269 unsigned int nr_open_calls = 111, i;
268 int id = trace_event__id("sys_enter_open"); 270 int id = trace_event__id("sys_enter_open");
269 271
@@ -278,7 +280,10 @@ static int test__open_syscall_event(void)
278 return -1; 280 return -1;
279 } 281 }
280 282
281 evsel = perf_evsel__new(PERF_TYPE_TRACEPOINT, id, 0); 283 memset(&attr, 0, sizeof(attr));
284 attr.type = PERF_TYPE_TRACEPOINT;
285 attr.config = id;
286 evsel = perf_evsel__new(&attr, 0);
282 if (evsel == NULL) { 287 if (evsel == NULL) {
283 pr_debug("perf_evsel__new\n"); 288 pr_debug("perf_evsel__new\n");
284 goto out_thread_map_delete; 289 goto out_thread_map_delete;
@@ -317,6 +322,111 @@ out_thread_map_delete:
317 return err; 322 return err;
318} 323}
319 324
325#include <sched.h>
326
327static int test__open_syscall_event_on_all_cpus(void)
328{
329 int err = -1, fd, cpu;
330 struct thread_map *threads;
331 struct cpu_map *cpus;
332 struct perf_evsel *evsel;
333 struct perf_event_attr attr;
334 unsigned int nr_open_calls = 111, i;
335 cpu_set_t *cpu_set;
336 size_t cpu_set_size;
337 int id = trace_event__id("sys_enter_open");
338
339 if (id < 0) {
340 pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
341 return -1;
342 }
343
344 threads = thread_map__new(-1, getpid());
345 if (threads == NULL) {
346 pr_debug("thread_map__new\n");
347 return -1;
348 }
349
350 cpus = cpu_map__new(NULL);
351 if (threads == NULL) {
352 pr_debug("thread_map__new\n");
353 return -1;
354 }
355
356 cpu_set = CPU_ALLOC(cpus->nr);
357
358 if (cpu_set == NULL)
359 goto out_thread_map_delete;
360
361 cpu_set_size = CPU_ALLOC_SIZE(cpus->nr);
362 CPU_ZERO_S(cpu_set_size, cpu_set);
363
364 memset(&attr, 0, sizeof(attr));
365 attr.type = PERF_TYPE_TRACEPOINT;
366 attr.config = id;
367 evsel = perf_evsel__new(&attr, 0);
368 if (evsel == NULL) {
369 pr_debug("perf_evsel__new\n");
370 goto out_cpu_free;
371 }
372
373 if (perf_evsel__open(evsel, cpus, threads) < 0) {
374 pr_debug("failed to open counter: %s, "
375 "tweak /proc/sys/kernel/perf_event_paranoid?\n",
376 strerror(errno));
377 goto out_evsel_delete;
378 }
379
380 for (cpu = 0; cpu < cpus->nr; ++cpu) {
381 unsigned int ncalls = nr_open_calls + cpu;
382
383 CPU_SET(cpu, cpu_set);
384 sched_setaffinity(0, cpu_set_size, cpu_set);
385 for (i = 0; i < ncalls; ++i) {
386 fd = open("/etc/passwd", O_RDONLY);
387 close(fd);
388 }
389 CPU_CLR(cpu, cpu_set);
390 }
391
392 /*
393 * Here we need to explicitely preallocate the counts, as if
394 * we use the auto allocation it will allocate just for 1 cpu,
395 * as we start by cpu 0.
396 */
397 if (perf_evsel__alloc_counts(evsel, cpus->nr) < 0) {
398 pr_debug("perf_evsel__alloc_counts(ncpus=%d)\n", cpus->nr);
399 goto out_close_fd;
400 }
401
402 for (cpu = 0; cpu < cpus->nr; ++cpu) {
403 unsigned int expected;
404
405 if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) {
406 pr_debug("perf_evsel__open_read_on_cpu\n");
407 goto out_close_fd;
408 }
409
410 expected = nr_open_calls + cpu;
411 if (evsel->counts->cpu[cpu].val != expected) {
412 pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %Ld\n",
413 expected, cpu, evsel->counts->cpu[cpu].val);
414 goto out_close_fd;
415 }
416 }
417
418 err = 0;
419out_close_fd:
420 perf_evsel__close_fd(evsel, 1, threads->nr);
421out_evsel_delete:
422 perf_evsel__delete(evsel);
423out_cpu_free:
424 CPU_FREE(cpu_set);
425out_thread_map_delete:
426 thread_map__delete(threads);
427 return err;
428}
429
320static struct test { 430static struct test {
321 const char *desc; 431 const char *desc;
322 int (*func)(void); 432 int (*func)(void);
@@ -330,6 +440,10 @@ static struct test {
330 .func = test__open_syscall_event, 440 .func = test__open_syscall_event,
331 }, 441 },
332 { 442 {
443 .desc = "detect open syscall event on all cpus",
444 .func = test__open_syscall_event_on_all_cpus,
445 },
446 {
333 .func = NULL, 447 .func = NULL,
334 }, 448 },
335}; 449};
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 1e67ab9c7ebc..6ce4042421bd 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -1247,6 +1247,8 @@ try_again:
1247 die("Permission error - are you root?\n" 1247 die("Permission error - are you root?\n"
1248 "\t Consider tweaking" 1248 "\t Consider tweaking"
1249 " /proc/sys/kernel/perf_event_paranoid.\n"); 1249 " /proc/sys/kernel/perf_event_paranoid.\n");
1250 if (err == ENOENT)
1251 die("%s event is not supported. ", event_name(evsel));
1250 /* 1252 /*
1251 * If it's cycles then fall back to hrtimer 1253 * If it's cycles then fall back to hrtimer
1252 * based cpu-clock-tick sw counter, which 1254 * based cpu-clock-tick sw counter, which
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index c95267e63c5b..f5cfed60af98 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -6,14 +6,13 @@
6 6
7#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) 7#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
8 8
9struct perf_evsel *perf_evsel__new(u32 type, u64 config, int idx) 9struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx)
10{ 10{
11 struct perf_evsel *evsel = zalloc(sizeof(*evsel)); 11 struct perf_evsel *evsel = zalloc(sizeof(*evsel));
12 12
13 if (evsel != NULL) { 13 if (evsel != NULL) {
14 evsel->idx = idx; 14 evsel->idx = idx;
15 evsel->attr.type = type; 15 evsel->attr = *attr;
16 evsel->attr.config = config;
17 INIT_LIST_HEAD(&evsel->node); 16 INIT_LIST_HEAD(&evsel->node);
18 } 17 }
19 18
@@ -128,59 +127,75 @@ int __perf_evsel__read(struct perf_evsel *evsel,
128 return 0; 127 return 0;
129} 128}
130 129
131int perf_evsel__open_per_cpu(struct perf_evsel *evsel, struct cpu_map *cpus) 130static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
131 struct thread_map *threads)
132{ 132{
133 int cpu; 133 int cpu, thread;
134 134
135 if (evsel->fd == NULL && perf_evsel__alloc_fd(evsel, cpus->nr, 1) < 0) 135 if (evsel->fd == NULL &&
136 perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0)
136 return -1; 137 return -1;
137 138
138 for (cpu = 0; cpu < cpus->nr; cpu++) { 139 for (cpu = 0; cpu < cpus->nr; cpu++) {
139 FD(evsel, cpu, 0) = sys_perf_event_open(&evsel->attr, -1, 140 for (thread = 0; thread < threads->nr; thread++) {
140 cpus->map[cpu], -1, 0); 141 FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr,
141 if (FD(evsel, cpu, 0) < 0) 142 threads->map[thread],
142 goto out_close; 143 cpus->map[cpu], -1, 0);
144 if (FD(evsel, cpu, thread) < 0)
145 goto out_close;
146 }
143 } 147 }
144 148
145 return 0; 149 return 0;
146 150
147out_close: 151out_close:
148 while (--cpu >= 0) { 152 do {
149 close(FD(evsel, cpu, 0)); 153 while (--thread >= 0) {
150 FD(evsel, cpu, 0) = -1; 154 close(FD(evsel, cpu, thread));
151 } 155 FD(evsel, cpu, thread) = -1;
156 }
157 thread = threads->nr;
158 } while (--cpu >= 0);
152 return -1; 159 return -1;
153} 160}
154 161
155int perf_evsel__open_per_thread(struct perf_evsel *evsel, struct thread_map *threads) 162static struct {
163 struct cpu_map map;
164 int cpus[1];
165} empty_cpu_map = {
166 .map.nr = 1,
167 .cpus = { -1, },
168};
169
170static struct {
171 struct thread_map map;
172 int threads[1];
173} empty_thread_map = {
174 .map.nr = 1,
175 .threads = { -1, },
176};
177
178int perf_evsel__open(struct perf_evsel *evsel,
179 struct cpu_map *cpus, struct thread_map *threads)
156{ 180{
157 int thread;
158
159 if (evsel->fd == NULL && perf_evsel__alloc_fd(evsel, 1, threads->nr))
160 return -1;
161 181
162 for (thread = 0; thread < threads->nr; thread++) { 182 if (cpus == NULL) {
163 FD(evsel, 0, thread) = sys_perf_event_open(&evsel->attr, 183 /* Work around old compiler warnings about strict aliasing */
164 threads->map[thread], -1, -1, 0); 184 cpus = &empty_cpu_map.map;
165 if (FD(evsel, 0, thread) < 0)
166 goto out_close;
167 } 185 }
168 186
169 return 0; 187 if (threads == NULL)
188 threads = &empty_thread_map.map;
170 189
171out_close: 190 return __perf_evsel__open(evsel, cpus, threads);
172 while (--thread >= 0) {
173 close(FD(evsel, 0, thread));
174 FD(evsel, 0, thread) = -1;
175 }
176 return -1;
177} 191}
178 192
179int perf_evsel__open(struct perf_evsel *evsel, 193int perf_evsel__open_per_cpu(struct perf_evsel *evsel, struct cpu_map *cpus)
180 struct cpu_map *cpus, struct thread_map *threads)
181{ 194{
182 if (threads == NULL) 195 return __perf_evsel__open(evsel, cpus, &empty_thread_map.map);
183 return perf_evsel__open_per_cpu(evsel, cpus); 196}
184 197
185 return perf_evsel__open_per_thread(evsel, threads); 198int perf_evsel__open_per_thread(struct perf_evsel *evsel, struct thread_map *threads)
199{
200 return __perf_evsel__open(evsel, &empty_cpu_map.map, threads);
186} 201}
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index a0ccd69c3fc2..b2d755fe88a5 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -37,7 +37,7 @@ struct perf_evsel {
37struct cpu_map; 37struct cpu_map;
38struct thread_map; 38struct thread_map;
39 39
40struct perf_evsel *perf_evsel__new(u32 type, u64 config, int idx); 40struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx);
41void perf_evsel__delete(struct perf_evsel *evsel); 41void perf_evsel__delete(struct perf_evsel *evsel);
42 42
43int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads); 43int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 649083f27e08..5cb6f4bde905 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -490,6 +490,31 @@ parse_multiple_tracepoint_event(char *sys_name, const char *evt_exp,
490 return EVT_HANDLED_ALL; 490 return EVT_HANDLED_ALL;
491} 491}
492 492
493static int store_event_type(const char *orgname)
494{
495 char filename[PATH_MAX], *c;
496 FILE *file;
497 int id, n;
498
499 sprintf(filename, "%s/", debugfs_path);
500 strncat(filename, orgname, strlen(orgname));
501 strcat(filename, "/id");
502
503 c = strchr(filename, ':');
504 if (c)
505 *c = '/';
506
507 file = fopen(filename, "r");
508 if (!file)
509 return 0;
510 n = fscanf(file, "%i", &id);
511 fclose(file);
512 if (n < 1) {
513 pr_err("cannot store event ID\n");
514 return -EINVAL;
515 }
516 return perf_header__push_event(id, orgname);
517}
493 518
494static enum event_result parse_tracepoint_event(const char **strp, 519static enum event_result parse_tracepoint_event(const char **strp,
495 struct perf_event_attr *attr) 520 struct perf_event_attr *attr)
@@ -533,9 +558,13 @@ static enum event_result parse_tracepoint_event(const char **strp,
533 *strp += strlen(sys_name) + evt_length; 558 *strp += strlen(sys_name) + evt_length;
534 return parse_multiple_tracepoint_event(sys_name, evt_name, 559 return parse_multiple_tracepoint_event(sys_name, evt_name,
535 flags); 560 flags);
536 } else 561 } else {
562 if (store_event_type(evt_name) < 0)
563 return EVT_FAILED;
564
537 return parse_single_tracepoint_event(sys_name, evt_name, 565 return parse_single_tracepoint_event(sys_name, evt_name,
538 evt_length, attr, strp); 566 evt_length, attr, strp);
567 }
539} 568}
540 569
541static enum event_result 570static enum event_result
@@ -778,41 +807,11 @@ modifier:
778 return ret; 807 return ret;
779} 808}
780 809
781static int store_event_type(const char *orgname)
782{
783 char filename[PATH_MAX], *c;
784 FILE *file;
785 int id, n;
786
787 sprintf(filename, "%s/", debugfs_path);
788 strncat(filename, orgname, strlen(orgname));
789 strcat(filename, "/id");
790
791 c = strchr(filename, ':');
792 if (c)
793 *c = '/';
794
795 file = fopen(filename, "r");
796 if (!file)
797 return 0;
798 n = fscanf(file, "%i", &id);
799 fclose(file);
800 if (n < 1) {
801 pr_err("cannot store event ID\n");
802 return -EINVAL;
803 }
804 return perf_header__push_event(id, orgname);
805}
806
807int parse_events(const struct option *opt __used, const char *str, int unset __used) 810int parse_events(const struct option *opt __used, const char *str, int unset __used)
808{ 811{
809 struct perf_event_attr attr; 812 struct perf_event_attr attr;
810 enum event_result ret; 813 enum event_result ret;
811 814
812 if (strchr(str, ':'))
813 if (store_event_type(str) < 0)
814 return -1;
815
816 for (;;) { 815 for (;;) {
817 memset(&attr, 0, sizeof(attr)); 816 memset(&attr, 0, sizeof(attr));
818 ret = parse_event_symbols(&str, &attr); 817 ret = parse_event_symbols(&str, &attr);
@@ -824,7 +823,7 @@ int parse_events(const struct option *opt __used, const char *str, int unset __u
824 823
825 if (ret != EVT_HANDLED_ALL) { 824 if (ret != EVT_HANDLED_ALL) {
826 struct perf_evsel *evsel; 825 struct perf_evsel *evsel;
827 evsel = perf_evsel__new(attr.type, attr.config, 826 evsel = perf_evsel__new(&attr,
828 nr_counters); 827 nr_counters);
829 if (evsel == NULL) 828 if (evsel == NULL)
830 return -1; 829 return -1;
@@ -1014,8 +1013,15 @@ void print_events(void)
1014 1013
1015int perf_evsel_list__create_default(void) 1014int perf_evsel_list__create_default(void)
1016{ 1015{
1017 struct perf_evsel *evsel = perf_evsel__new(PERF_TYPE_HARDWARE, 1016 struct perf_evsel *evsel;
1018 PERF_COUNT_HW_CPU_CYCLES, 0); 1017 struct perf_event_attr attr;
1018
1019 memset(&attr, 0, sizeof(attr));
1020 attr.type = PERF_TYPE_HARDWARE;
1021 attr.config = PERF_COUNT_HW_CPU_CYCLES;
1022
1023 evsel = perf_evsel__new(&attr, 0);
1024
1019 if (evsel == NULL) 1025 if (evsel == NULL)
1020 return -ENOMEM; 1026 return -ENOMEM;
1021 1027
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 6fb4694d05fa..313dac2d94ce 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -1007,7 +1007,7 @@ more:
1007 if (size == 0) 1007 if (size == 0)
1008 size = 8; 1008 size = 8;
1009 1009
1010 if (head + event->header.size >= mmap_size) { 1010 if (head + event->header.size > mmap_size) {
1011 if (mmaps[map_idx]) { 1011 if (mmaps[map_idx]) {
1012 munmap(mmaps[map_idx], mmap_size); 1012 munmap(mmaps[map_idx], mmap_size);
1013 mmaps[map_idx] = NULL; 1013 mmaps[map_idx] = NULL;